Merge branch 'dev'
Conflicts: ChangeLog include/jemalloc/internal/chunk.h src/chunk.c src/huge.c src/jemalloc.c test/rallocm.c
This commit is contained in:
commit
fc9b1dbf69
2
.gitignore
vendored
2
.gitignore
vendored
@ -11,6 +11,7 @@
|
|||||||
/lib/
|
/lib/
|
||||||
/Makefile
|
/Makefile
|
||||||
/include/jemalloc/internal/jemalloc_internal\.h
|
/include/jemalloc/internal/jemalloc_internal\.h
|
||||||
|
/include/jemalloc/internal/size_classes\.h
|
||||||
/include/jemalloc/jemalloc\.h
|
/include/jemalloc/jemalloc\.h
|
||||||
/include/jemalloc/jemalloc_defs\.h
|
/include/jemalloc/jemalloc_defs\.h
|
||||||
/test/jemalloc_test\.h
|
/test/jemalloc_test\.h
|
||||||
@ -21,3 +22,4 @@
|
|||||||
!test/*.c
|
!test/*.c
|
||||||
!test/*.exp
|
!test/*.exp
|
||||||
/VERSION
|
/VERSION
|
||||||
|
/bin/jemalloc.sh
|
||||||
|
32
COPYING
32
COPYING
@ -1,9 +1,10 @@
|
|||||||
Unless otherwise specified, files in the jemalloc source distribution are
|
Unless otherwise specified, files in the jemalloc source distribution are
|
||||||
subject to the following licenses:
|
subject to the following license:
|
||||||
--------------------------------------------------------------------------------
|
--------------------------------------------------------------------------------
|
||||||
Copyright (C) 2002-2010 Jason Evans <jasone@canonware.com>.
|
Copyright (C) 2002-2012 Jason Evans <jasone@canonware.com>.
|
||||||
All rights reserved.
|
All rights reserved.
|
||||||
Copyright (C) 2007-2010 Mozilla Foundation. All rights reserved.
|
Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
|
||||||
|
Copyright (C) 2009-2012 Facebook, Inc. All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
modification, are permitted provided that the following conditions are met:
|
modification, are permitted provided that the following conditions are met:
|
||||||
@ -24,28 +25,3 @@ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
|||||||
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||||
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
--------------------------------------------------------------------------------
|
--------------------------------------------------------------------------------
|
||||||
Copyright (C) 2009-2010 Facebook, Inc.
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without modification,
|
|
||||||
are permitted provided that the following conditions are met:
|
|
||||||
* Redistributions of source code must retain the above copyright notice, this
|
|
||||||
list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
|
||||||
list of conditions and the following disclaimer in the documentation and/or
|
|
||||||
other materials provided with the distribution.
|
|
||||||
* Neither the name of Facebook, Inc. nor the names of its contributors may be
|
|
||||||
used to endorse or promote products derived from this software without
|
|
||||||
specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
||||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
||||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
||||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
|
||||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
|
||||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
||||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
|
||||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
||||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
--------------------------------------------------------------------------------
|
|
||||||
|
89
ChangeLog
89
ChangeLog
@ -6,6 +6,95 @@ found in the git revision history:
|
|||||||
http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
|
http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
|
||||||
git://canonware.com/jemalloc.git
|
git://canonware.com/jemalloc.git
|
||||||
|
|
||||||
|
* 3.0.0 (May 11, 2012)
|
||||||
|
|
||||||
|
Although this version adds some major new features, the primary focus is on
|
||||||
|
internal code cleanup that facilitates maintainability and portability, most
|
||||||
|
of which is not reflected in the ChangeLog. This is the first release to
|
||||||
|
incorporate substantial contributions from numerous other developers, and the
|
||||||
|
result is a more broadly useful allocator (see the git revision history for
|
||||||
|
contribution details). Note that the license has been unified, thanks to
|
||||||
|
Facebook granting a license under the same terms as the other copyright
|
||||||
|
holders (see COPYING).
|
||||||
|
|
||||||
|
New features:
|
||||||
|
- Implement Valgrind support, redzones, and quarantine.
|
||||||
|
- Add support for additional platforms:
|
||||||
|
+ FreeBSD
|
||||||
|
+ Mac OS X Lion
|
||||||
|
+ MinGW
|
||||||
|
+ Windows (no support yet for replacing the system malloc)
|
||||||
|
- Add support for additional architectures:
|
||||||
|
+ MIPS
|
||||||
|
+ SH4
|
||||||
|
+ Tilera
|
||||||
|
- Add support for cross compiling.
|
||||||
|
- Add nallocm(), which rounds a request size up to the nearest size class
|
||||||
|
without actually allocating.
|
||||||
|
- Implement aligned_alloc() (blame C11).
|
||||||
|
- Add the "thread.tcache.enabled" mallctl.
|
||||||
|
- Add the "opt.prof_final" mallctl.
|
||||||
|
- Update pprof (from gperftools 2.0).
|
||||||
|
- Add the --with-mangling option.
|
||||||
|
- Add the --disable-experimental option.
|
||||||
|
- Add the --disable-munmap option, and make it the default on Linux.
|
||||||
|
- Add the --enable-mremap option, which disables use of mremap(2) by default.
|
||||||
|
|
||||||
|
Incompatible changes:
|
||||||
|
- Enable stats by default.
|
||||||
|
- Enable fill by default.
|
||||||
|
- Disable lazy locking by default.
|
||||||
|
- Rename the "tcache.flush" mallctl to "thread.tcache.flush".
|
||||||
|
- Rename the "arenas.pagesize" mallctl to "arenas.page".
|
||||||
|
- Change the "opt.lg_prof_sample" default from 0 to 19 (1 B to 512 KiB).
|
||||||
|
- Change the "opt.prof_accum" default from true to false.
|
||||||
|
|
||||||
|
Removed features:
|
||||||
|
- Remove the swap feature, including the "config.swap", "swap.avail",
|
||||||
|
"swap.prezeroed", "swap.nfds", and "swap.fds" mallctls.
|
||||||
|
- Remove highruns statistics, including the
|
||||||
|
"stats.arenas.<i>.bins.<j>.highruns" and
|
||||||
|
"stats.arenas.<i>.lruns.<j>.highruns" mallctls.
|
||||||
|
- As part of small size class refactoring, remove the "opt.lg_[qc]space_max",
|
||||||
|
"arenas.cacheline", "arenas.subpage", "arenas.[tqcs]space_{min,max}", and
|
||||||
|
"arenas.[tqcs]bins" mallctls.
|
||||||
|
- Remove the "arenas.chunksize" mallctl.
|
||||||
|
- Remove the "opt.lg_prof_tcmax" option.
|
||||||
|
- Remove the "opt.lg_prof_bt_max" option.
|
||||||
|
- Remove the "opt.lg_tcache_gc_sweep" option.
|
||||||
|
- Remove the --disable-tiny option, including the "config.tiny" mallctl.
|
||||||
|
- Remove the --enable-dynamic-page-shift configure option.
|
||||||
|
- Remove the --enable-sysv configure option.
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
- Fix a statistics-related bug in the "thread.arena" mallctl that could cause
|
||||||
|
invalid statistics and crashes.
|
||||||
|
- Work around TLS deallocation via free() on Linux. This bug could cause
|
||||||
|
write-after-free memory corruption.
|
||||||
|
- Fix a potential deadlock that could occur during interval- and
|
||||||
|
growth-triggered heap profile dumps.
|
||||||
|
- Fix large calloc() zeroing bugs due to dropping chunk map unzeroed flags.
|
||||||
|
- Fix chunk_alloc_dss() to stop claiming memory is zeroed. This bug could
|
||||||
|
cause memory corruption and crashes with --enable-dss specified.
|
||||||
|
- Fix fork-related bugs that could cause deadlock in children between fork
|
||||||
|
and exec.
|
||||||
|
- Fix malloc_stats_print() to honor 'b' and 'l' in the opts parameter.
|
||||||
|
- Fix realloc(p, 0) to act like free(p).
|
||||||
|
- Do not enforce minimum alignment in memalign().
|
||||||
|
- Check for NULL pointer in malloc_usable_size().
|
||||||
|
- Fix an off-by-one heap profile statistics bug that could be observed in
|
||||||
|
interval- and growth-triggered heap profiles.
|
||||||
|
- Fix the "epoch" mallctl to update cached stats even if the passed in epoch
|
||||||
|
is 0.
|
||||||
|
- Fix bin->runcur management to fix a layout policy bug. This bug did not
|
||||||
|
affect correctness.
|
||||||
|
- Fix a bug in choose_arena_hard() that potentially caused more arenas to be
|
||||||
|
initialized than necessary.
|
||||||
|
- Add missing "opt.lg_tcache_max" mallctl implementation.
|
||||||
|
- Use glibc allocator hooks to make mixed allocator usage less likely.
|
||||||
|
- Fix build issues for --disable-tcache.
|
||||||
|
- Don't mangle pthread_create() when --with-private-namespace is specified.
|
||||||
|
|
||||||
* 2.2.5 (November 14, 2011)
|
* 2.2.5 (November 14, 2011)
|
||||||
|
|
||||||
Bug fixes:
|
Bug fixes:
|
||||||
|
93
INSTALL
93
INSTALL
@ -26,6 +26,19 @@ any of the following arguments (not a definitive list) to 'configure':
|
|||||||
Embed one or more library paths, so that libjemalloc can find the libraries
|
Embed one or more library paths, so that libjemalloc can find the libraries
|
||||||
it is linked to. This works only on ELF-based systems.
|
it is linked to. This works only on ELF-based systems.
|
||||||
|
|
||||||
|
--with-mangling=<map>
|
||||||
|
Mangle public symbols specified in <map> which is a comma-separated list of
|
||||||
|
name:mangled pairs.
|
||||||
|
|
||||||
|
For example, to use ld's --wrap option as an alternative method for
|
||||||
|
overriding libc's malloc implementation, specify something like:
|
||||||
|
|
||||||
|
--with-mangling=malloc:__wrap_malloc,free:__wrap_free[...]
|
||||||
|
|
||||||
|
Note that mangling happens prior to application of the prefix specified by
|
||||||
|
--with-jemalloc-prefix, and mangled symbols are then ignored when applying
|
||||||
|
the prefix.
|
||||||
|
|
||||||
--with-jemalloc-prefix=<prefix>
|
--with-jemalloc-prefix=<prefix>
|
||||||
Prefix all public APIs with <prefix>. For example, if <prefix> is
|
Prefix all public APIs with <prefix>. For example, if <prefix> is
|
||||||
"prefix_", API changes like the following occur:
|
"prefix_", API changes like the following occur:
|
||||||
@ -62,8 +75,8 @@ any of the following arguments (not a definitive list) to 'configure':
|
|||||||
Enable assertions and validation code. This incurs a substantial
|
Enable assertions and validation code. This incurs a substantial
|
||||||
performance hit, but is very useful during application development.
|
performance hit, but is very useful during application development.
|
||||||
|
|
||||||
--enable-stats
|
--disable-stats
|
||||||
Enable statistics gathering functionality. See the "opt.stats_print"
|
Disable statistics gathering functionality. See the "opt.stats_print"
|
||||||
option documentation for usage details.
|
option documentation for usage details.
|
||||||
|
|
||||||
--enable-prof
|
--enable-prof
|
||||||
@ -90,51 +103,50 @@ any of the following arguments (not a definitive list) to 'configure':
|
|||||||
Statically link against the specified libunwind.a rather than dynamically
|
Statically link against the specified libunwind.a rather than dynamically
|
||||||
linking with -lunwind.
|
linking with -lunwind.
|
||||||
|
|
||||||
--disable-tiny
|
|
||||||
Disable tiny (sub-quantum-sized) object support. Technically it is not
|
|
||||||
legal for a malloc implementation to allocate objects with less than
|
|
||||||
quantum alignment (8 or 16 bytes, depending on architecture), but in
|
|
||||||
practice it never causes any problems if, for example, 4-byte allocations
|
|
||||||
are 4-byte-aligned.
|
|
||||||
|
|
||||||
--disable-tcache
|
--disable-tcache
|
||||||
Disable thread-specific caches for small objects. Objects are cached and
|
Disable thread-specific caches for small objects. Objects are cached and
|
||||||
released in bulk, thus reducing the total number of mutex operations. See
|
released in bulk, thus reducing the total number of mutex operations. See
|
||||||
the "opt.tcache" option for usage details.
|
the "opt.tcache" option for usage details.
|
||||||
|
|
||||||
--enable-swap
|
--enable-mremap
|
||||||
Enable mmap()ed swap file support. When this feature is built in, it is
|
Enable huge realloc() via mremap(2). mremap() is disabled by default
|
||||||
possible to specify one or more files that act as backing store. This
|
because the flavor used is specific to Linux, which has a quirk in its
|
||||||
effectively allows for per application swap files.
|
virtual memory allocation algorithm that causes semi-permanent VM map holes
|
||||||
|
under normal jemalloc operation.
|
||||||
|
|
||||||
|
--disable-munmap
|
||||||
|
Disable virtual memory deallocation via munmap(2); instead keep track of
|
||||||
|
the virtual memory for later use. munmap() is disabled by default (i.e.
|
||||||
|
--disable-munmap is implied) on Linux, which has a quirk in its virtual
|
||||||
|
memory allocation algorithm that causes semi-permanent VM map holes under
|
||||||
|
normal jemalloc operation.
|
||||||
|
|
||||||
--enable-dss
|
--enable-dss
|
||||||
Enable support for page allocation/deallocation via sbrk(2), in addition to
|
Enable support for page allocation/deallocation via sbrk(2), in addition to
|
||||||
mmap(2).
|
mmap(2).
|
||||||
|
|
||||||
--enable-fill
|
--disable-fill
|
||||||
Enable support for junk/zero filling of memory. See the "opt.junk"/
|
Disable support for junk/zero filling of memory, quarantine, and redzones.
|
||||||
"opt.zero" option documentation for usage details.
|
See the "opt.junk", "opt.zero", "opt.quarantine", and "opt.redzone" option
|
||||||
|
documentation for usage details.
|
||||||
|
|
||||||
|
--disable-valgrind
|
||||||
|
Disable support for Valgrind.
|
||||||
|
|
||||||
|
--disable-experimental
|
||||||
|
Disable support for the experimental API (*allocm()).
|
||||||
|
|
||||||
|
--enable-utrace
|
||||||
|
Enable utrace(2)-based allocation tracing. This feature is not broadly
|
||||||
|
portable (FreeBSD has it, but Linux and OS X do not).
|
||||||
|
|
||||||
--enable-xmalloc
|
--enable-xmalloc
|
||||||
Enable support for optional immediate termination due to out-of-memory
|
Enable support for optional immediate termination due to out-of-memory
|
||||||
errors, as is commonly implemented by "xmalloc" wrapper function for malloc.
|
errors, as is commonly implemented by "xmalloc" wrapper function for malloc.
|
||||||
See the "opt.xmalloc" option documentation for usage details.
|
See the "opt.xmalloc" option documentation for usage details.
|
||||||
|
|
||||||
--enable-sysv
|
--enable-lazy-lock
|
||||||
Enable support for System V semantics, wherein malloc(0) returns NULL
|
Enable code that wraps pthread_create() to detect when an application
|
||||||
rather than a minimal allocation. See the "opt.sysv" option documentation
|
|
||||||
for usage details.
|
|
||||||
|
|
||||||
--enable-dynamic-page-shift
|
|
||||||
Under most conditions, the system page size never changes (usually 4KiB or
|
|
||||||
8KiB, depending on architecture and configuration), and unless this option
|
|
||||||
is enabled, jemalloc assumes that page size can safely be determined during
|
|
||||||
configuration and hard-coded. Enabling dynamic page size determination has
|
|
||||||
a measurable impact on performance, since the compiler is forced to load
|
|
||||||
the page size from memory rather than embedding immediate values.
|
|
||||||
|
|
||||||
--disable-lazy-lock
|
|
||||||
Disable code that wraps pthread_create() to detect when an application
|
|
||||||
switches from single-threaded to multi-threaded mode, so that it can avoid
|
switches from single-threaded to multi-threaded mode, so that it can avoid
|
||||||
mutex locking/unlocking operations while in single-threaded mode. In
|
mutex locking/unlocking operations while in single-threaded mode. In
|
||||||
practice, this feature usually has little impact on performance unless
|
practice, this feature usually has little impact on performance unless
|
||||||
@ -181,11 +193,24 @@ PATH="?"
|
|||||||
|
|
||||||
=== Advanced compilation =======================================================
|
=== Advanced compilation =======================================================
|
||||||
|
|
||||||
|
To build only parts of jemalloc, use the following targets:
|
||||||
|
|
||||||
|
build_lib_shared
|
||||||
|
build_lib_static
|
||||||
|
build_lib
|
||||||
|
build_doc_html
|
||||||
|
build_doc_man
|
||||||
|
build_doc
|
||||||
|
|
||||||
To install only parts of jemalloc, use the following targets:
|
To install only parts of jemalloc, use the following targets:
|
||||||
|
|
||||||
install_bin
|
install_bin
|
||||||
install_include
|
install_include
|
||||||
|
install_lib_shared
|
||||||
|
install_lib_static
|
||||||
install_lib
|
install_lib
|
||||||
|
install_doc_html
|
||||||
|
install_doc_man
|
||||||
install_doc
|
install_doc
|
||||||
|
|
||||||
To clean up build results to varying degrees, use the following make targets:
|
To clean up build results to varying degrees, use the following make targets:
|
||||||
@ -248,10 +273,6 @@ directory, issue configuration and build commands:
|
|||||||
|
|
||||||
The manual page is generated in both html and roff formats. Any web browser
|
The manual page is generated in both html and roff formats. Any web browser
|
||||||
can be used to view the html manual. The roff manual page can be formatted
|
can be used to view the html manual. The roff manual page can be formatted
|
||||||
prior to installation via any of the following commands:
|
prior to installation via the following command:
|
||||||
|
|
||||||
nroff -man -t doc/jemalloc.3
|
nroff -man -t doc/jemalloc.3
|
||||||
|
|
||||||
groff -man -t -Tps doc/jemalloc.3 | ps2pdf - doc/jemalloc.3.pdf
|
|
||||||
|
|
||||||
(cd doc; groff -man -man-ext -t -Thtml jemalloc.3 > jemalloc.3.html)
|
|
||||||
|
330
Makefile.in
330
Makefile.in
@ -17,130 +17,185 @@ INCLUDEDIR := $(DESTDIR)@INCLUDEDIR@
|
|||||||
LIBDIR := $(DESTDIR)@LIBDIR@
|
LIBDIR := $(DESTDIR)@LIBDIR@
|
||||||
DATADIR := $(DESTDIR)@DATADIR@
|
DATADIR := $(DESTDIR)@DATADIR@
|
||||||
MANDIR := $(DESTDIR)@MANDIR@
|
MANDIR := $(DESTDIR)@MANDIR@
|
||||||
|
srcroot := @srcroot@
|
||||||
|
objroot := @objroot@
|
||||||
|
abs_srcroot := @abs_srcroot@
|
||||||
|
abs_objroot := @abs_objroot@
|
||||||
|
|
||||||
# Build parameters.
|
# Build parameters.
|
||||||
CPPFLAGS := @CPPFLAGS@ -I@srcroot@include -I@objroot@include
|
CPPFLAGS := @CPPFLAGS@ -I$(srcroot)include -I$(objroot)include
|
||||||
CFLAGS := @CFLAGS@
|
CFLAGS := @CFLAGS@
|
||||||
ifeq (macho, @abi@)
|
|
||||||
CFLAGS += -dynamic
|
|
||||||
endif
|
|
||||||
LDFLAGS := @LDFLAGS@
|
LDFLAGS := @LDFLAGS@
|
||||||
|
EXTRA_LDFLAGS := @EXTRA_LDFLAGS@
|
||||||
LIBS := @LIBS@
|
LIBS := @LIBS@
|
||||||
RPATH_EXTRA := @RPATH_EXTRA@
|
RPATH_EXTRA := @RPATH_EXTRA@
|
||||||
ifeq (macho, @abi@)
|
SO := @so@
|
||||||
SO := dylib
|
IMPORTLIB := @importlib@
|
||||||
WL_SONAME := dylib_install_name
|
O := @o@
|
||||||
|
A := @a@
|
||||||
|
EXE := @exe@
|
||||||
|
LIBPREFIX := @libprefix@
|
||||||
|
REV := @rev@
|
||||||
|
install_suffix := @install_suffix@
|
||||||
|
ABI := @abi@
|
||||||
|
XSLTPROC := @XSLTPROC@
|
||||||
|
AUTOCONF := @AUTOCONF@
|
||||||
|
_RPATH = @RPATH@
|
||||||
|
RPATH = $(if $(1),$(call _RPATH,$(1)))
|
||||||
|
cfghdrs_in := @cfghdrs_in@
|
||||||
|
cfghdrs_out := @cfghdrs_out@
|
||||||
|
cfgoutputs_in := @cfgoutputs_in@
|
||||||
|
cfgoutputs_out := @cfgoutputs_out@
|
||||||
|
enable_autogen := @enable_autogen@
|
||||||
|
enable_experimental := @enable_experimental@
|
||||||
|
DSO_LDFLAGS = @DSO_LDFLAGS@
|
||||||
|
SOREV = @SOREV@
|
||||||
|
PIC_CFLAGS = @PIC_CFLAGS@
|
||||||
|
CTARGET = @CTARGET@
|
||||||
|
LDTARGET = @LDTARGET@
|
||||||
|
MKLIB = @MKLIB@
|
||||||
|
CC_MM = @CC_MM@
|
||||||
|
|
||||||
|
ifeq (macho, $(ABI))
|
||||||
|
TEST_LIBRARY_PATH := DYLD_FALLBACK_LIBRARY_PATH="$(objroot)lib"
|
||||||
else
|
else
|
||||||
SO := so
|
ifeq (pecoff, $(ABI))
|
||||||
WL_SONAME := soname
|
TEST_LIBRARY_PATH := PATH="$(PATH):$(objroot)lib"
|
||||||
endif
|
|
||||||
REV := 1
|
|
||||||
ifeq (macho, @abi@)
|
|
||||||
TEST_LIBRARY_PATH := DYLD_FALLBACK_LIBRARY_PATH=@objroot@lib
|
|
||||||
else
|
else
|
||||||
TEST_LIBRARY_PATH :=
|
TEST_LIBRARY_PATH :=
|
||||||
endif
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
|
LIBJEMALLOC := $(LIBPREFIX)jemalloc$(install_suffix)
|
||||||
|
|
||||||
# Lists of files.
|
# Lists of files.
|
||||||
BINS := @srcroot@bin/pprof
|
BINS := $(srcroot)bin/pprof $(objroot)bin/jemalloc.sh
|
||||||
CHDRS := @objroot@include/jemalloc/jemalloc@install_suffix@.h \
|
CHDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h \
|
||||||
@objroot@include/jemalloc/jemalloc_defs@install_suffix@.h
|
$(objroot)include/jemalloc/jemalloc_defs$(install_suffix).h
|
||||||
CSRCS := @srcroot@src/jemalloc.c @srcroot@src/arena.c @srcroot@src/atomic.c \
|
CSRCS := $(srcroot)src/jemalloc.c $(srcroot)src/arena.c $(srcroot)src/atomic.c \
|
||||||
@srcroot@src/base.c @srcroot@src/bitmap.c @srcroot@src/chunk.c \
|
$(srcroot)src/base.c $(srcroot)src/bitmap.c $(srcroot)src/chunk.c \
|
||||||
@srcroot@src/chunk_dss.c @srcroot@src/chunk_mmap.c \
|
$(srcroot)src/chunk_dss.c $(srcroot)src/chunk_mmap.c \
|
||||||
@srcroot@src/chunk_swap.c @srcroot@src/ckh.c @srcroot@src/ctl.c \
|
$(srcroot)src/ckh.c $(srcroot)src/ctl.c $(srcroot)src/extent.c \
|
||||||
@srcroot@src/extent.c @srcroot@src/hash.c @srcroot@src/huge.c \
|
$(srcroot)src/hash.c $(srcroot)src/huge.c $(srcroot)src/mb.c \
|
||||||
@srcroot@src/mb.c @srcroot@src/mutex.c @srcroot@src/prof.c \
|
$(srcroot)src/mutex.c $(srcroot)src/prof.c $(srcroot)src/quarantine.c \
|
||||||
@srcroot@src/rtree.c @srcroot@src/stats.c @srcroot@src/tcache.c
|
$(srcroot)src/rtree.c $(srcroot)src/stats.c $(srcroot)src/tcache.c \
|
||||||
ifeq (macho, @abi@)
|
$(srcroot)src/util.c $(srcroot)src/tsd.c
|
||||||
CSRCS += @srcroot@src/zone.c
|
ifeq (macho, $(ABI))
|
||||||
|
CSRCS += $(srcroot)src/zone.c
|
||||||
endif
|
endif
|
||||||
STATIC_LIBS := @objroot@lib/libjemalloc@install_suffix@.a
|
ifeq ($(IMPORTLIB),$(SO))
|
||||||
DSOS := @objroot@lib/libjemalloc@install_suffix@.$(SO).$(REV) \
|
STATIC_LIBS := $(objroot)lib/$(LIBJEMALLOC).$(A)
|
||||||
@objroot@lib/libjemalloc@install_suffix@.$(SO) \
|
endif
|
||||||
@objroot@lib/libjemalloc@install_suffix@_pic.a
|
ifdef PIC_CFLAGS
|
||||||
MAN3 := @objroot@doc/jemalloc@install_suffix@.3
|
STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_pic.$(A)
|
||||||
DOCS_XML := @objroot@doc/jemalloc@install_suffix@.xml
|
else
|
||||||
DOCS_HTML := $(DOCS_XML:@objroot@%.xml=@srcroot@%.html)
|
STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_s.$(A)
|
||||||
DOCS_MAN3 := $(DOCS_XML:@objroot@%.xml=@srcroot@%.3)
|
endif
|
||||||
|
DSOS := $(objroot)lib/$(LIBJEMALLOC).$(SOREV)
|
||||||
|
ifneq ($(SOREV),$(SO))
|
||||||
|
DSOS += $(objroot)lib/$(LIBJEMALLOC).$(SO)
|
||||||
|
endif
|
||||||
|
MAN3 := $(objroot)doc/jemalloc$(install_suffix).3
|
||||||
|
DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml
|
||||||
|
DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(srcroot)%.html)
|
||||||
|
DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(srcroot)%.3)
|
||||||
DOCS := $(DOCS_HTML) $(DOCS_MAN3)
|
DOCS := $(DOCS_HTML) $(DOCS_MAN3)
|
||||||
CTESTS := @srcroot@test/allocated.c @srcroot@test/allocm.c \
|
CTESTS := $(srcroot)test/aligned_alloc.c $(srcroot)test/allocated.c \
|
||||||
@srcroot@test/bitmap.c @srcroot@test/mremap.c \
|
$(srcroot)test/bitmap.c $(srcroot)test/mremap.c \
|
||||||
@srcroot@test/posix_memalign.c @srcroot@test/rallocm.c \
|
$(srcroot)test/posix_memalign.c $(srcroot)test/thread_arena.c \
|
||||||
@srcroot@test/thread_arena.c
|
$(srcroot)test/thread_tcache_enabled.c
|
||||||
|
ifeq ($(enable_experimental), 1)
|
||||||
|
CTESTS += $(srcroot)test/allocm.c $(srcroot)test/rallocm.c
|
||||||
|
endif
|
||||||
|
|
||||||
|
COBJS := $(CSRCS:$(srcroot)%.c=$(objroot)%.$(O))
|
||||||
|
CPICOBJS := $(CSRCS:$(srcroot)%.c=$(objroot)%.pic.$(O))
|
||||||
|
CTESTOBJS := $(CTESTS:$(srcroot)%.c=$(objroot)%.$(O))
|
||||||
|
|
||||||
.PHONY: all dist doc_html doc_man doc
|
.PHONY: all dist doc_html doc_man doc
|
||||||
.PHONY: install_bin install_include install_lib
|
.PHONY: install_bin install_include install_lib
|
||||||
.PHONY: install_html install_man install_doc install
|
.PHONY: install_html install_man install_doc install
|
||||||
.PHONY: tests check clean distclean relclean
|
.PHONY: tests check clean distclean relclean
|
||||||
|
|
||||||
.SECONDARY : $(CTESTS:@srcroot@%.c=@objroot@%.o)
|
.SECONDARY : $(CTESTOBJS)
|
||||||
|
|
||||||
# Default target.
|
# Default target.
|
||||||
all: $(DSOS) $(STATIC_LIBS)
|
all: build
|
||||||
|
|
||||||
dist: doc
|
dist: build_doc
|
||||||
|
|
||||||
@srcroot@doc/%.html : @objroot@doc/%.xml @srcroot@doc/stylesheet.xsl @objroot@doc/html.xsl
|
$(srcroot)doc/%.html : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/html.xsl
|
||||||
@XSLTPROC@ -o $@ @objroot@doc/html.xsl $<
|
$(XSLTPROC) -o $@ $(objroot)doc/html.xsl $<
|
||||||
|
|
||||||
@srcroot@doc/%.3 : @objroot@doc/%.xml @srcroot@doc/stylesheet.xsl @objroot@doc/manpages.xsl
|
$(srcroot)doc/%.3 : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/manpages.xsl
|
||||||
@XSLTPROC@ -o $@ @objroot@doc/manpages.xsl $<
|
$(XSLTPROC) -o $@ $(objroot)doc/manpages.xsl $<
|
||||||
|
|
||||||
doc_html: $(DOCS_HTML)
|
build_doc_html: $(DOCS_HTML)
|
||||||
doc_man: $(DOCS_MAN3)
|
build_doc_man: $(DOCS_MAN3)
|
||||||
doc: $(DOCS)
|
build_doc: $(DOCS)
|
||||||
|
|
||||||
#
|
#
|
||||||
# Include generated dependency files.
|
# Include generated dependency files.
|
||||||
#
|
#
|
||||||
-include $(CSRCS:@srcroot@%.c=@objroot@%.d)
|
ifdef CC_MM
|
||||||
-include $(CSRCS:@srcroot@%.c=@objroot@%.pic.d)
|
-include $(COBJS:%.$(O)=%.d)
|
||||||
-include $(CTESTS:@srcroot@%.c=@objroot@%.d)
|
-include $(CPICOBJS:%.$(O)=%.d)
|
||||||
|
-include $(CTESTOBJS:%.$(O)=%.d)
|
||||||
|
endif
|
||||||
|
|
||||||
@objroot@src/%.o: @srcroot@src/%.c
|
$(COBJS): $(objroot)src/%.$(O): $(srcroot)src/%.c
|
||||||
|
$(CPICOBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.c
|
||||||
|
$(CPICOBJS): CFLAGS += $(PIC_CFLAGS)
|
||||||
|
$(CTESTOBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c
|
||||||
|
$(CTESTOBJS): CPPFLAGS += -I$(objroot)test
|
||||||
|
ifneq ($(IMPORTLIB),$(SO))
|
||||||
|
$(COBJS): CPPFLAGS += -DDLLEXPORT
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifndef CC_MM
|
||||||
|
# Dependencies
|
||||||
|
HEADER_DIRS = $(srcroot)include/jemalloc/internal \
|
||||||
|
$(objroot)include/jemalloc $(objroot)include/jemalloc/internal
|
||||||
|
HEADERS = $(wildcard $(foreach dir,$(HEADER_DIRS),$(dir)/*.h))
|
||||||
|
$(COBJS) $(CPICOBJS) $(CTESTOBJS): $(HEADERS)
|
||||||
|
$(CTESTOBJS): $(objroot)test/jemalloc_test.h
|
||||||
|
endif
|
||||||
|
|
||||||
|
$(COBJS) $(CPICOBJS) $(CTESTOBJS): %.$(O):
|
||||||
@mkdir -p $(@D)
|
@mkdir -p $(@D)
|
||||||
$(CC) $(CFLAGS) -c $(CPPFLAGS) -o $@ $<
|
$(CC) $(CFLAGS) -c $(CPPFLAGS) $(CTARGET) $<
|
||||||
@$(SHELL) -ec "$(CC) -MM $(CPPFLAGS) $< | sed \"s/\($(subst /,\/,$(notdir $(basename $@)))\)\.o\([ :]*\)/$(subst /,\/,$(strip $(dir $@)))\1.o \2/g\" > $(@:%.o=%.d)"
|
ifdef CC_MM
|
||||||
|
@$(CC) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $<
|
||||||
|
endif
|
||||||
|
|
||||||
@objroot@src/%.pic.o: @srcroot@src/%.c
|
ifneq ($(SOREV),$(SO))
|
||||||
@mkdir -p $(@D)
|
%.$(SO) : %.$(SOREV)
|
||||||
$(CC) $(CFLAGS) -fPIC -DPIC -c $(CPPFLAGS) -o $@ $<
|
|
||||||
@$(SHELL) -ec "$(CC) -MM $(CPPFLAGS) $< | sed \"s/\($(subst /,\/,$(notdir $(basename $(basename $@))))\)\.o\([ :]*\)/$(subst /,\/,$(strip $(dir $@)))\1.pic.o \2/g\" > $(@:%.o=%.d)"
|
|
||||||
|
|
||||||
%.$(SO) : %.$(SO).$(REV)
|
|
||||||
@mkdir -p $(@D)
|
@mkdir -p $(@D)
|
||||||
ln -sf $(<F) $@
|
ln -sf $(<F) $@
|
||||||
|
|
||||||
@objroot@lib/libjemalloc@install_suffix@.$(SO).$(REV) : $(CSRCS:@srcroot@%.c=@objroot@%.pic.o)
|
|
||||||
@mkdir -p $(@D)
|
|
||||||
$(CC) -shared -Wl,-$(WL_SONAME),$(@F) $(RPATH_EXTRA:%=@RPATH@%) -o $@ $+ $(LDFLAGS) $(LIBS)
|
|
||||||
|
|
||||||
@objroot@lib/libjemalloc@install_suffix@_pic.a : $(CSRCS:@srcroot@%.c=@objroot@%.pic.o)
|
|
||||||
@mkdir -p $(@D)
|
|
||||||
ar crus $@ $+
|
|
||||||
|
|
||||||
@objroot@lib/libjemalloc@install_suffix@.a : $(CSRCS:@srcroot@%.c=@objroot@%.o)
|
|
||||||
@mkdir -p $(@D)
|
|
||||||
ar crus $@ $+
|
|
||||||
|
|
||||||
@objroot@test/%.o: @srcroot@test/%.c
|
|
||||||
@mkdir -p $(@D)
|
|
||||||
$(CC) $(CFLAGS) -c $(CPPFLAGS) -I@objroot@test -o $@ $<
|
|
||||||
@$(SHELL) -ec "$(CC) -MM $(CPPFLAGS) -I@objroot@test $< | sed \"s/\($(subst /,\/,$(notdir $(basename $@)))\)\.o\([ :]*\)/$(subst /,\/,$(strip $(dir $@)))\1.o \2/g\" > $(@:%.o=%.d)"
|
|
||||||
|
|
||||||
# Automatic dependency generation misses #include "*.c".
|
|
||||||
@objroot@test/bitmap.o : @objroot@src/bitmap.o
|
|
||||||
|
|
||||||
@objroot@test/%: @objroot@test/%.o \
|
|
||||||
@objroot@lib/libjemalloc@install_suffix@.$(SO)
|
|
||||||
@mkdir -p $(@D)
|
|
||||||
ifneq (@RPATH@, )
|
|
||||||
$(CC) -o $@ $< @RPATH@@objroot@lib -L@objroot@lib -ljemalloc@install_suffix@ -lpthread
|
|
||||||
else
|
|
||||||
$(CC) -o $@ $< -L@objroot@lib -ljemalloc@install_suffix@ -lpthread
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
$(objroot)lib/$(LIBJEMALLOC).$(SOREV) : $(if $(PIC_CFLAGS),$(CPICOBJS),$(COBJS))
|
||||||
|
@mkdir -p $(@D)
|
||||||
|
$(CC) $(DSO_LDFLAGS) $(call RPATH,$(RPATH_EXTRA)) $(LDTARGET) $+ $(LDFLAGS) $(LIBS) $(EXTRA_LDFLAGS)
|
||||||
|
|
||||||
|
$(objroot)lib/$(LIBJEMALLOC)_pic.$(A) : $(CPICOBJS)
|
||||||
|
$(objroot)lib/$(LIBJEMALLOC).$(A) : $(COBJS)
|
||||||
|
$(objroot)lib/$(LIBJEMALLOC)_s.$(A) : $(COBJS)
|
||||||
|
|
||||||
|
$(STATIC_LIBS):
|
||||||
|
@mkdir -p $(@D)
|
||||||
|
$(MKLIB) $+
|
||||||
|
|
||||||
|
$(objroot)test/bitmap$(EXE): $(objroot)src/bitmap.$(O)
|
||||||
|
|
||||||
|
$(objroot)test/%$(EXE): $(objroot)test/%.$(O) $(objroot)src/util.$(O) $(DSOS)
|
||||||
|
@mkdir -p $(@D)
|
||||||
|
$(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(filter -lpthread,$(LIBS)) $(EXTRA_LDFLAGS)
|
||||||
|
|
||||||
|
build_lib_shared: $(DSOS)
|
||||||
|
build_lib_static: $(STATIC_LIBS)
|
||||||
|
build: build_lib_shared build_lib_static
|
||||||
|
|
||||||
install_bin:
|
install_bin:
|
||||||
install -d $(BINDIR)
|
install -d $(BINDIR)
|
||||||
@for b in $(BINS); do \
|
@for b in $(BINS); do \
|
||||||
@ -155,46 +210,55 @@ install_include:
|
|||||||
install -m 644 $$h $(INCLUDEDIR)/jemalloc; \
|
install -m 644 $$h $(INCLUDEDIR)/jemalloc; \
|
||||||
done
|
done
|
||||||
|
|
||||||
install_lib: $(DSOS) $(STATIC_LIBS)
|
install_lib_shared: $(DSOS)
|
||||||
install -d $(LIBDIR)
|
install -d $(LIBDIR)
|
||||||
install -m 755 @objroot@lib/libjemalloc@install_suffix@.$(SO).$(REV) $(LIBDIR)
|
install -m 755 $(objroot)lib/$(LIBJEMALLOC).$(SOREV) $(LIBDIR)
|
||||||
ln -sf libjemalloc@install_suffix@.$(SO).$(REV) $(LIBDIR)/libjemalloc@install_suffix@.$(SO)
|
ifneq ($(SOREV),$(SO))
|
||||||
install -m 755 @objroot@lib/libjemalloc@install_suffix@_pic.a $(LIBDIR)
|
ln -sf $(LIBJEMALLOC).$(SOREV) $(LIBDIR)/$(LIBJEMALLOC).$(SO)
|
||||||
install -m 755 @objroot@lib/libjemalloc@install_suffix@.a $(LIBDIR)
|
endif
|
||||||
|
|
||||||
install_html:
|
install_lib_static: $(STATIC_LIBS)
|
||||||
install -d $(DATADIR)/doc/jemalloc@install_suffix@
|
install -d $(LIBDIR)
|
||||||
@for d in $(DOCS_HTML); do \
|
@for l in $(STATIC_LIBS); do \
|
||||||
echo "install -m 644 $$d $(DATADIR)/doc/jemalloc@install_suffix@"; \
|
echo "install -m 755 $$l $(LIBDIR)"; \
|
||||||
install -m 644 $$d $(DATADIR)/doc/jemalloc@install_suffix@; \
|
install -m 755 $$l $(LIBDIR); \
|
||||||
done
|
done
|
||||||
|
|
||||||
install_man:
|
install_lib: install_lib_shared install_lib_static
|
||||||
|
|
||||||
|
install_doc_html:
|
||||||
|
install -d $(DATADIR)/doc/jemalloc$(install_suffix)
|
||||||
|
@for d in $(DOCS_HTML); do \
|
||||||
|
echo "install -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix)"; \
|
||||||
|
install -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix); \
|
||||||
|
done
|
||||||
|
|
||||||
|
install_doc_man:
|
||||||
install -d $(MANDIR)/man3
|
install -d $(MANDIR)/man3
|
||||||
@for d in $(DOCS_MAN3); do \
|
@for d in $(DOCS_MAN3); do \
|
||||||
echo "install -m 644 $$d $(MANDIR)/man3"; \
|
echo "install -m 644 $$d $(MANDIR)/man3"; \
|
||||||
install -m 644 $$d $(MANDIR)/man3; \
|
install -m 644 $$d $(MANDIR)/man3; \
|
||||||
done
|
done
|
||||||
|
|
||||||
install_doc: install_html install_man
|
install_doc: install_doc_html install_doc_man
|
||||||
|
|
||||||
install: install_bin install_include install_lib install_doc
|
install: install_bin install_include install_lib install_doc
|
||||||
|
|
||||||
tests: $(CTESTS:@srcroot@%.c=@objroot@%)
|
tests: $(CTESTS:$(srcroot)%.c=$(objroot)%$(EXE))
|
||||||
|
|
||||||
check: tests
|
check: tests
|
||||||
@mkdir -p @objroot@test
|
@mkdir -p $(objroot)test
|
||||||
@$(SHELL) -c 'total=0; \
|
@$(SHELL) -c 'total=0; \
|
||||||
failures=0; \
|
failures=0; \
|
||||||
echo "========================================="; \
|
echo "========================================="; \
|
||||||
for t in $(CTESTS:@srcroot@%.c=@objroot@%); do \
|
for t in $(CTESTS:$(srcroot)%.c=$(objroot)%); do \
|
||||||
total=`expr $$total + 1`; \
|
total=`expr $$total + 1`; \
|
||||||
/bin/echo -n "$${t} ... "; \
|
/bin/echo -n "$${t} ... "; \
|
||||||
$(TEST_LIBRARY_PATH) $${t} @abs_srcroot@ @abs_objroot@ \
|
$(TEST_LIBRARY_PATH) $${t}$(EXE) $(abs_srcroot) \
|
||||||
> @objroot@$${t}.out 2>&1; \
|
$(abs_objroot) > $(objroot)$${t}.out 2>&1; \
|
||||||
if test -e "@srcroot@$${t}.exp"; then \
|
if test -e "$(srcroot)$${t}.exp"; then \
|
||||||
diff -u @srcroot@$${t}.exp \
|
diff -w -u $(srcroot)$${t}.exp \
|
||||||
@objroot@$${t}.out >/dev/null 2>&1; \
|
$(objroot)$${t}.out >/dev/null 2>&1; \
|
||||||
fail=$$?; \
|
fail=$$?; \
|
||||||
if test "$${fail}" -eq "1" ; then \
|
if test "$${fail}" -eq "1" ; then \
|
||||||
failures=`expr $${failures} + 1`; \
|
failures=`expr $${failures} + 1`; \
|
||||||
@ -211,49 +275,49 @@ check: tests
|
|||||||
echo "Failures: $${failures}/$${total}"'
|
echo "Failures: $${failures}/$${total}"'
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -f $(CSRCS:@srcroot@%.c=@objroot@%.o)
|
rm -f $(COBJS)
|
||||||
rm -f $(CSRCS:@srcroot@%.c=@objroot@%.pic.o)
|
rm -f $(CPICOBJS)
|
||||||
rm -f $(CSRCS:@srcroot@%.c=@objroot@%.d)
|
rm -f $(COBJS:%.$(O)=%.d)
|
||||||
rm -f $(CSRCS:@srcroot@%.c=@objroot@%.pic.d)
|
rm -f $(CPICOBJS:%.$(O)=%.d)
|
||||||
rm -f $(CTESTS:@srcroot@%.c=@objroot@%)
|
rm -f $(CTESTOBJS:%.$(O)=%$(EXE))
|
||||||
rm -f $(CTESTS:@srcroot@%.c=@objroot@%.o)
|
rm -f $(CTESTOBJS)
|
||||||
rm -f $(CTESTS:@srcroot@%.c=@objroot@%.d)
|
rm -f $(CTESTOBJS:%.$(O)=%.d)
|
||||||
rm -f $(CTESTS:@srcroot@%.c=@objroot@%.out)
|
rm -f $(CTESTOBJS:%.$(O)=%.out)
|
||||||
rm -f $(DSOS) $(STATIC_LIBS)
|
rm -f $(DSOS) $(STATIC_LIBS)
|
||||||
|
|
||||||
distclean: clean
|
distclean: clean
|
||||||
rm -rf @objroot@autom4te.cache
|
rm -rf $(objroot)autom4te.cache
|
||||||
rm -f @objroot@config.log
|
rm -f $(objroot)config.log
|
||||||
rm -f @objroot@config.status
|
rm -f $(objroot)config.status
|
||||||
rm -f @objroot@config.stamp
|
rm -f $(objroot)config.stamp
|
||||||
rm -f @cfghdrs_out@
|
rm -f $(cfghdrs_out)
|
||||||
rm -f @cfgoutputs_out@
|
rm -f $(cfgoutputs_out)
|
||||||
|
|
||||||
relclean: distclean
|
relclean: distclean
|
||||||
rm -f @objroot@configure
|
rm -f $(objroot)configure
|
||||||
rm -f @srcroot@VERSION
|
rm -f $(srcroot)VERSION
|
||||||
rm -f $(DOCS_HTML)
|
rm -f $(DOCS_HTML)
|
||||||
rm -f $(DOCS_MAN3)
|
rm -f $(DOCS_MAN3)
|
||||||
|
|
||||||
#===============================================================================
|
#===============================================================================
|
||||||
# Re-configuration rules.
|
# Re-configuration rules.
|
||||||
|
|
||||||
ifeq (@enable_autogen@, 1)
|
ifeq ($(enable_autogen), 1)
|
||||||
@srcroot@configure : @srcroot@configure.ac
|
$(srcroot)configure : $(srcroot)configure.ac
|
||||||
cd ./@srcroot@ && @AUTOCONF@
|
cd ./$(srcroot) && $(AUTOCONF)
|
||||||
|
|
||||||
@objroot@config.status : @srcroot@configure
|
$(objroot)config.status : $(srcroot)configure
|
||||||
./@objroot@config.status --recheck
|
./$(objroot)config.status --recheck
|
||||||
|
|
||||||
@srcroot@config.stamp.in : @srcroot@configure.ac
|
$(srcroot)config.stamp.in : $(srcroot)configure.ac
|
||||||
echo stamp > @srcroot@config.stamp.in
|
echo stamp > $(srcroot)config.stamp.in
|
||||||
|
|
||||||
@objroot@config.stamp : @cfgoutputs_in@ @cfghdrs_in@ @srcroot@configure
|
$(objroot)config.stamp : $(cfgoutputs_in) $(cfghdrs_in) $(srcroot)configure
|
||||||
./@objroot@config.status
|
./$(objroot)config.status
|
||||||
@touch $@
|
@touch $@
|
||||||
|
|
||||||
# There must be some action in order for make to re-read Makefile when it is
|
# There must be some action in order for make to re-read Makefile when it is
|
||||||
# out of date.
|
# out of date.
|
||||||
@cfgoutputs_out@ @cfghdrs_out@ : @objroot@config.stamp
|
$(cfgoutputs_out) $(cfghdrs_out) : $(objroot)config.stamp
|
||||||
@true
|
@true
|
||||||
endif
|
endif
|
||||||
|
12
README
12
README
@ -1,10 +1,10 @@
|
|||||||
jemalloc is a general-purpose scalable concurrent malloc(3) implementation.
|
jemalloc is a general-purpose scalable concurrent malloc(3) implementation.
|
||||||
This distribution is a stand-alone "portable" implementation that currently
|
This distribution is a "portable" implementation that currently targets
|
||||||
targets Linux and Apple OS X. jemalloc is included as the default allocator in
|
FreeBSD, Linux, Apple OS X, and MinGW. jemalloc is included as the default
|
||||||
the FreeBSD and NetBSD operating systems, and it is used by the Mozilla Firefox
|
allocator in the FreeBSD and NetBSD operating systems, and it is used by the
|
||||||
web browser on Microsoft Windows-related platforms. Depending on your needs,
|
Mozilla Firefox web browser on Microsoft Windows-related platforms. Depending
|
||||||
one of the other divergent versions may suit your needs better than this
|
on your needs, one of the other divergent versions may suit your needs better
|
||||||
distribution.
|
than this distribution.
|
||||||
|
|
||||||
The COPYING file contains copyright and licensing information.
|
The COPYING file contains copyright and licensing information.
|
||||||
|
|
||||||
|
9
bin/jemalloc.sh.in
Normal file
9
bin/jemalloc.sh.in
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
prefix=@prefix@
|
||||||
|
exec_prefix=@exec_prefix@
|
||||||
|
libdir=@libdir@
|
||||||
|
|
||||||
|
@LD_PRELOAD_VAR@=${libdir}/libjemalloc.@SOREV@
|
||||||
|
export @LD_PRELOAD_VAR@
|
||||||
|
exec "$@"
|
1034
config.guess
vendored
1034
config.guess
vendored
File diff suppressed because it is too large
Load Diff
432
config.sub
vendored
432
config.sub
vendored
@ -1,9 +1,10 @@
|
|||||||
#! /bin/sh
|
#! /bin/sh
|
||||||
# Configuration validation subroutine script.
|
# Configuration validation subroutine script.
|
||||||
# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
|
# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
|
||||||
# 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
|
# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
|
||||||
|
# 2011, 2012 Free Software Foundation, Inc.
|
||||||
|
|
||||||
timestamp='2004-02-23'
|
timestamp='2012-02-10'
|
||||||
|
|
||||||
# This file is (in principle) common to ALL GNU software.
|
# This file is (in principle) common to ALL GNU software.
|
||||||
# The presence of a machine in this file suggests that SOME GNU software
|
# The presence of a machine in this file suggests that SOME GNU software
|
||||||
@ -20,23 +21,25 @@ timestamp='2004-02-23'
|
|||||||
# GNU General Public License for more details.
|
# GNU General Public License for more details.
|
||||||
#
|
#
|
||||||
# You should have received a copy of the GNU General Public License
|
# You should have received a copy of the GNU General Public License
|
||||||
# along with this program; if not, write to the Free Software
|
# along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||||
# Foundation, Inc., 59 Temple Place - Suite 330,
|
#
|
||||||
# Boston, MA 02111-1307, USA.
|
|
||||||
|
|
||||||
# As a special exception to the GNU General Public License, if you
|
# As a special exception to the GNU General Public License, if you
|
||||||
# distribute this file as part of a program that contains a
|
# distribute this file as part of a program that contains a
|
||||||
# configuration script generated by Autoconf, you may include it under
|
# configuration script generated by Autoconf, you may include it under
|
||||||
# the same distribution terms that you use for the rest of that program.
|
# the same distribution terms that you use for the rest of that program.
|
||||||
|
|
||||||
|
|
||||||
# Please send patches to <config-patches@gnu.org>. Submit a context
|
# Please send patches to <config-patches@gnu.org>. Submit a context
|
||||||
# diff and a properly formatted ChangeLog entry.
|
# diff and a properly formatted GNU ChangeLog entry.
|
||||||
#
|
#
|
||||||
# Configuration subroutine to validate and canonicalize a configuration type.
|
# Configuration subroutine to validate and canonicalize a configuration type.
|
||||||
# Supply the specified configuration type as an argument.
|
# Supply the specified configuration type as an argument.
|
||||||
# If it is invalid, we print an error message on stderr and exit with code 1.
|
# If it is invalid, we print an error message on stderr and exit with code 1.
|
||||||
# Otherwise, we print the canonical config type on stdout and succeed.
|
# Otherwise, we print the canonical config type on stdout and succeed.
|
||||||
|
|
||||||
|
# You can get the latest version of this script from:
|
||||||
|
# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
|
||||||
|
|
||||||
# This file is supposed to be the same for all GNU packages
|
# This file is supposed to be the same for all GNU packages
|
||||||
# and recognize all the CPU types, system types and aliases
|
# and recognize all the CPU types, system types and aliases
|
||||||
# that are meaningful with *any* GNU software.
|
# that are meaningful with *any* GNU software.
|
||||||
@ -70,7 +73,8 @@ Report bugs and patches to <config-patches@gnu.org>."
|
|||||||
version="\
|
version="\
|
||||||
GNU config.sub ($timestamp)
|
GNU config.sub ($timestamp)
|
||||||
|
|
||||||
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001
|
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
|
||||||
|
2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
|
||||||
Free Software Foundation, Inc.
|
Free Software Foundation, Inc.
|
||||||
|
|
||||||
This is free software; see the source for copying conditions. There is NO
|
This is free software; see the source for copying conditions. There is NO
|
||||||
@ -83,11 +87,11 @@ Try \`$me --help' for more information."
|
|||||||
while test $# -gt 0 ; do
|
while test $# -gt 0 ; do
|
||||||
case $1 in
|
case $1 in
|
||||||
--time-stamp | --time* | -t )
|
--time-stamp | --time* | -t )
|
||||||
echo "$timestamp" ; exit 0 ;;
|
echo "$timestamp" ; exit ;;
|
||||||
--version | -v )
|
--version | -v )
|
||||||
echo "$version" ; exit 0 ;;
|
echo "$version" ; exit ;;
|
||||||
--help | --h* | -h )
|
--help | --h* | -h )
|
||||||
echo "$usage"; exit 0 ;;
|
echo "$usage"; exit ;;
|
||||||
-- ) # Stop option processing
|
-- ) # Stop option processing
|
||||||
shift; break ;;
|
shift; break ;;
|
||||||
- ) # Use stdin as input.
|
- ) # Use stdin as input.
|
||||||
@ -99,7 +103,7 @@ while test $# -gt 0 ; do
|
|||||||
*local*)
|
*local*)
|
||||||
# First pass through any local machine types.
|
# First pass through any local machine types.
|
||||||
echo $1
|
echo $1
|
||||||
exit 0;;
|
exit ;;
|
||||||
|
|
||||||
* )
|
* )
|
||||||
break ;;
|
break ;;
|
||||||
@ -118,11 +122,18 @@ esac
|
|||||||
# Here we must recognize all the valid KERNEL-OS combinations.
|
# Here we must recognize all the valid KERNEL-OS combinations.
|
||||||
maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
|
maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
|
||||||
case $maybe_os in
|
case $maybe_os in
|
||||||
nto-qnx* | linux-gnu* | linux-dietlibc | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | \
|
nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
|
||||||
kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | storm-chaos* | os2-emx* | rtmk-nova*)
|
linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
|
||||||
|
knetbsd*-gnu* | netbsd*-gnu* | \
|
||||||
|
kopensolaris*-gnu* | \
|
||||||
|
storm-chaos* | os2-emx* | rtmk-nova*)
|
||||||
os=-$maybe_os
|
os=-$maybe_os
|
||||||
basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
|
basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
|
||||||
;;
|
;;
|
||||||
|
android-linux)
|
||||||
|
os=-linux-android
|
||||||
|
basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown
|
||||||
|
;;
|
||||||
*)
|
*)
|
||||||
basic_machine=`echo $1 | sed 's/-[^-]*$//'`
|
basic_machine=`echo $1 | sed 's/-[^-]*$//'`
|
||||||
if [ $basic_machine != $1 ]
|
if [ $basic_machine != $1 ]
|
||||||
@ -145,10 +156,13 @@ case $os in
|
|||||||
-convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
|
-convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
|
||||||
-c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
|
-c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
|
||||||
-harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
|
-harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
|
||||||
-apple | -axis)
|
-apple | -axis | -knuth | -cray | -microblaze)
|
||||||
os=
|
os=
|
||||||
basic_machine=$1
|
basic_machine=$1
|
||||||
;;
|
;;
|
||||||
|
-bluegene*)
|
||||||
|
os=-cnk
|
||||||
|
;;
|
||||||
-sim | -cisco | -oki | -wec | -winbond)
|
-sim | -cisco | -oki | -wec | -winbond)
|
||||||
os=
|
os=
|
||||||
basic_machine=$1
|
basic_machine=$1
|
||||||
@ -163,13 +177,17 @@ case $os in
|
|||||||
os=-chorusos
|
os=-chorusos
|
||||||
basic_machine=$1
|
basic_machine=$1
|
||||||
;;
|
;;
|
||||||
-chorusrdb)
|
-chorusrdb)
|
||||||
os=-chorusrdb
|
os=-chorusrdb
|
||||||
basic_machine=$1
|
basic_machine=$1
|
||||||
;;
|
;;
|
||||||
-hiux*)
|
-hiux*)
|
||||||
os=-hiuxwe2
|
os=-hiuxwe2
|
||||||
;;
|
;;
|
||||||
|
-sco6)
|
||||||
|
os=-sco5v6
|
||||||
|
basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
|
||||||
|
;;
|
||||||
-sco5)
|
-sco5)
|
||||||
os=-sco3.2v5
|
os=-sco3.2v5
|
||||||
basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
|
basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
|
||||||
@ -186,6 +204,10 @@ case $os in
|
|||||||
# Don't forget version if it is 3.2v4 or newer.
|
# Don't forget version if it is 3.2v4 or newer.
|
||||||
basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
|
basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
|
||||||
;;
|
;;
|
||||||
|
-sco5v6*)
|
||||||
|
# Don't forget version if it is 3.2v4 or newer.
|
||||||
|
basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
|
||||||
|
;;
|
||||||
-sco*)
|
-sco*)
|
||||||
os=-sco3.2v2
|
os=-sco3.2v2
|
||||||
basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
|
basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
|
||||||
@ -227,25 +249,36 @@ case $basic_machine in
|
|||||||
# Some are omitted here because they have special meanings below.
|
# Some are omitted here because they have special meanings below.
|
||||||
1750a | 580 \
|
1750a | 580 \
|
||||||
| a29k \
|
| a29k \
|
||||||
|
| aarch64 | aarch64_be \
|
||||||
| alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
|
| alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
|
||||||
| alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
|
| alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
|
||||||
| am33_2.0 \
|
| am33_2.0 \
|
||||||
| arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr \
|
| arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \
|
||||||
|
| be32 | be64 \
|
||||||
|
| bfin \
|
||||||
| c4x | clipper \
|
| c4x | clipper \
|
||||||
| d10v | d30v | dlx | dsp16xx \
|
| d10v | d30v | dlx | dsp16xx \
|
||||||
| fr30 | frv \
|
| epiphany \
|
||||||
|
| fido | fr30 | frv \
|
||||||
| h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
|
| h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
|
||||||
|
| hexagon \
|
||||||
| i370 | i860 | i960 | ia64 \
|
| i370 | i860 | i960 | ia64 \
|
||||||
| ip2k | iq2000 \
|
| ip2k | iq2000 \
|
||||||
| m32r | m68000 | m68k | m88k | mcore \
|
| le32 | le64 \
|
||||||
|
| lm32 \
|
||||||
|
| m32c | m32r | m32rle | m68000 | m68k | m88k \
|
||||||
|
| maxq | mb | microblaze | mcore | mep | metag \
|
||||||
| mips | mipsbe | mipseb | mipsel | mipsle \
|
| mips | mipsbe | mipseb | mipsel | mipsle \
|
||||||
| mips16 \
|
| mips16 \
|
||||||
| mips64 | mips64el \
|
| mips64 | mips64el \
|
||||||
| mips64vr | mips64vrel \
|
| mips64octeon | mips64octeonel \
|
||||||
| mips64orion | mips64orionel \
|
| mips64orion | mips64orionel \
|
||||||
|
| mips64r5900 | mips64r5900el \
|
||||||
|
| mips64vr | mips64vrel \
|
||||||
| mips64vr4100 | mips64vr4100el \
|
| mips64vr4100 | mips64vr4100el \
|
||||||
| mips64vr4300 | mips64vr4300el \
|
| mips64vr4300 | mips64vr4300el \
|
||||||
| mips64vr5000 | mips64vr5000el \
|
| mips64vr5000 | mips64vr5000el \
|
||||||
|
| mips64vr5900 | mips64vr5900el \
|
||||||
| mipsisa32 | mipsisa32el \
|
| mipsisa32 | mipsisa32el \
|
||||||
| mipsisa32r2 | mipsisa32r2el \
|
| mipsisa32r2 | mipsisa32r2el \
|
||||||
| mipsisa64 | mipsisa64el \
|
| mipsisa64 | mipsisa64el \
|
||||||
@ -254,30 +287,65 @@ case $basic_machine in
|
|||||||
| mipsisa64sr71k | mipsisa64sr71kel \
|
| mipsisa64sr71k | mipsisa64sr71kel \
|
||||||
| mipstx39 | mipstx39el \
|
| mipstx39 | mipstx39el \
|
||||||
| mn10200 | mn10300 \
|
| mn10200 | mn10300 \
|
||||||
|
| moxie \
|
||||||
|
| mt \
|
||||||
| msp430 \
|
| msp430 \
|
||||||
|
| nds32 | nds32le | nds32be \
|
||||||
|
| nios | nios2 \
|
||||||
| ns16k | ns32k \
|
| ns16k | ns32k \
|
||||||
| openrisc | or32 \
|
| open8 \
|
||||||
|
| or32 \
|
||||||
| pdp10 | pdp11 | pj | pjl \
|
| pdp10 | pdp11 | pj | pjl \
|
||||||
| powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \
|
| powerpc | powerpc64 | powerpc64le | powerpcle \
|
||||||
| pyramid \
|
| pyramid \
|
||||||
| sh | sh[1234] | sh[23]e | sh[34]eb | shbe | shle | sh[1234]le | sh3ele \
|
| rl78 | rx \
|
||||||
|
| score \
|
||||||
|
| sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
|
||||||
| sh64 | sh64le \
|
| sh64 | sh64le \
|
||||||
| sparc | sparc64 | sparc86x | sparclet | sparclite | sparcv9 | sparcv9b \
|
| sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
|
||||||
| strongarm \
|
| sparcv8 | sparcv9 | sparcv9b | sparcv9v \
|
||||||
| tahoe | thumb | tic4x | tic80 | tron \
|
| spu \
|
||||||
| v850 | v850e \
|
| tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \
|
||||||
|
| ubicom32 \
|
||||||
|
| v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \
|
||||||
| we32k \
|
| we32k \
|
||||||
| x86 | xscale | xstormy16 | xtensa \
|
| x86 | xc16x | xstormy16 | xtensa \
|
||||||
| z8k)
|
| z8k | z80)
|
||||||
basic_machine=$basic_machine-unknown
|
basic_machine=$basic_machine-unknown
|
||||||
;;
|
;;
|
||||||
m6811 | m68hc11 | m6812 | m68hc12)
|
c54x)
|
||||||
# Motorola 68HC11/12.
|
basic_machine=tic54x-unknown
|
||||||
|
;;
|
||||||
|
c55x)
|
||||||
|
basic_machine=tic55x-unknown
|
||||||
|
;;
|
||||||
|
c6x)
|
||||||
|
basic_machine=tic6x-unknown
|
||||||
|
;;
|
||||||
|
m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | picochip)
|
||||||
basic_machine=$basic_machine-unknown
|
basic_machine=$basic_machine-unknown
|
||||||
os=-none
|
os=-none
|
||||||
;;
|
;;
|
||||||
m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
|
m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
|
||||||
;;
|
;;
|
||||||
|
ms1)
|
||||||
|
basic_machine=mt-unknown
|
||||||
|
;;
|
||||||
|
|
||||||
|
strongarm | thumb | xscale)
|
||||||
|
basic_machine=arm-unknown
|
||||||
|
;;
|
||||||
|
xgate)
|
||||||
|
basic_machine=$basic_machine-unknown
|
||||||
|
os=-none
|
||||||
|
;;
|
||||||
|
xscaleeb)
|
||||||
|
basic_machine=armeb-unknown
|
||||||
|
;;
|
||||||
|
|
||||||
|
xscaleel)
|
||||||
|
basic_machine=armel-unknown
|
||||||
|
;;
|
||||||
|
|
||||||
# We use `pc' rather than `unknown'
|
# We use `pc' rather than `unknown'
|
||||||
# because (1) that's what they normally are, and
|
# because (1) that's what they normally are, and
|
||||||
@ -293,32 +361,40 @@ case $basic_machine in
|
|||||||
# Recognize the basic CPU types with company name.
|
# Recognize the basic CPU types with company name.
|
||||||
580-* \
|
580-* \
|
||||||
| a29k-* \
|
| a29k-* \
|
||||||
|
| aarch64-* | aarch64_be-* \
|
||||||
| alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
|
| alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
|
||||||
| alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
|
| alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
|
||||||
| alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
|
| alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
|
||||||
| arm-* | armbe-* | armle-* | armeb-* | armv*-* \
|
| arm-* | armbe-* | armle-* | armeb-* | armv*-* \
|
||||||
| avr-* \
|
| avr-* | avr32-* \
|
||||||
| bs2000-* \
|
| be32-* | be64-* \
|
||||||
| c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \
|
| bfin-* | bs2000-* \
|
||||||
| clipper-* | cydra-* \
|
| c[123]* | c30-* | [cjt]90-* | c4x-* \
|
||||||
|
| clipper-* | craynv-* | cydra-* \
|
||||||
| d10v-* | d30v-* | dlx-* \
|
| d10v-* | d30v-* | dlx-* \
|
||||||
| elxsi-* \
|
| elxsi-* \
|
||||||
| f30[01]-* | f700-* | fr30-* | frv-* | fx80-* \
|
| f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
|
||||||
| h8300-* | h8500-* \
|
| h8300-* | h8500-* \
|
||||||
| hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
|
| hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
|
||||||
|
| hexagon-* \
|
||||||
| i*86-* | i860-* | i960-* | ia64-* \
|
| i*86-* | i860-* | i960-* | ia64-* \
|
||||||
| ip2k-* | iq2000-* \
|
| ip2k-* | iq2000-* \
|
||||||
| m32r-* \
|
| le32-* | le64-* \
|
||||||
|
| lm32-* \
|
||||||
|
| m32c-* | m32r-* | m32rle-* \
|
||||||
| m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
|
| m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
|
||||||
| m88110-* | m88k-* | mcore-* \
|
| m88110-* | m88k-* | maxq-* | mcore-* | metag-* | microblaze-* \
|
||||||
| mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
|
| mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
|
||||||
| mips16-* \
|
| mips16-* \
|
||||||
| mips64-* | mips64el-* \
|
| mips64-* | mips64el-* \
|
||||||
| mips64vr-* | mips64vrel-* \
|
| mips64octeon-* | mips64octeonel-* \
|
||||||
| mips64orion-* | mips64orionel-* \
|
| mips64orion-* | mips64orionel-* \
|
||||||
|
| mips64r5900-* | mips64r5900el-* \
|
||||||
|
| mips64vr-* | mips64vrel-* \
|
||||||
| mips64vr4100-* | mips64vr4100el-* \
|
| mips64vr4100-* | mips64vr4100el-* \
|
||||||
| mips64vr4300-* | mips64vr4300el-* \
|
| mips64vr4300-* | mips64vr4300el-* \
|
||||||
| mips64vr5000-* | mips64vr5000el-* \
|
| mips64vr5000-* | mips64vr5000el-* \
|
||||||
|
| mips64vr5900-* | mips64vr5900el-* \
|
||||||
| mipsisa32-* | mipsisa32el-* \
|
| mipsisa32-* | mipsisa32el-* \
|
||||||
| mipsisa32r2-* | mipsisa32r2el-* \
|
| mipsisa32r2-* | mipsisa32r2el-* \
|
||||||
| mipsisa64-* | mipsisa64el-* \
|
| mipsisa64-* | mipsisa64el-* \
|
||||||
@ -326,26 +402,39 @@ case $basic_machine in
|
|||||||
| mipsisa64sb1-* | mipsisa64sb1el-* \
|
| mipsisa64sb1-* | mipsisa64sb1el-* \
|
||||||
| mipsisa64sr71k-* | mipsisa64sr71kel-* \
|
| mipsisa64sr71k-* | mipsisa64sr71kel-* \
|
||||||
| mipstx39-* | mipstx39el-* \
|
| mipstx39-* | mipstx39el-* \
|
||||||
|
| mmix-* \
|
||||||
|
| mt-* \
|
||||||
| msp430-* \
|
| msp430-* \
|
||||||
| none-* | np1-* | nv1-* | ns16k-* | ns32k-* \
|
| nds32-* | nds32le-* | nds32be-* \
|
||||||
|
| nios-* | nios2-* \
|
||||||
|
| none-* | np1-* | ns16k-* | ns32k-* \
|
||||||
|
| open8-* \
|
||||||
| orion-* \
|
| orion-* \
|
||||||
| pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
|
| pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
|
||||||
| powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \
|
| powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \
|
||||||
| pyramid-* \
|
| pyramid-* \
|
||||||
| romp-* | rs6000-* \
|
| rl78-* | romp-* | rs6000-* | rx-* \
|
||||||
| sh-* | sh[1234]-* | sh[23]e-* | sh[34]eb-* | shbe-* \
|
| sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
|
||||||
| shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
|
| shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
|
||||||
| sparc-* | sparc64-* | sparc86x-* | sparclet-* | sparclite-* \
|
| sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
|
||||||
| sparcv9-* | sparcv9b-* | strongarm-* | sv1-* | sx?-* \
|
| sparclite-* \
|
||||||
| tahoe-* | thumb-* \
|
| sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \
|
||||||
|
| tahoe-* \
|
||||||
| tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
|
| tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
|
||||||
|
| tile*-* \
|
||||||
| tron-* \
|
| tron-* \
|
||||||
| v850-* | v850e-* | vax-* \
|
| ubicom32-* \
|
||||||
|
| v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \
|
||||||
|
| vax-* \
|
||||||
| we32k-* \
|
| we32k-* \
|
||||||
| x86-* | x86_64-* | xps100-* | xscale-* | xstormy16-* \
|
| x86-* | x86_64-* | xc16x-* | xps100-* \
|
||||||
| xtensa-* \
|
| xstormy16-* | xtensa*-* \
|
||||||
| ymp-* \
|
| ymp-* \
|
||||||
| z8k-*)
|
| z8k-* | z80-*)
|
||||||
|
;;
|
||||||
|
# Recognize the basic CPU types without company name, with glob match.
|
||||||
|
xtensa*)
|
||||||
|
basic_machine=$basic_machine-unknown
|
||||||
;;
|
;;
|
||||||
# Recognize the various machine names and aliases which stand
|
# Recognize the various machine names and aliases which stand
|
||||||
# for a CPU type and a company and sometimes even an OS.
|
# for a CPU type and a company and sometimes even an OS.
|
||||||
@ -363,7 +452,7 @@ case $basic_machine in
|
|||||||
basic_machine=a29k-amd
|
basic_machine=a29k-amd
|
||||||
os=-udi
|
os=-udi
|
||||||
;;
|
;;
|
||||||
abacus)
|
abacus)
|
||||||
basic_machine=abacus-unknown
|
basic_machine=abacus-unknown
|
||||||
;;
|
;;
|
||||||
adobe68k)
|
adobe68k)
|
||||||
@ -409,6 +498,10 @@ case $basic_machine in
|
|||||||
basic_machine=m68k-apollo
|
basic_machine=m68k-apollo
|
||||||
os=-bsd
|
os=-bsd
|
||||||
;;
|
;;
|
||||||
|
aros)
|
||||||
|
basic_machine=i386-pc
|
||||||
|
os=-aros
|
||||||
|
;;
|
||||||
aux)
|
aux)
|
||||||
basic_machine=m68k-apple
|
basic_machine=m68k-apple
|
||||||
os=-aux
|
os=-aux
|
||||||
@ -417,10 +510,35 @@ case $basic_machine in
|
|||||||
basic_machine=ns32k-sequent
|
basic_machine=ns32k-sequent
|
||||||
os=-dynix
|
os=-dynix
|
||||||
;;
|
;;
|
||||||
|
blackfin)
|
||||||
|
basic_machine=bfin-unknown
|
||||||
|
os=-linux
|
||||||
|
;;
|
||||||
|
blackfin-*)
|
||||||
|
basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'`
|
||||||
|
os=-linux
|
||||||
|
;;
|
||||||
|
bluegene*)
|
||||||
|
basic_machine=powerpc-ibm
|
||||||
|
os=-cnk
|
||||||
|
;;
|
||||||
|
c54x-*)
|
||||||
|
basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'`
|
||||||
|
;;
|
||||||
|
c55x-*)
|
||||||
|
basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'`
|
||||||
|
;;
|
||||||
|
c6x-*)
|
||||||
|
basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'`
|
||||||
|
;;
|
||||||
c90)
|
c90)
|
||||||
basic_machine=c90-cray
|
basic_machine=c90-cray
|
||||||
os=-unicos
|
os=-unicos
|
||||||
;;
|
;;
|
||||||
|
cegcc)
|
||||||
|
basic_machine=arm-unknown
|
||||||
|
os=-cegcc
|
||||||
|
;;
|
||||||
convex-c1)
|
convex-c1)
|
||||||
basic_machine=c1-convex
|
basic_machine=c1-convex
|
||||||
os=-bsd
|
os=-bsd
|
||||||
@ -445,13 +563,20 @@ case $basic_machine in
|
|||||||
basic_machine=j90-cray
|
basic_machine=j90-cray
|
||||||
os=-unicos
|
os=-unicos
|
||||||
;;
|
;;
|
||||||
cr16c)
|
craynv)
|
||||||
basic_machine=cr16c-unknown
|
basic_machine=craynv-cray
|
||||||
|
os=-unicosmp
|
||||||
|
;;
|
||||||
|
cr16 | cr16-*)
|
||||||
|
basic_machine=cr16-unknown
|
||||||
os=-elf
|
os=-elf
|
||||||
;;
|
;;
|
||||||
crds | unos)
|
crds | unos)
|
||||||
basic_machine=m68k-crds
|
basic_machine=m68k-crds
|
||||||
;;
|
;;
|
||||||
|
crisv32 | crisv32-* | etraxfs*)
|
||||||
|
basic_machine=crisv32-axis
|
||||||
|
;;
|
||||||
cris | cris-* | etrax*)
|
cris | cris-* | etrax*)
|
||||||
basic_machine=cris-axis
|
basic_machine=cris-axis
|
||||||
;;
|
;;
|
||||||
@ -481,6 +606,14 @@ case $basic_machine in
|
|||||||
basic_machine=m88k-motorola
|
basic_machine=m88k-motorola
|
||||||
os=-sysv3
|
os=-sysv3
|
||||||
;;
|
;;
|
||||||
|
dicos)
|
||||||
|
basic_machine=i686-pc
|
||||||
|
os=-dicos
|
||||||
|
;;
|
||||||
|
djgpp)
|
||||||
|
basic_machine=i586-pc
|
||||||
|
os=-msdosdjgpp
|
||||||
|
;;
|
||||||
dpx20 | dpx20-*)
|
dpx20 | dpx20-*)
|
||||||
basic_machine=rs6000-bull
|
basic_machine=rs6000-bull
|
||||||
os=-bosx
|
os=-bosx
|
||||||
@ -592,7 +725,6 @@ case $basic_machine in
|
|||||||
i370-ibm* | ibm*)
|
i370-ibm* | ibm*)
|
||||||
basic_machine=i370-ibm
|
basic_machine=i370-ibm
|
||||||
;;
|
;;
|
||||||
# I'm not sure what "Sysv32" means. Should this be sysv3.2?
|
|
||||||
i*86v32)
|
i*86v32)
|
||||||
basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
|
basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
|
||||||
os=-sysv32
|
os=-sysv32
|
||||||
@ -631,6 +763,14 @@ case $basic_machine in
|
|||||||
basic_machine=m68k-isi
|
basic_machine=m68k-isi
|
||||||
os=-sysv
|
os=-sysv
|
||||||
;;
|
;;
|
||||||
|
m68knommu)
|
||||||
|
basic_machine=m68k-unknown
|
||||||
|
os=-linux
|
||||||
|
;;
|
||||||
|
m68knommu-*)
|
||||||
|
basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'`
|
||||||
|
os=-linux
|
||||||
|
;;
|
||||||
m88k-omron*)
|
m88k-omron*)
|
||||||
basic_machine=m88k-omron
|
basic_machine=m88k-omron
|
||||||
;;
|
;;
|
||||||
@ -642,10 +782,17 @@ case $basic_machine in
|
|||||||
basic_machine=ns32k-utek
|
basic_machine=ns32k-utek
|
||||||
os=-sysv
|
os=-sysv
|
||||||
;;
|
;;
|
||||||
|
microblaze)
|
||||||
|
basic_machine=microblaze-xilinx
|
||||||
|
;;
|
||||||
mingw32)
|
mingw32)
|
||||||
basic_machine=i386-pc
|
basic_machine=i386-pc
|
||||||
os=-mingw32
|
os=-mingw32
|
||||||
;;
|
;;
|
||||||
|
mingw32ce)
|
||||||
|
basic_machine=arm-unknown
|
||||||
|
os=-mingw32ce
|
||||||
|
;;
|
||||||
miniframe)
|
miniframe)
|
||||||
basic_machine=m68000-convergent
|
basic_machine=m68000-convergent
|
||||||
;;
|
;;
|
||||||
@ -659,10 +806,6 @@ case $basic_machine in
|
|||||||
mips3*)
|
mips3*)
|
||||||
basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
|
basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
|
||||||
;;
|
;;
|
||||||
mmix*)
|
|
||||||
basic_machine=mmix-knuth
|
|
||||||
os=-mmixware
|
|
||||||
;;
|
|
||||||
monitor)
|
monitor)
|
||||||
basic_machine=m68k-rom68k
|
basic_machine=m68k-rom68k
|
||||||
os=-coff
|
os=-coff
|
||||||
@ -675,10 +818,21 @@ case $basic_machine in
|
|||||||
basic_machine=i386-pc
|
basic_machine=i386-pc
|
||||||
os=-msdos
|
os=-msdos
|
||||||
;;
|
;;
|
||||||
|
ms1-*)
|
||||||
|
basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
|
||||||
|
;;
|
||||||
|
msys)
|
||||||
|
basic_machine=i386-pc
|
||||||
|
os=-msys
|
||||||
|
;;
|
||||||
mvs)
|
mvs)
|
||||||
basic_machine=i370-ibm
|
basic_machine=i370-ibm
|
||||||
os=-mvs
|
os=-mvs
|
||||||
;;
|
;;
|
||||||
|
nacl)
|
||||||
|
basic_machine=le32-unknown
|
||||||
|
os=-nacl
|
||||||
|
;;
|
||||||
ncr3000)
|
ncr3000)
|
||||||
basic_machine=i486-ncr
|
basic_machine=i486-ncr
|
||||||
os=-sysv4
|
os=-sysv4
|
||||||
@ -743,9 +897,11 @@ case $basic_machine in
|
|||||||
np1)
|
np1)
|
||||||
basic_machine=np1-gould
|
basic_machine=np1-gould
|
||||||
;;
|
;;
|
||||||
nv1)
|
neo-tandem)
|
||||||
basic_machine=nv1-cray
|
basic_machine=neo-tandem
|
||||||
os=-unicosmp
|
;;
|
||||||
|
nse-tandem)
|
||||||
|
basic_machine=nse-tandem
|
||||||
;;
|
;;
|
||||||
nsr-tandem)
|
nsr-tandem)
|
||||||
basic_machine=nsr-tandem
|
basic_machine=nsr-tandem
|
||||||
@ -754,9 +910,8 @@ case $basic_machine in
|
|||||||
basic_machine=hppa1.1-oki
|
basic_machine=hppa1.1-oki
|
||||||
os=-proelf
|
os=-proelf
|
||||||
;;
|
;;
|
||||||
or32 | or32-*)
|
openrisc | openrisc-*)
|
||||||
basic_machine=or32-unknown
|
basic_machine=or32-unknown
|
||||||
os=-coff
|
|
||||||
;;
|
;;
|
||||||
os400)
|
os400)
|
||||||
basic_machine=powerpc-ibm
|
basic_machine=powerpc-ibm
|
||||||
@ -778,6 +933,14 @@ case $basic_machine in
|
|||||||
basic_machine=i860-intel
|
basic_machine=i860-intel
|
||||||
os=-osf
|
os=-osf
|
||||||
;;
|
;;
|
||||||
|
parisc)
|
||||||
|
basic_machine=hppa-unknown
|
||||||
|
os=-linux
|
||||||
|
;;
|
||||||
|
parisc-*)
|
||||||
|
basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'`
|
||||||
|
os=-linux
|
||||||
|
;;
|
||||||
pbd)
|
pbd)
|
||||||
basic_machine=sparc-tti
|
basic_machine=sparc-tti
|
||||||
;;
|
;;
|
||||||
@ -787,6 +950,12 @@ case $basic_machine in
|
|||||||
pc532 | pc532-*)
|
pc532 | pc532-*)
|
||||||
basic_machine=ns32k-pc532
|
basic_machine=ns32k-pc532
|
||||||
;;
|
;;
|
||||||
|
pc98)
|
||||||
|
basic_machine=i386-pc
|
||||||
|
;;
|
||||||
|
pc98-*)
|
||||||
|
basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'`
|
||||||
|
;;
|
||||||
pentium | p5 | k5 | k6 | nexgen | viac3)
|
pentium | p5 | k5 | k6 | nexgen | viac3)
|
||||||
basic_machine=i586-pc
|
basic_machine=i586-pc
|
||||||
;;
|
;;
|
||||||
@ -816,9 +985,10 @@ case $basic_machine in
|
|||||||
;;
|
;;
|
||||||
power) basic_machine=power-ibm
|
power) basic_machine=power-ibm
|
||||||
;;
|
;;
|
||||||
ppc) basic_machine=powerpc-unknown
|
ppc | ppcbe) basic_machine=powerpc-unknown
|
||||||
;;
|
;;
|
||||||
ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
|
ppc-* | ppcbe-*)
|
||||||
|
basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
|
||||||
;;
|
;;
|
||||||
ppcle | powerpclittle | ppc-le | powerpc-little)
|
ppcle | powerpclittle | ppc-le | powerpc-little)
|
||||||
basic_machine=powerpcle-unknown
|
basic_machine=powerpcle-unknown
|
||||||
@ -843,6 +1013,10 @@ case $basic_machine in
|
|||||||
basic_machine=i586-unknown
|
basic_machine=i586-unknown
|
||||||
os=-pw32
|
os=-pw32
|
||||||
;;
|
;;
|
||||||
|
rdos)
|
||||||
|
basic_machine=i386-pc
|
||||||
|
os=-rdos
|
||||||
|
;;
|
||||||
rom68k)
|
rom68k)
|
||||||
basic_machine=m68k-rom68k
|
basic_machine=m68k-rom68k
|
||||||
os=-coff
|
os=-coff
|
||||||
@ -869,6 +1043,10 @@ case $basic_machine in
|
|||||||
sb1el)
|
sb1el)
|
||||||
basic_machine=mipsisa64sb1el-unknown
|
basic_machine=mipsisa64sb1el-unknown
|
||||||
;;
|
;;
|
||||||
|
sde)
|
||||||
|
basic_machine=mipsisa32-sde
|
||||||
|
os=-elf
|
||||||
|
;;
|
||||||
sei)
|
sei)
|
||||||
basic_machine=mips-sei
|
basic_machine=mips-sei
|
||||||
os=-seiux
|
os=-seiux
|
||||||
@ -880,6 +1058,9 @@ case $basic_machine in
|
|||||||
basic_machine=sh-hitachi
|
basic_machine=sh-hitachi
|
||||||
os=-hms
|
os=-hms
|
||||||
;;
|
;;
|
||||||
|
sh5el)
|
||||||
|
basic_machine=sh5le-unknown
|
||||||
|
;;
|
||||||
sh64)
|
sh64)
|
||||||
basic_machine=sh64-unknown
|
basic_machine=sh64-unknown
|
||||||
;;
|
;;
|
||||||
@ -901,6 +1082,9 @@ case $basic_machine in
|
|||||||
basic_machine=i860-stratus
|
basic_machine=i860-stratus
|
||||||
os=-sysv4
|
os=-sysv4
|
||||||
;;
|
;;
|
||||||
|
strongarm-* | thumb-*)
|
||||||
|
basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'`
|
||||||
|
;;
|
||||||
sun2)
|
sun2)
|
||||||
basic_machine=m68000-sun
|
basic_machine=m68000-sun
|
||||||
;;
|
;;
|
||||||
@ -957,17 +1141,9 @@ case $basic_machine in
|
|||||||
basic_machine=t90-cray
|
basic_machine=t90-cray
|
||||||
os=-unicos
|
os=-unicos
|
||||||
;;
|
;;
|
||||||
tic54x | c54x*)
|
tile*)
|
||||||
basic_machine=tic54x-unknown
|
basic_machine=$basic_machine-unknown
|
||||||
os=-coff
|
os=-linux-gnu
|
||||||
;;
|
|
||||||
tic55x | c55x*)
|
|
||||||
basic_machine=tic55x-unknown
|
|
||||||
os=-coff
|
|
||||||
;;
|
|
||||||
tic6x | c6x*)
|
|
||||||
basic_machine=tic6x-unknown
|
|
||||||
os=-coff
|
|
||||||
;;
|
;;
|
||||||
tx39)
|
tx39)
|
||||||
basic_machine=mipstx39-unknown
|
basic_machine=mipstx39-unknown
|
||||||
@ -1029,9 +1205,16 @@ case $basic_machine in
|
|||||||
basic_machine=hppa1.1-winbond
|
basic_machine=hppa1.1-winbond
|
||||||
os=-proelf
|
os=-proelf
|
||||||
;;
|
;;
|
||||||
|
xbox)
|
||||||
|
basic_machine=i686-pc
|
||||||
|
os=-mingw32
|
||||||
|
;;
|
||||||
xps | xps100)
|
xps | xps100)
|
||||||
basic_machine=xps100-honeywell
|
basic_machine=xps100-honeywell
|
||||||
;;
|
;;
|
||||||
|
xscale-* | xscalee[bl]-*)
|
||||||
|
basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'`
|
||||||
|
;;
|
||||||
ymp)
|
ymp)
|
||||||
basic_machine=ymp-cray
|
basic_machine=ymp-cray
|
||||||
os=-unicos
|
os=-unicos
|
||||||
@ -1040,6 +1223,10 @@ case $basic_machine in
|
|||||||
basic_machine=z8k-unknown
|
basic_machine=z8k-unknown
|
||||||
os=-sim
|
os=-sim
|
||||||
;;
|
;;
|
||||||
|
z80-*-coff)
|
||||||
|
basic_machine=z80-unknown
|
||||||
|
os=-sim
|
||||||
|
;;
|
||||||
none)
|
none)
|
||||||
basic_machine=none-none
|
basic_machine=none-none
|
||||||
os=-none
|
os=-none
|
||||||
@ -1059,6 +1246,9 @@ case $basic_machine in
|
|||||||
romp)
|
romp)
|
||||||
basic_machine=romp-ibm
|
basic_machine=romp-ibm
|
||||||
;;
|
;;
|
||||||
|
mmix)
|
||||||
|
basic_machine=mmix-knuth
|
||||||
|
;;
|
||||||
rs6000)
|
rs6000)
|
||||||
basic_machine=rs6000-ibm
|
basic_machine=rs6000-ibm
|
||||||
;;
|
;;
|
||||||
@ -1075,13 +1265,10 @@ case $basic_machine in
|
|||||||
we32k)
|
we32k)
|
||||||
basic_machine=we32k-att
|
basic_machine=we32k-att
|
||||||
;;
|
;;
|
||||||
sh3 | sh4 | sh[34]eb | sh[1234]le | sh[23]ele)
|
sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele)
|
||||||
basic_machine=sh-unknown
|
basic_machine=sh-unknown
|
||||||
;;
|
;;
|
||||||
sh64)
|
sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
|
||||||
basic_machine=sh64-unknown
|
|
||||||
;;
|
|
||||||
sparc | sparcv9 | sparcv9b)
|
|
||||||
basic_machine=sparc-sun
|
basic_machine=sparc-sun
|
||||||
;;
|
;;
|
||||||
cydra)
|
cydra)
|
||||||
@ -1125,9 +1312,12 @@ esac
|
|||||||
if [ x"$os" != x"" ]
|
if [ x"$os" != x"" ]
|
||||||
then
|
then
|
||||||
case $os in
|
case $os in
|
||||||
# First match some system type aliases
|
# First match some system type aliases
|
||||||
# that might get confused with valid system types.
|
# that might get confused with valid system types.
|
||||||
# -solaris* is a basic system type, with this one exception.
|
# -solaris* is a basic system type, with this one exception.
|
||||||
|
-auroraux)
|
||||||
|
os=-auroraux
|
||||||
|
;;
|
||||||
-solaris1 | -solaris1.*)
|
-solaris1 | -solaris1.*)
|
||||||
os=`echo $os | sed -e 's|solaris1|sunos4|'`
|
os=`echo $os | sed -e 's|solaris1|sunos4|'`
|
||||||
;;
|
;;
|
||||||
@ -1148,26 +1338,31 @@ case $os in
|
|||||||
# Each alternative MUST END IN A *, to match a version number.
|
# Each alternative MUST END IN A *, to match a version number.
|
||||||
# -sysv* is not here because it comes later, after sysvr4.
|
# -sysv* is not here because it comes later, after sysvr4.
|
||||||
-gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
|
-gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
|
||||||
| -*vms* | -sco* | -esix* | -isc* | -aix* | -sunos | -sunos[34]*\
|
| -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
|
||||||
| -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \
|
| -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
|
||||||
|
| -sym* | -kopensolaris* \
|
||||||
| -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
|
| -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
|
||||||
| -aos* \
|
| -aos* | -aros* \
|
||||||
| -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
|
| -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
|
||||||
| -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
|
| -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
|
||||||
| -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* | -openbsd* \
|
| -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
|
||||||
|
| -openbsd* | -solidbsd* \
|
||||||
| -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
|
| -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
|
||||||
| -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
|
| -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
|
||||||
| -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
|
| -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
|
||||||
| -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
|
| -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
|
||||||
| -chorusos* | -chorusrdb* \
|
| -chorusos* | -chorusrdb* | -cegcc* \
|
||||||
| -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
|
| -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
|
||||||
| -mingw32* | -linux-gnu* | -linux-uclibc* | -uxpv* | -beos* | -mpeix* | -udk* \
|
| -mingw32* | -linux-gnu* | -linux-android* \
|
||||||
|
| -linux-newlib* | -linux-uclibc* \
|
||||||
|
| -uxpv* | -beos* | -mpeix* | -udk* \
|
||||||
| -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
|
| -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
|
||||||
| -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
|
| -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
|
||||||
| -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
|
| -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
|
||||||
| -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
|
| -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
|
||||||
| -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
|
| -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
|
||||||
| -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly*)
|
| -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
|
||||||
|
| -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*)
|
||||||
# Remember, each alternative MUST END IN *, to match a version number.
|
# Remember, each alternative MUST END IN *, to match a version number.
|
||||||
;;
|
;;
|
||||||
-qnx*)
|
-qnx*)
|
||||||
@ -1185,7 +1380,7 @@ case $os in
|
|||||||
os=`echo $os | sed -e 's|nto|nto-qnx|'`
|
os=`echo $os | sed -e 's|nto|nto-qnx|'`
|
||||||
;;
|
;;
|
||||||
-sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
|
-sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
|
||||||
| -windows* | -osx | -abug | -netware* | -os9* | -beos* \
|
| -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
|
||||||
| -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
|
| -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
|
||||||
;;
|
;;
|
||||||
-mac*)
|
-mac*)
|
||||||
@ -1206,7 +1401,7 @@ case $os in
|
|||||||
-opened*)
|
-opened*)
|
||||||
os=-openedition
|
os=-openedition
|
||||||
;;
|
;;
|
||||||
-os400*)
|
-os400*)
|
||||||
os=-os400
|
os=-os400
|
||||||
;;
|
;;
|
||||||
-wince*)
|
-wince*)
|
||||||
@ -1255,7 +1450,7 @@ case $os in
|
|||||||
-sinix*)
|
-sinix*)
|
||||||
os=-sysv4
|
os=-sysv4
|
||||||
;;
|
;;
|
||||||
-tpf*)
|
-tpf*)
|
||||||
os=-tpf
|
os=-tpf
|
||||||
;;
|
;;
|
||||||
-triton*)
|
-triton*)
|
||||||
@ -1294,6 +1489,14 @@ case $os in
|
|||||||
-kaos*)
|
-kaos*)
|
||||||
os=-kaos
|
os=-kaos
|
||||||
;;
|
;;
|
||||||
|
-zvmoe)
|
||||||
|
os=-zvmoe
|
||||||
|
;;
|
||||||
|
-dicos*)
|
||||||
|
os=-dicos
|
||||||
|
;;
|
||||||
|
-nacl*)
|
||||||
|
;;
|
||||||
-none)
|
-none)
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
@ -1316,6 +1519,12 @@ else
|
|||||||
# system, and we'll never get to this point.
|
# system, and we'll never get to this point.
|
||||||
|
|
||||||
case $basic_machine in
|
case $basic_machine in
|
||||||
|
score-*)
|
||||||
|
os=-elf
|
||||||
|
;;
|
||||||
|
spu-*)
|
||||||
|
os=-elf
|
||||||
|
;;
|
||||||
*-acorn)
|
*-acorn)
|
||||||
os=-riscix1.2
|
os=-riscix1.2
|
||||||
;;
|
;;
|
||||||
@ -1325,9 +1534,18 @@ case $basic_machine in
|
|||||||
arm*-semi)
|
arm*-semi)
|
||||||
os=-aout
|
os=-aout
|
||||||
;;
|
;;
|
||||||
c4x-* | tic4x-*)
|
c4x-* | tic4x-*)
|
||||||
os=-coff
|
os=-coff
|
||||||
;;
|
;;
|
||||||
|
tic54x-*)
|
||||||
|
os=-coff
|
||||||
|
;;
|
||||||
|
tic55x-*)
|
||||||
|
os=-coff
|
||||||
|
;;
|
||||||
|
tic6x-*)
|
||||||
|
os=-coff
|
||||||
|
;;
|
||||||
# This must come before the *-dec entry.
|
# This must come before the *-dec entry.
|
||||||
pdp10-*)
|
pdp10-*)
|
||||||
os=-tops20
|
os=-tops20
|
||||||
@ -1346,13 +1564,13 @@ case $basic_machine in
|
|||||||
;;
|
;;
|
||||||
m68000-sun)
|
m68000-sun)
|
||||||
os=-sunos3
|
os=-sunos3
|
||||||
# This also exists in the configure program, but was not the
|
|
||||||
# default.
|
|
||||||
# os=-sunos4
|
|
||||||
;;
|
;;
|
||||||
m68*-cisco)
|
m68*-cisco)
|
||||||
os=-aout
|
os=-aout
|
||||||
;;
|
;;
|
||||||
|
mep-*)
|
||||||
|
os=-elf
|
||||||
|
;;
|
||||||
mips*-cisco)
|
mips*-cisco)
|
||||||
os=-elf
|
os=-elf
|
||||||
;;
|
;;
|
||||||
@ -1371,9 +1589,15 @@ case $basic_machine in
|
|||||||
*-be)
|
*-be)
|
||||||
os=-beos
|
os=-beos
|
||||||
;;
|
;;
|
||||||
|
*-haiku)
|
||||||
|
os=-haiku
|
||||||
|
;;
|
||||||
*-ibm)
|
*-ibm)
|
||||||
os=-aix
|
os=-aix
|
||||||
;;
|
;;
|
||||||
|
*-knuth)
|
||||||
|
os=-mmixware
|
||||||
|
;;
|
||||||
*-wec)
|
*-wec)
|
||||||
os=-proelf
|
os=-proelf
|
||||||
;;
|
;;
|
||||||
@ -1476,7 +1700,7 @@ case $basic_machine in
|
|||||||
-sunos*)
|
-sunos*)
|
||||||
vendor=sun
|
vendor=sun
|
||||||
;;
|
;;
|
||||||
-aix*)
|
-cnk*|-aix*)
|
||||||
vendor=ibm
|
vendor=ibm
|
||||||
;;
|
;;
|
||||||
-beos*)
|
-beos*)
|
||||||
@ -1539,7 +1763,7 @@ case $basic_machine in
|
|||||||
esac
|
esac
|
||||||
|
|
||||||
echo $basic_machine$os
|
echo $basic_machine$os
|
||||||
exit 0
|
exit
|
||||||
|
|
||||||
# Local variables:
|
# Local variables:
|
||||||
# eval: (add-hook 'write-file-hooks 'time-stamp)
|
# eval: (add-hook 'write-file-hooks 'time-stamp)
|
||||||
|
722
configure.ac
722
configure.ac
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -11,22 +11,8 @@
|
|||||||
|
|
||||||
#define atomic_read_uint64(p) atomic_add_uint64(p, 0)
|
#define atomic_read_uint64(p) atomic_add_uint64(p, 0)
|
||||||
#define atomic_read_uint32(p) atomic_add_uint32(p, 0)
|
#define atomic_read_uint32(p) atomic_add_uint32(p, 0)
|
||||||
|
#define atomic_read_z(p) atomic_add_z(p, 0)
|
||||||
#if (LG_SIZEOF_PTR == 3)
|
#define atomic_read_u(p) atomic_add_u(p, 0)
|
||||||
# define atomic_read_z(p) \
|
|
||||||
(size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)0)
|
|
||||||
# define atomic_add_z(p, x) \
|
|
||||||
(size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x)
|
|
||||||
# define atomic_sub_z(p, x) \
|
|
||||||
(size_t)atomic_sub_uint64((uint64_t *)p, (uint64_t)x)
|
|
||||||
#elif (LG_SIZEOF_PTR == 2)
|
|
||||||
# define atomic_read_z(p) \
|
|
||||||
(size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)0)
|
|
||||||
# define atomic_add_z(p, x) \
|
|
||||||
(size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x)
|
|
||||||
# define atomic_sub_z(p, x) \
|
|
||||||
(size_t)atomic_sub_uint32((uint32_t *)p, (uint32_t)x)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
@ -37,12 +23,17 @@ uint64_t atomic_add_uint64(uint64_t *p, uint64_t x);
|
|||||||
uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
|
uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
|
||||||
uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
|
uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
|
||||||
uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
|
uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
|
||||||
|
size_t atomic_add_z(size_t *p, size_t x);
|
||||||
|
size_t atomic_sub_z(size_t *p, size_t x);
|
||||||
|
unsigned atomic_add_u(unsigned *p, unsigned x);
|
||||||
|
unsigned atomic_sub_u(unsigned *p, unsigned x);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* 64-bit operations. */
|
/* 64-bit operations. */
|
||||||
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
|
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
|
||||||
|
# ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
|
||||||
JEMALLOC_INLINE uint64_t
|
JEMALLOC_INLINE uint64_t
|
||||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||||
{
|
{
|
||||||
@ -56,6 +47,20 @@ atomic_sub_uint64(uint64_t *p, uint64_t x)
|
|||||||
|
|
||||||
return (__sync_sub_and_fetch(p, x));
|
return (__sync_sub_and_fetch(p, x));
|
||||||
}
|
}
|
||||||
|
#elif (defined(_MSC_VER))
|
||||||
|
JEMALLOC_INLINE uint64_t
|
||||||
|
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (InterlockedExchangeAdd64(p, x));
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE uint64_t
|
||||||
|
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (InterlockedExchangeAdd64(p, -((int64_t)x)));
|
||||||
|
}
|
||||||
#elif (defined(JEMALLOC_OSATOMIC))
|
#elif (defined(JEMALLOC_OSATOMIC))
|
||||||
JEMALLOC_INLINE uint64_t
|
JEMALLOC_INLINE uint64_t
|
||||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||||
@ -70,7 +75,7 @@ atomic_sub_uint64(uint64_t *p, uint64_t x)
|
|||||||
|
|
||||||
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
|
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
|
||||||
}
|
}
|
||||||
#elif (defined(__amd64_) || defined(__x86_64__))
|
# elif (defined(__amd64__) || defined(__x86_64__))
|
||||||
JEMALLOC_INLINE uint64_t
|
JEMALLOC_INLINE uint64_t
|
||||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||||
{
|
{
|
||||||
@ -97,8 +102,43 @@ atomic_sub_uint64(uint64_t *p, uint64_t x)
|
|||||||
|
|
||||||
return (x);
|
return (x);
|
||||||
}
|
}
|
||||||
#else
|
# elif (defined(JEMALLOC_ATOMIC9))
|
||||||
# if (LG_SIZEOF_PTR == 3)
|
JEMALLOC_INLINE uint64_t
|
||||||
|
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||||
|
{
|
||||||
|
|
||||||
|
/*
|
||||||
|
* atomic_fetchadd_64() doesn't exist, but we only ever use this
|
||||||
|
* function on LP64 systems, so atomic_fetchadd_long() will do.
|
||||||
|
*/
|
||||||
|
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
||||||
|
|
||||||
|
return (atomic_fetchadd_long(p, (unsigned long)x) + x);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE uint64_t
|
||||||
|
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||||
|
{
|
||||||
|
|
||||||
|
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
||||||
|
|
||||||
|
return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
|
||||||
|
}
|
||||||
|
# elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
|
||||||
|
JEMALLOC_INLINE uint64_t
|
||||||
|
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (__sync_add_and_fetch(p, x));
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE uint64_t
|
||||||
|
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (__sync_sub_and_fetch(p, x));
|
||||||
|
}
|
||||||
|
# else
|
||||||
# error "Missing implementation for 64-bit atomic operations"
|
# error "Missing implementation for 64-bit atomic operations"
|
||||||
# endif
|
# endif
|
||||||
#endif
|
#endif
|
||||||
@ -119,6 +159,20 @@ atomic_sub_uint32(uint32_t *p, uint32_t x)
|
|||||||
|
|
||||||
return (__sync_sub_and_fetch(p, x));
|
return (__sync_sub_and_fetch(p, x));
|
||||||
}
|
}
|
||||||
|
#elif (defined(_MSC_VER))
|
||||||
|
JEMALLOC_INLINE uint32_t
|
||||||
|
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (InterlockedExchangeAdd(p, x));
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE uint32_t
|
||||||
|
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (InterlockedExchangeAdd(p, -((int32_t)x)));
|
||||||
|
}
|
||||||
#elif (defined(JEMALLOC_OSATOMIC))
|
#elif (defined(JEMALLOC_OSATOMIC))
|
||||||
JEMALLOC_INLINE uint32_t
|
JEMALLOC_INLINE uint32_t
|
||||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||||
@ -133,7 +187,7 @@ atomic_sub_uint32(uint32_t *p, uint32_t x)
|
|||||||
|
|
||||||
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
|
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
|
||||||
}
|
}
|
||||||
#elif (defined(__i386__) || defined(__amd64_) || defined(__x86_64__))
|
#elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
|
||||||
JEMALLOC_INLINE uint32_t
|
JEMALLOC_INLINE uint32_t
|
||||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||||
{
|
{
|
||||||
@ -160,9 +214,90 @@ atomic_sub_uint32(uint32_t *p, uint32_t x)
|
|||||||
|
|
||||||
return (x);
|
return (x);
|
||||||
}
|
}
|
||||||
|
#elif (defined(JEMALLOC_ATOMIC9))
|
||||||
|
JEMALLOC_INLINE uint32_t
|
||||||
|
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (atomic_fetchadd_32(p, x) + x);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE uint32_t
|
||||||
|
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
|
||||||
|
}
|
||||||
|
#elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
|
||||||
|
JEMALLOC_INLINE uint32_t
|
||||||
|
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (__sync_add_and_fetch(p, x));
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE uint32_t
|
||||||
|
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (__sync_sub_and_fetch(p, x));
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
# error "Missing implementation for 32-bit atomic operations"
|
# error "Missing implementation for 32-bit atomic operations"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/******************************************************************************/
|
||||||
|
/* size_t operations. */
|
||||||
|
JEMALLOC_INLINE size_t
|
||||||
|
atomic_add_z(size_t *p, size_t x)
|
||||||
|
{
|
||||||
|
|
||||||
|
#if (LG_SIZEOF_PTR == 3)
|
||||||
|
return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
|
||||||
|
#elif (LG_SIZEOF_PTR == 2)
|
||||||
|
return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE size_t
|
||||||
|
atomic_sub_z(size_t *p, size_t x)
|
||||||
|
{
|
||||||
|
|
||||||
|
#if (LG_SIZEOF_PTR == 3)
|
||||||
|
return ((size_t)atomic_add_uint64((uint64_t *)p,
|
||||||
|
(uint64_t)-((int64_t)x)));
|
||||||
|
#elif (LG_SIZEOF_PTR == 2)
|
||||||
|
return ((size_t)atomic_add_uint32((uint32_t *)p,
|
||||||
|
(uint32_t)-((int32_t)x)));
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
/******************************************************************************/
|
||||||
|
/* unsigned operations. */
|
||||||
|
JEMALLOC_INLINE unsigned
|
||||||
|
atomic_add_u(unsigned *p, unsigned x)
|
||||||
|
{
|
||||||
|
|
||||||
|
#if (LG_SIZEOF_INT == 3)
|
||||||
|
return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
|
||||||
|
#elif (LG_SIZEOF_INT == 2)
|
||||||
|
return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE unsigned
|
||||||
|
atomic_sub_u(unsigned *p, unsigned x)
|
||||||
|
{
|
||||||
|
|
||||||
|
#if (LG_SIZEOF_INT == 3)
|
||||||
|
return ((unsigned)atomic_add_uint64((uint64_t *)p,
|
||||||
|
(uint64_t)-((int64_t)x)));
|
||||||
|
#elif (LG_SIZEOF_INT == 2)
|
||||||
|
return ((unsigned)atomic_add_uint32((uint32_t *)p,
|
||||||
|
(uint32_t)-((int32_t)x)));
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
/******************************************************************************/
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
#endif /* JEMALLOC_H_INLINES */
|
||||||
|
@ -9,12 +9,14 @@
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
extern malloc_mutex_t base_mtx;
|
|
||||||
|
|
||||||
void *base_alloc(size_t size);
|
void *base_alloc(size_t size);
|
||||||
|
void *base_calloc(size_t number, size_t size);
|
||||||
extent_node_t *base_node_alloc(void);
|
extent_node_t *base_node_alloc(void);
|
||||||
void base_node_dealloc(extent_node_t *node);
|
void base_node_dealloc(extent_node_t *node);
|
||||||
bool base_boot(void);
|
bool base_boot(void);
|
||||||
|
void base_prefork(void);
|
||||||
|
void base_postfork_parent(void);
|
||||||
|
void base_postfork_child(void);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
@ -28,20 +28,13 @@
|
|||||||
#ifdef JEMALLOC_H_EXTERNS
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
extern size_t opt_lg_chunk;
|
extern size_t opt_lg_chunk;
|
||||||
#ifdef JEMALLOC_SWAP
|
|
||||||
extern bool opt_overcommit;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
/* Protects stats_chunks; currently not used for any other purpose. */
|
/* Protects stats_chunks; currently not used for any other purpose. */
|
||||||
extern malloc_mutex_t chunks_mtx;
|
extern malloc_mutex_t chunks_mtx;
|
||||||
/* Chunk statistics. */
|
/* Chunk statistics. */
|
||||||
extern chunk_stats_t stats_chunks;
|
extern chunk_stats_t stats_chunks;
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_IVSALLOC
|
|
||||||
extern rtree_t *chunks_rtree;
|
extern rtree_t *chunks_rtree;
|
||||||
#endif
|
|
||||||
|
|
||||||
extern size_t chunksize;
|
extern size_t chunksize;
|
||||||
extern size_t chunksize_mask; /* (chunksize - 1). */
|
extern size_t chunksize_mask; /* (chunksize - 1). */
|
||||||
@ -49,7 +42,7 @@ extern size_t chunk_npages;
|
|||||||
extern size_t map_bias; /* Number of arena chunk header pages. */
|
extern size_t map_bias; /* Number of arena chunk header pages. */
|
||||||
extern size_t arena_maxclass; /* Max size class for arenas. */
|
extern size_t arena_maxclass; /* Max size class for arenas. */
|
||||||
|
|
||||||
void *chunk_alloc(size_t size, bool base, bool *zero);
|
void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero);
|
||||||
void chunk_dealloc(void *chunk, size_t size, bool unmap);
|
void chunk_dealloc(void *chunk, size_t size, bool unmap);
|
||||||
bool chunk_boot(void);
|
bool chunk_boot(void);
|
||||||
|
|
||||||
@ -60,6 +53,5 @@ bool chunk_boot(void);
|
|||||||
#endif /* JEMALLOC_H_INLINES */
|
#endif /* JEMALLOC_H_INLINES */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
#include "jemalloc/internal/chunk_swap.h"
|
|
||||||
#include "jemalloc/internal/chunk_dss.h"
|
#include "jemalloc/internal/chunk_dss.h"
|
||||||
#include "jemalloc/internal/chunk_mmap.h"
|
#include "jemalloc/internal/chunk_mmap.h"
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#ifdef JEMALLOC_DSS
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_TYPES
|
#ifdef JEMALLOC_H_TYPES
|
||||||
|
|
||||||
@ -10,16 +9,12 @@
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
/*
|
void *chunk_alloc_dss(size_t size, size_t alignment, bool *zero);
|
||||||
* Protects sbrk() calls. This avoids malloc races among threads, though it
|
|
||||||
* does not protect against races with threads that call sbrk() directly.
|
|
||||||
*/
|
|
||||||
extern malloc_mutex_t dss_mtx;
|
|
||||||
|
|
||||||
void *chunk_alloc_dss(size_t size, bool *zero);
|
|
||||||
bool chunk_in_dss(void *chunk);
|
bool chunk_in_dss(void *chunk);
|
||||||
bool chunk_dealloc_dss(void *chunk, size_t size);
|
|
||||||
bool chunk_dss_boot(void);
|
bool chunk_dss_boot(void);
|
||||||
|
void chunk_dss_prefork(void);
|
||||||
|
void chunk_dss_postfork_parent(void);
|
||||||
|
void chunk_dss_postfork_child(void);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
@ -27,4 +22,3 @@ bool chunk_dss_boot(void);
|
|||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
#endif /* JEMALLOC_H_INLINES */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#endif /* JEMALLOC_DSS */
|
|
||||||
|
@ -9,11 +9,10 @@
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
void *chunk_alloc_mmap(size_t size);
|
void pages_purge(void *addr, size_t length);
|
||||||
void *chunk_alloc_mmap_noreserve(size_t size);
|
|
||||||
void chunk_dealloc_mmap(void *chunk, size_t size);
|
|
||||||
|
|
||||||
bool chunk_mmap_boot(void);
|
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
|
||||||
|
bool chunk_dealloc_mmap(void *chunk, size_t size);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
@ -1,34 +0,0 @@
|
|||||||
#ifdef JEMALLOC_SWAP
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_TYPES
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
|
||||||
|
|
||||||
extern malloc_mutex_t swap_mtx;
|
|
||||||
extern bool swap_enabled;
|
|
||||||
extern bool swap_prezeroed;
|
|
||||||
extern size_t swap_nfds;
|
|
||||||
extern int *swap_fds;
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
extern size_t swap_avail;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void *chunk_alloc_swap(size_t size, bool *zero);
|
|
||||||
bool chunk_in_swap(void *chunk);
|
|
||||||
bool chunk_dealloc_swap(void *chunk, size_t size);
|
|
||||||
bool chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed);
|
|
||||||
bool chunk_swap_boot(void);
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
/******************************************************************************/
|
|
||||||
#endif /* JEMALLOC_SWAP */
|
|
@ -30,11 +30,6 @@ struct ckhc_s {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct ckh_s {
|
struct ckh_s {
|
||||||
#ifdef JEMALLOC_DEBUG
|
|
||||||
#define CKH_MAGIC 0x3af2489d
|
|
||||||
uint32_t magic;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CKH_COUNT
|
#ifdef CKH_COUNT
|
||||||
/* Counters used to get an idea of performance. */
|
/* Counters used to get an idea of performance. */
|
||||||
uint64_t ngrows;
|
uint64_t ngrows;
|
||||||
@ -47,7 +42,7 @@ struct ckh_s {
|
|||||||
/* Used for pseudo-random number generation. */
|
/* Used for pseudo-random number generation. */
|
||||||
#define CKH_A 1103515241
|
#define CKH_A 1103515241
|
||||||
#define CKH_C 12347
|
#define CKH_C 12347
|
||||||
uint32_t prn_state;
|
uint32_t prng_state;
|
||||||
|
|
||||||
/* Total number of items. */
|
/* Total number of items. */
|
||||||
size_t count;
|
size_t count;
|
||||||
|
@ -2,6 +2,8 @@
|
|||||||
#ifdef JEMALLOC_H_TYPES
|
#ifdef JEMALLOC_H_TYPES
|
||||||
|
|
||||||
typedef struct ctl_node_s ctl_node_t;
|
typedef struct ctl_node_s ctl_node_t;
|
||||||
|
typedef struct ctl_named_node_s ctl_named_node_t;
|
||||||
|
typedef struct ctl_indexed_node_s ctl_indexed_node_t;
|
||||||
typedef struct ctl_arena_stats_s ctl_arena_stats_t;
|
typedef struct ctl_arena_stats_s ctl_arena_stats_t;
|
||||||
typedef struct ctl_stats_s ctl_stats_t;
|
typedef struct ctl_stats_s ctl_stats_t;
|
||||||
|
|
||||||
@ -11,20 +13,21 @@ typedef struct ctl_stats_s ctl_stats_t;
|
|||||||
|
|
||||||
struct ctl_node_s {
|
struct ctl_node_s {
|
||||||
bool named;
|
bool named;
|
||||||
union {
|
};
|
||||||
struct {
|
|
||||||
const char *name;
|
struct ctl_named_node_s {
|
||||||
/* If (nchildren == 0), this is a terminal node. */
|
struct ctl_node_s node;
|
||||||
unsigned nchildren;
|
const char *name;
|
||||||
const ctl_node_t *children;
|
/* If (nchildren == 0), this is a terminal node. */
|
||||||
} named;
|
unsigned nchildren;
|
||||||
struct {
|
const ctl_node_t *children;
|
||||||
const ctl_node_t *(*index)(const size_t *, size_t,
|
int (*ctl)(const size_t *, size_t, void *, size_t *,
|
||||||
size_t);
|
void *, size_t);
|
||||||
} indexed;
|
};
|
||||||
} u;
|
|
||||||
int (*ctl)(const size_t *, size_t, void *, size_t *, void *,
|
struct ctl_indexed_node_s {
|
||||||
size_t);
|
struct ctl_node_s node;
|
||||||
|
const ctl_named_node_t *(*index)(const size_t *, size_t, size_t);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ctl_arena_stats_s {
|
struct ctl_arena_stats_s {
|
||||||
@ -32,7 +35,6 @@ struct ctl_arena_stats_s {
|
|||||||
unsigned nthreads;
|
unsigned nthreads;
|
||||||
size_t pactive;
|
size_t pactive;
|
||||||
size_t pdirty;
|
size_t pdirty;
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
arena_stats_t astats;
|
arena_stats_t astats;
|
||||||
|
|
||||||
/* Aggregate stats for small size classes, based on bin stats. */
|
/* Aggregate stats for small size classes, based on bin stats. */
|
||||||
@ -41,13 +43,11 @@ struct ctl_arena_stats_s {
|
|||||||
uint64_t ndalloc_small;
|
uint64_t ndalloc_small;
|
||||||
uint64_t nrequests_small;
|
uint64_t nrequests_small;
|
||||||
|
|
||||||
malloc_bin_stats_t *bstats; /* nbins elements. */
|
malloc_bin_stats_t bstats[NBINS];
|
||||||
malloc_large_stats_t *lstats; /* nlclasses elements. */
|
malloc_large_stats_t *lstats; /* nlclasses elements. */
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ctl_stats_s {
|
struct ctl_stats_s {
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
size_t allocated;
|
size_t allocated;
|
||||||
size_t active;
|
size_t active;
|
||||||
size_t mapped;
|
size_t mapped;
|
||||||
@ -61,11 +61,7 @@ struct ctl_stats_s {
|
|||||||
uint64_t nmalloc; /* huge_nmalloc */
|
uint64_t nmalloc; /* huge_nmalloc */
|
||||||
uint64_t ndalloc; /* huge_ndalloc */
|
uint64_t ndalloc; /* huge_ndalloc */
|
||||||
} huge;
|
} huge;
|
||||||
#endif
|
|
||||||
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
|
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
|
||||||
#ifdef JEMALLOC_SWAP
|
|
||||||
size_t swap_avail;
|
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
#endif /* JEMALLOC_H_STRUCTS */
|
||||||
@ -81,27 +77,25 @@ int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
|||||||
bool ctl_boot(void);
|
bool ctl_boot(void);
|
||||||
|
|
||||||
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
|
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
|
||||||
if (JEMALLOC_P(mallctl)(name, oldp, oldlenp, newp, newlen) \
|
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
|
||||||
!= 0) { \
|
!= 0) { \
|
||||||
malloc_write("<jemalloc>: Failure in xmallctl(\""); \
|
malloc_printf( \
|
||||||
malloc_write(name); \
|
"<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \
|
||||||
malloc_write("\", ...)\n"); \
|
name); \
|
||||||
abort(); \
|
abort(); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define xmallctlnametomib(name, mibp, miblenp) do { \
|
#define xmallctlnametomib(name, mibp, miblenp) do { \
|
||||||
if (JEMALLOC_P(mallctlnametomib)(name, mibp, miblenp) != 0) { \
|
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
|
||||||
malloc_write( \
|
malloc_printf("<jemalloc>: Failure in " \
|
||||||
"<jemalloc>: Failure in xmallctlnametomib(\""); \
|
"xmallctlnametomib(\"%s\", ...)\n", name); \
|
||||||
malloc_write(name); \
|
|
||||||
malloc_write("\", ...)\n"); \
|
|
||||||
abort(); \
|
abort(); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
|
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
|
||||||
if (JEMALLOC_P(mallctlbymib)(mib, miblen, oldp, oldlenp, newp, \
|
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
|
||||||
newlen) != 0) { \
|
newlen) != 0) { \
|
||||||
malloc_write( \
|
malloc_write( \
|
||||||
"<jemalloc>: Failure in xmallctlbymib()\n"); \
|
"<jemalloc>: Failure in xmallctlbymib()\n"); \
|
||||||
|
@ -9,18 +9,14 @@ typedef struct extent_node_s extent_node_t;
|
|||||||
|
|
||||||
/* Tree of extents. */
|
/* Tree of extents. */
|
||||||
struct extent_node_s {
|
struct extent_node_s {
|
||||||
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
|
|
||||||
/* Linkage for the size/address-ordered tree. */
|
/* Linkage for the size/address-ordered tree. */
|
||||||
rb_node(extent_node_t) link_szad;
|
rb_node(extent_node_t) link_szad;
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Linkage for the address-ordered tree. */
|
/* Linkage for the address-ordered tree. */
|
||||||
rb_node(extent_node_t) link_ad;
|
rb_node(extent_node_t) link_ad;
|
||||||
|
|
||||||
#ifdef JEMALLOC_PROF
|
|
||||||
/* Profile counters, used for huge objects. */
|
/* Profile counters, used for huge objects. */
|
||||||
prof_ctx_t *prof_ctx;
|
prof_ctx_t *prof_ctx;
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Pointer to the extent that this tree node is responsible for. */
|
/* Pointer to the extent that this tree node is responsible for. */
|
||||||
void *addr;
|
void *addr;
|
||||||
@ -34,9 +30,7 @@ typedef rb_tree(extent_node_t) extent_tree_t;
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
|
|
||||||
rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
|
rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
|
||||||
#endif
|
|
||||||
|
|
||||||
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
|
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ uint64_t hash(const void *key, size_t len, uint64_t seed);
|
|||||||
JEMALLOC_INLINE uint64_t
|
JEMALLOC_INLINE uint64_t
|
||||||
hash(const void *key, size_t len, uint64_t seed)
|
hash(const void *key, size_t len, uint64_t seed)
|
||||||
{
|
{
|
||||||
const uint64_t m = 0xc6a4a7935bd1e995LLU;
|
const uint64_t m = UINT64_C(0xc6a4a7935bd1e995);
|
||||||
const int r = 47;
|
const int r = 47;
|
||||||
uint64_t h = seed ^ (len * m);
|
uint64_t h = seed ^ (len * m);
|
||||||
const uint64_t *data = (const uint64_t *)key;
|
const uint64_t *data = (const uint64_t *)key;
|
||||||
@ -48,14 +48,14 @@ hash(const void *key, size_t len, uint64_t seed)
|
|||||||
|
|
||||||
data2 = (const unsigned char *)data;
|
data2 = (const unsigned char *)data;
|
||||||
switch(len & 7) {
|
switch(len & 7) {
|
||||||
case 7: h ^= ((uint64_t)(data2[6])) << 48;
|
case 7: h ^= ((uint64_t)(data2[6])) << 48;
|
||||||
case 6: h ^= ((uint64_t)(data2[5])) << 40;
|
case 6: h ^= ((uint64_t)(data2[5])) << 40;
|
||||||
case 5: h ^= ((uint64_t)(data2[4])) << 32;
|
case 5: h ^= ((uint64_t)(data2[4])) << 32;
|
||||||
case 4: h ^= ((uint64_t)(data2[3])) << 24;
|
case 4: h ^= ((uint64_t)(data2[3])) << 24;
|
||||||
case 3: h ^= ((uint64_t)(data2[2])) << 16;
|
case 3: h ^= ((uint64_t)(data2[2])) << 16;
|
||||||
case 2: h ^= ((uint64_t)(data2[1])) << 8;
|
case 2: h ^= ((uint64_t)(data2[1])) << 8;
|
||||||
case 1: h ^= ((uint64_t)(data2[0]));
|
case 1: h ^= ((uint64_t)(data2[0]));
|
||||||
h *= m;
|
h *= m;
|
||||||
}
|
}
|
||||||
|
|
||||||
h ^= h >> r;
|
h ^= h >> r;
|
||||||
|
@ -9,12 +9,10 @@
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
/* Huge allocation statistics. */
|
/* Huge allocation statistics. */
|
||||||
extern uint64_t huge_nmalloc;
|
extern uint64_t huge_nmalloc;
|
||||||
extern uint64_t huge_ndalloc;
|
extern uint64_t huge_ndalloc;
|
||||||
extern size_t huge_allocated;
|
extern size_t huge_allocated;
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Protects chunk-related data structures. */
|
/* Protects chunk-related data structures. */
|
||||||
extern malloc_mutex_t huge_mtx;
|
extern malloc_mutex_t huge_mtx;
|
||||||
@ -27,11 +25,12 @@ void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
|||||||
size_t alignment, bool zero);
|
size_t alignment, bool zero);
|
||||||
void huge_dalloc(void *ptr, bool unmap);
|
void huge_dalloc(void *ptr, bool unmap);
|
||||||
size_t huge_salloc(const void *ptr);
|
size_t huge_salloc(const void *ptr);
|
||||||
#ifdef JEMALLOC_PROF
|
|
||||||
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
|
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
|
||||||
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
|
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
|
||||||
#endif
|
|
||||||
bool huge_boot(void);
|
bool huge_boot(void);
|
||||||
|
void huge_prefork(void);
|
||||||
|
void huge_postfork_parent(void);
|
||||||
|
void huge_postfork_child(void);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -54,7 +54,7 @@ mb_write(void)
|
|||||||
);
|
);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
#elif (defined(__amd64_) || defined(__x86_64__))
|
#elif (defined(__amd64__) || defined(__x86_64__))
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
mb_write(void)
|
mb_write(void)
|
||||||
{
|
{
|
||||||
@ -87,6 +87,13 @@ mb_write(void)
|
|||||||
: "memory" /* Clobbers. */
|
: "memory" /* Clobbers. */
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
#elif defined(__tile__)
|
||||||
|
JEMALLOC_INLINE void
|
||||||
|
mb_write(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
__sync_synchronize();
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
/*
|
/*
|
||||||
* This is much slower than a simple memory barrier, but the semantics of mutex
|
* This is much slower than a simple memory barrier, but the semantics of mutex
|
||||||
|
@ -1,22 +1,42 @@
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_TYPES
|
#ifdef JEMALLOC_H_TYPES
|
||||||
|
|
||||||
#ifdef JEMALLOC_OSSPIN
|
typedef struct malloc_mutex_s malloc_mutex_t;
|
||||||
typedef OSSpinLock malloc_mutex_t;
|
|
||||||
#else
|
|
||||||
typedef pthread_mutex_t malloc_mutex_t;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
|
#ifdef _WIN32
|
||||||
# define MALLOC_MUTEX_INITIALIZER PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
|
# define MALLOC_MUTEX_INITIALIZER
|
||||||
|
#elif (defined(JEMALLOC_OSSPIN))
|
||||||
|
# define MALLOC_MUTEX_INITIALIZER {0}
|
||||||
|
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||||
|
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
|
||||||
#else
|
#else
|
||||||
# define MALLOC_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
|
# if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \
|
||||||
|
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
|
||||||
|
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
|
||||||
|
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
|
||||||
|
# else
|
||||||
|
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
|
||||||
|
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER}
|
||||||
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
#endif /* JEMALLOC_H_TYPES */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
#ifdef JEMALLOC_H_STRUCTS
|
||||||
|
|
||||||
|
struct malloc_mutex_s {
|
||||||
|
#ifdef _WIN32
|
||||||
|
CRITICAL_SECTION lock;
|
||||||
|
#elif (defined(JEMALLOC_OSSPIN))
|
||||||
|
OSSpinLock lock;
|
||||||
|
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||||
|
pthread_mutex_t lock;
|
||||||
|
malloc_mutex_t *postponed_next;
|
||||||
|
#else
|
||||||
|
pthread_mutex_t lock;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
#endif /* JEMALLOC_H_STRUCTS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
@ -24,11 +44,15 @@ typedef pthread_mutex_t malloc_mutex_t;
|
|||||||
#ifdef JEMALLOC_LAZY_LOCK
|
#ifdef JEMALLOC_LAZY_LOCK
|
||||||
extern bool isthreaded;
|
extern bool isthreaded;
|
||||||
#else
|
#else
|
||||||
|
# undef isthreaded /* Undo private_namespace.h definition. */
|
||||||
# define isthreaded true
|
# define isthreaded true
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
bool malloc_mutex_init(malloc_mutex_t *mutex);
|
bool malloc_mutex_init(malloc_mutex_t *mutex);
|
||||||
void malloc_mutex_destroy(malloc_mutex_t *mutex);
|
void malloc_mutex_prefork(malloc_mutex_t *mutex);
|
||||||
|
void malloc_mutex_postfork_parent(malloc_mutex_t *mutex);
|
||||||
|
void malloc_mutex_postfork_child(malloc_mutex_t *mutex);
|
||||||
|
bool mutex_boot(void);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
@ -36,7 +60,6 @@ void malloc_mutex_destroy(malloc_mutex_t *mutex);
|
|||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
#ifndef JEMALLOC_ENABLE_INLINE
|
||||||
void malloc_mutex_lock(malloc_mutex_t *mutex);
|
void malloc_mutex_lock(malloc_mutex_t *mutex);
|
||||||
bool malloc_mutex_trylock(malloc_mutex_t *mutex);
|
|
||||||
void malloc_mutex_unlock(malloc_mutex_t *mutex);
|
void malloc_mutex_unlock(malloc_mutex_t *mutex);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -46,37 +69,27 @@ malloc_mutex_lock(malloc_mutex_t *mutex)
|
|||||||
{
|
{
|
||||||
|
|
||||||
if (isthreaded) {
|
if (isthreaded) {
|
||||||
#ifdef JEMALLOC_OSSPIN
|
#ifdef _WIN32
|
||||||
OSSpinLockLock(mutex);
|
EnterCriticalSection(&mutex->lock);
|
||||||
|
#elif (defined(JEMALLOC_OSSPIN))
|
||||||
|
OSSpinLockLock(&mutex->lock);
|
||||||
#else
|
#else
|
||||||
pthread_mutex_lock(mutex);
|
pthread_mutex_lock(&mutex->lock);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
malloc_mutex_trylock(malloc_mutex_t *mutex)
|
|
||||||
{
|
|
||||||
|
|
||||||
if (isthreaded) {
|
|
||||||
#ifdef JEMALLOC_OSSPIN
|
|
||||||
return (OSSpinLockTry(mutex) == false);
|
|
||||||
#else
|
|
||||||
return (pthread_mutex_trylock(mutex) != 0);
|
|
||||||
#endif
|
|
||||||
} else
|
|
||||||
return (false);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
malloc_mutex_unlock(malloc_mutex_t *mutex)
|
malloc_mutex_unlock(malloc_mutex_t *mutex)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (isthreaded) {
|
if (isthreaded) {
|
||||||
#ifdef JEMALLOC_OSSPIN
|
#ifdef _WIN32
|
||||||
OSSpinLockUnlock(mutex);
|
LeaveCriticalSection(&mutex->lock);
|
||||||
|
#elif (defined(JEMALLOC_OSSPIN))
|
||||||
|
OSSpinLockUnlock(&mutex->lock);
|
||||||
#else
|
#else
|
||||||
pthread_mutex_unlock(mutex);
|
pthread_mutex_unlock(&mutex->lock);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,36 +1,84 @@
|
|||||||
|
#define a0calloc JEMALLOC_N(a0calloc)
|
||||||
|
#define a0free JEMALLOC_N(a0free)
|
||||||
|
#define a0malloc JEMALLOC_N(a0malloc)
|
||||||
|
#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
|
||||||
#define arena_bin_index JEMALLOC_N(arena_bin_index)
|
#define arena_bin_index JEMALLOC_N(arena_bin_index)
|
||||||
|
#define arena_bin_info JEMALLOC_N(arena_bin_info)
|
||||||
#define arena_boot JEMALLOC_N(arena_boot)
|
#define arena_boot JEMALLOC_N(arena_boot)
|
||||||
#define arena_dalloc JEMALLOC_N(arena_dalloc)
|
#define arena_dalloc JEMALLOC_N(arena_dalloc)
|
||||||
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
|
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
|
||||||
|
#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked)
|
||||||
|
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
|
||||||
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
|
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
|
||||||
|
#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
|
||||||
|
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
|
||||||
#define arena_malloc JEMALLOC_N(arena_malloc)
|
#define arena_malloc JEMALLOC_N(arena_malloc)
|
||||||
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
|
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
|
||||||
#define arena_malloc_small JEMALLOC_N(arena_malloc_small)
|
#define arena_malloc_small JEMALLOC_N(arena_malloc_small)
|
||||||
|
#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get)
|
||||||
|
#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get)
|
||||||
|
#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get)
|
||||||
|
#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get)
|
||||||
|
#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set)
|
||||||
|
#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get)
|
||||||
|
#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set)
|
||||||
|
#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get)
|
||||||
|
#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get)
|
||||||
|
#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set)
|
||||||
|
#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set)
|
||||||
|
#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get)
|
||||||
|
#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set)
|
||||||
|
#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
|
||||||
|
#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set)
|
||||||
|
#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
|
||||||
|
#define arena_mapp_get JEMALLOC_N(arena_mapp_get)
|
||||||
|
#define arena_maxclass JEMALLOC_N(arena_maxclass)
|
||||||
#define arena_new JEMALLOC_N(arena_new)
|
#define arena_new JEMALLOC_N(arena_new)
|
||||||
#define arena_palloc JEMALLOC_N(arena_palloc)
|
#define arena_palloc JEMALLOC_N(arena_palloc)
|
||||||
|
#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
|
||||||
|
#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
|
||||||
|
#define arena_prefork JEMALLOC_N(arena_prefork)
|
||||||
#define arena_prof_accum JEMALLOC_N(arena_prof_accum)
|
#define arena_prof_accum JEMALLOC_N(arena_prof_accum)
|
||||||
#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get)
|
#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get)
|
||||||
#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set)
|
#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set)
|
||||||
#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
|
#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
|
||||||
|
#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get)
|
||||||
#define arena_purge_all JEMALLOC_N(arena_purge_all)
|
#define arena_purge_all JEMALLOC_N(arena_purge_all)
|
||||||
#define arena_ralloc JEMALLOC_N(arena_ralloc)
|
#define arena_ralloc JEMALLOC_N(arena_ralloc)
|
||||||
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
|
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
|
||||||
#define arena_run_regind JEMALLOC_N(arena_run_regind)
|
#define arena_run_regind JEMALLOC_N(arena_run_regind)
|
||||||
#define arena_salloc JEMALLOC_N(arena_salloc)
|
#define arena_salloc JEMALLOC_N(arena_salloc)
|
||||||
#define arena_salloc_demote JEMALLOC_N(arena_salloc_demote)
|
|
||||||
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
|
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
|
||||||
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
|
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
|
||||||
|
#define arenas JEMALLOC_N(arenas)
|
||||||
#define arenas_bin_i_index JEMALLOC_N(arenas_bin_i_index)
|
#define arenas_bin_i_index JEMALLOC_N(arenas_bin_i_index)
|
||||||
|
#define arenas_booted JEMALLOC_N(arenas_booted)
|
||||||
|
#define arenas_cleanup JEMALLOC_N(arenas_cleanup)
|
||||||
#define arenas_extend JEMALLOC_N(arenas_extend)
|
#define arenas_extend JEMALLOC_N(arenas_extend)
|
||||||
|
#define arenas_initialized JEMALLOC_N(arenas_initialized)
|
||||||
|
#define arenas_lock JEMALLOC_N(arenas_lock)
|
||||||
#define arenas_lrun_i_index JEMALLOC_N(arenas_lrun_i_index)
|
#define arenas_lrun_i_index JEMALLOC_N(arenas_lrun_i_index)
|
||||||
|
#define arenas_tls JEMALLOC_N(arenas_tls)
|
||||||
|
#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
|
||||||
|
#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
|
||||||
|
#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
|
||||||
|
#define arenas_tsd_set JEMALLOC_N(arenas_tsd_set)
|
||||||
|
#define atomic_add_u JEMALLOC_N(atomic_add_u)
|
||||||
#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
|
#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
|
||||||
#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64)
|
#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64)
|
||||||
|
#define atomic_add_z JEMALLOC_N(atomic_add_z)
|
||||||
|
#define atomic_sub_u JEMALLOC_N(atomic_sub_u)
|
||||||
#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32)
|
#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32)
|
||||||
#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64)
|
#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64)
|
||||||
|
#define atomic_sub_z JEMALLOC_N(atomic_sub_z)
|
||||||
#define base_alloc JEMALLOC_N(base_alloc)
|
#define base_alloc JEMALLOC_N(base_alloc)
|
||||||
#define base_boot JEMALLOC_N(base_boot)
|
#define base_boot JEMALLOC_N(base_boot)
|
||||||
|
#define base_calloc JEMALLOC_N(base_calloc)
|
||||||
#define base_node_alloc JEMALLOC_N(base_node_alloc)
|
#define base_node_alloc JEMALLOC_N(base_node_alloc)
|
||||||
#define base_node_dealloc JEMALLOC_N(base_node_dealloc)
|
#define base_node_dealloc JEMALLOC_N(base_node_dealloc)
|
||||||
|
#define base_postfork_child JEMALLOC_N(base_postfork_child)
|
||||||
|
#define base_postfork_parent JEMALLOC_N(base_postfork_parent)
|
||||||
|
#define base_prefork JEMALLOC_N(base_prefork)
|
||||||
#define bitmap_full JEMALLOC_N(bitmap_full)
|
#define bitmap_full JEMALLOC_N(bitmap_full)
|
||||||
#define bitmap_get JEMALLOC_N(bitmap_get)
|
#define bitmap_get JEMALLOC_N(bitmap_get)
|
||||||
#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
|
#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
|
||||||
@ -47,19 +95,19 @@
|
|||||||
#define chunk_alloc JEMALLOC_N(chunk_alloc)
|
#define chunk_alloc JEMALLOC_N(chunk_alloc)
|
||||||
#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
|
#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
|
||||||
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
|
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
|
||||||
#define chunk_alloc_mmap_noreserve JEMALLOC_N(chunk_alloc_mmap_noreserve)
|
|
||||||
#define chunk_alloc_swap JEMALLOC_N(chunk_alloc_swap)
|
|
||||||
#define chunk_boot JEMALLOC_N(chunk_boot)
|
#define chunk_boot JEMALLOC_N(chunk_boot)
|
||||||
#define chunk_dealloc JEMALLOC_N(chunk_dealloc)
|
#define chunk_dealloc JEMALLOC_N(chunk_dealloc)
|
||||||
#define chunk_dealloc_dss JEMALLOC_N(chunk_dealloc_dss)
|
|
||||||
#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap)
|
#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap)
|
||||||
#define chunk_dealloc_swap JEMALLOC_N(chunk_dealloc_swap)
|
|
||||||
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
|
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
|
||||||
|
#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child)
|
||||||
|
#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
|
||||||
|
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
|
||||||
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
|
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
|
||||||
#define chunk_in_swap JEMALLOC_N(chunk_in_swap)
|
#define chunk_npages JEMALLOC_N(chunk_npages)
|
||||||
#define chunk_mmap_boot JEMALLOC_N(chunk_mmap_boot)
|
#define chunks_mtx JEMALLOC_N(chunks_mtx)
|
||||||
#define chunk_swap_boot JEMALLOC_N(chunk_swap_boot)
|
#define chunks_rtree JEMALLOC_N(chunks_rtree)
|
||||||
#define chunk_swap_enable JEMALLOC_N(chunk_swap_enable)
|
#define chunksize JEMALLOC_N(chunksize)
|
||||||
|
#define chunksize_mask JEMALLOC_N(chunksize_mask)
|
||||||
#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search)
|
#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search)
|
||||||
#define ckh_count JEMALLOC_N(ckh_count)
|
#define ckh_count JEMALLOC_N(ckh_count)
|
||||||
#define ckh_delete JEMALLOC_N(ckh_delete)
|
#define ckh_delete JEMALLOC_N(ckh_delete)
|
||||||
@ -77,7 +125,6 @@
|
|||||||
#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
|
#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
|
||||||
#define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert)
|
#define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert)
|
||||||
#define ckh_try_insert JEMALLOC_N(ckh_try_insert)
|
#define ckh_try_insert JEMALLOC_N(ckh_try_insert)
|
||||||
#define create_zone JEMALLOC_N(create_zone)
|
|
||||||
#define ctl_boot JEMALLOC_N(ctl_boot)
|
#define ctl_boot JEMALLOC_N(ctl_boot)
|
||||||
#define ctl_bymib JEMALLOC_N(ctl_bymib)
|
#define ctl_bymib JEMALLOC_N(ctl_bymib)
|
||||||
#define ctl_byname JEMALLOC_N(ctl_byname)
|
#define ctl_byname JEMALLOC_N(ctl_byname)
|
||||||
@ -115,10 +162,17 @@
|
|||||||
#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
|
#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
|
||||||
#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
|
#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
|
||||||
#define hash JEMALLOC_N(hash)
|
#define hash JEMALLOC_N(hash)
|
||||||
|
#define huge_allocated JEMALLOC_N(huge_allocated)
|
||||||
#define huge_boot JEMALLOC_N(huge_boot)
|
#define huge_boot JEMALLOC_N(huge_boot)
|
||||||
#define huge_dalloc JEMALLOC_N(huge_dalloc)
|
#define huge_dalloc JEMALLOC_N(huge_dalloc)
|
||||||
#define huge_malloc JEMALLOC_N(huge_malloc)
|
#define huge_malloc JEMALLOC_N(huge_malloc)
|
||||||
|
#define huge_mtx JEMALLOC_N(huge_mtx)
|
||||||
|
#define huge_ndalloc JEMALLOC_N(huge_ndalloc)
|
||||||
|
#define huge_nmalloc JEMALLOC_N(huge_nmalloc)
|
||||||
#define huge_palloc JEMALLOC_N(huge_palloc)
|
#define huge_palloc JEMALLOC_N(huge_palloc)
|
||||||
|
#define huge_postfork_child JEMALLOC_N(huge_postfork_child)
|
||||||
|
#define huge_postfork_parent JEMALLOC_N(huge_postfork_parent)
|
||||||
|
#define huge_prefork JEMALLOC_N(huge_prefork)
|
||||||
#define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get)
|
#define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get)
|
||||||
#define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set)
|
#define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set)
|
||||||
#define huge_ralloc JEMALLOC_N(huge_ralloc)
|
#define huge_ralloc JEMALLOC_N(huge_ralloc)
|
||||||
@ -129,21 +183,63 @@
|
|||||||
#define idalloc JEMALLOC_N(idalloc)
|
#define idalloc JEMALLOC_N(idalloc)
|
||||||
#define imalloc JEMALLOC_N(imalloc)
|
#define imalloc JEMALLOC_N(imalloc)
|
||||||
#define ipalloc JEMALLOC_N(ipalloc)
|
#define ipalloc JEMALLOC_N(ipalloc)
|
||||||
|
#define iqalloc JEMALLOC_N(iqalloc)
|
||||||
#define iralloc JEMALLOC_N(iralloc)
|
#define iralloc JEMALLOC_N(iralloc)
|
||||||
#define isalloc JEMALLOC_N(isalloc)
|
#define isalloc JEMALLOC_N(isalloc)
|
||||||
|
#define isthreaded JEMALLOC_N(isthreaded)
|
||||||
#define ivsalloc JEMALLOC_N(ivsalloc)
|
#define ivsalloc JEMALLOC_N(ivsalloc)
|
||||||
#define jemalloc_darwin_init JEMALLOC_N(jemalloc_darwin_init)
|
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
|
||||||
#define jemalloc_postfork JEMALLOC_N(jemalloc_postfork)
|
#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
|
||||||
#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
|
#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
|
||||||
#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
|
#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
|
||||||
#define malloc_mutex_destroy JEMALLOC_N(malloc_mutex_destroy)
|
|
||||||
#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
|
#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
|
||||||
#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
|
#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
|
||||||
#define malloc_mutex_trylock JEMALLOC_N(malloc_mutex_trylock)
|
#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
|
||||||
|
#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent)
|
||||||
|
#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork)
|
||||||
#define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock)
|
#define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock)
|
||||||
#define malloc_printf JEMALLOC_N(malloc_printf)
|
#define malloc_printf JEMALLOC_N(malloc_printf)
|
||||||
|
#define malloc_snprintf JEMALLOC_N(malloc_snprintf)
|
||||||
|
#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
|
||||||
|
#define malloc_tsd_boot JEMALLOC_N(malloc_tsd_boot)
|
||||||
|
#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
|
||||||
|
#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
|
||||||
|
#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
|
||||||
|
#define malloc_tsd_no_cleanup JEMALLOC_N(malloc_tsd_no_cleanup)
|
||||||
|
#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf)
|
||||||
|
#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
|
||||||
#define malloc_write JEMALLOC_N(malloc_write)
|
#define malloc_write JEMALLOC_N(malloc_write)
|
||||||
|
#define map_bias JEMALLOC_N(map_bias)
|
||||||
#define mb_write JEMALLOC_N(mb_write)
|
#define mb_write JEMALLOC_N(mb_write)
|
||||||
|
#define mutex_boot JEMALLOC_N(mutex_boot)
|
||||||
|
#define narenas JEMALLOC_N(narenas)
|
||||||
|
#define ncpus JEMALLOC_N(ncpus)
|
||||||
|
#define nhbins JEMALLOC_N(nhbins)
|
||||||
|
#define opt_abort JEMALLOC_N(opt_abort)
|
||||||
|
#define opt_junk JEMALLOC_N(opt_junk)
|
||||||
|
#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk)
|
||||||
|
#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult)
|
||||||
|
#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
|
||||||
|
#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample)
|
||||||
|
#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max)
|
||||||
|
#define opt_narenas JEMALLOC_N(opt_narenas)
|
||||||
|
#define opt_prof JEMALLOC_N(opt_prof)
|
||||||
|
#define opt_prof_accum JEMALLOC_N(opt_prof_accum)
|
||||||
|
#define opt_prof_active JEMALLOC_N(opt_prof_active)
|
||||||
|
#define opt_prof_final JEMALLOC_N(opt_prof_final)
|
||||||
|
#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
|
||||||
|
#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
|
||||||
|
#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
|
||||||
|
#define opt_quarantine JEMALLOC_N(opt_quarantine)
|
||||||
|
#define opt_redzone JEMALLOC_N(opt_redzone)
|
||||||
|
#define opt_stats_print JEMALLOC_N(opt_stats_print)
|
||||||
|
#define opt_tcache JEMALLOC_N(opt_tcache)
|
||||||
|
#define opt_utrace JEMALLOC_N(opt_utrace)
|
||||||
|
#define opt_valgrind JEMALLOC_N(opt_valgrind)
|
||||||
|
#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
|
||||||
|
#define opt_zero JEMALLOC_N(opt_zero)
|
||||||
|
#define p2rz JEMALLOC_N(p2rz)
|
||||||
|
#define pages_purge JEMALLOC_N(pages_purge)
|
||||||
#define pow2_ceil JEMALLOC_N(pow2_ceil)
|
#define pow2_ceil JEMALLOC_N(pow2_ceil)
|
||||||
#define prof_backtrace JEMALLOC_N(prof_backtrace)
|
#define prof_backtrace JEMALLOC_N(prof_backtrace)
|
||||||
#define prof_boot0 JEMALLOC_N(prof_boot0)
|
#define prof_boot0 JEMALLOC_N(prof_boot0)
|
||||||
@ -154,14 +250,31 @@
|
|||||||
#define prof_free JEMALLOC_N(prof_free)
|
#define prof_free JEMALLOC_N(prof_free)
|
||||||
#define prof_gdump JEMALLOC_N(prof_gdump)
|
#define prof_gdump JEMALLOC_N(prof_gdump)
|
||||||
#define prof_idump JEMALLOC_N(prof_idump)
|
#define prof_idump JEMALLOC_N(prof_idump)
|
||||||
|
#define prof_interval JEMALLOC_N(prof_interval)
|
||||||
#define prof_lookup JEMALLOC_N(prof_lookup)
|
#define prof_lookup JEMALLOC_N(prof_lookup)
|
||||||
#define prof_malloc JEMALLOC_N(prof_malloc)
|
#define prof_malloc JEMALLOC_N(prof_malloc)
|
||||||
#define prof_mdump JEMALLOC_N(prof_mdump)
|
#define prof_mdump JEMALLOC_N(prof_mdump)
|
||||||
|
#define prof_promote JEMALLOC_N(prof_promote)
|
||||||
#define prof_realloc JEMALLOC_N(prof_realloc)
|
#define prof_realloc JEMALLOC_N(prof_realloc)
|
||||||
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
|
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
|
||||||
#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
|
#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
|
||||||
|
#define prof_tdata_booted JEMALLOC_N(prof_tdata_booted)
|
||||||
|
#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
|
||||||
|
#define prof_tdata_get JEMALLOC_N(prof_tdata_get)
|
||||||
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
|
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
|
||||||
#define pthread_create JEMALLOC_N(pthread_create)
|
#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized)
|
||||||
|
#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls)
|
||||||
|
#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot)
|
||||||
|
#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
|
||||||
|
#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
|
||||||
|
#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set)
|
||||||
|
#define quarantine JEMALLOC_N(quarantine)
|
||||||
|
#define quarantine_boot JEMALLOC_N(quarantine_boot)
|
||||||
|
#define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot)
|
||||||
|
#define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper)
|
||||||
|
#define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get)
|
||||||
|
#define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set)
|
||||||
|
#define register_zone JEMALLOC_N(register_zone)
|
||||||
#define rtree_get JEMALLOC_N(rtree_get)
|
#define rtree_get JEMALLOC_N(rtree_get)
|
||||||
#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
|
#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
|
||||||
#define rtree_new JEMALLOC_N(rtree_new)
|
#define rtree_new JEMALLOC_N(rtree_new)
|
||||||
@ -171,25 +284,56 @@
|
|||||||
#define stats_arenas_i_bins_j_index JEMALLOC_N(stats_arenas_i_bins_j_index)
|
#define stats_arenas_i_bins_j_index JEMALLOC_N(stats_arenas_i_bins_j_index)
|
||||||
#define stats_arenas_i_index JEMALLOC_N(stats_arenas_i_index)
|
#define stats_arenas_i_index JEMALLOC_N(stats_arenas_i_index)
|
||||||
#define stats_arenas_i_lruns_j_index JEMALLOC_N(stats_arenas_i_lruns_j_index)
|
#define stats_arenas_i_lruns_j_index JEMALLOC_N(stats_arenas_i_lruns_j_index)
|
||||||
|
#define stats_cactive JEMALLOC_N(stats_cactive)
|
||||||
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
|
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
|
||||||
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
|
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
|
||||||
#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub)
|
#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub)
|
||||||
|
#define stats_chunks JEMALLOC_N(stats_chunks)
|
||||||
#define stats_print JEMALLOC_N(stats_print)
|
#define stats_print JEMALLOC_N(stats_print)
|
||||||
#define szone2ozone JEMALLOC_N(szone2ozone)
|
|
||||||
#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy)
|
#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy)
|
||||||
#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
|
#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
|
||||||
#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small)
|
#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small)
|
||||||
#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
|
#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
|
||||||
|
#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate)
|
||||||
|
#define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate)
|
||||||
#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
|
#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
|
||||||
#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
|
#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
|
||||||
#define tcache_boot JEMALLOC_N(tcache_boot)
|
#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
|
||||||
|
#define tcache_boot0 JEMALLOC_N(tcache_boot0)
|
||||||
|
#define tcache_boot1 JEMALLOC_N(tcache_boot1)
|
||||||
|
#define tcache_booted JEMALLOC_N(tcache_booted)
|
||||||
#define tcache_create JEMALLOC_N(tcache_create)
|
#define tcache_create JEMALLOC_N(tcache_create)
|
||||||
#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large)
|
#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large)
|
||||||
#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small)
|
#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small)
|
||||||
#define tcache_destroy JEMALLOC_N(tcache_destroy)
|
#define tcache_destroy JEMALLOC_N(tcache_destroy)
|
||||||
|
#define tcache_enabled_booted JEMALLOC_N(tcache_enabled_booted)
|
||||||
|
#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get)
|
||||||
|
#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized)
|
||||||
|
#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
|
||||||
|
#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls)
|
||||||
|
#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot)
|
||||||
|
#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
|
||||||
|
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
|
||||||
|
#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set)
|
||||||
#define tcache_event JEMALLOC_N(tcache_event)
|
#define tcache_event JEMALLOC_N(tcache_event)
|
||||||
|
#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
|
||||||
|
#define tcache_flush JEMALLOC_N(tcache_flush)
|
||||||
#define tcache_get JEMALLOC_N(tcache_get)
|
#define tcache_get JEMALLOC_N(tcache_get)
|
||||||
|
#define tcache_initialized JEMALLOC_N(tcache_initialized)
|
||||||
|
#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
|
||||||
|
#define tcache_salloc JEMALLOC_N(tcache_salloc)
|
||||||
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
|
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
|
||||||
#define thread_allocated_get JEMALLOC_N(thread_allocated_get)
|
#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup)
|
||||||
#define thread_allocated_get_hard JEMALLOC_N(thread_allocated_get_hard)
|
#define tcache_tls JEMALLOC_N(tcache_tls)
|
||||||
#define u2s JEMALLOC_N(u2s)
|
#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot)
|
||||||
|
#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
|
||||||
|
#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
|
||||||
|
#define tcache_tsd_set JEMALLOC_N(tcache_tsd_set)
|
||||||
|
#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
|
||||||
|
#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
|
||||||
|
#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls)
|
||||||
|
#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot)
|
||||||
|
#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
|
||||||
|
#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)
|
||||||
|
#define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set)
|
||||||
|
#define u2rz JEMALLOC_N(u2rz)
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
/*
|
/*
|
||||||
* Simple linear congruential pseudo-random number generator:
|
* Simple linear congruential pseudo-random number generator:
|
||||||
*
|
*
|
||||||
* prn(y) = (a*x + c) % m
|
* prng(y) = (a*x + c) % m
|
||||||
*
|
*
|
||||||
* where the following constants ensure maximal period:
|
* where the following constants ensure maximal period:
|
||||||
*
|
*
|
||||||
@ -25,7 +25,7 @@
|
|||||||
* uint32_t state : Seed value.
|
* uint32_t state : Seed value.
|
||||||
* const uint32_t a, c : See above discussion.
|
* const uint32_t a, c : See above discussion.
|
||||||
*/
|
*/
|
||||||
#define prn32(r, lg_range, state, a, c) do { \
|
#define prng32(r, lg_range, state, a, c) do { \
|
||||||
assert(lg_range > 0); \
|
assert(lg_range > 0); \
|
||||||
assert(lg_range <= 32); \
|
assert(lg_range <= 32); \
|
||||||
\
|
\
|
||||||
@ -34,8 +34,8 @@
|
|||||||
r >>= (32 - lg_range); \
|
r >>= (32 - lg_range); \
|
||||||
} while (false)
|
} while (false)
|
||||||
|
|
||||||
/* Same as prn32(), but 64 bits of pseudo-randomness, using uint64_t. */
|
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
|
||||||
#define prn64(r, lg_range, state, a, c) do { \
|
#define prng64(r, lg_range, state, a, c) do { \
|
||||||
assert(lg_range > 0); \
|
assert(lg_range > 0); \
|
||||||
assert(lg_range <= 64); \
|
assert(lg_range <= 64); \
|
||||||
\
|
\
|
@ -1,4 +1,3 @@
|
|||||||
#ifdef JEMALLOC_PROF
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_TYPES
|
#ifdef JEMALLOC_H_TYPES
|
||||||
|
|
||||||
@ -10,28 +9,41 @@ typedef struct prof_tdata_s prof_tdata_t;
|
|||||||
|
|
||||||
/* Option defaults. */
|
/* Option defaults. */
|
||||||
#define PROF_PREFIX_DEFAULT "jeprof"
|
#define PROF_PREFIX_DEFAULT "jeprof"
|
||||||
#define LG_PROF_BT_MAX_DEFAULT 7
|
#define LG_PROF_SAMPLE_DEFAULT 19
|
||||||
#define LG_PROF_SAMPLE_DEFAULT 0
|
|
||||||
#define LG_PROF_INTERVAL_DEFAULT -1
|
#define LG_PROF_INTERVAL_DEFAULT -1
|
||||||
#define LG_PROF_TCMAX_DEFAULT -1
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Hard limit on stack backtrace depth. Note that the version of
|
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
|
||||||
* prof_backtrace() that is based on __builtin_return_address() necessarily has
|
* is based on __builtin_return_address() necessarily has a hard-coded number
|
||||||
* a hard-coded number of backtrace frame handlers.
|
* of backtrace frame handlers, and should be kept in sync with this setting.
|
||||||
*/
|
*/
|
||||||
#if (defined(JEMALLOC_PROF_LIBGCC) || defined(JEMALLOC_PROF_LIBUNWIND))
|
#define PROF_BT_MAX 128
|
||||||
# define LG_PROF_BT_MAX ((ZU(1) << (LG_SIZEOF_PTR+3)) - 1)
|
|
||||||
#else
|
/* Maximum number of backtraces to store in each per thread LRU cache. */
|
||||||
# define LG_PROF_BT_MAX 7 /* >= LG_PROF_BT_MAX_DEFAULT */
|
#define PROF_TCMAX 1024
|
||||||
#endif
|
|
||||||
#define PROF_BT_MAX (1U << LG_PROF_BT_MAX)
|
|
||||||
|
|
||||||
/* Initial hash table size. */
|
/* Initial hash table size. */
|
||||||
#define PROF_CKH_MINITEMS 64
|
#define PROF_CKH_MINITEMS 64
|
||||||
|
|
||||||
/* Size of memory buffer to use when writing dump files. */
|
/* Size of memory buffer to use when writing dump files. */
|
||||||
#define PROF_DUMP_BUF_SIZE 65536
|
#define PROF_DUMP_BUFSIZE 65536
|
||||||
|
|
||||||
|
/* Size of stack-allocated buffer used by prof_printf(). */
|
||||||
|
#define PROF_PRINTF_BUFSIZE 128
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Number of mutexes shared among all ctx's. No space is allocated for these
|
||||||
|
* unless profiling is enabled, so it's okay to over-provision.
|
||||||
|
*/
|
||||||
|
#define PROF_NCTX_LOCKS 1024
|
||||||
|
|
||||||
|
/*
|
||||||
|
* prof_tdata pointers close to NULL are used to encode state information that
|
||||||
|
* is used for cleaning up during thread shutdown.
|
||||||
|
*/
|
||||||
|
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
|
||||||
|
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
|
||||||
|
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
#endif /* JEMALLOC_H_TYPES */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
@ -109,8 +121,18 @@ struct prof_ctx_s {
|
|||||||
/* Associated backtrace. */
|
/* Associated backtrace. */
|
||||||
prof_bt_t *bt;
|
prof_bt_t *bt;
|
||||||
|
|
||||||
/* Protects cnt_merged and cnts_ql. */
|
/* Protects nlimbo, cnt_merged, and cnts_ql. */
|
||||||
malloc_mutex_t lock;
|
malloc_mutex_t *lock;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Number of threads that currently cause this ctx to be in a state of
|
||||||
|
* limbo due to one of:
|
||||||
|
* - Initializing per thread counters associated with this ctx.
|
||||||
|
* - Preparing to destroy this ctx.
|
||||||
|
* nlimbo must be 1 (single destroyer) in order to safely destroy the
|
||||||
|
* ctx.
|
||||||
|
*/
|
||||||
|
unsigned nlimbo;
|
||||||
|
|
||||||
/* Temporary storage for summation during dump. */
|
/* Temporary storage for summation during dump. */
|
||||||
prof_cnt_t cnt_summed;
|
prof_cnt_t cnt_summed;
|
||||||
@ -145,9 +167,14 @@ struct prof_tdata_s {
|
|||||||
void **vec;
|
void **vec;
|
||||||
|
|
||||||
/* Sampling state. */
|
/* Sampling state. */
|
||||||
uint64_t prn_state;
|
uint64_t prng_state;
|
||||||
uint64_t threshold;
|
uint64_t threshold;
|
||||||
uint64_t accum;
|
uint64_t accum;
|
||||||
|
|
||||||
|
/* State used to avoid dumping while operating on prof internals. */
|
||||||
|
bool enq;
|
||||||
|
bool enq_idump;
|
||||||
|
bool enq_gdump;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
#endif /* JEMALLOC_H_STRUCTS */
|
||||||
@ -162,13 +189,12 @@ extern bool opt_prof;
|
|||||||
* to notice state changes.
|
* to notice state changes.
|
||||||
*/
|
*/
|
||||||
extern bool opt_prof_active;
|
extern bool opt_prof_active;
|
||||||
extern size_t opt_lg_prof_bt_max; /* Maximum backtrace depth. */
|
|
||||||
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
|
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
|
||||||
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
|
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
|
||||||
extern bool opt_prof_gdump; /* High-water memory dumping. */
|
extern bool opt_prof_gdump; /* High-water memory dumping. */
|
||||||
|
extern bool opt_prof_final; /* Final profile dumping. */
|
||||||
extern bool opt_prof_leak; /* Dump leak summary at exit. */
|
extern bool opt_prof_leak; /* Dump leak summary at exit. */
|
||||||
extern bool opt_prof_accum; /* Report cumulative bytes. */
|
extern bool opt_prof_accum; /* Report cumulative bytes. */
|
||||||
extern ssize_t opt_lg_prof_tcmax; /* lg(max per thread bactrace cache) */
|
|
||||||
extern char opt_prof_prefix[PATH_MAX + 1];
|
extern char opt_prof_prefix[PATH_MAX + 1];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -186,39 +212,14 @@ extern uint64_t prof_interval;
|
|||||||
*/
|
*/
|
||||||
extern bool prof_promote;
|
extern bool prof_promote;
|
||||||
|
|
||||||
/* (1U << opt_lg_prof_bt_max). */
|
|
||||||
extern unsigned prof_bt_max;
|
|
||||||
|
|
||||||
/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */
|
|
||||||
#ifndef NO_TLS
|
|
||||||
extern __thread prof_tdata_t *prof_tdata_tls
|
|
||||||
JEMALLOC_ATTR(tls_model("initial-exec"));
|
|
||||||
# define PROF_TCACHE_GET() prof_tdata_tls
|
|
||||||
# define PROF_TCACHE_SET(v) do { \
|
|
||||||
prof_tdata_tls = (v); \
|
|
||||||
pthread_setspecific(prof_tdata_tsd, (void *)(v)); \
|
|
||||||
} while (0)
|
|
||||||
#else
|
|
||||||
# define PROF_TCACHE_GET() \
|
|
||||||
((prof_tdata_t *)pthread_getspecific(prof_tdata_tsd))
|
|
||||||
# define PROF_TCACHE_SET(v) do { \
|
|
||||||
pthread_setspecific(prof_tdata_tsd, (void *)(v)); \
|
|
||||||
} while (0)
|
|
||||||
#endif
|
|
||||||
/*
|
|
||||||
* Same contents as b2cnt_tls, but initialized such that the TSD destructor is
|
|
||||||
* called when a thread exits, so that prof_tdata_tls contents can be merged,
|
|
||||||
* unlinked, and deallocated.
|
|
||||||
*/
|
|
||||||
extern pthread_key_t prof_tdata_tsd;
|
|
||||||
|
|
||||||
void bt_init(prof_bt_t *bt, void **vec);
|
void bt_init(prof_bt_t *bt, void **vec);
|
||||||
void prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max);
|
void prof_backtrace(prof_bt_t *bt, unsigned nignore);
|
||||||
prof_thr_cnt_t *prof_lookup(prof_bt_t *bt);
|
prof_thr_cnt_t *prof_lookup(prof_bt_t *bt);
|
||||||
void prof_idump(void);
|
void prof_idump(void);
|
||||||
bool prof_mdump(const char *filename);
|
bool prof_mdump(const char *filename);
|
||||||
void prof_gdump(void);
|
void prof_gdump(void);
|
||||||
prof_tdata_t *prof_tdata_init(void);
|
prof_tdata_t *prof_tdata_init(void);
|
||||||
|
void prof_tdata_cleanup(void *arg);
|
||||||
void prof_boot0(void);
|
void prof_boot0(void);
|
||||||
void prof_boot1(void);
|
void prof_boot1(void);
|
||||||
bool prof_boot2(void);
|
bool prof_boot2(void);
|
||||||
@ -233,13 +234,13 @@ bool prof_boot2(void);
|
|||||||
\
|
\
|
||||||
assert(size == s2u(size)); \
|
assert(size == s2u(size)); \
|
||||||
\
|
\
|
||||||
prof_tdata = PROF_TCACHE_GET(); \
|
prof_tdata = prof_tdata_get(); \
|
||||||
if (prof_tdata == NULL) { \
|
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \
|
||||||
prof_tdata = prof_tdata_init(); \
|
if (prof_tdata != NULL) \
|
||||||
if (prof_tdata == NULL) { \
|
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
|
||||||
|
else \
|
||||||
ret = NULL; \
|
ret = NULL; \
|
||||||
break; \
|
break; \
|
||||||
} \
|
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
if (opt_prof_active == false) { \
|
if (opt_prof_active == false) { \
|
||||||
@ -249,13 +250,13 @@ bool prof_boot2(void);
|
|||||||
/* Don't bother with sampling logic, since sampling */\
|
/* Don't bother with sampling logic, since sampling */\
|
||||||
/* interval is 1. */\
|
/* interval is 1. */\
|
||||||
bt_init(&bt, prof_tdata->vec); \
|
bt_init(&bt, prof_tdata->vec); \
|
||||||
prof_backtrace(&bt, nignore, prof_bt_max); \
|
prof_backtrace(&bt, nignore); \
|
||||||
ret = prof_lookup(&bt); \
|
ret = prof_lookup(&bt); \
|
||||||
} else { \
|
} else { \
|
||||||
if (prof_tdata->threshold == 0) { \
|
if (prof_tdata->threshold == 0) { \
|
||||||
/* Initialize. Seed the prng differently for */\
|
/* Initialize. Seed the prng differently for */\
|
||||||
/* each thread. */\
|
/* each thread. */\
|
||||||
prof_tdata->prn_state = \
|
prof_tdata->prng_state = \
|
||||||
(uint64_t)(uintptr_t)&size; \
|
(uint64_t)(uintptr_t)&size; \
|
||||||
prof_sample_threshold_update(prof_tdata); \
|
prof_sample_threshold_update(prof_tdata); \
|
||||||
} \
|
} \
|
||||||
@ -272,7 +273,7 @@ bool prof_boot2(void);
|
|||||||
if (size >= prof_tdata->threshold - \
|
if (size >= prof_tdata->threshold - \
|
||||||
prof_tdata->accum) { \
|
prof_tdata->accum) { \
|
||||||
bt_init(&bt, prof_tdata->vec); \
|
bt_init(&bt, prof_tdata->vec); \
|
||||||
prof_backtrace(&bt, nignore, prof_bt_max); \
|
prof_backtrace(&bt, nignore); \
|
||||||
ret = prof_lookup(&bt); \
|
ret = prof_lookup(&bt); \
|
||||||
} else \
|
} else \
|
||||||
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
|
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
|
||||||
@ -280,6 +281,9 @@ bool prof_boot2(void);
|
|||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
#ifndef JEMALLOC_ENABLE_INLINE
|
||||||
|
malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *)
|
||||||
|
|
||||||
|
prof_tdata_t *prof_tdata_get(void);
|
||||||
void prof_sample_threshold_update(prof_tdata_t *prof_tdata);
|
void prof_sample_threshold_update(prof_tdata_t *prof_tdata);
|
||||||
prof_ctx_t *prof_ctx_get(const void *ptr);
|
prof_ctx_t *prof_ctx_get(const void *ptr);
|
||||||
void prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
|
void prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
|
||||||
@ -291,12 +295,35 @@ void prof_free(const void *ptr, size_t size);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
|
||||||
|
/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */
|
||||||
|
malloc_tsd_externs(prof_tdata, prof_tdata_t *)
|
||||||
|
malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL,
|
||||||
|
prof_tdata_cleanup)
|
||||||
|
|
||||||
|
JEMALLOC_INLINE prof_tdata_t *
|
||||||
|
prof_tdata_get(void)
|
||||||
|
{
|
||||||
|
prof_tdata_t *prof_tdata;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
|
prof_tdata = *prof_tdata_tsd_get();
|
||||||
|
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) {
|
||||||
|
if (prof_tdata == NULL)
|
||||||
|
prof_tdata = prof_tdata_init();
|
||||||
|
}
|
||||||
|
|
||||||
|
return (prof_tdata);
|
||||||
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
prof_sample_threshold_update(prof_tdata_t *prof_tdata)
|
prof_sample_threshold_update(prof_tdata_t *prof_tdata)
|
||||||
{
|
{
|
||||||
uint64_t r;
|
uint64_t r;
|
||||||
double u;
|
double u;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Compute sample threshold as a geometrically distributed random
|
* Compute sample threshold as a geometrically distributed random
|
||||||
* variable with mean (2^opt_lg_prof_sample).
|
* variable with mean (2^opt_lg_prof_sample).
|
||||||
@ -315,8 +342,8 @@ prof_sample_threshold_update(prof_tdata_t *prof_tdata)
|
|||||||
* pp 500
|
* pp 500
|
||||||
* (http://cg.scs.carleton.ca/~luc/rnbookindex.html)
|
* (http://cg.scs.carleton.ca/~luc/rnbookindex.html)
|
||||||
*/
|
*/
|
||||||
prn64(r, 53, prof_tdata->prn_state,
|
prng64(r, 53, prof_tdata->prng_state,
|
||||||
(uint64_t)6364136223846793005LLU, (uint64_t)1442695040888963407LLU);
|
UINT64_C(6364136223846793005), UINT64_C(1442695040888963407));
|
||||||
u = (double)r * (1.0/9007199254740992.0L);
|
u = (double)r * (1.0/9007199254740992.0L);
|
||||||
prof_tdata->threshold = (uint64_t)(log(u) /
|
prof_tdata->threshold = (uint64_t)(log(u) /
|
||||||
log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample))))
|
log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample))))
|
||||||
@ -329,13 +356,12 @@ prof_ctx_get(const void *ptr)
|
|||||||
prof_ctx_t *ret;
|
prof_ctx_t *ret;
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
if (chunk != ptr) {
|
if (chunk != ptr) {
|
||||||
/* Region. */
|
/* Region. */
|
||||||
dassert(chunk->arena->magic == ARENA_MAGIC);
|
|
||||||
|
|
||||||
ret = arena_prof_ctx_get(ptr);
|
ret = arena_prof_ctx_get(ptr);
|
||||||
} else
|
} else
|
||||||
ret = huge_prof_ctx_get(ptr);
|
ret = huge_prof_ctx_get(ptr);
|
||||||
@ -348,13 +374,12 @@ prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
|
|||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
if (chunk != ptr) {
|
if (chunk != ptr) {
|
||||||
/* Region. */
|
/* Region. */
|
||||||
dassert(chunk->arena->magic == ARENA_MAGIC);
|
|
||||||
|
|
||||||
arena_prof_ctx_set(ptr, ctx);
|
arena_prof_ctx_set(ptr, ctx);
|
||||||
} else
|
} else
|
||||||
huge_prof_ctx_set(ptr, ctx);
|
huge_prof_ctx_set(ptr, ctx);
|
||||||
@ -365,11 +390,13 @@ prof_sample_accum_update(size_t size)
|
|||||||
{
|
{
|
||||||
prof_tdata_t *prof_tdata;
|
prof_tdata_t *prof_tdata;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
/* Sampling logic is unnecessary if the interval is 1. */
|
/* Sampling logic is unnecessary if the interval is 1. */
|
||||||
assert(opt_lg_prof_sample != 0);
|
assert(opt_lg_prof_sample != 0);
|
||||||
|
|
||||||
prof_tdata = PROF_TCACHE_GET();
|
prof_tdata = *prof_tdata_tsd_get();
|
||||||
assert(prof_tdata != NULL);
|
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
|
||||||
|
return (true);
|
||||||
|
|
||||||
/* Take care to avoid integer overflow. */
|
/* Take care to avoid integer overflow. */
|
||||||
if (size >= prof_tdata->threshold - prof_tdata->accum) {
|
if (size >= prof_tdata->threshold - prof_tdata->accum) {
|
||||||
@ -391,8 +418,9 @@ JEMALLOC_INLINE void
|
|||||||
prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
|
prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
assert(size == isalloc(ptr));
|
assert(size == isalloc(ptr, true));
|
||||||
|
|
||||||
if (opt_lg_prof_sample != 0) {
|
if (opt_lg_prof_sample != 0) {
|
||||||
if (prof_sample_accum_update(size)) {
|
if (prof_sample_accum_update(size)) {
|
||||||
@ -437,10 +465,11 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
|
|||||||
{
|
{
|
||||||
prof_thr_cnt_t *told_cnt;
|
prof_thr_cnt_t *told_cnt;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
|
assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
|
||||||
|
|
||||||
if (ptr != NULL) {
|
if (ptr != NULL) {
|
||||||
assert(size == isalloc(ptr));
|
assert(size == isalloc(ptr, true));
|
||||||
if (opt_lg_prof_sample != 0) {
|
if (opt_lg_prof_sample != 0) {
|
||||||
if (prof_sample_accum_update(size)) {
|
if (prof_sample_accum_update(size)) {
|
||||||
/*
|
/*
|
||||||
@ -463,10 +492,10 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
|
|||||||
* It's too late to propagate OOM for this realloc(),
|
* It's too late to propagate OOM for this realloc(),
|
||||||
* so operate directly on old_cnt->ctx->cnt_merged.
|
* so operate directly on old_cnt->ctx->cnt_merged.
|
||||||
*/
|
*/
|
||||||
malloc_mutex_lock(&old_ctx->lock);
|
malloc_mutex_lock(old_ctx->lock);
|
||||||
old_ctx->cnt_merged.curobjs--;
|
old_ctx->cnt_merged.curobjs--;
|
||||||
old_ctx->cnt_merged.curbytes -= old_size;
|
old_ctx->cnt_merged.curbytes -= old_size;
|
||||||
malloc_mutex_unlock(&old_ctx->lock);
|
malloc_mutex_unlock(old_ctx->lock);
|
||||||
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
|
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
@ -510,9 +539,12 @@ prof_free(const void *ptr, size_t size)
|
|||||||
{
|
{
|
||||||
prof_ctx_t *ctx = prof_ctx_get(ptr);
|
prof_ctx_t *ctx = prof_ctx_get(ptr);
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
if ((uintptr_t)ctx > (uintptr_t)1) {
|
if ((uintptr_t)ctx > (uintptr_t)1) {
|
||||||
assert(size == isalloc(ptr));
|
prof_thr_cnt_t *tcnt;
|
||||||
prof_thr_cnt_t *tcnt = prof_lookup(ctx->bt);
|
assert(size == isalloc(ptr, true));
|
||||||
|
tcnt = prof_lookup(ctx->bt);
|
||||||
|
|
||||||
if (tcnt != NULL) {
|
if (tcnt != NULL) {
|
||||||
tcnt->epoch++;
|
tcnt->epoch++;
|
||||||
@ -533,10 +565,10 @@ prof_free(const void *ptr, size_t size)
|
|||||||
* OOM during free() cannot be propagated, so operate
|
* OOM during free() cannot be propagated, so operate
|
||||||
* directly on cnt->ctx->cnt_merged.
|
* directly on cnt->ctx->cnt_merged.
|
||||||
*/
|
*/
|
||||||
malloc_mutex_lock(&ctx->lock);
|
malloc_mutex_lock(ctx->lock);
|
||||||
ctx->cnt_merged.curobjs--;
|
ctx->cnt_merged.curobjs--;
|
||||||
ctx->cnt_merged.curbytes -= size;
|
ctx->cnt_merged.curbytes -= size;
|
||||||
malloc_mutex_unlock(&ctx->lock);
|
malloc_mutex_unlock(ctx->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -544,4 +576,3 @@ prof_free(const void *ptr, size_t size)
|
|||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
#endif /* JEMALLOC_H_INLINES */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#endif /* JEMALLOC_PROF */
|
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
#ifndef JEMALLOC_ZONE
|
|
||||||
# error "This source file is for zones on Darwin (OS X)."
|
|
||||||
#endif
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_TYPES
|
#ifdef JEMALLOC_H_TYPES
|
||||||
|
|
||||||
|
/* Default per thread quarantine size if valgrind is enabled. */
|
||||||
|
#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24)
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
#endif /* JEMALLOC_H_TYPES */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
#ifdef JEMALLOC_H_STRUCTS
|
||||||
@ -12,8 +12,8 @@
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
malloc_zone_t *create_zone(void);
|
void quarantine(void *ptr);
|
||||||
void szone2ozone(malloc_zone_t *zone);
|
bool quarantine_boot(void);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
@ -21,3 +21,4 @@ void szone2ozone(malloc_zone_t *zone);
|
|||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
#endif /* JEMALLOC_H_INLINES */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
@ -223,88 +223,88 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
|
|||||||
* The following API is generated:
|
* The following API is generated:
|
||||||
*
|
*
|
||||||
* static void
|
* static void
|
||||||
* ex_new(ex_t *extree);
|
* ex_new(ex_t *tree);
|
||||||
* Description: Initialize a red-black tree structure.
|
* Description: Initialize a red-black tree structure.
|
||||||
* Args:
|
* Args:
|
||||||
* extree: Pointer to an uninitialized red-black tree object.
|
* tree: Pointer to an uninitialized red-black tree object.
|
||||||
*
|
*
|
||||||
* static ex_node_t *
|
* static ex_node_t *
|
||||||
* ex_first(ex_t *extree);
|
* ex_first(ex_t *tree);
|
||||||
* static ex_node_t *
|
* static ex_node_t *
|
||||||
* ex_last(ex_t *extree);
|
* ex_last(ex_t *tree);
|
||||||
* Description: Get the first/last node in extree.
|
* Description: Get the first/last node in tree.
|
||||||
* Args:
|
* Args:
|
||||||
* extree: Pointer to an initialized red-black tree object.
|
* tree: Pointer to an initialized red-black tree object.
|
||||||
* Ret: First/last node in extree, or NULL if extree is empty.
|
* Ret: First/last node in tree, or NULL if tree is empty.
|
||||||
*
|
*
|
||||||
* static ex_node_t *
|
* static ex_node_t *
|
||||||
* ex_next(ex_t *extree, ex_node_t *node);
|
* ex_next(ex_t *tree, ex_node_t *node);
|
||||||
* static ex_node_t *
|
* static ex_node_t *
|
||||||
* ex_prev(ex_t *extree, ex_node_t *node);
|
* ex_prev(ex_t *tree, ex_node_t *node);
|
||||||
* Description: Get node's successor/predecessor.
|
* Description: Get node's successor/predecessor.
|
||||||
* Args:
|
* Args:
|
||||||
* extree: Pointer to an initialized red-black tree object.
|
* tree: Pointer to an initialized red-black tree object.
|
||||||
* node : A node in extree.
|
* node: A node in tree.
|
||||||
* Ret: node's successor/predecessor in extree, or NULL if node is
|
* Ret: node's successor/predecessor in tree, or NULL if node is
|
||||||
* last/first.
|
* last/first.
|
||||||
*
|
*
|
||||||
* static ex_node_t *
|
* static ex_node_t *
|
||||||
* ex_search(ex_t *extree, ex_node_t *key);
|
* ex_search(ex_t *tree, ex_node_t *key);
|
||||||
* Description: Search for node that matches key.
|
* Description: Search for node that matches key.
|
||||||
* Args:
|
* Args:
|
||||||
* extree: Pointer to an initialized red-black tree object.
|
* tree: Pointer to an initialized red-black tree object.
|
||||||
* key : Search key.
|
* key : Search key.
|
||||||
* Ret: Node in extree that matches key, or NULL if no match.
|
* Ret: Node in tree that matches key, or NULL if no match.
|
||||||
*
|
*
|
||||||
* static ex_node_t *
|
* static ex_node_t *
|
||||||
* ex_nsearch(ex_t *extree, ex_node_t *key);
|
* ex_nsearch(ex_t *tree, ex_node_t *key);
|
||||||
* static ex_node_t *
|
* static ex_node_t *
|
||||||
* ex_psearch(ex_t *extree, ex_node_t *key);
|
* ex_psearch(ex_t *tree, ex_node_t *key);
|
||||||
* Description: Search for node that matches key. If no match is found,
|
* Description: Search for node that matches key. If no match is found,
|
||||||
* return what would be key's successor/predecessor, were
|
* return what would be key's successor/predecessor, were
|
||||||
* key in extree.
|
* key in tree.
|
||||||
* Args:
|
* Args:
|
||||||
* extree: Pointer to an initialized red-black tree object.
|
* tree: Pointer to an initialized red-black tree object.
|
||||||
* key : Search key.
|
* key : Search key.
|
||||||
* Ret: Node in extree that matches key, or if no match, hypothetical
|
* Ret: Node in tree that matches key, or if no match, hypothetical node's
|
||||||
* node's successor/predecessor (NULL if no successor/predecessor).
|
* successor/predecessor (NULL if no successor/predecessor).
|
||||||
*
|
*
|
||||||
* static void
|
* static void
|
||||||
* ex_insert(ex_t *extree, ex_node_t *node);
|
* ex_insert(ex_t *tree, ex_node_t *node);
|
||||||
* Description: Insert node into extree.
|
* Description: Insert node into tree.
|
||||||
* Args:
|
* Args:
|
||||||
* extree: Pointer to an initialized red-black tree object.
|
* tree: Pointer to an initialized red-black tree object.
|
||||||
* node : Node to be inserted into extree.
|
* node: Node to be inserted into tree.
|
||||||
*
|
*
|
||||||
* static void
|
* static void
|
||||||
* ex_remove(ex_t *extree, ex_node_t *node);
|
* ex_remove(ex_t *tree, ex_node_t *node);
|
||||||
* Description: Remove node from extree.
|
* Description: Remove node from tree.
|
||||||
* Args:
|
* Args:
|
||||||
* extree: Pointer to an initialized red-black tree object.
|
* tree: Pointer to an initialized red-black tree object.
|
||||||
* node : Node in extree to be removed.
|
* node: Node in tree to be removed.
|
||||||
*
|
*
|
||||||
* static ex_node_t *
|
* static ex_node_t *
|
||||||
* ex_iter(ex_t *extree, ex_node_t *start, ex_node_t *(*cb)(ex_t *,
|
* ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *,
|
||||||
* ex_node_t *, void *), void *arg);
|
* ex_node_t *, void *), void *arg);
|
||||||
* static ex_node_t *
|
* static ex_node_t *
|
||||||
* ex_reverse_iter(ex_t *extree, ex_node_t *start, ex_node *(*cb)(ex_t *,
|
* ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *,
|
||||||
* ex_node_t *, void *), void *arg);
|
* ex_node_t *, void *), void *arg);
|
||||||
* Description: Iterate forward/backward over extree, starting at node.
|
* Description: Iterate forward/backward over tree, starting at node. If
|
||||||
* If extree is modified, iteration must be immediately
|
* tree is modified, iteration must be immediately
|
||||||
* terminated by the callback function that causes the
|
* terminated by the callback function that causes the
|
||||||
* modification.
|
* modification.
|
||||||
* Args:
|
* Args:
|
||||||
* extree: Pointer to an initialized red-black tree object.
|
* tree : Pointer to an initialized red-black tree object.
|
||||||
* start : Node at which to start iteration, or NULL to start at
|
* start: Node at which to start iteration, or NULL to start at
|
||||||
* first/last node.
|
* first/last node.
|
||||||
* cb : Callback function, which is called for each node during
|
* cb : Callback function, which is called for each node during
|
||||||
* iteration. Under normal circumstances the callback function
|
* iteration. Under normal circumstances the callback function
|
||||||
* should return NULL, which causes iteration to continue. If a
|
* should return NULL, which causes iteration to continue. If a
|
||||||
* callback function returns non-NULL, iteration is immediately
|
* callback function returns non-NULL, iteration is immediately
|
||||||
* terminated and the non-NULL return value is returned by the
|
* terminated and the non-NULL return value is returned by the
|
||||||
* iterator. This is useful for re-starting iteration after
|
* iterator. This is useful for re-starting iteration after
|
||||||
* modifying extree.
|
* modifying tree.
|
||||||
* arg : Opaque pointer passed to cb().
|
* arg : Opaque pointer passed to cb().
|
||||||
* Ret: NULL if iteration completed, or the non-NULL callback return value
|
* Ret: NULL if iteration completed, or the non-NULL callback return value
|
||||||
* that caused termination of the iteration.
|
* that caused termination of the iteration.
|
||||||
*/
|
*/
|
||||||
|
122
include/jemalloc/internal/size_classes.sh
Executable file
122
include/jemalloc/internal/size_classes.sh
Executable file
@ -0,0 +1,122 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# The following limits are chosen such that they cover all supported platforms.
|
||||||
|
|
||||||
|
# Range of quanta.
|
||||||
|
lg_qmin=3
|
||||||
|
lg_qmax=4
|
||||||
|
|
||||||
|
# The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)].
|
||||||
|
lg_tmin=3
|
||||||
|
|
||||||
|
# Range of page sizes.
|
||||||
|
lg_pmin=12
|
||||||
|
lg_pmax=16
|
||||||
|
|
||||||
|
pow2() {
|
||||||
|
e=$1
|
||||||
|
pow2_result=1
|
||||||
|
while [ ${e} -gt 0 ] ; do
|
||||||
|
pow2_result=$((${pow2_result} + ${pow2_result}))
|
||||||
|
e=$((${e} - 1))
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
cat <<EOF
|
||||||
|
/* This file was automatically generated by size_classes.sh. */
|
||||||
|
/******************************************************************************/
|
||||||
|
#ifdef JEMALLOC_H_TYPES
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
lg_q=${lg_qmin}
|
||||||
|
while [ ${lg_q} -le ${lg_qmax} ] ; do
|
||||||
|
lg_t=${lg_tmin}
|
||||||
|
while [ ${lg_t} -le ${lg_q} ] ; do
|
||||||
|
lg_p=${lg_pmin}
|
||||||
|
while [ ${lg_p} -le ${lg_pmax} ] ; do
|
||||||
|
echo "#if (LG_TINY_MIN == ${lg_t} && LG_QUANTUM == ${lg_q} && LG_PAGE == ${lg_p})"
|
||||||
|
echo "#define SIZE_CLASSES_DEFINED"
|
||||||
|
pow2 ${lg_q}; q=${pow2_result}
|
||||||
|
pow2 ${lg_t}; t=${pow2_result}
|
||||||
|
pow2 ${lg_p}; p=${pow2_result}
|
||||||
|
bin=0
|
||||||
|
psz=0
|
||||||
|
sz=${t}
|
||||||
|
delta=$((${sz} - ${psz}))
|
||||||
|
echo "/* SIZE_CLASS(bin, delta, sz) */"
|
||||||
|
echo "#define SIZE_CLASSES \\"
|
||||||
|
|
||||||
|
# Tiny size classes.
|
||||||
|
while [ ${sz} -lt ${q} ] ; do
|
||||||
|
echo " SIZE_CLASS(${bin}, ${delta}, ${sz}) \\"
|
||||||
|
bin=$((${bin} + 1))
|
||||||
|
psz=${sz}
|
||||||
|
sz=$((${sz} + ${sz}))
|
||||||
|
delta=$((${sz} - ${psz}))
|
||||||
|
done
|
||||||
|
# Quantum-multiple size classes. For each doubling of sz, as many as 4
|
||||||
|
# size classes exist. Their spacing is the greater of:
|
||||||
|
# - q
|
||||||
|
# - sz/4, where sz is a power of 2
|
||||||
|
while [ ${sz} -lt ${p} ] ; do
|
||||||
|
if [ ${sz} -ge $((${q} * 4)) ] ; then
|
||||||
|
i=$((${sz} / 4))
|
||||||
|
else
|
||||||
|
i=${q}
|
||||||
|
fi
|
||||||
|
next_2pow=$((${sz} * 2))
|
||||||
|
while [ ${sz} -lt $next_2pow ] ; do
|
||||||
|
echo " SIZE_CLASS(${bin}, ${delta}, ${sz}) \\"
|
||||||
|
bin=$((${bin} + 1))
|
||||||
|
psz=${sz}
|
||||||
|
sz=$((${sz} + ${i}))
|
||||||
|
delta=$((${sz} - ${psz}))
|
||||||
|
done
|
||||||
|
done
|
||||||
|
echo
|
||||||
|
echo "#define NBINS ${bin}"
|
||||||
|
echo "#define SMALL_MAXCLASS ${psz}"
|
||||||
|
echo "#endif"
|
||||||
|
echo
|
||||||
|
lg_p=$((${lg_p} + 1))
|
||||||
|
done
|
||||||
|
lg_t=$((${lg_t} + 1))
|
||||||
|
done
|
||||||
|
lg_q=$((${lg_q} + 1))
|
||||||
|
done
|
||||||
|
|
||||||
|
cat <<EOF
|
||||||
|
#ifndef SIZE_CLASSES_DEFINED
|
||||||
|
# error "No size class definitions match configuration"
|
||||||
|
#endif
|
||||||
|
#undef SIZE_CLASSES_DEFINED
|
||||||
|
/*
|
||||||
|
* The small_size2bin lookup table uses uint8_t to encode each bin index, so we
|
||||||
|
* cannot support more than 256 small size classes. Further constrain NBINS to
|
||||||
|
* 255 to support prof_promote, since all small size classes, plus a "not
|
||||||
|
* small" size class must be stored in 8 bits of arena_chunk_map_t's bits
|
||||||
|
* field.
|
||||||
|
*/
|
||||||
|
#if (NBINS > 255)
|
||||||
|
# error "Too many small size classes"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_H_TYPES */
|
||||||
|
/******************************************************************************/
|
||||||
|
#ifdef JEMALLOC_H_STRUCTS
|
||||||
|
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_H_STRUCTS */
|
||||||
|
/******************************************************************************/
|
||||||
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
|
/******************************************************************************/
|
||||||
|
#ifdef JEMALLOC_H_INLINES
|
||||||
|
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_H_INLINES */
|
||||||
|
/******************************************************************************/
|
||||||
|
EOF
|
@ -1,25 +1,16 @@
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_TYPES
|
#ifdef JEMALLOC_H_TYPES
|
||||||
|
|
||||||
#define UMAX2S_BUFSIZE 65
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
|
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
|
||||||
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
|
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
|
||||||
typedef struct malloc_large_stats_s malloc_large_stats_t;
|
typedef struct malloc_large_stats_s malloc_large_stats_t;
|
||||||
typedef struct arena_stats_s arena_stats_t;
|
typedef struct arena_stats_s arena_stats_t;
|
||||||
#endif
|
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
typedef struct chunk_stats_s chunk_stats_t;
|
typedef struct chunk_stats_s chunk_stats_t;
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
#endif /* JEMALLOC_H_TYPES */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
#ifdef JEMALLOC_H_STRUCTS
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_TCACHE
|
|
||||||
struct tcache_bin_stats_s {
|
struct tcache_bin_stats_s {
|
||||||
/*
|
/*
|
||||||
* Number of allocation requests that corresponded to the size of this
|
* Number of allocation requests that corresponded to the size of this
|
||||||
@ -27,7 +18,6 @@ struct tcache_bin_stats_s {
|
|||||||
*/
|
*/
|
||||||
uint64_t nrequests;
|
uint64_t nrequests;
|
||||||
};
|
};
|
||||||
#endif
|
|
||||||
|
|
||||||
struct malloc_bin_stats_s {
|
struct malloc_bin_stats_s {
|
||||||
/*
|
/*
|
||||||
@ -52,13 +42,11 @@ struct malloc_bin_stats_s {
|
|||||||
*/
|
*/
|
||||||
uint64_t nrequests;
|
uint64_t nrequests;
|
||||||
|
|
||||||
#ifdef JEMALLOC_TCACHE
|
|
||||||
/* Number of tcache fills from this bin. */
|
/* Number of tcache fills from this bin. */
|
||||||
uint64_t nfills;
|
uint64_t nfills;
|
||||||
|
|
||||||
/* Number of tcache flushes to this bin. */
|
/* Number of tcache flushes to this bin. */
|
||||||
uint64_t nflushes;
|
uint64_t nflushes;
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Total number of runs created for this bin's size class. */
|
/* Total number of runs created for this bin's size class. */
|
||||||
uint64_t nruns;
|
uint64_t nruns;
|
||||||
@ -69,9 +57,6 @@ struct malloc_bin_stats_s {
|
|||||||
*/
|
*/
|
||||||
uint64_t reruns;
|
uint64_t reruns;
|
||||||
|
|
||||||
/* High-water mark for this bin. */
|
|
||||||
size_t highruns;
|
|
||||||
|
|
||||||
/* Current number of runs in this bin. */
|
/* Current number of runs in this bin. */
|
||||||
size_t curruns;
|
size_t curruns;
|
||||||
};
|
};
|
||||||
@ -93,9 +78,6 @@ struct malloc_large_stats_s {
|
|||||||
*/
|
*/
|
||||||
uint64_t nrequests;
|
uint64_t nrequests;
|
||||||
|
|
||||||
/* High-water mark for this size class. */
|
|
||||||
size_t highruns;
|
|
||||||
|
|
||||||
/* Current number of runs of this size class. */
|
/* Current number of runs of this size class. */
|
||||||
size_t curruns;
|
size_t curruns;
|
||||||
};
|
};
|
||||||
@ -127,14 +109,10 @@ struct arena_stats_s {
|
|||||||
*/
|
*/
|
||||||
malloc_large_stats_t *lstats;
|
malloc_large_stats_t *lstats;
|
||||||
};
|
};
|
||||||
#endif /* JEMALLOC_STATS */
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
struct chunk_stats_s {
|
struct chunk_stats_s {
|
||||||
# ifdef JEMALLOC_STATS
|
|
||||||
/* Number of chunks that were allocated. */
|
/* Number of chunks that were allocated. */
|
||||||
uint64_t nchunks;
|
uint64_t nchunks;
|
||||||
# endif
|
|
||||||
|
|
||||||
/* High-water mark for number of chunks allocated. */
|
/* High-water mark for number of chunks allocated. */
|
||||||
size_t highchunks;
|
size_t highchunks;
|
||||||
@ -146,7 +124,6 @@ struct chunk_stats_s {
|
|||||||
*/
|
*/
|
||||||
size_t curchunks;
|
size_t curchunks;
|
||||||
};
|
};
|
||||||
#endif /* JEMALLOC_STATS */
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
#endif /* JEMALLOC_H_STRUCTS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
@ -154,24 +131,14 @@ struct chunk_stats_s {
|
|||||||
|
|
||||||
extern bool opt_stats_print;
|
extern bool opt_stats_print;
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
extern size_t stats_cactive;
|
extern size_t stats_cactive;
|
||||||
#endif
|
|
||||||
|
|
||||||
char *u2s(uint64_t x, unsigned base, char *s);
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
|
|
||||||
const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4));
|
|
||||||
void malloc_printf(const char *format, ...)
|
|
||||||
JEMALLOC_ATTR(format(printf, 1, 2));
|
|
||||||
#endif
|
|
||||||
void stats_print(void (*write)(void *, const char *), void *cbopaque,
|
void stats_print(void (*write)(void *, const char *), void *cbopaque,
|
||||||
const char *opts);
|
const char *opts);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_INLINES
|
#ifdef JEMALLOC_H_INLINES
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
#ifndef JEMALLOC_ENABLE_INLINE
|
||||||
size_t stats_cactive_get(void);
|
size_t stats_cactive_get(void);
|
||||||
@ -202,6 +169,5 @@ stats_cactive_sub(size_t size)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* JEMALLOC_STATS */
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
#endif /* JEMALLOC_H_INLINES */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#ifdef JEMALLOC_TCACHE
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_TYPES
|
#ifdef JEMALLOC_H_TYPES
|
||||||
|
|
||||||
@ -6,6 +5,16 @@ typedef struct tcache_bin_info_s tcache_bin_info_t;
|
|||||||
typedef struct tcache_bin_s tcache_bin_t;
|
typedef struct tcache_bin_s tcache_bin_t;
|
||||||
typedef struct tcache_s tcache_t;
|
typedef struct tcache_s tcache_t;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* tcache pointers close to NULL are used to encode state information that is
|
||||||
|
* used for two purposes: preventing thread caching on a per thread basis and
|
||||||
|
* cleaning up during thread shutdown.
|
||||||
|
*/
|
||||||
|
#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
|
||||||
|
#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
|
||||||
|
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
|
||||||
|
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Absolute maximum number of cache slots for each small bin in the thread
|
* Absolute maximum number of cache slots for each small bin in the thread
|
||||||
* cache. This is an additional constraint beyond that imposed as: twice the
|
* cache. This is an additional constraint beyond that imposed as: twice the
|
||||||
@ -22,17 +31,26 @@ typedef struct tcache_s tcache_t;
|
|||||||
#define LG_TCACHE_MAXCLASS_DEFAULT 15
|
#define LG_TCACHE_MAXCLASS_DEFAULT 15
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* (1U << opt_lg_tcache_gc_sweep) is the approximate number of allocation
|
* TCACHE_GC_SWEEP is the approximate number of allocation events between
|
||||||
* events between full GC sweeps (-1: disabled). Integer rounding may cause
|
* full GC sweeps. Integer rounding may cause the actual number to be
|
||||||
* the actual number to be slightly higher, since GC is performed
|
* slightly higher, since GC is performed incrementally.
|
||||||
* incrementally.
|
|
||||||
*/
|
*/
|
||||||
#define LG_TCACHE_GC_SWEEP_DEFAULT 13
|
#define TCACHE_GC_SWEEP 8192
|
||||||
|
|
||||||
|
/* Number of tcache allocation/deallocation events between incremental GCs. */
|
||||||
|
#define TCACHE_GC_INCR \
|
||||||
|
((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
#endif /* JEMALLOC_H_TYPES */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
#ifdef JEMALLOC_H_STRUCTS
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
tcache_enabled_false = 0, /* Enable cast to/from bool. */
|
||||||
|
tcache_enabled_true = 1,
|
||||||
|
tcache_enabled_default = 2
|
||||||
|
} tcache_enabled_t;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read-only information associated with each element of tcache_t's tbins array
|
* Read-only information associated with each element of tcache_t's tbins array
|
||||||
* is stored separately, mainly to reduce memory usage.
|
* is stored separately, mainly to reduce memory usage.
|
||||||
@ -42,9 +60,7 @@ struct tcache_bin_info_s {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct tcache_bin_s {
|
struct tcache_bin_s {
|
||||||
# ifdef JEMALLOC_STATS
|
|
||||||
tcache_bin_stats_t tstats;
|
tcache_bin_stats_t tstats;
|
||||||
# endif
|
|
||||||
int low_water; /* Min # cached since last GC. */
|
int low_water; /* Min # cached since last GC. */
|
||||||
unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
|
unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
|
||||||
unsigned ncached; /* # of cached objects. */
|
unsigned ncached; /* # of cached objects. */
|
||||||
@ -52,12 +68,8 @@ struct tcache_bin_s {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct tcache_s {
|
struct tcache_s {
|
||||||
# ifdef JEMALLOC_STATS
|
|
||||||
ql_elm(tcache_t) link; /* Used for aggregating stats. */
|
ql_elm(tcache_t) link; /* Used for aggregating stats. */
|
||||||
# endif
|
|
||||||
# ifdef JEMALLOC_PROF
|
|
||||||
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */
|
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */
|
||||||
# endif
|
|
||||||
arena_t *arena; /* This thread's arena. */
|
arena_t *arena; /* This thread's arena. */
|
||||||
unsigned ev_cnt; /* Event count since incremental GC. */
|
unsigned ev_cnt; /* Event count since incremental GC. */
|
||||||
unsigned next_gc_bin; /* Next bin to GC. */
|
unsigned next_gc_bin; /* Next bin to GC. */
|
||||||
@ -76,29 +88,11 @@ struct tcache_s {
|
|||||||
|
|
||||||
extern bool opt_tcache;
|
extern bool opt_tcache;
|
||||||
extern ssize_t opt_lg_tcache_max;
|
extern ssize_t opt_lg_tcache_max;
|
||||||
extern ssize_t opt_lg_tcache_gc_sweep;
|
|
||||||
|
|
||||||
extern tcache_bin_info_t *tcache_bin_info;
|
extern tcache_bin_info_t *tcache_bin_info;
|
||||||
|
|
||||||
/* Map of thread-specific caches. */
|
|
||||||
#ifndef NO_TLS
|
|
||||||
extern __thread tcache_t *tcache_tls
|
|
||||||
JEMALLOC_ATTR(tls_model("initial-exec"));
|
|
||||||
# define TCACHE_GET() tcache_tls
|
|
||||||
# define TCACHE_SET(v) do { \
|
|
||||||
tcache_tls = (tcache_t *)(v); \
|
|
||||||
pthread_setspecific(tcache_tsd, (void *)(v)); \
|
|
||||||
} while (0)
|
|
||||||
#else
|
|
||||||
# define TCACHE_GET() ((tcache_t *)pthread_getspecific(tcache_tsd))
|
|
||||||
# define TCACHE_SET(v) do { \
|
|
||||||
pthread_setspecific(tcache_tsd, (void *)(v)); \
|
|
||||||
} while (0)
|
|
||||||
#endif
|
|
||||||
extern pthread_key_t tcache_tsd;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Number of tcache bins. There are nbins small-object bins, plus 0 or more
|
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
|
||||||
* large-object bins.
|
* large-object bins.
|
||||||
*/
|
*/
|
||||||
extern size_t nhbins;
|
extern size_t nhbins;
|
||||||
@ -106,68 +100,159 @@ extern size_t nhbins;
|
|||||||
/* Maximum cached size class. */
|
/* Maximum cached size class. */
|
||||||
extern size_t tcache_maxclass;
|
extern size_t tcache_maxclass;
|
||||||
|
|
||||||
/* Number of tcache allocation/deallocation events between incremental GCs. */
|
size_t tcache_salloc(const void *ptr);
|
||||||
extern unsigned tcache_gc_incr;
|
void tcache_event_hard(tcache_t *tcache);
|
||||||
|
|
||||||
void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
|
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
, tcache_t *tcache
|
|
||||||
#endif
|
|
||||||
);
|
|
||||||
void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
|
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
, tcache_t *tcache
|
|
||||||
#endif
|
|
||||||
);
|
|
||||||
tcache_t *tcache_create(arena_t *arena);
|
|
||||||
void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
|
void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
|
||||||
size_t binind);
|
size_t binind);
|
||||||
|
void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||||
|
tcache_t *tcache);
|
||||||
|
void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||||
|
tcache_t *tcache);
|
||||||
|
void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
|
||||||
|
void tcache_arena_dissociate(tcache_t *tcache);
|
||||||
|
tcache_t *tcache_create(arena_t *arena);
|
||||||
void tcache_destroy(tcache_t *tcache);
|
void tcache_destroy(tcache_t *tcache);
|
||||||
#ifdef JEMALLOC_STATS
|
void tcache_thread_cleanup(void *arg);
|
||||||
void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
|
void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
|
||||||
#endif
|
bool tcache_boot0(void);
|
||||||
bool tcache_boot(void);
|
bool tcache_boot1(void);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_INLINES
|
#ifdef JEMALLOC_H_INLINES
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
#ifndef JEMALLOC_ENABLE_INLINE
|
||||||
|
malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache, tcache_t *)
|
||||||
|
malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache_enabled, tcache_enabled_t)
|
||||||
|
|
||||||
void tcache_event(tcache_t *tcache);
|
void tcache_event(tcache_t *tcache);
|
||||||
tcache_t *tcache_get(void);
|
void tcache_flush(void);
|
||||||
|
bool tcache_enabled_get(void);
|
||||||
|
tcache_t *tcache_get(bool create);
|
||||||
|
void tcache_enabled_set(bool enabled);
|
||||||
void *tcache_alloc_easy(tcache_bin_t *tbin);
|
void *tcache_alloc_easy(tcache_bin_t *tbin);
|
||||||
void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
|
void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
|
||||||
void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
|
void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
|
||||||
void tcache_dalloc_small(tcache_t *tcache, void *ptr);
|
void tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind);
|
||||||
void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
|
void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
|
||||||
JEMALLOC_INLINE tcache_t *
|
/* Map of thread-specific caches. */
|
||||||
tcache_get(void)
|
malloc_tsd_externs(tcache, tcache_t *)
|
||||||
|
malloc_tsd_funcs(JEMALLOC_INLINE, tcache, tcache_t *, NULL,
|
||||||
|
tcache_thread_cleanup)
|
||||||
|
/* Per thread flag that allows thread caches to be disabled. */
|
||||||
|
malloc_tsd_externs(tcache_enabled, tcache_enabled_t)
|
||||||
|
malloc_tsd_funcs(JEMALLOC_INLINE, tcache_enabled, tcache_enabled_t,
|
||||||
|
tcache_enabled_default, malloc_tsd_no_cleanup)
|
||||||
|
|
||||||
|
JEMALLOC_INLINE void
|
||||||
|
tcache_flush(void)
|
||||||
{
|
{
|
||||||
tcache_t *tcache;
|
tcache_t *tcache;
|
||||||
|
|
||||||
if ((isthreaded & opt_tcache) == false)
|
cassert(config_tcache);
|
||||||
|
|
||||||
|
tcache = *tcache_tsd_get();
|
||||||
|
if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX)
|
||||||
|
return;
|
||||||
|
tcache_destroy(tcache);
|
||||||
|
tcache = NULL;
|
||||||
|
tcache_tsd_set(&tcache);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE bool
|
||||||
|
tcache_enabled_get(void)
|
||||||
|
{
|
||||||
|
tcache_enabled_t tcache_enabled;
|
||||||
|
|
||||||
|
cassert(config_tcache);
|
||||||
|
|
||||||
|
tcache_enabled = *tcache_enabled_tsd_get();
|
||||||
|
if (tcache_enabled == tcache_enabled_default) {
|
||||||
|
tcache_enabled = (tcache_enabled_t)opt_tcache;
|
||||||
|
tcache_enabled_tsd_set(&tcache_enabled);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ((bool)tcache_enabled);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE void
|
||||||
|
tcache_enabled_set(bool enabled)
|
||||||
|
{
|
||||||
|
tcache_enabled_t tcache_enabled;
|
||||||
|
tcache_t *tcache;
|
||||||
|
|
||||||
|
cassert(config_tcache);
|
||||||
|
|
||||||
|
tcache_enabled = (tcache_enabled_t)enabled;
|
||||||
|
tcache_enabled_tsd_set(&tcache_enabled);
|
||||||
|
tcache = *tcache_tsd_get();
|
||||||
|
if (enabled) {
|
||||||
|
if (tcache == TCACHE_STATE_DISABLED) {
|
||||||
|
tcache = NULL;
|
||||||
|
tcache_tsd_set(&tcache);
|
||||||
|
}
|
||||||
|
} else /* disabled */ {
|
||||||
|
if (tcache > TCACHE_STATE_MAX) {
|
||||||
|
tcache_destroy(tcache);
|
||||||
|
tcache = NULL;
|
||||||
|
}
|
||||||
|
if (tcache == NULL) {
|
||||||
|
tcache = TCACHE_STATE_DISABLED;
|
||||||
|
tcache_tsd_set(&tcache);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE tcache_t *
|
||||||
|
tcache_get(bool create)
|
||||||
|
{
|
||||||
|
tcache_t *tcache;
|
||||||
|
|
||||||
|
if (config_tcache == false)
|
||||||
|
return (NULL);
|
||||||
|
if (config_lazy_lock && isthreaded == false)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
tcache = TCACHE_GET();
|
tcache = *tcache_tsd_get();
|
||||||
if ((uintptr_t)tcache <= (uintptr_t)2) {
|
if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) {
|
||||||
|
if (tcache == TCACHE_STATE_DISABLED)
|
||||||
|
return (NULL);
|
||||||
if (tcache == NULL) {
|
if (tcache == NULL) {
|
||||||
tcache = tcache_create(choose_arena());
|
if (create == false) {
|
||||||
if (tcache == NULL)
|
|
||||||
return (NULL);
|
|
||||||
} else {
|
|
||||||
if (tcache == (void *)(uintptr_t)1) {
|
|
||||||
/*
|
/*
|
||||||
* Make a note that an allocator function was
|
* Creating a tcache here would cause
|
||||||
* called after the tcache_thread_cleanup() was
|
* allocation as a side effect of free().
|
||||||
* called.
|
* Ordinarily that would be okay since
|
||||||
|
* tcache_create() failure is a soft failure
|
||||||
|
* that doesn't propagate. However, if TLS
|
||||||
|
* data are freed via free() as in glibc,
|
||||||
|
* subtle corruption could result from setting
|
||||||
|
* a TLS variable after its backing memory is
|
||||||
|
* freed.
|
||||||
*/
|
*/
|
||||||
TCACHE_SET((uintptr_t)2);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
if (tcache_enabled_get() == false) {
|
||||||
|
tcache_enabled_set(false); /* Memoize. */
|
||||||
|
return (NULL);
|
||||||
|
}
|
||||||
|
return (tcache_create(choose_arena(NULL)));
|
||||||
|
}
|
||||||
|
if (tcache == TCACHE_STATE_PURGATORY) {
|
||||||
|
/*
|
||||||
|
* Make a note that an allocator function was called
|
||||||
|
* after tcache_thread_cleanup() was called.
|
||||||
|
*/
|
||||||
|
tcache = TCACHE_STATE_REINCARNATED;
|
||||||
|
tcache_tsd_set(&tcache);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
if (tcache == TCACHE_STATE_REINCARNATED)
|
||||||
|
return (NULL);
|
||||||
|
not_reached();
|
||||||
}
|
}
|
||||||
|
|
||||||
return (tcache);
|
return (tcache);
|
||||||
@ -177,60 +262,13 @@ JEMALLOC_INLINE void
|
|||||||
tcache_event(tcache_t *tcache)
|
tcache_event(tcache_t *tcache)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (tcache_gc_incr == 0)
|
if (TCACHE_GC_INCR == 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
tcache->ev_cnt++;
|
tcache->ev_cnt++;
|
||||||
assert(tcache->ev_cnt <= tcache_gc_incr);
|
assert(tcache->ev_cnt <= TCACHE_GC_INCR);
|
||||||
if (tcache->ev_cnt == tcache_gc_incr) {
|
if (tcache->ev_cnt == TCACHE_GC_INCR)
|
||||||
size_t binind = tcache->next_gc_bin;
|
tcache_event_hard(tcache);
|
||||||
tcache_bin_t *tbin = &tcache->tbins[binind];
|
|
||||||
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
|
|
||||||
|
|
||||||
if (tbin->low_water > 0) {
|
|
||||||
/*
|
|
||||||
* Flush (ceiling) 3/4 of the objects below the low
|
|
||||||
* water mark.
|
|
||||||
*/
|
|
||||||
if (binind < nbins) {
|
|
||||||
tcache_bin_flush_small(tbin, binind,
|
|
||||||
tbin->ncached - tbin->low_water +
|
|
||||||
(tbin->low_water >> 2)
|
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
, tcache
|
|
||||||
#endif
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
tcache_bin_flush_large(tbin, binind,
|
|
||||||
tbin->ncached - tbin->low_water +
|
|
||||||
(tbin->low_water >> 2)
|
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
, tcache
|
|
||||||
#endif
|
|
||||||
);
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
* Reduce fill count by 2X. Limit lg_fill_div such that
|
|
||||||
* the fill count is always at least 1.
|
|
||||||
*/
|
|
||||||
if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1))
|
|
||||||
>= 1)
|
|
||||||
tbin->lg_fill_div++;
|
|
||||||
} else if (tbin->low_water < 0) {
|
|
||||||
/*
|
|
||||||
* Increase fill count by 2X. Make sure lg_fill_div
|
|
||||||
* stays greater than 0.
|
|
||||||
*/
|
|
||||||
if (tbin->lg_fill_div > 1)
|
|
||||||
tbin->lg_fill_div--;
|
|
||||||
}
|
|
||||||
tbin->low_water = tbin->ncached;
|
|
||||||
|
|
||||||
tcache->next_gc_bin++;
|
|
||||||
if (tcache->next_gc_bin == nhbins)
|
|
||||||
tcache->next_gc_bin = 0;
|
|
||||||
tcache->ev_cnt = 0;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void *
|
JEMALLOC_INLINE void *
|
||||||
@ -257,7 +295,7 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
|
|||||||
tcache_bin_t *tbin;
|
tcache_bin_t *tbin;
|
||||||
|
|
||||||
binind = SMALL_SIZE2BIN(size);
|
binind = SMALL_SIZE2BIN(size);
|
||||||
assert(binind < nbins);
|
assert(binind < NBINS);
|
||||||
tbin = &tcache->tbins[binind];
|
tbin = &tcache->tbins[binind];
|
||||||
ret = tcache_alloc_easy(tbin);
|
ret = tcache_alloc_easy(tbin);
|
||||||
if (ret == NULL) {
|
if (ret == NULL) {
|
||||||
@ -265,24 +303,29 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
|
|||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
assert(arena_salloc(ret) == arena_bin_info[binind].reg_size);
|
assert(tcache_salloc(ret) == arena_bin_info[binind].reg_size);
|
||||||
|
|
||||||
if (zero == false) {
|
if (zero == false) {
|
||||||
#ifdef JEMALLOC_FILL
|
if (config_fill) {
|
||||||
if (opt_junk)
|
if (opt_junk) {
|
||||||
memset(ret, 0xa5, size);
|
arena_alloc_junk_small(ret,
|
||||||
else if (opt_zero)
|
&arena_bin_info[binind], false);
|
||||||
memset(ret, 0, size);
|
} else if (opt_zero)
|
||||||
#endif
|
memset(ret, 0, size);
|
||||||
} else
|
}
|
||||||
|
} else {
|
||||||
|
if (config_fill && opt_junk) {
|
||||||
|
arena_alloc_junk_small(ret, &arena_bin_info[binind],
|
||||||
|
true);
|
||||||
|
}
|
||||||
|
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats)
|
||||||
tbin->tstats.nrequests++;
|
tbin->tstats.nrequests++;
|
||||||
#endif
|
if (config_prof)
|
||||||
#ifdef JEMALLOC_PROF
|
tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
|
||||||
tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
|
|
||||||
#endif
|
|
||||||
tcache_event(tcache);
|
tcache_event(tcache);
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -296,7 +339,7 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
|
|||||||
|
|
||||||
size = PAGE_CEILING(size);
|
size = PAGE_CEILING(size);
|
||||||
assert(size <= tcache_maxclass);
|
assert(size <= tcache_maxclass);
|
||||||
binind = nbins + (size >> PAGE_SHIFT) - 1;
|
binind = NBINS + (size >> LG_PAGE) - 1;
|
||||||
assert(binind < nhbins);
|
assert(binind < nhbins);
|
||||||
tbin = &tcache->tbins[binind];
|
tbin = &tcache->tbins[binind];
|
||||||
ret = tcache_alloc_easy(tbin);
|
ret = tcache_alloc_easy(tbin);
|
||||||
@ -309,28 +352,30 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
|
|||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
} else {
|
} else {
|
||||||
#ifdef JEMALLOC_PROF
|
if (config_prof && prof_promote && size == PAGE) {
|
||||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
|
arena_chunk_t *chunk =
|
||||||
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
|
(arena_chunk_t *)CHUNK_ADDR2BASE(ret);
|
||||||
PAGE_SHIFT);
|
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
|
||||||
chunk->map[pageind-map_bias].bits &= ~CHUNK_MAP_CLASS_MASK;
|
LG_PAGE);
|
||||||
#endif
|
arena_mapbits_large_binind_set(chunk, pageind,
|
||||||
|
BININD_INVALID);
|
||||||
|
}
|
||||||
if (zero == false) {
|
if (zero == false) {
|
||||||
#ifdef JEMALLOC_FILL
|
if (config_fill) {
|
||||||
if (opt_junk)
|
if (opt_junk)
|
||||||
memset(ret, 0xa5, size);
|
memset(ret, 0xa5, size);
|
||||||
else if (opt_zero)
|
else if (opt_zero)
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
#endif
|
}
|
||||||
} else
|
} else {
|
||||||
|
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats)
|
||||||
tbin->tstats.nrequests++;
|
tbin->tstats.nrequests++;
|
||||||
#endif
|
if (config_prof)
|
||||||
#ifdef JEMALLOC_PROF
|
tcache->prof_accumbytes += size;
|
||||||
tcache->prof_accumbytes += size;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tcache_event(tcache);
|
tcache_event(tcache);
|
||||||
@ -338,45 +383,21 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
tcache_dalloc_small(tcache_t *tcache, void *ptr)
|
tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
|
||||||
{
|
{
|
||||||
arena_t *arena;
|
|
||||||
arena_chunk_t *chunk;
|
|
||||||
arena_run_t *run;
|
|
||||||
arena_bin_t *bin;
|
|
||||||
tcache_bin_t *tbin;
|
tcache_bin_t *tbin;
|
||||||
tcache_bin_info_t *tbin_info;
|
tcache_bin_info_t *tbin_info;
|
||||||
size_t pageind, binind;
|
|
||||||
arena_chunk_map_t *mapelm;
|
|
||||||
|
|
||||||
assert(arena_salloc(ptr) <= small_maxclass);
|
assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
|
||||||
|
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
if (config_fill && opt_junk)
|
||||||
arena = chunk->arena;
|
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
|
||||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
|
|
||||||
mapelm = &chunk->map[pageind-map_bias];
|
|
||||||
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
|
|
||||||
(mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT));
|
|
||||||
dassert(run->magic == ARENA_RUN_MAGIC);
|
|
||||||
bin = run->bin;
|
|
||||||
binind = ((uintptr_t)bin - (uintptr_t)&arena->bins) /
|
|
||||||
sizeof(arena_bin_t);
|
|
||||||
assert(binind < nbins);
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_FILL
|
|
||||||
if (opt_junk)
|
|
||||||
memset(ptr, 0x5a, arena_bin_info[binind].reg_size);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
tbin = &tcache->tbins[binind];
|
tbin = &tcache->tbins[binind];
|
||||||
tbin_info = &tcache_bin_info[binind];
|
tbin_info = &tcache_bin_info[binind];
|
||||||
if (tbin->ncached == tbin_info->ncached_max) {
|
if (tbin->ncached == tbin_info->ncached_max) {
|
||||||
tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
|
tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
|
||||||
1)
|
1), tcache);
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
, tcache
|
|
||||||
#endif
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
assert(tbin->ncached < tbin_info->ncached_max);
|
assert(tbin->ncached < tbin_info->ncached_max);
|
||||||
tbin->avail[tbin->ncached] = ptr;
|
tbin->avail[tbin->ncached] = ptr;
|
||||||
@ -388,35 +409,24 @@ tcache_dalloc_small(tcache_t *tcache, void *ptr)
|
|||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
|
tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
|
||||||
{
|
{
|
||||||
arena_t *arena;
|
size_t binind;
|
||||||
arena_chunk_t *chunk;
|
|
||||||
size_t pageind, binind;
|
|
||||||
tcache_bin_t *tbin;
|
tcache_bin_t *tbin;
|
||||||
tcache_bin_info_t *tbin_info;
|
tcache_bin_info_t *tbin_info;
|
||||||
|
|
||||||
assert((size & PAGE_MASK) == 0);
|
assert((size & PAGE_MASK) == 0);
|
||||||
assert(arena_salloc(ptr) > small_maxclass);
|
assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
|
||||||
assert(arena_salloc(ptr) <= tcache_maxclass);
|
assert(tcache_salloc(ptr) <= tcache_maxclass);
|
||||||
|
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
binind = NBINS + (size >> LG_PAGE) - 1;
|
||||||
arena = chunk->arena;
|
|
||||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
|
|
||||||
binind = nbins + (size >> PAGE_SHIFT) - 1;
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_FILL
|
if (config_fill && opt_junk)
|
||||||
if (opt_junk)
|
|
||||||
memset(ptr, 0x5a, size);
|
memset(ptr, 0x5a, size);
|
||||||
#endif
|
|
||||||
|
|
||||||
tbin = &tcache->tbins[binind];
|
tbin = &tcache->tbins[binind];
|
||||||
tbin_info = &tcache_bin_info[binind];
|
tbin_info = &tcache_bin_info[binind];
|
||||||
if (tbin->ncached == tbin_info->ncached_max) {
|
if (tbin->ncached == tbin_info->ncached_max) {
|
||||||
tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
|
tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
|
||||||
1)
|
1), tcache);
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
, tcache
|
|
||||||
#endif
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
assert(tbin->ncached < tbin_info->ncached_max);
|
assert(tbin->ncached < tbin_info->ncached_max);
|
||||||
tbin->avail[tbin->ncached] = ptr;
|
tbin->avail[tbin->ncached] = ptr;
|
||||||
@ -428,4 +438,3 @@ tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
|
|||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
#endif /* JEMALLOC_H_INLINES */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#endif /* JEMALLOC_TCACHE */
|
|
||||||
|
397
include/jemalloc/internal/tsd.h
Normal file
397
include/jemalloc/internal/tsd.h
Normal file
@ -0,0 +1,397 @@
|
|||||||
|
/******************************************************************************/
|
||||||
|
#ifdef JEMALLOC_H_TYPES
|
||||||
|
|
||||||
|
/* Maximum number of malloc_tsd users with cleanup functions. */
|
||||||
|
#define MALLOC_TSD_CLEANUPS_MAX 8
|
||||||
|
|
||||||
|
typedef bool (*malloc_tsd_cleanup_t)(void);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* TLS/TSD-agnostic macro-based implementation of thread-specific data. There
|
||||||
|
* are four macros that support (at least) three use cases: file-private,
|
||||||
|
* library-private, and library-private inlined. Following is an example
|
||||||
|
* library-private tsd variable:
|
||||||
|
*
|
||||||
|
* In example.h:
|
||||||
|
* typedef struct {
|
||||||
|
* int x;
|
||||||
|
* int y;
|
||||||
|
* } example_t;
|
||||||
|
* #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0})
|
||||||
|
* malloc_tsd_protos(, example, example_t *)
|
||||||
|
* malloc_tsd_externs(example, example_t *)
|
||||||
|
* In example.c:
|
||||||
|
* malloc_tsd_data(, example, example_t *, EX_INITIALIZER)
|
||||||
|
* malloc_tsd_funcs(, example, example_t *, EX_INITIALIZER,
|
||||||
|
* example_tsd_cleanup)
|
||||||
|
*
|
||||||
|
* The result is a set of generated functions, e.g.:
|
||||||
|
*
|
||||||
|
* bool example_tsd_boot(void) {...}
|
||||||
|
* example_t **example_tsd_get() {...}
|
||||||
|
* void example_tsd_set(example_t **val) {...}
|
||||||
|
*
|
||||||
|
* Note that all of the functions deal in terms of (a_type *) rather than
|
||||||
|
* (a_type) so that it is possible to support non-pointer types (unlike
|
||||||
|
* pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is
|
||||||
|
* cast to (void *). This means that the cleanup function needs to cast *and*
|
||||||
|
* dereference the function argument, e.g.:
|
||||||
|
*
|
||||||
|
* void
|
||||||
|
* example_tsd_cleanup(void *arg)
|
||||||
|
* {
|
||||||
|
* example_t *example = *(example_t **)arg;
|
||||||
|
*
|
||||||
|
* [...]
|
||||||
|
* if ([want the cleanup function to be called again]) {
|
||||||
|
* example_tsd_set(&example);
|
||||||
|
* }
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* If example_tsd_set() is called within example_tsd_cleanup(), it will be
|
||||||
|
* called again. This is similar to how pthreads TSD destruction works, except
|
||||||
|
* that pthreads only calls the cleanup function again if the value was set to
|
||||||
|
* non-NULL.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* malloc_tsd_protos(). */
|
||||||
|
#define malloc_tsd_protos(a_attr, a_name, a_type) \
|
||||||
|
a_attr bool \
|
||||||
|
a_name##_tsd_boot(void); \
|
||||||
|
a_attr a_type * \
|
||||||
|
a_name##_tsd_get(void); \
|
||||||
|
a_attr void \
|
||||||
|
a_name##_tsd_set(a_type *val);
|
||||||
|
|
||||||
|
/* malloc_tsd_externs(). */
|
||||||
|
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||||
|
#define malloc_tsd_externs(a_name, a_type) \
|
||||||
|
extern __thread a_type a_name##_tls; \
|
||||||
|
extern __thread bool a_name##_initialized; \
|
||||||
|
extern bool a_name##_booted;
|
||||||
|
#elif (defined(JEMALLOC_TLS))
|
||||||
|
#define malloc_tsd_externs(a_name, a_type) \
|
||||||
|
extern __thread a_type a_name##_tls; \
|
||||||
|
extern pthread_key_t a_name##_tsd; \
|
||||||
|
extern bool a_name##_booted;
|
||||||
|
#elif (defined(_WIN32))
|
||||||
|
#define malloc_tsd_externs(a_name, a_type) \
|
||||||
|
extern DWORD a_name##_tsd; \
|
||||||
|
extern bool a_name##_booted;
|
||||||
|
#else
|
||||||
|
#define malloc_tsd_externs(a_name, a_type) \
|
||||||
|
extern pthread_key_t a_name##_tsd; \
|
||||||
|
extern bool a_name##_booted;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* malloc_tsd_data(). */
|
||||||
|
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||||
|
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||||
|
a_attr __thread a_type JEMALLOC_TLS_MODEL \
|
||||||
|
a_name##_tls = a_initializer; \
|
||||||
|
a_attr __thread bool JEMALLOC_TLS_MODEL \
|
||||||
|
a_name##_initialized = false; \
|
||||||
|
a_attr bool a_name##_booted = false;
|
||||||
|
#elif (defined(JEMALLOC_TLS))
|
||||||
|
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||||
|
a_attr __thread a_type JEMALLOC_TLS_MODEL \
|
||||||
|
a_name##_tls = a_initializer; \
|
||||||
|
a_attr pthread_key_t a_name##_tsd; \
|
||||||
|
a_attr bool a_name##_booted = false;
|
||||||
|
#elif (defined(_WIN32))
|
||||||
|
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||||
|
a_attr DWORD a_name##_tsd; \
|
||||||
|
a_attr bool a_name##_booted = false;
|
||||||
|
#else
|
||||||
|
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
|
||||||
|
a_attr pthread_key_t a_name##_tsd; \
|
||||||
|
a_attr bool a_name##_booted = false;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* malloc_tsd_funcs(). */
|
||||||
|
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||||
|
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||||
|
a_cleanup) \
|
||||||
|
/* Initialization/cleanup. */ \
|
||||||
|
a_attr bool \
|
||||||
|
a_name##_tsd_cleanup_wrapper(void) \
|
||||||
|
{ \
|
||||||
|
\
|
||||||
|
if (a_name##_initialized) { \
|
||||||
|
a_name##_initialized = false; \
|
||||||
|
a_cleanup(&a_name##_tls); \
|
||||||
|
} \
|
||||||
|
return (a_name##_initialized); \
|
||||||
|
} \
|
||||||
|
a_attr bool \
|
||||||
|
a_name##_tsd_boot(void) \
|
||||||
|
{ \
|
||||||
|
\
|
||||||
|
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||||
|
malloc_tsd_cleanup_register( \
|
||||||
|
&a_name##_tsd_cleanup_wrapper); \
|
||||||
|
} \
|
||||||
|
a_name##_booted = true; \
|
||||||
|
return (false); \
|
||||||
|
} \
|
||||||
|
/* Get/set. */ \
|
||||||
|
a_attr a_type * \
|
||||||
|
a_name##_tsd_get(void) \
|
||||||
|
{ \
|
||||||
|
\
|
||||||
|
assert(a_name##_booted); \
|
||||||
|
return (&a_name##_tls); \
|
||||||
|
} \
|
||||||
|
a_attr void \
|
||||||
|
a_name##_tsd_set(a_type *val) \
|
||||||
|
{ \
|
||||||
|
\
|
||||||
|
assert(a_name##_booted); \
|
||||||
|
a_name##_tls = (*val); \
|
||||||
|
if (a_cleanup != malloc_tsd_no_cleanup) \
|
||||||
|
a_name##_initialized = true; \
|
||||||
|
}
|
||||||
|
#elif (defined(JEMALLOC_TLS))
|
||||||
|
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||||
|
a_cleanup) \
|
||||||
|
/* Initialization/cleanup. */ \
|
||||||
|
a_attr bool \
|
||||||
|
a_name##_tsd_boot(void) \
|
||||||
|
{ \
|
||||||
|
\
|
||||||
|
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||||
|
if (pthread_key_create(&a_name##_tsd, a_cleanup) != 0) \
|
||||||
|
return (true); \
|
||||||
|
} \
|
||||||
|
a_name##_booted = true; \
|
||||||
|
return (false); \
|
||||||
|
} \
|
||||||
|
/* Get/set. */ \
|
||||||
|
a_attr a_type * \
|
||||||
|
a_name##_tsd_get(void) \
|
||||||
|
{ \
|
||||||
|
\
|
||||||
|
assert(a_name##_booted); \
|
||||||
|
return (&a_name##_tls); \
|
||||||
|
} \
|
||||||
|
a_attr void \
|
||||||
|
a_name##_tsd_set(a_type *val) \
|
||||||
|
{ \
|
||||||
|
\
|
||||||
|
assert(a_name##_booted); \
|
||||||
|
a_name##_tls = (*val); \
|
||||||
|
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||||
|
if (pthread_setspecific(a_name##_tsd, \
|
||||||
|
(void *)(&a_name##_tls))) { \
|
||||||
|
malloc_write("<jemalloc>: Error" \
|
||||||
|
" setting TSD for "#a_name"\n"); \
|
||||||
|
if (opt_abort) \
|
||||||
|
abort(); \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
}
|
||||||
|
#elif (defined(_WIN32))
|
||||||
|
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||||
|
a_cleanup) \
|
||||||
|
/* Data structure. */ \
|
||||||
|
typedef struct { \
|
||||||
|
bool initialized; \
|
||||||
|
a_type val; \
|
||||||
|
} a_name##_tsd_wrapper_t; \
|
||||||
|
/* Initialization/cleanup. */ \
|
||||||
|
a_attr bool \
|
||||||
|
a_name##_tsd_cleanup_wrapper(void) \
|
||||||
|
{ \
|
||||||
|
a_name##_tsd_wrapper_t *wrapper; \
|
||||||
|
\
|
||||||
|
wrapper = (a_name##_tsd_wrapper_t *) TlsGetValue(a_name##_tsd); \
|
||||||
|
if (wrapper == NULL) \
|
||||||
|
return (false); \
|
||||||
|
if (a_cleanup != malloc_tsd_no_cleanup && \
|
||||||
|
wrapper->initialized) { \
|
||||||
|
a_type val = wrapper->val; \
|
||||||
|
a_type tsd_static_data = a_initializer; \
|
||||||
|
wrapper->initialized = false; \
|
||||||
|
wrapper->val = tsd_static_data; \
|
||||||
|
a_cleanup(&val); \
|
||||||
|
if (wrapper->initialized) { \
|
||||||
|
/* Trigger another cleanup round. */ \
|
||||||
|
return (true); \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
malloc_tsd_dalloc(wrapper); \
|
||||||
|
return (false); \
|
||||||
|
} \
|
||||||
|
a_attr bool \
|
||||||
|
a_name##_tsd_boot(void) \
|
||||||
|
{ \
|
||||||
|
\
|
||||||
|
a_name##_tsd = TlsAlloc(); \
|
||||||
|
if (a_name##_tsd == TLS_OUT_OF_INDEXES) \
|
||||||
|
return (true); \
|
||||||
|
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||||
|
malloc_tsd_cleanup_register( \
|
||||||
|
&a_name##_tsd_cleanup_wrapper); \
|
||||||
|
} \
|
||||||
|
a_name##_booted = true; \
|
||||||
|
return (false); \
|
||||||
|
} \
|
||||||
|
/* Get/set. */ \
|
||||||
|
a_attr a_name##_tsd_wrapper_t * \
|
||||||
|
a_name##_tsd_get_wrapper(void) \
|
||||||
|
{ \
|
||||||
|
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
|
||||||
|
TlsGetValue(a_name##_tsd); \
|
||||||
|
\
|
||||||
|
if (wrapper == NULL) { \
|
||||||
|
wrapper = (a_name##_tsd_wrapper_t *) \
|
||||||
|
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
|
||||||
|
if (wrapper == NULL) { \
|
||||||
|
malloc_write("<jemalloc>: Error allocating" \
|
||||||
|
" TSD for "#a_name"\n"); \
|
||||||
|
abort(); \
|
||||||
|
} else { \
|
||||||
|
static a_type tsd_static_data = a_initializer; \
|
||||||
|
wrapper->initialized = false; \
|
||||||
|
wrapper->val = tsd_static_data; \
|
||||||
|
} \
|
||||||
|
if (!TlsSetValue(a_name##_tsd, (void *)wrapper)) { \
|
||||||
|
malloc_write("<jemalloc>: Error setting" \
|
||||||
|
" TSD for "#a_name"\n"); \
|
||||||
|
abort(); \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
return (wrapper); \
|
||||||
|
} \
|
||||||
|
a_attr a_type * \
|
||||||
|
a_name##_tsd_get(void) \
|
||||||
|
{ \
|
||||||
|
a_name##_tsd_wrapper_t *wrapper; \
|
||||||
|
\
|
||||||
|
assert(a_name##_booted); \
|
||||||
|
wrapper = a_name##_tsd_get_wrapper(); \
|
||||||
|
return (&wrapper->val); \
|
||||||
|
} \
|
||||||
|
a_attr void \
|
||||||
|
a_name##_tsd_set(a_type *val) \
|
||||||
|
{ \
|
||||||
|
a_name##_tsd_wrapper_t *wrapper; \
|
||||||
|
\
|
||||||
|
assert(a_name##_booted); \
|
||||||
|
wrapper = a_name##_tsd_get_wrapper(); \
|
||||||
|
wrapper->val = *(val); \
|
||||||
|
if (a_cleanup != malloc_tsd_no_cleanup) \
|
||||||
|
wrapper->initialized = true; \
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||||
|
a_cleanup) \
|
||||||
|
/* Data structure. */ \
|
||||||
|
typedef struct { \
|
||||||
|
bool initialized; \
|
||||||
|
a_type val; \
|
||||||
|
} a_name##_tsd_wrapper_t; \
|
||||||
|
/* Initialization/cleanup. */ \
|
||||||
|
a_attr void \
|
||||||
|
a_name##_tsd_cleanup_wrapper(void *arg) \
|
||||||
|
{ \
|
||||||
|
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *)arg;\
|
||||||
|
\
|
||||||
|
if (a_cleanup != malloc_tsd_no_cleanup && \
|
||||||
|
wrapper->initialized) { \
|
||||||
|
wrapper->initialized = false; \
|
||||||
|
a_cleanup(&wrapper->val); \
|
||||||
|
if (wrapper->initialized) { \
|
||||||
|
/* Trigger another cleanup round. */ \
|
||||||
|
if (pthread_setspecific(a_name##_tsd, \
|
||||||
|
(void *)wrapper)) { \
|
||||||
|
malloc_write("<jemalloc>: Error" \
|
||||||
|
" setting TSD for "#a_name"\n"); \
|
||||||
|
if (opt_abort) \
|
||||||
|
abort(); \
|
||||||
|
} \
|
||||||
|
return; \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
malloc_tsd_dalloc(wrapper); \
|
||||||
|
} \
|
||||||
|
a_attr bool \
|
||||||
|
a_name##_tsd_boot(void) \
|
||||||
|
{ \
|
||||||
|
\
|
||||||
|
if (pthread_key_create(&a_name##_tsd, \
|
||||||
|
a_name##_tsd_cleanup_wrapper) != 0) \
|
||||||
|
return (true); \
|
||||||
|
a_name##_booted = true; \
|
||||||
|
return (false); \
|
||||||
|
} \
|
||||||
|
/* Get/set. */ \
|
||||||
|
a_attr a_name##_tsd_wrapper_t * \
|
||||||
|
a_name##_tsd_get_wrapper(void) \
|
||||||
|
{ \
|
||||||
|
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
|
||||||
|
pthread_getspecific(a_name##_tsd); \
|
||||||
|
\
|
||||||
|
if (wrapper == NULL) { \
|
||||||
|
wrapper = (a_name##_tsd_wrapper_t *) \
|
||||||
|
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
|
||||||
|
if (wrapper == NULL) { \
|
||||||
|
malloc_write("<jemalloc>: Error allocating" \
|
||||||
|
" TSD for "#a_name"\n"); \
|
||||||
|
abort(); \
|
||||||
|
} else { \
|
||||||
|
static a_type tsd_static_data = a_initializer; \
|
||||||
|
wrapper->initialized = false; \
|
||||||
|
wrapper->val = tsd_static_data; \
|
||||||
|
} \
|
||||||
|
if (pthread_setspecific(a_name##_tsd, \
|
||||||
|
(void *)wrapper)) { \
|
||||||
|
malloc_write("<jemalloc>: Error setting" \
|
||||||
|
" TSD for "#a_name"\n"); \
|
||||||
|
abort(); \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
return (wrapper); \
|
||||||
|
} \
|
||||||
|
a_attr a_type * \
|
||||||
|
a_name##_tsd_get(void) \
|
||||||
|
{ \
|
||||||
|
a_name##_tsd_wrapper_t *wrapper; \
|
||||||
|
\
|
||||||
|
assert(a_name##_booted); \
|
||||||
|
wrapper = a_name##_tsd_get_wrapper(); \
|
||||||
|
return (&wrapper->val); \
|
||||||
|
} \
|
||||||
|
a_attr void \
|
||||||
|
a_name##_tsd_set(a_type *val) \
|
||||||
|
{ \
|
||||||
|
a_name##_tsd_wrapper_t *wrapper; \
|
||||||
|
\
|
||||||
|
assert(a_name##_booted); \
|
||||||
|
wrapper = a_name##_tsd_get_wrapper(); \
|
||||||
|
wrapper->val = *(val); \
|
||||||
|
if (a_cleanup != malloc_tsd_no_cleanup) \
|
||||||
|
wrapper->initialized = true; \
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_H_TYPES */
|
||||||
|
/******************************************************************************/
|
||||||
|
#ifdef JEMALLOC_H_STRUCTS
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_H_STRUCTS */
|
||||||
|
/******************************************************************************/
|
||||||
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
|
void *malloc_tsd_malloc(size_t size);
|
||||||
|
void malloc_tsd_dalloc(void *wrapper);
|
||||||
|
void malloc_tsd_no_cleanup(void *);
|
||||||
|
void malloc_tsd_cleanup_register(bool (*f)(void));
|
||||||
|
void malloc_tsd_boot(void);
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
|
/******************************************************************************/
|
||||||
|
#ifdef JEMALLOC_H_INLINES
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_H_INLINES */
|
||||||
|
/******************************************************************************/
|
160
include/jemalloc/internal/util.h
Normal file
160
include/jemalloc/internal/util.h
Normal file
@ -0,0 +1,160 @@
|
|||||||
|
/******************************************************************************/
|
||||||
|
#ifdef JEMALLOC_H_TYPES
|
||||||
|
|
||||||
|
/* Size of stack-allocated buffer passed to buferror(). */
|
||||||
|
#define BUFERROR_BUF 64
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
|
||||||
|
* large enough for all possible uses within jemalloc.
|
||||||
|
*/
|
||||||
|
#define MALLOC_PRINTF_BUFSIZE 4096
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Wrap a cpp argument that contains commas such that it isn't broken up into
|
||||||
|
* multiple arguments.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_CONCAT(...) __VA_ARGS__
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Silence compiler warnings due to uninitialized values. This is used
|
||||||
|
* wherever the compiler fails to recognize that the variable is never used
|
||||||
|
* uninitialized.
|
||||||
|
*/
|
||||||
|
#ifdef JEMALLOC_CC_SILENCE
|
||||||
|
# define JEMALLOC_CC_SILENCE_INIT(v) = v
|
||||||
|
#else
|
||||||
|
# define JEMALLOC_CC_SILENCE_INIT(v)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Define a custom assert() in order to reduce the chances of deadlock during
|
||||||
|
* assertion failure.
|
||||||
|
*/
|
||||||
|
#ifndef assert
|
||||||
|
#define assert(e) do { \
|
||||||
|
if (config_debug && !(e)) { \
|
||||||
|
malloc_printf( \
|
||||||
|
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
|
||||||
|
__FILE__, __LINE__, #e); \
|
||||||
|
abort(); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
|
||||||
|
#define cassert(c) do { \
|
||||||
|
if ((c) == false) \
|
||||||
|
assert(false); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#ifndef not_reached
|
||||||
|
#define not_reached() do { \
|
||||||
|
if (config_debug) { \
|
||||||
|
malloc_printf( \
|
||||||
|
"<jemalloc>: %s:%d: Unreachable code reached\n", \
|
||||||
|
__FILE__, __LINE__); \
|
||||||
|
abort(); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef not_implemented
|
||||||
|
#define not_implemented() do { \
|
||||||
|
if (config_debug) { \
|
||||||
|
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
|
||||||
|
__FILE__, __LINE__); \
|
||||||
|
abort(); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define assert_not_implemented(e) do { \
|
||||||
|
if (config_debug && !(e)) \
|
||||||
|
not_implemented(); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_H_TYPES */
|
||||||
|
/******************************************************************************/
|
||||||
|
#ifdef JEMALLOC_H_STRUCTS
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_H_STRUCTS */
|
||||||
|
/******************************************************************************/
|
||||||
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
|
int buferror(char *buf, size_t buflen);
|
||||||
|
uintmax_t malloc_strtoumax(const char *nptr, char **endptr, int base);
|
||||||
|
void malloc_write(const char *s);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
|
||||||
|
* point math.
|
||||||
|
*/
|
||||||
|
int malloc_vsnprintf(char *str, size_t size, const char *format,
|
||||||
|
va_list ap);
|
||||||
|
int malloc_snprintf(char *str, size_t size, const char *format, ...)
|
||||||
|
JEMALLOC_ATTR(format(printf, 3, 4));
|
||||||
|
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||||
|
const char *format, va_list ap);
|
||||||
|
void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
|
||||||
|
const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4));
|
||||||
|
void malloc_printf(const char *format, ...)
|
||||||
|
JEMALLOC_ATTR(format(printf, 1, 2));
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
|
/******************************************************************************/
|
||||||
|
#ifdef JEMALLOC_H_INLINES
|
||||||
|
|
||||||
|
#ifndef JEMALLOC_ENABLE_INLINE
|
||||||
|
size_t pow2_ceil(size_t x);
|
||||||
|
void malloc_write(const char *s);
|
||||||
|
void set_errno(int errnum);
|
||||||
|
int get_errno(void);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
|
||||||
|
/* Compute the smallest power of 2 that is >= x. */
|
||||||
|
JEMALLOC_INLINE size_t
|
||||||
|
pow2_ceil(size_t x)
|
||||||
|
{
|
||||||
|
|
||||||
|
x--;
|
||||||
|
x |= x >> 1;
|
||||||
|
x |= x >> 2;
|
||||||
|
x |= x >> 4;
|
||||||
|
x |= x >> 8;
|
||||||
|
x |= x >> 16;
|
||||||
|
#if (LG_SIZEOF_PTR == 3)
|
||||||
|
x |= x >> 32;
|
||||||
|
#endif
|
||||||
|
x++;
|
||||||
|
return (x);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Sets error code */
|
||||||
|
JEMALLOC_INLINE void
|
||||||
|
set_errno(int errnum)
|
||||||
|
{
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
SetLastError(errnum);
|
||||||
|
#else
|
||||||
|
errno = errnum;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Get last error code */
|
||||||
|
JEMALLOC_INLINE int
|
||||||
|
get_errno(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
return (GetLastError());
|
||||||
|
#else
|
||||||
|
return (errno);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_H_INLINES */
|
||||||
|
/******************************************************************************/
|
@ -15,10 +15,8 @@ extern "C" {
|
|||||||
#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
|
#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
|
||||||
|
|
||||||
#include "jemalloc_defs@install_suffix@.h"
|
#include "jemalloc_defs@install_suffix@.h"
|
||||||
#ifndef JEMALLOC_P
|
|
||||||
# define JEMALLOC_P(s) s
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_EXPERIMENTAL
|
||||||
#define ALLOCM_LG_ALIGN(la) (la)
|
#define ALLOCM_LG_ALIGN(la) (la)
|
||||||
#if LG_SIZEOF_PTR == 2
|
#if LG_SIZEOF_PTR == 2
|
||||||
#define ALLOCM_ALIGN(a) (ffs(a)-1)
|
#define ALLOCM_ALIGN(a) (ffs(a)-1)
|
||||||
@ -31,34 +29,124 @@ extern "C" {
|
|||||||
#define ALLOCM_SUCCESS 0
|
#define ALLOCM_SUCCESS 0
|
||||||
#define ALLOCM_ERR_OOM 1
|
#define ALLOCM_ERR_OOM 1
|
||||||
#define ALLOCM_ERR_NOT_MOVED 2
|
#define ALLOCM_ERR_NOT_MOVED 2
|
||||||
|
#endif
|
||||||
|
|
||||||
extern const char *JEMALLOC_P(malloc_conf);
|
/*
|
||||||
extern void (*JEMALLOC_P(malloc_message))(void *, const char *);
|
* The je_ prefix on the following public symbol declarations is an artifact of
|
||||||
|
* namespace management, and should be omitted in application code unless
|
||||||
|
* JEMALLOC_NO_DEMANGLE is defined (see below).
|
||||||
|
*/
|
||||||
|
extern JEMALLOC_EXPORT const char *je_malloc_conf;
|
||||||
|
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
|
||||||
|
const char *s);
|
||||||
|
|
||||||
void *JEMALLOC_P(malloc)(size_t size) JEMALLOC_ATTR(malloc);
|
JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc);
|
||||||
void *JEMALLOC_P(calloc)(size_t num, size_t size) JEMALLOC_ATTR(malloc);
|
JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size)
|
||||||
int JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
|
JEMALLOC_ATTR(malloc);
|
||||||
JEMALLOC_ATTR(nonnull(1));
|
JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment,
|
||||||
void *JEMALLOC_P(realloc)(void *ptr, size_t size);
|
size_t size) JEMALLOC_ATTR(nonnull(1));
|
||||||
void JEMALLOC_P(free)(void *ptr);
|
JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size)
|
||||||
|
JEMALLOC_ATTR(malloc);
|
||||||
|
JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size);
|
||||||
|
JEMALLOC_EXPORT void je_free(void *ptr);
|
||||||
|
|
||||||
size_t JEMALLOC_P(malloc_usable_size)(const void *ptr);
|
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
|
||||||
void JEMALLOC_P(malloc_stats_print)(void (*write_cb)(void *, const char *),
|
JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
|
||||||
void *cbopaque, const char *opts);
|
JEMALLOC_ATTR(malloc);
|
||||||
int JEMALLOC_P(mallctl)(const char *name, void *oldp, size_t *oldlenp,
|
#endif
|
||||||
void *newp, size_t newlen);
|
|
||||||
int JEMALLOC_P(mallctlnametomib)(const char *name, size_t *mibp,
|
#ifdef JEMALLOC_OVERRIDE_VALLOC
|
||||||
size_t *miblenp);
|
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
|
||||||
int JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp,
|
#endif
|
||||||
|
|
||||||
|
JEMALLOC_EXPORT size_t je_malloc_usable_size(const void *ptr);
|
||||||
|
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
|
||||||
|
const char *), void *je_cbopaque, const char *opts);
|
||||||
|
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
|
||||||
size_t *oldlenp, void *newp, size_t newlen);
|
size_t *oldlenp, void *newp, size_t newlen);
|
||||||
|
JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
|
||||||
|
size_t *miblenp);
|
||||||
|
JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
|
||||||
|
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
|
||||||
|
|
||||||
int JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags)
|
#ifdef JEMALLOC_EXPERIMENTAL
|
||||||
JEMALLOC_ATTR(nonnull(1));
|
JEMALLOC_EXPORT int je_allocm(void **ptr, size_t *rsize, size_t size,
|
||||||
int JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size,
|
int flags) JEMALLOC_ATTR(nonnull(1));
|
||||||
|
JEMALLOC_EXPORT int je_rallocm(void **ptr, size_t *rsize, size_t size,
|
||||||
size_t extra, int flags) JEMALLOC_ATTR(nonnull(1));
|
size_t extra, int flags) JEMALLOC_ATTR(nonnull(1));
|
||||||
int JEMALLOC_P(sallocm)(const void *ptr, size_t *rsize, int flags)
|
JEMALLOC_EXPORT int je_sallocm(const void *ptr, size_t *rsize, int flags)
|
||||||
JEMALLOC_ATTR(nonnull(1));
|
JEMALLOC_ATTR(nonnull(1));
|
||||||
int JEMALLOC_P(dallocm)(void *ptr, int flags) JEMALLOC_ATTR(nonnull(1));
|
JEMALLOC_EXPORT int je_dallocm(void *ptr, int flags)
|
||||||
|
JEMALLOC_ATTR(nonnull(1));
|
||||||
|
JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* By default application code must explicitly refer to mangled symbol names,
|
||||||
|
* so that it is possible to use jemalloc in conjunction with another allocator
|
||||||
|
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
|
||||||
|
* name mangling that matches the API prefixing that happened as a result of
|
||||||
|
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
|
||||||
|
*/
|
||||||
|
#ifdef JEMALLOC_MANGLE
|
||||||
|
#ifndef JEMALLOC_NO_DEMANGLE
|
||||||
|
#define JEMALLOC_NO_DEMANGLE
|
||||||
|
#endif
|
||||||
|
#define malloc_conf je_malloc_conf
|
||||||
|
#define malloc_message je_malloc_message
|
||||||
|
#define malloc je_malloc
|
||||||
|
#define calloc je_calloc
|
||||||
|
#define posix_memalign je_posix_memalign
|
||||||
|
#define aligned_alloc je_aligned_alloc
|
||||||
|
#define realloc je_realloc
|
||||||
|
#define free je_free
|
||||||
|
#define malloc_usable_size je_malloc_usable_size
|
||||||
|
#define malloc_stats_print je_malloc_stats_print
|
||||||
|
#define mallctl je_mallctl
|
||||||
|
#define mallctlnametomib je_mallctlnametomib
|
||||||
|
#define mallctlbymib je_mallctlbymib
|
||||||
|
#define memalign je_memalign
|
||||||
|
#define valloc je_valloc
|
||||||
|
#ifdef JEMALLOC_EXPERIMENTAL
|
||||||
|
#define allocm je_allocm
|
||||||
|
#define rallocm je_rallocm
|
||||||
|
#define sallocm je_sallocm
|
||||||
|
#define dallocm je_dallocm
|
||||||
|
#define nallocm je_nallocm
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The je_* macros can be used as stable alternative names for the public
|
||||||
|
* jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily meant
|
||||||
|
* for use in jemalloc itself, but it can be used by application code to
|
||||||
|
* provide isolation from the name mangling specified via --with-mangling
|
||||||
|
* and/or --with-jemalloc-prefix.
|
||||||
|
*/
|
||||||
|
#ifndef JEMALLOC_NO_DEMANGLE
|
||||||
|
#undef je_malloc_conf
|
||||||
|
#undef je_malloc_message
|
||||||
|
#undef je_malloc
|
||||||
|
#undef je_calloc
|
||||||
|
#undef je_posix_memalign
|
||||||
|
#undef je_aligned_alloc
|
||||||
|
#undef je_realloc
|
||||||
|
#undef je_free
|
||||||
|
#undef je_malloc_usable_size
|
||||||
|
#undef je_malloc_stats_print
|
||||||
|
#undef je_mallctl
|
||||||
|
#undef je_mallctlnametomib
|
||||||
|
#undef je_mallctlbymib
|
||||||
|
#undef je_memalign
|
||||||
|
#undef je_valloc
|
||||||
|
#ifdef JEMALLOC_EXPERIMENTAL
|
||||||
|
#undef je_allocm
|
||||||
|
#undef je_rallocm
|
||||||
|
#undef je_sallocm
|
||||||
|
#undef je_dallocm
|
||||||
|
#undef je_nallocm
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
};
|
};
|
||||||
|
@ -1,22 +1,36 @@
|
|||||||
#ifndef JEMALLOC_DEFS_H_
|
|
||||||
#define JEMALLOC_DEFS_H_
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If JEMALLOC_PREFIX is defined, it will cause all public APIs to be prefixed.
|
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
|
||||||
* This makes it possible, with some care, to use multiple allocators
|
* public APIs to be prefixed. This makes it possible, with some care, to use
|
||||||
* simultaneously.
|
* multiple allocators simultaneously.
|
||||||
*
|
|
||||||
* In many cases it is more convenient to manually prefix allocator function
|
|
||||||
* calls than to let macros do it automatically, particularly when using
|
|
||||||
* multiple allocators simultaneously. Define JEMALLOC_MANGLE before
|
|
||||||
* #include'ing jemalloc.h in order to cause name mangling that corresponds to
|
|
||||||
* the API prefixing.
|
|
||||||
*/
|
*/
|
||||||
#undef JEMALLOC_PREFIX
|
#undef JEMALLOC_PREFIX
|
||||||
#undef JEMALLOC_CPREFIX
|
#undef JEMALLOC_CPREFIX
|
||||||
#if (defined(JEMALLOC_PREFIX) && defined(JEMALLOC_MANGLE))
|
|
||||||
#undef JEMALLOC_P
|
/*
|
||||||
#endif
|
* Name mangling for public symbols is controlled by --with-mangling and
|
||||||
|
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
|
||||||
|
* these macro definitions.
|
||||||
|
*/
|
||||||
|
#undef je_malloc_conf
|
||||||
|
#undef je_malloc_message
|
||||||
|
#undef je_malloc
|
||||||
|
#undef je_calloc
|
||||||
|
#undef je_posix_memalign
|
||||||
|
#undef je_aligned_alloc
|
||||||
|
#undef je_realloc
|
||||||
|
#undef je_free
|
||||||
|
#undef je_malloc_usable_size
|
||||||
|
#undef je_malloc_stats_print
|
||||||
|
#undef je_mallctl
|
||||||
|
#undef je_mallctlnametomib
|
||||||
|
#undef je_mallctlbymib
|
||||||
|
#undef je_memalign
|
||||||
|
#undef je_valloc
|
||||||
|
#undef je_allocm
|
||||||
|
#undef je_rallocm
|
||||||
|
#undef je_sallocm
|
||||||
|
#undef je_dallocm
|
||||||
|
#undef je_nallocm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
||||||
@ -33,26 +47,92 @@
|
|||||||
*/
|
*/
|
||||||
#undef CPU_SPINWAIT
|
#undef CPU_SPINWAIT
|
||||||
|
|
||||||
|
/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
|
||||||
|
#undef JEMALLOC_ATOMIC9
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if OSAtomic*() functions are available, as provided by Darwin, and
|
* Defined if OSAtomic*() functions are available, as provided by Darwin, and
|
||||||
* documented in the atomic(3) manual page.
|
* documented in the atomic(3) manual page.
|
||||||
*/
|
*/
|
||||||
#undef JEMALLOC_OSATOMIC
|
#undef JEMALLOC_OSATOMIC
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
|
||||||
|
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
|
||||||
|
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
|
||||||
|
* functions are defined in libgcc instead of being inlines)
|
||||||
|
*/
|
||||||
|
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
|
||||||
|
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
|
||||||
|
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
|
||||||
|
* functions are defined in libgcc instead of being inlines)
|
||||||
|
*/
|
||||||
|
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if OSSpin*() functions are available, as provided by Darwin, and
|
* Defined if OSSpin*() functions are available, as provided by Darwin, and
|
||||||
* documented in the spinlock(3) manual page.
|
* documented in the spinlock(3) manual page.
|
||||||
*/
|
*/
|
||||||
#undef JEMALLOC_OSSPIN
|
#undef JEMALLOC_OSSPIN
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
||||||
|
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
||||||
|
* bootstrapping will cause recursion into the pthreads library. Therefore, if
|
||||||
|
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
|
||||||
|
* malloc_tsd.
|
||||||
|
*/
|
||||||
|
#undef JEMALLOC_MALLOC_THREAD_CLEANUP
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if threaded initialization is known to be safe on this platform.
|
||||||
|
* Among other things, it must be possible to initialize a mutex without
|
||||||
|
* triggering allocation in order for threaded allocation to be safe.
|
||||||
|
*/
|
||||||
|
#undef JEMALLOC_THREADED_INIT
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined if the pthreads implementation defines
|
||||||
|
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
|
||||||
|
* to avoid recursive allocation during mutex initialization.
|
||||||
|
*/
|
||||||
|
#undef JEMALLOC_MUTEX_INIT_CB
|
||||||
|
|
||||||
/* Defined if __attribute__((...)) syntax is supported. */
|
/* Defined if __attribute__((...)) syntax is supported. */
|
||||||
#undef JEMALLOC_HAVE_ATTR
|
#undef JEMALLOC_HAVE_ATTR
|
||||||
#ifdef JEMALLOC_HAVE_ATTR
|
#ifdef JEMALLOC_HAVE_ATTR
|
||||||
# define JEMALLOC_ATTR(s) __attribute__((s))
|
# define JEMALLOC_ATTR(s) __attribute__((s))
|
||||||
|
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
|
||||||
|
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
|
||||||
|
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
|
||||||
|
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
|
||||||
|
#elif _MSC_VER
|
||||||
|
# define JEMALLOC_ATTR(s)
|
||||||
|
# ifdef DLLEXPORT
|
||||||
|
# define JEMALLOC_EXPORT __declspec(dllexport)
|
||||||
|
# else
|
||||||
|
# define JEMALLOC_EXPORT __declspec(dllimport)
|
||||||
|
# endif
|
||||||
|
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
|
||||||
|
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
|
||||||
|
# define JEMALLOC_NOINLINE __declspec(noinline)
|
||||||
#else
|
#else
|
||||||
# define JEMALLOC_ATTR(s)
|
# define JEMALLOC_ATTR(s)
|
||||||
|
# define JEMALLOC_EXPORT
|
||||||
|
# define JEMALLOC_ALIGNED(s)
|
||||||
|
# define JEMALLOC_SECTION(s)
|
||||||
|
# define JEMALLOC_NOINLINE
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* Defined if sbrk() is supported. */
|
||||||
|
#undef JEMALLOC_HAVE_SBRK
|
||||||
|
|
||||||
|
/* Non-empty if the tls_model attribute is supported. */
|
||||||
|
#undef JEMALLOC_TLS_MODEL
|
||||||
|
|
||||||
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
|
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
|
||||||
#undef JEMALLOC_CC_SILENCE
|
#undef JEMALLOC_CC_SILENCE
|
||||||
|
|
||||||
@ -77,12 +157,6 @@
|
|||||||
/* Use gcc intrinsics for profile backtracing if defined. */
|
/* Use gcc intrinsics for profile backtracing if defined. */
|
||||||
#undef JEMALLOC_PROF_GCC
|
#undef JEMALLOC_PROF_GCC
|
||||||
|
|
||||||
/*
|
|
||||||
* JEMALLOC_TINY enables support for tiny objects, which are smaller than one
|
|
||||||
* quantum.
|
|
||||||
*/
|
|
||||||
#undef JEMALLOC_TINY
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
|
* JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
|
||||||
* This makes it possible to allocate/deallocate objects without any locking
|
* This makes it possible to allocate/deallocate objects without any locking
|
||||||
@ -96,29 +170,43 @@
|
|||||||
*/
|
*/
|
||||||
#undef JEMALLOC_DSS
|
#undef JEMALLOC_DSS
|
||||||
|
|
||||||
/* JEMALLOC_SWAP enables mmap()ed swap file support. */
|
/* Support memory filling (junk/zero/quarantine/redzone). */
|
||||||
#undef JEMALLOC_SWAP
|
|
||||||
|
|
||||||
/* Support memory filling (junk/zero). */
|
|
||||||
#undef JEMALLOC_FILL
|
#undef JEMALLOC_FILL
|
||||||
|
|
||||||
|
/* Support the experimental API. */
|
||||||
|
#undef JEMALLOC_EXPERIMENTAL
|
||||||
|
|
||||||
|
/* Support utrace(2)-based tracing. */
|
||||||
|
#undef JEMALLOC_UTRACE
|
||||||
|
|
||||||
|
/* Support Valgrind. */
|
||||||
|
#undef JEMALLOC_VALGRIND
|
||||||
|
|
||||||
/* Support optional abort() on OOM. */
|
/* Support optional abort() on OOM. */
|
||||||
#undef JEMALLOC_XMALLOC
|
#undef JEMALLOC_XMALLOC
|
||||||
|
|
||||||
/* Support SYSV semantics. */
|
|
||||||
#undef JEMALLOC_SYSV
|
|
||||||
|
|
||||||
/* Support lazy locking (avoid locking unless a second thread is launched). */
|
/* Support lazy locking (avoid locking unless a second thread is launched). */
|
||||||
#undef JEMALLOC_LAZY_LOCK
|
#undef JEMALLOC_LAZY_LOCK
|
||||||
|
|
||||||
/* Determine page size at run time if defined. */
|
|
||||||
#undef DYNAMIC_PAGE_SHIFT
|
|
||||||
|
|
||||||
/* One page is 2^STATIC_PAGE_SHIFT bytes. */
|
/* One page is 2^STATIC_PAGE_SHIFT bytes. */
|
||||||
#undef STATIC_PAGE_SHIFT
|
#undef STATIC_PAGE_SHIFT
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If defined, use munmap() to unmap freed chunks, rather than storing them for
|
||||||
|
* later reuse. This is disabled by default on Linux because common sequences
|
||||||
|
* of mmap()/munmap() calls will cause virtual memory map holes.
|
||||||
|
*/
|
||||||
|
#undef JEMALLOC_MUNMAP
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is
|
||||||
|
* disabled by default because it is Linux-specific and it will cause virtual
|
||||||
|
* memory map holes, much like munmap(2) does.
|
||||||
|
*/
|
||||||
|
#undef JEMALLOC_MREMAP
|
||||||
|
|
||||||
/* TLS is used to map arenas and magazine caches to threads. */
|
/* TLS is used to map arenas and magazine caches to threads. */
|
||||||
#undef NO_TLS
|
#undef JEMALLOC_TLS
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
|
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
|
||||||
@ -139,9 +227,6 @@
|
|||||||
#undef JEMALLOC_ZONE
|
#undef JEMALLOC_ZONE
|
||||||
#undef JEMALLOC_ZONE_VERSION
|
#undef JEMALLOC_ZONE_VERSION
|
||||||
|
|
||||||
/* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). */
|
|
||||||
#undef JEMALLOC_MREMAP_FIXED
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Methods for purging unused pages differ between operating systems.
|
* Methods for purging unused pages differ between operating systems.
|
||||||
*
|
*
|
||||||
@ -164,4 +249,5 @@
|
|||||||
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
|
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
|
||||||
#undef LG_SIZEOF_LONG
|
#undef LG_SIZEOF_LONG
|
||||||
|
|
||||||
#endif /* JEMALLOC_DEFS_H_ */
|
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
|
||||||
|
#undef LG_SIZEOF_INTMAX_T
|
||||||
|
313
include/msvc_compat/inttypes.h
Normal file
313
include/msvc_compat/inttypes.h
Normal file
@ -0,0 +1,313 @@
|
|||||||
|
// ISO C9x compliant inttypes.h for Microsoft Visual Studio
|
||||||
|
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
|
||||||
|
//
|
||||||
|
// Copyright (c) 2006 Alexander Chemeris
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are met:
|
||||||
|
//
|
||||||
|
// 1. Redistributions of source code must retain the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer.
|
||||||
|
//
|
||||||
|
// 2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer in the
|
||||||
|
// documentation and/or other materials provided with the distribution.
|
||||||
|
//
|
||||||
|
// 3. The name of the author may be used to endorse or promote products
|
||||||
|
// derived from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||||
|
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||||
|
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||||
|
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||||
|
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||||
|
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||||
|
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||||
|
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
//
|
||||||
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
#ifndef _MSC_VER // [
|
||||||
|
#error "Use this header only with Microsoft Visual C++ compilers!"
|
||||||
|
#endif // _MSC_VER ]
|
||||||
|
|
||||||
|
#ifndef _MSC_INTTYPES_H_ // [
|
||||||
|
#define _MSC_INTTYPES_H_
|
||||||
|
|
||||||
|
#if _MSC_VER > 1000
|
||||||
|
#pragma once
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include "stdint.h"
|
||||||
|
|
||||||
|
// 7.8 Format conversion of integer types
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
intmax_t quot;
|
||||||
|
intmax_t rem;
|
||||||
|
} imaxdiv_t;
|
||||||
|
|
||||||
|
// 7.8.1 Macros for format specifiers
|
||||||
|
|
||||||
|
#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198
|
||||||
|
|
||||||
|
#ifdef _WIN64
|
||||||
|
# define __PRI64_PREFIX "l"
|
||||||
|
# define __PRIPTR_PREFIX "l"
|
||||||
|
#else
|
||||||
|
# define __PRI64_PREFIX "ll"
|
||||||
|
# define __PRIPTR_PREFIX
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// The fprintf macros for signed integers are:
|
||||||
|
#define PRId8 "d"
|
||||||
|
#define PRIi8 "i"
|
||||||
|
#define PRIdLEAST8 "d"
|
||||||
|
#define PRIiLEAST8 "i"
|
||||||
|
#define PRIdFAST8 "d"
|
||||||
|
#define PRIiFAST8 "i"
|
||||||
|
|
||||||
|
#define PRId16 "hd"
|
||||||
|
#define PRIi16 "hi"
|
||||||
|
#define PRIdLEAST16 "hd"
|
||||||
|
#define PRIiLEAST16 "hi"
|
||||||
|
#define PRIdFAST16 "hd"
|
||||||
|
#define PRIiFAST16 "hi"
|
||||||
|
|
||||||
|
#define PRId32 "d"
|
||||||
|
#define PRIi32 "i"
|
||||||
|
#define PRIdLEAST32 "d"
|
||||||
|
#define PRIiLEAST32 "i"
|
||||||
|
#define PRIdFAST32 "d"
|
||||||
|
#define PRIiFAST32 "i"
|
||||||
|
|
||||||
|
#define PRId64 __PRI64_PREFIX "d"
|
||||||
|
#define PRIi64 __PRI64_PREFIX "i"
|
||||||
|
#define PRIdLEAST64 __PRI64_PREFIX "d"
|
||||||
|
#define PRIiLEAST64 __PRI64_PREFIX "i"
|
||||||
|
#define PRIdFAST64 __PRI64_PREFIX "d"
|
||||||
|
#define PRIiFAST64 __PRI64_PREFIX "i"
|
||||||
|
|
||||||
|
#define PRIdMAX __PRI64_PREFIX "d"
|
||||||
|
#define PRIiMAX __PRI64_PREFIX "i"
|
||||||
|
|
||||||
|
#define PRIdPTR __PRIPTR_PREFIX "d"
|
||||||
|
#define PRIiPTR __PRIPTR_PREFIX "i"
|
||||||
|
|
||||||
|
// The fprintf macros for unsigned integers are:
|
||||||
|
#define PRIo8 "o"
|
||||||
|
#define PRIu8 "u"
|
||||||
|
#define PRIx8 "x"
|
||||||
|
#define PRIX8 "X"
|
||||||
|
#define PRIoLEAST8 "o"
|
||||||
|
#define PRIuLEAST8 "u"
|
||||||
|
#define PRIxLEAST8 "x"
|
||||||
|
#define PRIXLEAST8 "X"
|
||||||
|
#define PRIoFAST8 "o"
|
||||||
|
#define PRIuFAST8 "u"
|
||||||
|
#define PRIxFAST8 "x"
|
||||||
|
#define PRIXFAST8 "X"
|
||||||
|
|
||||||
|
#define PRIo16 "ho"
|
||||||
|
#define PRIu16 "hu"
|
||||||
|
#define PRIx16 "hx"
|
||||||
|
#define PRIX16 "hX"
|
||||||
|
#define PRIoLEAST16 "ho"
|
||||||
|
#define PRIuLEAST16 "hu"
|
||||||
|
#define PRIxLEAST16 "hx"
|
||||||
|
#define PRIXLEAST16 "hX"
|
||||||
|
#define PRIoFAST16 "ho"
|
||||||
|
#define PRIuFAST16 "hu"
|
||||||
|
#define PRIxFAST16 "hx"
|
||||||
|
#define PRIXFAST16 "hX"
|
||||||
|
|
||||||
|
#define PRIo32 "o"
|
||||||
|
#define PRIu32 "u"
|
||||||
|
#define PRIx32 "x"
|
||||||
|
#define PRIX32 "X"
|
||||||
|
#define PRIoLEAST32 "o"
|
||||||
|
#define PRIuLEAST32 "u"
|
||||||
|
#define PRIxLEAST32 "x"
|
||||||
|
#define PRIXLEAST32 "X"
|
||||||
|
#define PRIoFAST32 "o"
|
||||||
|
#define PRIuFAST32 "u"
|
||||||
|
#define PRIxFAST32 "x"
|
||||||
|
#define PRIXFAST32 "X"
|
||||||
|
|
||||||
|
#define PRIo64 __PRI64_PREFIX "o"
|
||||||
|
#define PRIu64 __PRI64_PREFIX "u"
|
||||||
|
#define PRIx64 __PRI64_PREFIX "x"
|
||||||
|
#define PRIX64 __PRI64_PREFIX "X"
|
||||||
|
#define PRIoLEAST64 __PRI64_PREFIX "o"
|
||||||
|
#define PRIuLEAST64 __PRI64_PREFIX "u"
|
||||||
|
#define PRIxLEAST64 __PRI64_PREFIX "x"
|
||||||
|
#define PRIXLEAST64 __PRI64_PREFIX "X"
|
||||||
|
#define PRIoFAST64 __PRI64_PREFIX "o"
|
||||||
|
#define PRIuFAST64 __PRI64_PREFIX "u"
|
||||||
|
#define PRIxFAST64 __PRI64_PREFIX "x"
|
||||||
|
#define PRIXFAST64 __PRI64_PREFIX "X"
|
||||||
|
|
||||||
|
#define PRIoMAX __PRI64_PREFIX "o"
|
||||||
|
#define PRIuMAX __PRI64_PREFIX "u"
|
||||||
|
#define PRIxMAX __PRI64_PREFIX "x"
|
||||||
|
#define PRIXMAX __PRI64_PREFIX "X"
|
||||||
|
|
||||||
|
#define PRIoPTR __PRIPTR_PREFIX "o"
|
||||||
|
#define PRIuPTR __PRIPTR_PREFIX "u"
|
||||||
|
#define PRIxPTR __PRIPTR_PREFIX "x"
|
||||||
|
#define PRIXPTR __PRIPTR_PREFIX "X"
|
||||||
|
|
||||||
|
// The fscanf macros for signed integers are:
|
||||||
|
#define SCNd8 "d"
|
||||||
|
#define SCNi8 "i"
|
||||||
|
#define SCNdLEAST8 "d"
|
||||||
|
#define SCNiLEAST8 "i"
|
||||||
|
#define SCNdFAST8 "d"
|
||||||
|
#define SCNiFAST8 "i"
|
||||||
|
|
||||||
|
#define SCNd16 "hd"
|
||||||
|
#define SCNi16 "hi"
|
||||||
|
#define SCNdLEAST16 "hd"
|
||||||
|
#define SCNiLEAST16 "hi"
|
||||||
|
#define SCNdFAST16 "hd"
|
||||||
|
#define SCNiFAST16 "hi"
|
||||||
|
|
||||||
|
#define SCNd32 "ld"
|
||||||
|
#define SCNi32 "li"
|
||||||
|
#define SCNdLEAST32 "ld"
|
||||||
|
#define SCNiLEAST32 "li"
|
||||||
|
#define SCNdFAST32 "ld"
|
||||||
|
#define SCNiFAST32 "li"
|
||||||
|
|
||||||
|
#define SCNd64 "I64d"
|
||||||
|
#define SCNi64 "I64i"
|
||||||
|
#define SCNdLEAST64 "I64d"
|
||||||
|
#define SCNiLEAST64 "I64i"
|
||||||
|
#define SCNdFAST64 "I64d"
|
||||||
|
#define SCNiFAST64 "I64i"
|
||||||
|
|
||||||
|
#define SCNdMAX "I64d"
|
||||||
|
#define SCNiMAX "I64i"
|
||||||
|
|
||||||
|
#ifdef _WIN64 // [
|
||||||
|
# define SCNdPTR "I64d"
|
||||||
|
# define SCNiPTR "I64i"
|
||||||
|
#else // _WIN64 ][
|
||||||
|
# define SCNdPTR "ld"
|
||||||
|
# define SCNiPTR "li"
|
||||||
|
#endif // _WIN64 ]
|
||||||
|
|
||||||
|
// The fscanf macros for unsigned integers are:
|
||||||
|
#define SCNo8 "o"
|
||||||
|
#define SCNu8 "u"
|
||||||
|
#define SCNx8 "x"
|
||||||
|
#define SCNX8 "X"
|
||||||
|
#define SCNoLEAST8 "o"
|
||||||
|
#define SCNuLEAST8 "u"
|
||||||
|
#define SCNxLEAST8 "x"
|
||||||
|
#define SCNXLEAST8 "X"
|
||||||
|
#define SCNoFAST8 "o"
|
||||||
|
#define SCNuFAST8 "u"
|
||||||
|
#define SCNxFAST8 "x"
|
||||||
|
#define SCNXFAST8 "X"
|
||||||
|
|
||||||
|
#define SCNo16 "ho"
|
||||||
|
#define SCNu16 "hu"
|
||||||
|
#define SCNx16 "hx"
|
||||||
|
#define SCNX16 "hX"
|
||||||
|
#define SCNoLEAST16 "ho"
|
||||||
|
#define SCNuLEAST16 "hu"
|
||||||
|
#define SCNxLEAST16 "hx"
|
||||||
|
#define SCNXLEAST16 "hX"
|
||||||
|
#define SCNoFAST16 "ho"
|
||||||
|
#define SCNuFAST16 "hu"
|
||||||
|
#define SCNxFAST16 "hx"
|
||||||
|
#define SCNXFAST16 "hX"
|
||||||
|
|
||||||
|
#define SCNo32 "lo"
|
||||||
|
#define SCNu32 "lu"
|
||||||
|
#define SCNx32 "lx"
|
||||||
|
#define SCNX32 "lX"
|
||||||
|
#define SCNoLEAST32 "lo"
|
||||||
|
#define SCNuLEAST32 "lu"
|
||||||
|
#define SCNxLEAST32 "lx"
|
||||||
|
#define SCNXLEAST32 "lX"
|
||||||
|
#define SCNoFAST32 "lo"
|
||||||
|
#define SCNuFAST32 "lu"
|
||||||
|
#define SCNxFAST32 "lx"
|
||||||
|
#define SCNXFAST32 "lX"
|
||||||
|
|
||||||
|
#define SCNo64 "I64o"
|
||||||
|
#define SCNu64 "I64u"
|
||||||
|
#define SCNx64 "I64x"
|
||||||
|
#define SCNX64 "I64X"
|
||||||
|
#define SCNoLEAST64 "I64o"
|
||||||
|
#define SCNuLEAST64 "I64u"
|
||||||
|
#define SCNxLEAST64 "I64x"
|
||||||
|
#define SCNXLEAST64 "I64X"
|
||||||
|
#define SCNoFAST64 "I64o"
|
||||||
|
#define SCNuFAST64 "I64u"
|
||||||
|
#define SCNxFAST64 "I64x"
|
||||||
|
#define SCNXFAST64 "I64X"
|
||||||
|
|
||||||
|
#define SCNoMAX "I64o"
|
||||||
|
#define SCNuMAX "I64u"
|
||||||
|
#define SCNxMAX "I64x"
|
||||||
|
#define SCNXMAX "I64X"
|
||||||
|
|
||||||
|
#ifdef _WIN64 // [
|
||||||
|
# define SCNoPTR "I64o"
|
||||||
|
# define SCNuPTR "I64u"
|
||||||
|
# define SCNxPTR "I64x"
|
||||||
|
# define SCNXPTR "I64X"
|
||||||
|
#else // _WIN64 ][
|
||||||
|
# define SCNoPTR "lo"
|
||||||
|
# define SCNuPTR "lu"
|
||||||
|
# define SCNxPTR "lx"
|
||||||
|
# define SCNXPTR "lX"
|
||||||
|
#endif // _WIN64 ]
|
||||||
|
|
||||||
|
#endif // __STDC_FORMAT_MACROS ]
|
||||||
|
|
||||||
|
// 7.8.2 Functions for greatest-width integer types
|
||||||
|
|
||||||
|
// 7.8.2.1 The imaxabs function
|
||||||
|
#define imaxabs _abs64
|
||||||
|
|
||||||
|
// 7.8.2.2 The imaxdiv function
|
||||||
|
|
||||||
|
// This is modified version of div() function from Microsoft's div.c found
|
||||||
|
// in %MSVC.NET%\crt\src\div.c
|
||||||
|
#ifdef STATIC_IMAXDIV // [
|
||||||
|
static
|
||||||
|
#else // STATIC_IMAXDIV ][
|
||||||
|
_inline
|
||||||
|
#endif // STATIC_IMAXDIV ]
|
||||||
|
imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom)
|
||||||
|
{
|
||||||
|
imaxdiv_t result;
|
||||||
|
|
||||||
|
result.quot = numer / denom;
|
||||||
|
result.rem = numer % denom;
|
||||||
|
|
||||||
|
if (numer < 0 && result.rem > 0) {
|
||||||
|
// did division wrong; must fix up
|
||||||
|
++result.quot;
|
||||||
|
result.rem -= denom;
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
// 7.8.2.3 The strtoimax and strtoumax functions
|
||||||
|
#define strtoimax _strtoi64
|
||||||
|
#define strtoumax _strtoui64
|
||||||
|
|
||||||
|
// 7.8.2.4 The wcstoimax and wcstoumax functions
|
||||||
|
#define wcstoimax _wcstoi64
|
||||||
|
#define wcstoumax _wcstoui64
|
||||||
|
|
||||||
|
|
||||||
|
#endif // _MSC_INTTYPES_H_ ]
|
16
include/msvc_compat/stdbool.h
Normal file
16
include/msvc_compat/stdbool.h
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
#ifndef stdbool_h
|
||||||
|
#define stdbool_h
|
||||||
|
|
||||||
|
#include <wtypes.h>
|
||||||
|
|
||||||
|
/* MSVC doesn't define _Bool or bool in C, but does have BOOL */
|
||||||
|
/* Note this doesn't pass autoconf's test because (bool) 0.5 != true */
|
||||||
|
typedef BOOL _Bool;
|
||||||
|
|
||||||
|
#define bool _Bool
|
||||||
|
#define true 1
|
||||||
|
#define false 0
|
||||||
|
|
||||||
|
#define __bool_true_false_are_defined 1
|
||||||
|
|
||||||
|
#endif /* stdbool_h */
|
247
include/msvc_compat/stdint.h
Normal file
247
include/msvc_compat/stdint.h
Normal file
@ -0,0 +1,247 @@
|
|||||||
|
// ISO C9x compliant stdint.h for Microsoft Visual Studio
|
||||||
|
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
|
||||||
|
//
|
||||||
|
// Copyright (c) 2006-2008 Alexander Chemeris
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are met:
|
||||||
|
//
|
||||||
|
// 1. Redistributions of source code must retain the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer.
|
||||||
|
//
|
||||||
|
// 2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer in the
|
||||||
|
// documentation and/or other materials provided with the distribution.
|
||||||
|
//
|
||||||
|
// 3. The name of the author may be used to endorse or promote products
|
||||||
|
// derived from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||||
|
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||||
|
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||||
|
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||||
|
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||||
|
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||||
|
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||||
|
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
//
|
||||||
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
#ifndef _MSC_VER // [
|
||||||
|
#error "Use this header only with Microsoft Visual C++ compilers!"
|
||||||
|
#endif // _MSC_VER ]
|
||||||
|
|
||||||
|
#ifndef _MSC_STDINT_H_ // [
|
||||||
|
#define _MSC_STDINT_H_
|
||||||
|
|
||||||
|
#if _MSC_VER > 1000
|
||||||
|
#pragma once
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <limits.h>
|
||||||
|
|
||||||
|
// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
|
||||||
|
// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
|
||||||
|
// or compiler give many errors like this:
|
||||||
|
// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
# include <wchar.h>
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Define _W64 macros to mark types changing their size, like intptr_t.
|
||||||
|
#ifndef _W64
|
||||||
|
# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
|
||||||
|
# define _W64 __w64
|
||||||
|
# else
|
||||||
|
# define _W64
|
||||||
|
# endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
// 7.18.1 Integer types
|
||||||
|
|
||||||
|
// 7.18.1.1 Exact-width integer types
|
||||||
|
|
||||||
|
// Visual Studio 6 and Embedded Visual C++ 4 doesn't
|
||||||
|
// realize that, e.g. char has the same size as __int8
|
||||||
|
// so we give up on __intX for them.
|
||||||
|
#if (_MSC_VER < 1300)
|
||||||
|
typedef signed char int8_t;
|
||||||
|
typedef signed short int16_t;
|
||||||
|
typedef signed int int32_t;
|
||||||
|
typedef unsigned char uint8_t;
|
||||||
|
typedef unsigned short uint16_t;
|
||||||
|
typedef unsigned int uint32_t;
|
||||||
|
#else
|
||||||
|
typedef signed __int8 int8_t;
|
||||||
|
typedef signed __int16 int16_t;
|
||||||
|
typedef signed __int32 int32_t;
|
||||||
|
typedef unsigned __int8 uint8_t;
|
||||||
|
typedef unsigned __int16 uint16_t;
|
||||||
|
typedef unsigned __int32 uint32_t;
|
||||||
|
#endif
|
||||||
|
typedef signed __int64 int64_t;
|
||||||
|
typedef unsigned __int64 uint64_t;
|
||||||
|
|
||||||
|
|
||||||
|
// 7.18.1.2 Minimum-width integer types
|
||||||
|
typedef int8_t int_least8_t;
|
||||||
|
typedef int16_t int_least16_t;
|
||||||
|
typedef int32_t int_least32_t;
|
||||||
|
typedef int64_t int_least64_t;
|
||||||
|
typedef uint8_t uint_least8_t;
|
||||||
|
typedef uint16_t uint_least16_t;
|
||||||
|
typedef uint32_t uint_least32_t;
|
||||||
|
typedef uint64_t uint_least64_t;
|
||||||
|
|
||||||
|
// 7.18.1.3 Fastest minimum-width integer types
|
||||||
|
typedef int8_t int_fast8_t;
|
||||||
|
typedef int16_t int_fast16_t;
|
||||||
|
typedef int32_t int_fast32_t;
|
||||||
|
typedef int64_t int_fast64_t;
|
||||||
|
typedef uint8_t uint_fast8_t;
|
||||||
|
typedef uint16_t uint_fast16_t;
|
||||||
|
typedef uint32_t uint_fast32_t;
|
||||||
|
typedef uint64_t uint_fast64_t;
|
||||||
|
|
||||||
|
// 7.18.1.4 Integer types capable of holding object pointers
|
||||||
|
#ifdef _WIN64 // [
|
||||||
|
typedef signed __int64 intptr_t;
|
||||||
|
typedef unsigned __int64 uintptr_t;
|
||||||
|
#else // _WIN64 ][
|
||||||
|
typedef _W64 signed int intptr_t;
|
||||||
|
typedef _W64 unsigned int uintptr_t;
|
||||||
|
#endif // _WIN64 ]
|
||||||
|
|
||||||
|
// 7.18.1.5 Greatest-width integer types
|
||||||
|
typedef int64_t intmax_t;
|
||||||
|
typedef uint64_t uintmax_t;
|
||||||
|
|
||||||
|
|
||||||
|
// 7.18.2 Limits of specified-width integer types
|
||||||
|
|
||||||
|
#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
|
||||||
|
|
||||||
|
// 7.18.2.1 Limits of exact-width integer types
|
||||||
|
#define INT8_MIN ((int8_t)_I8_MIN)
|
||||||
|
#define INT8_MAX _I8_MAX
|
||||||
|
#define INT16_MIN ((int16_t)_I16_MIN)
|
||||||
|
#define INT16_MAX _I16_MAX
|
||||||
|
#define INT32_MIN ((int32_t)_I32_MIN)
|
||||||
|
#define INT32_MAX _I32_MAX
|
||||||
|
#define INT64_MIN ((int64_t)_I64_MIN)
|
||||||
|
#define INT64_MAX _I64_MAX
|
||||||
|
#define UINT8_MAX _UI8_MAX
|
||||||
|
#define UINT16_MAX _UI16_MAX
|
||||||
|
#define UINT32_MAX _UI32_MAX
|
||||||
|
#define UINT64_MAX _UI64_MAX
|
||||||
|
|
||||||
|
// 7.18.2.2 Limits of minimum-width integer types
|
||||||
|
#define INT_LEAST8_MIN INT8_MIN
|
||||||
|
#define INT_LEAST8_MAX INT8_MAX
|
||||||
|
#define INT_LEAST16_MIN INT16_MIN
|
||||||
|
#define INT_LEAST16_MAX INT16_MAX
|
||||||
|
#define INT_LEAST32_MIN INT32_MIN
|
||||||
|
#define INT_LEAST32_MAX INT32_MAX
|
||||||
|
#define INT_LEAST64_MIN INT64_MIN
|
||||||
|
#define INT_LEAST64_MAX INT64_MAX
|
||||||
|
#define UINT_LEAST8_MAX UINT8_MAX
|
||||||
|
#define UINT_LEAST16_MAX UINT16_MAX
|
||||||
|
#define UINT_LEAST32_MAX UINT32_MAX
|
||||||
|
#define UINT_LEAST64_MAX UINT64_MAX
|
||||||
|
|
||||||
|
// 7.18.2.3 Limits of fastest minimum-width integer types
|
||||||
|
#define INT_FAST8_MIN INT8_MIN
|
||||||
|
#define INT_FAST8_MAX INT8_MAX
|
||||||
|
#define INT_FAST16_MIN INT16_MIN
|
||||||
|
#define INT_FAST16_MAX INT16_MAX
|
||||||
|
#define INT_FAST32_MIN INT32_MIN
|
||||||
|
#define INT_FAST32_MAX INT32_MAX
|
||||||
|
#define INT_FAST64_MIN INT64_MIN
|
||||||
|
#define INT_FAST64_MAX INT64_MAX
|
||||||
|
#define UINT_FAST8_MAX UINT8_MAX
|
||||||
|
#define UINT_FAST16_MAX UINT16_MAX
|
||||||
|
#define UINT_FAST32_MAX UINT32_MAX
|
||||||
|
#define UINT_FAST64_MAX UINT64_MAX
|
||||||
|
|
||||||
|
// 7.18.2.4 Limits of integer types capable of holding object pointers
|
||||||
|
#ifdef _WIN64 // [
|
||||||
|
# define INTPTR_MIN INT64_MIN
|
||||||
|
# define INTPTR_MAX INT64_MAX
|
||||||
|
# define UINTPTR_MAX UINT64_MAX
|
||||||
|
#else // _WIN64 ][
|
||||||
|
# define INTPTR_MIN INT32_MIN
|
||||||
|
# define INTPTR_MAX INT32_MAX
|
||||||
|
# define UINTPTR_MAX UINT32_MAX
|
||||||
|
#endif // _WIN64 ]
|
||||||
|
|
||||||
|
// 7.18.2.5 Limits of greatest-width integer types
|
||||||
|
#define INTMAX_MIN INT64_MIN
|
||||||
|
#define INTMAX_MAX INT64_MAX
|
||||||
|
#define UINTMAX_MAX UINT64_MAX
|
||||||
|
|
||||||
|
// 7.18.3 Limits of other integer types
|
||||||
|
|
||||||
|
#ifdef _WIN64 // [
|
||||||
|
# define PTRDIFF_MIN _I64_MIN
|
||||||
|
# define PTRDIFF_MAX _I64_MAX
|
||||||
|
#else // _WIN64 ][
|
||||||
|
# define PTRDIFF_MIN _I32_MIN
|
||||||
|
# define PTRDIFF_MAX _I32_MAX
|
||||||
|
#endif // _WIN64 ]
|
||||||
|
|
||||||
|
#define SIG_ATOMIC_MIN INT_MIN
|
||||||
|
#define SIG_ATOMIC_MAX INT_MAX
|
||||||
|
|
||||||
|
#ifndef SIZE_MAX // [
|
||||||
|
# ifdef _WIN64 // [
|
||||||
|
# define SIZE_MAX _UI64_MAX
|
||||||
|
# else // _WIN64 ][
|
||||||
|
# define SIZE_MAX _UI32_MAX
|
||||||
|
# endif // _WIN64 ]
|
||||||
|
#endif // SIZE_MAX ]
|
||||||
|
|
||||||
|
// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
|
||||||
|
#ifndef WCHAR_MIN // [
|
||||||
|
# define WCHAR_MIN 0
|
||||||
|
#endif // WCHAR_MIN ]
|
||||||
|
#ifndef WCHAR_MAX // [
|
||||||
|
# define WCHAR_MAX _UI16_MAX
|
||||||
|
#endif // WCHAR_MAX ]
|
||||||
|
|
||||||
|
#define WINT_MIN 0
|
||||||
|
#define WINT_MAX _UI16_MAX
|
||||||
|
|
||||||
|
#endif // __STDC_LIMIT_MACROS ]
|
||||||
|
|
||||||
|
|
||||||
|
// 7.18.4 Limits of other integer types
|
||||||
|
|
||||||
|
#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
|
||||||
|
|
||||||
|
// 7.18.4.1 Macros for minimum-width integer constants
|
||||||
|
|
||||||
|
#define INT8_C(val) val##i8
|
||||||
|
#define INT16_C(val) val##i16
|
||||||
|
#define INT32_C(val) val##i32
|
||||||
|
#define INT64_C(val) val##i64
|
||||||
|
|
||||||
|
#define UINT8_C(val) val##ui8
|
||||||
|
#define UINT16_C(val) val##ui16
|
||||||
|
#define UINT32_C(val) val##ui32
|
||||||
|
#define UINT64_C(val) val##ui64
|
||||||
|
|
||||||
|
// 7.18.4.2 Macros for greatest-width integer constants
|
||||||
|
#define INTMAX_C INT64_C
|
||||||
|
#define UINTMAX_C UINT64_C
|
||||||
|
|
||||||
|
#endif // __STDC_CONSTANT_MACROS ]
|
||||||
|
|
||||||
|
|
||||||
|
#endif // _MSC_STDINT_H_ ]
|
23
include/msvc_compat/strings.h
Normal file
23
include/msvc_compat/strings.h
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
#ifndef strings_h
|
||||||
|
#define strings_h
|
||||||
|
|
||||||
|
/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided
|
||||||
|
* for both */
|
||||||
|
#include <intrin.h>
|
||||||
|
#pragma intrinsic(_BitScanForward)
|
||||||
|
static __forceinline int ffsl(long x)
|
||||||
|
{
|
||||||
|
unsigned long i;
|
||||||
|
|
||||||
|
if (_BitScanForward(&i, x))
|
||||||
|
return (i + 1);
|
||||||
|
return (0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static __forceinline int ffs(int x)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (ffsl(x));
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
2115
src/arena.c
2115
src/arena.c
File diff suppressed because it is too large
Load Diff
36
src/base.c
36
src/base.c
@ -4,7 +4,7 @@
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Data. */
|
/* Data. */
|
||||||
|
|
||||||
malloc_mutex_t base_mtx;
|
static malloc_mutex_t base_mtx;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Current pages that are being used for internal memory allocations. These
|
* Current pages that are being used for internal memory allocations. These
|
||||||
@ -32,7 +32,7 @@ base_pages_alloc(size_t minsize)
|
|||||||
assert(minsize != 0);
|
assert(minsize != 0);
|
||||||
csize = CHUNK_CEILING(minsize);
|
csize = CHUNK_CEILING(minsize);
|
||||||
zero = false;
|
zero = false;
|
||||||
base_pages = chunk_alloc(csize, true, &zero);
|
base_pages = chunk_alloc(csize, chunksize, true, &zero);
|
||||||
if (base_pages == NULL)
|
if (base_pages == NULL)
|
||||||
return (true);
|
return (true);
|
||||||
base_next_addr = base_pages;
|
base_next_addr = base_pages;
|
||||||
@ -66,6 +66,17 @@ base_alloc(size_t size)
|
|||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void *
|
||||||
|
base_calloc(size_t number, size_t size)
|
||||||
|
{
|
||||||
|
void *ret = base_alloc(number * size);
|
||||||
|
|
||||||
|
if (ret != NULL)
|
||||||
|
memset(ret, 0, number * size);
|
||||||
|
|
||||||
|
return (ret);
|
||||||
|
}
|
||||||
|
|
||||||
extent_node_t *
|
extent_node_t *
|
||||||
base_node_alloc(void)
|
base_node_alloc(void)
|
||||||
{
|
{
|
||||||
@ -104,3 +115,24 @@ base_boot(void)
|
|||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
base_prefork(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
malloc_mutex_prefork(&base_mtx);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
base_postfork_parent(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
malloc_mutex_postfork_parent(&base_mtx);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
base_postfork_child(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
malloc_mutex_postfork_child(&base_mtx);
|
||||||
|
}
|
||||||
|
315
src/chunk.c
315
src/chunk.c
@ -5,18 +5,20 @@
|
|||||||
/* Data. */
|
/* Data. */
|
||||||
|
|
||||||
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
|
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
|
||||||
#ifdef JEMALLOC_SWAP
|
|
||||||
bool opt_overcommit = true;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
malloc_mutex_t chunks_mtx;
|
malloc_mutex_t chunks_mtx;
|
||||||
chunk_stats_t stats_chunks;
|
chunk_stats_t stats_chunks;
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_IVSALLOC
|
/*
|
||||||
|
* Trees of chunks that were previously allocated (trees differ only in node
|
||||||
|
* ordering). These are used when allocating chunks, in an attempt to re-use
|
||||||
|
* address space. Depending on function, different tree orderings are needed,
|
||||||
|
* which is why there are two trees with the same contents.
|
||||||
|
*/
|
||||||
|
static extent_tree_t chunks_szad;
|
||||||
|
static extent_tree_t chunks_ad;
|
||||||
|
|
||||||
rtree_t *chunks_rtree;
|
rtree_t *chunks_rtree;
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Various chunk-related settings. */
|
/* Various chunk-related settings. */
|
||||||
size_t chunksize;
|
size_t chunksize;
|
||||||
@ -26,6 +28,98 @@ size_t map_bias;
|
|||||||
size_t arena_maxclass; /* Max size class for arenas. */
|
size_t arena_maxclass; /* Max size class for arenas. */
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
/* Function prototypes for non-inline static functions. */
|
||||||
|
|
||||||
|
static void *chunk_recycle(size_t size, size_t alignment, bool base,
|
||||||
|
bool *zero);
|
||||||
|
static void chunk_record(void *chunk, size_t size);
|
||||||
|
|
||||||
|
/******************************************************************************/
|
||||||
|
|
||||||
|
static void *
|
||||||
|
chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
|
||||||
|
{
|
||||||
|
void *ret;
|
||||||
|
extent_node_t *node;
|
||||||
|
extent_node_t key;
|
||||||
|
size_t alloc_size, leadsize, trailsize;
|
||||||
|
|
||||||
|
if (base) {
|
||||||
|
/*
|
||||||
|
* This function may need to call base_node_{,de}alloc(), but
|
||||||
|
* the current chunk allocation request is on behalf of the
|
||||||
|
* base allocator. Avoid deadlock (and if that weren't an
|
||||||
|
* issue, potential for infinite recursion) by returning NULL.
|
||||||
|
*/
|
||||||
|
return (NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
alloc_size = size + alignment - chunksize;
|
||||||
|
/* Beware size_t wrap-around. */
|
||||||
|
if (alloc_size < size)
|
||||||
|
return (NULL);
|
||||||
|
key.addr = NULL;
|
||||||
|
key.size = alloc_size;
|
||||||
|
malloc_mutex_lock(&chunks_mtx);
|
||||||
|
node = extent_tree_szad_nsearch(&chunks_szad, &key);
|
||||||
|
if (node == NULL) {
|
||||||
|
malloc_mutex_unlock(&chunks_mtx);
|
||||||
|
return (NULL);
|
||||||
|
}
|
||||||
|
leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
|
||||||
|
(uintptr_t)node->addr;
|
||||||
|
assert(node->size >= leadsize + size);
|
||||||
|
trailsize = node->size - leadsize - size;
|
||||||
|
ret = (void *)((uintptr_t)node->addr + leadsize);
|
||||||
|
/* Remove node from the tree. */
|
||||||
|
extent_tree_szad_remove(&chunks_szad, node);
|
||||||
|
extent_tree_ad_remove(&chunks_ad, node);
|
||||||
|
if (leadsize != 0) {
|
||||||
|
/* Insert the leading space as a smaller chunk. */
|
||||||
|
node->size = leadsize;
|
||||||
|
extent_tree_szad_insert(&chunks_szad, node);
|
||||||
|
extent_tree_ad_insert(&chunks_ad, node);
|
||||||
|
node = NULL;
|
||||||
|
}
|
||||||
|
if (trailsize != 0) {
|
||||||
|
/* Insert the trailing space as a smaller chunk. */
|
||||||
|
if (node == NULL) {
|
||||||
|
/*
|
||||||
|
* An additional node is required, but
|
||||||
|
* base_node_alloc() can cause a new base chunk to be
|
||||||
|
* allocated. Drop chunks_mtx in order to avoid
|
||||||
|
* deadlock, and if node allocation fails, deallocate
|
||||||
|
* the result before returning an error.
|
||||||
|
*/
|
||||||
|
malloc_mutex_unlock(&chunks_mtx);
|
||||||
|
node = base_node_alloc();
|
||||||
|
if (node == NULL) {
|
||||||
|
chunk_dealloc(ret, size, true);
|
||||||
|
return (NULL);
|
||||||
|
}
|
||||||
|
malloc_mutex_lock(&chunks_mtx);
|
||||||
|
}
|
||||||
|
node->addr = (void *)((uintptr_t)(ret) + size);
|
||||||
|
node->size = trailsize;
|
||||||
|
extent_tree_szad_insert(&chunks_szad, node);
|
||||||
|
extent_tree_ad_insert(&chunks_ad, node);
|
||||||
|
node = NULL;
|
||||||
|
}
|
||||||
|
malloc_mutex_unlock(&chunks_mtx);
|
||||||
|
|
||||||
|
if (node != NULL)
|
||||||
|
base_node_dealloc(node);
|
||||||
|
#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||||
|
/* Pages are zeroed as a side effect of pages_purge(). */
|
||||||
|
*zero = true;
|
||||||
|
#else
|
||||||
|
if (*zero) {
|
||||||
|
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||||
|
memset(ret, 0, size);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
return (ret);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the caller specifies (*zero == false), it is still possible to receive
|
* If the caller specifies (*zero == false), it is still possible to receive
|
||||||
@ -34,79 +128,138 @@ size_t arena_maxclass; /* Max size class for arenas. */
|
|||||||
* advantage of them if they are returned.
|
* advantage of them if they are returned.
|
||||||
*/
|
*/
|
||||||
void *
|
void *
|
||||||
chunk_alloc(size_t size, bool base, bool *zero)
|
chunk_alloc(size_t size, size_t alignment, bool base, bool *zero)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
assert((size & chunksize_mask) == 0);
|
assert((size & chunksize_mask) == 0);
|
||||||
|
assert(alignment != 0);
|
||||||
|
assert((alignment & chunksize_mask) == 0);
|
||||||
|
|
||||||
#ifdef JEMALLOC_SWAP
|
ret = chunk_recycle(size, alignment, base, zero);
|
||||||
if (swap_enabled) {
|
if (ret != NULL)
|
||||||
ret = chunk_alloc_swap(size, zero);
|
goto label_return;
|
||||||
if (ret != NULL)
|
|
||||||
goto RETURN;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (swap_enabled == false || opt_overcommit) {
|
ret = chunk_alloc_mmap(size, alignment, zero);
|
||||||
#endif
|
if (ret != NULL)
|
||||||
#ifdef JEMALLOC_DSS
|
goto label_return;
|
||||||
ret = chunk_alloc_dss(size, zero);
|
|
||||||
|
if (config_dss) {
|
||||||
|
ret = chunk_alloc_dss(size, alignment, zero);
|
||||||
if (ret != NULL)
|
if (ret != NULL)
|
||||||
goto RETURN;
|
goto label_return;
|
||||||
#endif
|
|
||||||
ret = chunk_alloc_mmap(size);
|
|
||||||
if (ret != NULL) {
|
|
||||||
*zero = true;
|
|
||||||
goto RETURN;
|
|
||||||
}
|
|
||||||
#ifdef JEMALLOC_SWAP
|
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
/* All strategies for allocation failed. */
|
/* All strategies for allocation failed. */
|
||||||
ret = NULL;
|
ret = NULL;
|
||||||
RETURN:
|
label_return:
|
||||||
#ifdef JEMALLOC_IVSALLOC
|
if (config_ivsalloc && base == false && ret != NULL) {
|
||||||
if (base == false && ret != NULL) {
|
|
||||||
if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
|
if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
|
||||||
chunk_dealloc(ret, size, true);
|
chunk_dealloc(ret, size, true);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
if ((config_stats || config_prof) && ret != NULL) {
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
if (ret != NULL) {
|
|
||||||
# ifdef JEMALLOC_PROF
|
|
||||||
bool gdump;
|
bool gdump;
|
||||||
# endif
|
|
||||||
malloc_mutex_lock(&chunks_mtx);
|
malloc_mutex_lock(&chunks_mtx);
|
||||||
# ifdef JEMALLOC_STATS
|
if (config_stats)
|
||||||
stats_chunks.nchunks += (size / chunksize);
|
stats_chunks.nchunks += (size / chunksize);
|
||||||
# endif
|
|
||||||
stats_chunks.curchunks += (size / chunksize);
|
stats_chunks.curchunks += (size / chunksize);
|
||||||
if (stats_chunks.curchunks > stats_chunks.highchunks) {
|
if (stats_chunks.curchunks > stats_chunks.highchunks) {
|
||||||
stats_chunks.highchunks = stats_chunks.curchunks;
|
stats_chunks.highchunks = stats_chunks.curchunks;
|
||||||
# ifdef JEMALLOC_PROF
|
if (config_prof)
|
||||||
gdump = true;
|
gdump = true;
|
||||||
# endif
|
} else if (config_prof)
|
||||||
}
|
|
||||||
# ifdef JEMALLOC_PROF
|
|
||||||
else
|
|
||||||
gdump = false;
|
gdump = false;
|
||||||
# endif
|
|
||||||
malloc_mutex_unlock(&chunks_mtx);
|
malloc_mutex_unlock(&chunks_mtx);
|
||||||
# ifdef JEMALLOC_PROF
|
if (config_prof && opt_prof && opt_prof_gdump && gdump)
|
||||||
if (opt_prof && opt_prof_gdump && gdump)
|
|
||||||
prof_gdump();
|
prof_gdump();
|
||||||
# endif
|
|
||||||
}
|
}
|
||||||
#endif
|
if (config_debug && *zero && ret != NULL) {
|
||||||
|
size_t i;
|
||||||
|
size_t *p = (size_t *)(uintptr_t)ret;
|
||||||
|
|
||||||
|
VALGRIND_MAKE_MEM_DEFINED(ret, size);
|
||||||
|
for (i = 0; i < size / sizeof(size_t); i++)
|
||||||
|
assert(p[i] == 0);
|
||||||
|
}
|
||||||
assert(CHUNK_ADDR2BASE(ret) == ret);
|
assert(CHUNK_ADDR2BASE(ret) == ret);
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
chunk_record(void *chunk, size_t size)
|
||||||
|
{
|
||||||
|
extent_node_t *xnode, *node, *prev, key;
|
||||||
|
|
||||||
|
pages_purge(chunk, size);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocate a node before acquiring chunks_mtx even though it might not
|
||||||
|
* be needed, because base_node_alloc() may cause a new base chunk to
|
||||||
|
* be allocated, which could cause deadlock if chunks_mtx were already
|
||||||
|
* held.
|
||||||
|
*/
|
||||||
|
xnode = base_node_alloc();
|
||||||
|
|
||||||
|
malloc_mutex_lock(&chunks_mtx);
|
||||||
|
key.addr = (void *)((uintptr_t)chunk + size);
|
||||||
|
node = extent_tree_ad_nsearch(&chunks_ad, &key);
|
||||||
|
/* Try to coalesce forward. */
|
||||||
|
if (node != NULL && node->addr == key.addr) {
|
||||||
|
/*
|
||||||
|
* Coalesce chunk with the following address range. This does
|
||||||
|
* not change the position within chunks_ad, so only
|
||||||
|
* remove/insert from/into chunks_szad.
|
||||||
|
*/
|
||||||
|
extent_tree_szad_remove(&chunks_szad, node);
|
||||||
|
node->addr = chunk;
|
||||||
|
node->size += size;
|
||||||
|
extent_tree_szad_insert(&chunks_szad, node);
|
||||||
|
if (xnode != NULL)
|
||||||
|
base_node_dealloc(xnode);
|
||||||
|
} else {
|
||||||
|
/* Coalescing forward failed, so insert a new node. */
|
||||||
|
if (xnode == NULL) {
|
||||||
|
/*
|
||||||
|
* base_node_alloc() failed, which is an exceedingly
|
||||||
|
* unlikely failure. Leak chunk; its pages have
|
||||||
|
* already been purged, so this is only a virtual
|
||||||
|
* memory leak.
|
||||||
|
*/
|
||||||
|
malloc_mutex_unlock(&chunks_mtx);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
node = xnode;
|
||||||
|
node->addr = chunk;
|
||||||
|
node->size = size;
|
||||||
|
extent_tree_ad_insert(&chunks_ad, node);
|
||||||
|
extent_tree_szad_insert(&chunks_szad, node);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Try to coalesce backward. */
|
||||||
|
prev = extent_tree_ad_prev(&chunks_ad, node);
|
||||||
|
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
|
||||||
|
chunk) {
|
||||||
|
/*
|
||||||
|
* Coalesce chunk with the previous address range. This does
|
||||||
|
* not change the position within chunks_ad, so only
|
||||||
|
* remove/insert node from/into chunks_szad.
|
||||||
|
*/
|
||||||
|
extent_tree_szad_remove(&chunks_szad, prev);
|
||||||
|
extent_tree_ad_remove(&chunks_ad, prev);
|
||||||
|
|
||||||
|
extent_tree_szad_remove(&chunks_szad, node);
|
||||||
|
node->addr = prev->addr;
|
||||||
|
node->size += prev->size;
|
||||||
|
extent_tree_szad_insert(&chunks_szad, node);
|
||||||
|
|
||||||
|
base_node_dealloc(prev);
|
||||||
|
}
|
||||||
|
malloc_mutex_unlock(&chunks_mtx);
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
chunk_dealloc(void *chunk, size_t size, bool unmap)
|
chunk_dealloc(void *chunk, size_t size, bool unmap)
|
||||||
{
|
{
|
||||||
@ -116,25 +269,18 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
|
|||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
assert((size & chunksize_mask) == 0);
|
assert((size & chunksize_mask) == 0);
|
||||||
|
|
||||||
#ifdef JEMALLOC_IVSALLOC
|
if (config_ivsalloc)
|
||||||
rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
|
rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
|
||||||
#endif
|
if (config_stats || config_prof) {
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
malloc_mutex_lock(&chunks_mtx);
|
||||||
malloc_mutex_lock(&chunks_mtx);
|
stats_chunks.curchunks -= (size / chunksize);
|
||||||
stats_chunks.curchunks -= (size / chunksize);
|
malloc_mutex_unlock(&chunks_mtx);
|
||||||
malloc_mutex_unlock(&chunks_mtx);
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
if (unmap) {
|
if (unmap) {
|
||||||
#ifdef JEMALLOC_SWAP
|
if ((config_dss && chunk_in_dss(chunk)) ||
|
||||||
if (swap_enabled && chunk_dealloc_swap(chunk, size) == false)
|
chunk_dealloc_mmap(chunk, size))
|
||||||
return;
|
chunk_record(chunk, size);
|
||||||
#endif
|
|
||||||
#ifdef JEMALLOC_DSS
|
|
||||||
if (chunk_dealloc_dss(chunk, size) == false)
|
|
||||||
return;
|
|
||||||
#endif
|
|
||||||
chunk_dealloc_mmap(chunk, size);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -144,30 +290,25 @@ chunk_boot(void)
|
|||||||
|
|
||||||
/* Set variables according to the value of opt_lg_chunk. */
|
/* Set variables according to the value of opt_lg_chunk. */
|
||||||
chunksize = (ZU(1) << opt_lg_chunk);
|
chunksize = (ZU(1) << opt_lg_chunk);
|
||||||
assert(chunksize >= PAGE_SIZE);
|
assert(chunksize >= PAGE);
|
||||||
chunksize_mask = chunksize - 1;
|
chunksize_mask = chunksize - 1;
|
||||||
chunk_npages = (chunksize >> PAGE_SHIFT);
|
chunk_npages = (chunksize >> LG_PAGE);
|
||||||
|
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
if (config_stats || config_prof) {
|
||||||
if (malloc_mutex_init(&chunks_mtx))
|
if (malloc_mutex_init(&chunks_mtx))
|
||||||
|
return (true);
|
||||||
|
memset(&stats_chunks, 0, sizeof(chunk_stats_t));
|
||||||
|
}
|
||||||
|
if (config_dss && chunk_dss_boot())
|
||||||
return (true);
|
return (true);
|
||||||
memset(&stats_chunks, 0, sizeof(chunk_stats_t));
|
extent_tree_szad_new(&chunks_szad);
|
||||||
#endif
|
extent_tree_ad_new(&chunks_ad);
|
||||||
#ifdef JEMALLOC_SWAP
|
if (config_ivsalloc) {
|
||||||
if (chunk_swap_boot())
|
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||||
return (true);
|
opt_lg_chunk);
|
||||||
#endif
|
if (chunks_rtree == NULL)
|
||||||
if (chunk_mmap_boot())
|
return (true);
|
||||||
return (true);
|
}
|
||||||
#ifdef JEMALLOC_DSS
|
|
||||||
if (chunk_dss_boot())
|
|
||||||
return (true);
|
|
||||||
#endif
|
|
||||||
#ifdef JEMALLOC_IVSALLOC
|
|
||||||
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk);
|
|
||||||
if (chunks_rtree == NULL)
|
|
||||||
return (true);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
264
src/chunk_dss.c
264
src/chunk_dss.c
@ -1,82 +1,42 @@
|
|||||||
#define JEMALLOC_CHUNK_DSS_C_
|
#define JEMALLOC_CHUNK_DSS_C_
|
||||||
#include "jemalloc/internal/jemalloc_internal.h"
|
#include "jemalloc/internal/jemalloc_internal.h"
|
||||||
#ifdef JEMALLOC_DSS
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Data. */
|
/* Data. */
|
||||||
|
|
||||||
malloc_mutex_t dss_mtx;
|
/*
|
||||||
|
* Protects sbrk() calls. This avoids malloc races among threads, though it
|
||||||
|
* does not protect against races with threads that call sbrk() directly.
|
||||||
|
*/
|
||||||
|
static malloc_mutex_t dss_mtx;
|
||||||
|
|
||||||
/* Base address of the DSS. */
|
/* Base address of the DSS. */
|
||||||
static void *dss_base;
|
static void *dss_base;
|
||||||
/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
|
/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
|
||||||
static void *dss_prev;
|
static void *dss_prev;
|
||||||
/* Current upper limit on DSS addresses. */
|
/* Current upper limit on DSS addresses. */
|
||||||
static void *dss_max;
|
static void *dss_max;
|
||||||
|
|
||||||
/*
|
|
||||||
* Trees of chunks that were previously allocated (trees differ only in node
|
|
||||||
* ordering). These are used when allocating chunks, in an attempt to re-use
|
|
||||||
* address space. Depending on function, different tree orderings are needed,
|
|
||||||
* which is why there are two trees with the same contents.
|
|
||||||
*/
|
|
||||||
static extent_tree_t dss_chunks_szad;
|
|
||||||
static extent_tree_t dss_chunks_ad;
|
|
||||||
|
|
||||||
/******************************************************************************/
|
|
||||||
/* Function prototypes for non-inline static functions. */
|
|
||||||
|
|
||||||
static void *chunk_recycle_dss(size_t size, bool *zero);
|
|
||||||
static extent_node_t *chunk_dealloc_dss_record(void *chunk, size_t size);
|
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
|
#ifndef JEMALLOC_HAVE_SBRK
|
||||||
static void *
|
static void *
|
||||||
chunk_recycle_dss(size_t size, bool *zero)
|
sbrk(intptr_t increment)
|
||||||
{
|
{
|
||||||
extent_node_t *node, key;
|
|
||||||
|
|
||||||
key.addr = NULL;
|
not_implemented();
|
||||||
key.size = size;
|
|
||||||
malloc_mutex_lock(&dss_mtx);
|
|
||||||
node = extent_tree_szad_nsearch(&dss_chunks_szad, &key);
|
|
||||||
if (node != NULL) {
|
|
||||||
void *ret = node->addr;
|
|
||||||
|
|
||||||
/* Remove node from the tree. */
|
|
||||||
extent_tree_szad_remove(&dss_chunks_szad, node);
|
|
||||||
if (node->size == size) {
|
|
||||||
extent_tree_ad_remove(&dss_chunks_ad, node);
|
|
||||||
base_node_dealloc(node);
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* Insert the remainder of node's address range as a
|
|
||||||
* smaller chunk. Its position within dss_chunks_ad
|
|
||||||
* does not change.
|
|
||||||
*/
|
|
||||||
assert(node->size > size);
|
|
||||||
node->addr = (void *)((uintptr_t)node->addr + size);
|
|
||||||
node->size -= size;
|
|
||||||
extent_tree_szad_insert(&dss_chunks_szad, node);
|
|
||||||
}
|
|
||||||
malloc_mutex_unlock(&dss_mtx);
|
|
||||||
|
|
||||||
if (*zero)
|
|
||||||
memset(ret, 0, size);
|
|
||||||
return (ret);
|
|
||||||
}
|
|
||||||
malloc_mutex_unlock(&dss_mtx);
|
|
||||||
|
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void *
|
void *
|
||||||
chunk_alloc_dss(size_t size, bool *zero)
|
chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
ret = chunk_recycle_dss(size, zero);
|
cassert(config_dss);
|
||||||
if (ret != NULL)
|
assert(size > 0 && (size & chunksize_mask) == 0);
|
||||||
return (ret);
|
assert(alignment > 0 && (alignment & chunksize_mask) == 0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* sbrk() uses a signed increment argument, so take care not to
|
* sbrk() uses a signed increment argument, so take care not to
|
||||||
@ -87,6 +47,8 @@ chunk_alloc_dss(size_t size, bool *zero)
|
|||||||
|
|
||||||
malloc_mutex_lock(&dss_mtx);
|
malloc_mutex_lock(&dss_mtx);
|
||||||
if (dss_prev != (void *)-1) {
|
if (dss_prev != (void *)-1) {
|
||||||
|
size_t gap_size, cpad_size;
|
||||||
|
void *cpad, *dss_next;
|
||||||
intptr_t incr;
|
intptr_t incr;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -97,26 +59,40 @@ chunk_alloc_dss(size_t size, bool *zero)
|
|||||||
do {
|
do {
|
||||||
/* Get the current end of the DSS. */
|
/* Get the current end of the DSS. */
|
||||||
dss_max = sbrk(0);
|
dss_max = sbrk(0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Calculate how much padding is necessary to
|
* Calculate how much padding is necessary to
|
||||||
* chunk-align the end of the DSS.
|
* chunk-align the end of the DSS.
|
||||||
*/
|
*/
|
||||||
incr = (intptr_t)size
|
gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
|
||||||
- (intptr_t)CHUNK_ADDR2OFFSET(dss_max);
|
chunksize_mask;
|
||||||
if (incr == (intptr_t)size)
|
/*
|
||||||
ret = dss_max;
|
* Compute how much chunk-aligned pad space (if any) is
|
||||||
else {
|
* necessary to satisfy alignment. This space can be
|
||||||
ret = (void *)((intptr_t)dss_max + incr);
|
* recycled for later use.
|
||||||
incr += size;
|
*/
|
||||||
|
cpad = (void *)((uintptr_t)dss_max + gap_size);
|
||||||
|
ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
|
||||||
|
alignment);
|
||||||
|
cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
|
||||||
|
dss_next = (void *)((uintptr_t)ret + size);
|
||||||
|
if ((uintptr_t)ret < (uintptr_t)dss_max ||
|
||||||
|
(uintptr_t)dss_next < (uintptr_t)dss_max) {
|
||||||
|
/* Wrap-around. */
|
||||||
|
malloc_mutex_unlock(&dss_mtx);
|
||||||
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
incr = gap_size + cpad_size + size;
|
||||||
dss_prev = sbrk(incr);
|
dss_prev = sbrk(incr);
|
||||||
if (dss_prev == dss_max) {
|
if (dss_prev == dss_max) {
|
||||||
/* Success. */
|
/* Success. */
|
||||||
dss_max = (void *)((intptr_t)dss_prev + incr);
|
dss_max = dss_next;
|
||||||
malloc_mutex_unlock(&dss_mtx);
|
malloc_mutex_unlock(&dss_mtx);
|
||||||
*zero = true;
|
if (cpad_size != 0)
|
||||||
|
chunk_dealloc(cpad, cpad_size, true);
|
||||||
|
if (*zero) {
|
||||||
|
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||||
|
memset(ret, 0, size);
|
||||||
|
}
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
} while (dss_prev != (void *)-1);
|
} while (dss_prev != (void *)-1);
|
||||||
@ -126,84 +102,13 @@ chunk_alloc_dss(size_t size, bool *zero)
|
|||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static extent_node_t *
|
|
||||||
chunk_dealloc_dss_record(void *chunk, size_t size)
|
|
||||||
{
|
|
||||||
extent_node_t *xnode, *node, *prev, key;
|
|
||||||
|
|
||||||
xnode = NULL;
|
|
||||||
while (true) {
|
|
||||||
key.addr = (void *)((uintptr_t)chunk + size);
|
|
||||||
node = extent_tree_ad_nsearch(&dss_chunks_ad, &key);
|
|
||||||
/* Try to coalesce forward. */
|
|
||||||
if (node != NULL && node->addr == key.addr) {
|
|
||||||
/*
|
|
||||||
* Coalesce chunk with the following address range.
|
|
||||||
* This does not change the position within
|
|
||||||
* dss_chunks_ad, so only remove/insert from/into
|
|
||||||
* dss_chunks_szad.
|
|
||||||
*/
|
|
||||||
extent_tree_szad_remove(&dss_chunks_szad, node);
|
|
||||||
node->addr = chunk;
|
|
||||||
node->size += size;
|
|
||||||
extent_tree_szad_insert(&dss_chunks_szad, node);
|
|
||||||
break;
|
|
||||||
} else if (xnode == NULL) {
|
|
||||||
/*
|
|
||||||
* It is possible that base_node_alloc() will cause a
|
|
||||||
* new base chunk to be allocated, so take care not to
|
|
||||||
* deadlock on dss_mtx, and recover if another thread
|
|
||||||
* deallocates an adjacent chunk while this one is busy
|
|
||||||
* allocating xnode.
|
|
||||||
*/
|
|
||||||
malloc_mutex_unlock(&dss_mtx);
|
|
||||||
xnode = base_node_alloc();
|
|
||||||
malloc_mutex_lock(&dss_mtx);
|
|
||||||
if (xnode == NULL)
|
|
||||||
return (NULL);
|
|
||||||
} else {
|
|
||||||
/* Coalescing forward failed, so insert a new node. */
|
|
||||||
node = xnode;
|
|
||||||
xnode = NULL;
|
|
||||||
node->addr = chunk;
|
|
||||||
node->size = size;
|
|
||||||
extent_tree_ad_insert(&dss_chunks_ad, node);
|
|
||||||
extent_tree_szad_insert(&dss_chunks_szad, node);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/* Discard xnode if it ended up unused do to a race. */
|
|
||||||
if (xnode != NULL)
|
|
||||||
base_node_dealloc(xnode);
|
|
||||||
|
|
||||||
/* Try to coalesce backward. */
|
|
||||||
prev = extent_tree_ad_prev(&dss_chunks_ad, node);
|
|
||||||
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
|
|
||||||
chunk) {
|
|
||||||
/*
|
|
||||||
* Coalesce chunk with the previous address range. This does
|
|
||||||
* not change the position within dss_chunks_ad, so only
|
|
||||||
* remove/insert node from/into dss_chunks_szad.
|
|
||||||
*/
|
|
||||||
extent_tree_szad_remove(&dss_chunks_szad, prev);
|
|
||||||
extent_tree_ad_remove(&dss_chunks_ad, prev);
|
|
||||||
|
|
||||||
extent_tree_szad_remove(&dss_chunks_szad, node);
|
|
||||||
node->addr = prev->addr;
|
|
||||||
node->size += prev->size;
|
|
||||||
extent_tree_szad_insert(&dss_chunks_szad, node);
|
|
||||||
|
|
||||||
base_node_dealloc(prev);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (node);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
bool
|
||||||
chunk_in_dss(void *chunk)
|
chunk_in_dss(void *chunk)
|
||||||
{
|
{
|
||||||
bool ret;
|
bool ret;
|
||||||
|
|
||||||
|
cassert(config_dss);
|
||||||
|
|
||||||
malloc_mutex_lock(&dss_mtx);
|
malloc_mutex_lock(&dss_mtx);
|
||||||
if ((uintptr_t)chunk >= (uintptr_t)dss_base
|
if ((uintptr_t)chunk >= (uintptr_t)dss_base
|
||||||
&& (uintptr_t)chunk < (uintptr_t)dss_max)
|
&& (uintptr_t)chunk < (uintptr_t)dss_max)
|
||||||
@ -215,70 +120,43 @@ chunk_in_dss(void *chunk)
|
|||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
|
||||||
chunk_dealloc_dss(void *chunk, size_t size)
|
|
||||||
{
|
|
||||||
bool ret;
|
|
||||||
|
|
||||||
malloc_mutex_lock(&dss_mtx);
|
|
||||||
if ((uintptr_t)chunk >= (uintptr_t)dss_base
|
|
||||||
&& (uintptr_t)chunk < (uintptr_t)dss_max) {
|
|
||||||
extent_node_t *node;
|
|
||||||
|
|
||||||
/* Try to coalesce with other unused chunks. */
|
|
||||||
node = chunk_dealloc_dss_record(chunk, size);
|
|
||||||
if (node != NULL) {
|
|
||||||
chunk = node->addr;
|
|
||||||
size = node->size;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Get the current end of the DSS. */
|
|
||||||
dss_max = sbrk(0);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Try to shrink the DSS if this chunk is at the end of the
|
|
||||||
* DSS. The sbrk() call here is subject to a race condition
|
|
||||||
* with threads that use brk(2) or sbrk(2) directly, but the
|
|
||||||
* alternative would be to leak memory for the sake of poorly
|
|
||||||
* designed multi-threaded programs.
|
|
||||||
*/
|
|
||||||
if ((void *)((uintptr_t)chunk + size) == dss_max
|
|
||||||
&& (dss_prev = sbrk(-(intptr_t)size)) == dss_max) {
|
|
||||||
/* Success. */
|
|
||||||
dss_max = (void *)((intptr_t)dss_prev - (intptr_t)size);
|
|
||||||
|
|
||||||
if (node != NULL) {
|
|
||||||
extent_tree_szad_remove(&dss_chunks_szad, node);
|
|
||||||
extent_tree_ad_remove(&dss_chunks_ad, node);
|
|
||||||
base_node_dealloc(node);
|
|
||||||
}
|
|
||||||
} else
|
|
||||||
madvise(chunk, size, MADV_DONTNEED);
|
|
||||||
|
|
||||||
ret = false;
|
|
||||||
goto RETURN;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = true;
|
|
||||||
RETURN:
|
|
||||||
malloc_mutex_unlock(&dss_mtx);
|
|
||||||
return (ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
bool
|
||||||
chunk_dss_boot(void)
|
chunk_dss_boot(void)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
cassert(config_dss);
|
||||||
|
|
||||||
if (malloc_mutex_init(&dss_mtx))
|
if (malloc_mutex_init(&dss_mtx))
|
||||||
return (true);
|
return (true);
|
||||||
dss_base = sbrk(0);
|
dss_base = sbrk(0);
|
||||||
dss_prev = dss_base;
|
dss_prev = dss_base;
|
||||||
dss_max = dss_base;
|
dss_max = dss_base;
|
||||||
extent_tree_szad_new(&dss_chunks_szad);
|
|
||||||
extent_tree_ad_new(&dss_chunks_ad);
|
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
chunk_dss_prefork(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (config_dss)
|
||||||
|
malloc_mutex_prefork(&dss_mtx);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
chunk_dss_postfork_parent(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (config_dss)
|
||||||
|
malloc_mutex_postfork_parent(&dss_mtx);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
chunk_dss_postfork_child(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (config_dss)
|
||||||
|
malloc_mutex_postfork_child(&dss_mtx);
|
||||||
|
}
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#endif /* JEMALLOC_DSS */
|
|
||||||
|
279
src/chunk_mmap.c
279
src/chunk_mmap.c
@ -1,54 +1,37 @@
|
|||||||
#define JEMALLOC_CHUNK_MMAP_C_
|
#define JEMALLOC_CHUNK_MMAP_C_
|
||||||
#include "jemalloc/internal/jemalloc_internal.h"
|
#include "jemalloc/internal/jemalloc_internal.h"
|
||||||
|
|
||||||
/******************************************************************************/
|
|
||||||
/* Data. */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
|
|
||||||
* potentially avoid some system calls.
|
|
||||||
*/
|
|
||||||
#ifndef NO_TLS
|
|
||||||
static __thread bool mmap_unaligned_tls
|
|
||||||
JEMALLOC_ATTR(tls_model("initial-exec"));
|
|
||||||
#define MMAP_UNALIGNED_GET() mmap_unaligned_tls
|
|
||||||
#define MMAP_UNALIGNED_SET(v) do { \
|
|
||||||
mmap_unaligned_tls = (v); \
|
|
||||||
} while (0)
|
|
||||||
#else
|
|
||||||
static pthread_key_t mmap_unaligned_tsd;
|
|
||||||
#define MMAP_UNALIGNED_GET() ((bool)pthread_getspecific(mmap_unaligned_tsd))
|
|
||||||
#define MMAP_UNALIGNED_SET(v) do { \
|
|
||||||
pthread_setspecific(mmap_unaligned_tsd, (void *)(v)); \
|
|
||||||
} while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Function prototypes for non-inline static functions. */
|
/* Function prototypes for non-inline static functions. */
|
||||||
|
|
||||||
static void *pages_map(void *addr, size_t size, bool noreserve);
|
static void *pages_map(void *addr, size_t size);
|
||||||
static void pages_unmap(void *addr, size_t size);
|
static void pages_unmap(void *addr, size_t size);
|
||||||
static void *chunk_alloc_mmap_slow(size_t size, bool unaligned,
|
static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
|
||||||
bool noreserve);
|
bool *zero);
|
||||||
static void *chunk_alloc_mmap_internal(size_t size, bool noreserve);
|
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
pages_map(void *addr, size_t size, bool noreserve)
|
pages_map(void *addr, size_t size)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
|
assert(size != 0);
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
/*
|
||||||
|
* If VirtualAlloc can't allocate at the given address when one is
|
||||||
|
* given, it fails and returns NULL.
|
||||||
|
*/
|
||||||
|
ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
|
||||||
|
PAGE_READWRITE);
|
||||||
|
#else
|
||||||
/*
|
/*
|
||||||
* We don't use MAP_FIXED here, because it can cause the *replacement*
|
* We don't use MAP_FIXED here, because it can cause the *replacement*
|
||||||
* of existing mappings, and we only want to create new mappings.
|
* of existing mappings, and we only want to create new mappings.
|
||||||
*/
|
*/
|
||||||
int flags = MAP_PRIVATE | MAP_ANON;
|
ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
|
||||||
#ifdef MAP_NORESERVE
|
-1, 0);
|
||||||
if (noreserve)
|
|
||||||
flags |= MAP_NORESERVE;
|
|
||||||
#endif
|
|
||||||
ret = mmap(addr, size, PROT_READ | PROT_WRITE, flags, -1, 0);
|
|
||||||
assert(ret != NULL);
|
assert(ret != NULL);
|
||||||
|
|
||||||
if (ret == MAP_FAILED)
|
if (ret == MAP_FAILED)
|
||||||
@ -60,16 +43,15 @@ pages_map(void *addr, size_t size, bool noreserve)
|
|||||||
if (munmap(ret, size) == -1) {
|
if (munmap(ret, size) == -1) {
|
||||||
char buf[BUFERROR_BUF];
|
char buf[BUFERROR_BUF];
|
||||||
|
|
||||||
buferror(errno, buf, sizeof(buf));
|
buferror(buf, sizeof(buf));
|
||||||
malloc_write("<jemalloc>: Error in munmap(): ");
|
malloc_printf("<jemalloc: Error in munmap(): %s\n",
|
||||||
malloc_write(buf);
|
buf);
|
||||||
malloc_write("\n");
|
|
||||||
if (opt_abort)
|
if (opt_abort)
|
||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
ret = NULL;
|
ret = NULL;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
assert(ret == NULL || (addr == NULL && ret != addr)
|
assert(ret == NULL || (addr == NULL && ret != addr)
|
||||||
|| (addr != NULL && ret == addr));
|
|| (addr != NULL && ret == addr));
|
||||||
return (ret);
|
return (ret);
|
||||||
@ -79,161 +61,142 @@ static void
|
|||||||
pages_unmap(void *addr, size_t size)
|
pages_unmap(void *addr, size_t size)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (munmap(addr, size) == -1) {
|
#ifdef _WIN32
|
||||||
|
if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
|
||||||
|
#else
|
||||||
|
if (munmap(addr, size) == -1)
|
||||||
|
#endif
|
||||||
|
{
|
||||||
char buf[BUFERROR_BUF];
|
char buf[BUFERROR_BUF];
|
||||||
|
|
||||||
buferror(errno, buf, sizeof(buf));
|
buferror(buf, sizeof(buf));
|
||||||
malloc_write("<jemalloc>: Error in munmap(): ");
|
malloc_printf("<jemalloc>: Error in "
|
||||||
malloc_write(buf);
|
#ifdef _WIN32
|
||||||
malloc_write("\n");
|
"VirtualFree"
|
||||||
|
#else
|
||||||
|
"munmap"
|
||||||
|
#endif
|
||||||
|
"(): %s\n", buf);
|
||||||
if (opt_abort)
|
if (opt_abort)
|
||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
chunk_alloc_mmap_slow(size_t size, bool unaligned, bool noreserve)
|
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret = (void *)((uintptr_t)addr + leadsize);
|
||||||
size_t offset;
|
|
||||||
|
|
||||||
/* Beware size_t wrap-around. */
|
assert(alloc_size >= leadsize + size);
|
||||||
if (size + chunksize <= size)
|
#ifdef _WIN32
|
||||||
|
{
|
||||||
|
void *new_addr;
|
||||||
|
|
||||||
|
pages_unmap(addr, alloc_size);
|
||||||
|
new_addr = pages_map(ret, size);
|
||||||
|
if (new_addr == ret)
|
||||||
|
return (ret);
|
||||||
|
if (new_addr)
|
||||||
|
pages_unmap(new_addr, size);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
ret = pages_map(NULL, size + chunksize, noreserve);
|
|
||||||
if (ret == NULL)
|
|
||||||
return (NULL);
|
|
||||||
|
|
||||||
/* Clean up unneeded leading/trailing space. */
|
|
||||||
offset = CHUNK_ADDR2OFFSET(ret);
|
|
||||||
if (offset != 0) {
|
|
||||||
/* Note that mmap() returned an unaligned mapping. */
|
|
||||||
unaligned = true;
|
|
||||||
|
|
||||||
/* Leading space. */
|
|
||||||
pages_unmap(ret, chunksize - offset);
|
|
||||||
|
|
||||||
ret = (void *)((uintptr_t)ret +
|
|
||||||
(chunksize - offset));
|
|
||||||
|
|
||||||
/* Trailing space. */
|
|
||||||
pages_unmap((void *)((uintptr_t)ret + size),
|
|
||||||
offset);
|
|
||||||
} else {
|
|
||||||
/* Trailing space only. */
|
|
||||||
pages_unmap((void *)((uintptr_t)ret + size),
|
|
||||||
chunksize);
|
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
{
|
||||||
|
size_t trailsize = alloc_size - leadsize - size;
|
||||||
|
|
||||||
/*
|
if (leadsize != 0)
|
||||||
* If mmap() returned an aligned mapping, reset mmap_unaligned so that
|
pages_unmap(addr, leadsize);
|
||||||
* the next chunk_alloc_mmap() execution tries the fast allocation
|
if (trailsize != 0)
|
||||||
* method.
|
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
|
||||||
*/
|
return (ret);
|
||||||
if (unaligned == false)
|
}
|
||||||
MMAP_UNALIGNED_SET(false);
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
return (ret);
|
void
|
||||||
|
pages_purge(void *addr, size_t length)
|
||||||
|
{
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
|
||||||
|
#else
|
||||||
|
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||||
|
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
|
||||||
|
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
|
||||||
|
# define JEMALLOC_MADV_PURGE MADV_FREE
|
||||||
|
# else
|
||||||
|
# error "No method defined for purging unused dirty pages."
|
||||||
|
# endif
|
||||||
|
madvise(addr, length, JEMALLOC_MADV_PURGE);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
chunk_alloc_mmap_internal(size_t size, bool noreserve)
|
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
|
||||||
|
{
|
||||||
|
void *ret, *pages;
|
||||||
|
size_t alloc_size, leadsize;
|
||||||
|
|
||||||
|
alloc_size = size + alignment - PAGE;
|
||||||
|
/* Beware size_t wrap-around. */
|
||||||
|
if (alloc_size < size)
|
||||||
|
return (NULL);
|
||||||
|
do {
|
||||||
|
pages = pages_map(NULL, alloc_size);
|
||||||
|
if (pages == NULL)
|
||||||
|
return (NULL);
|
||||||
|
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
|
||||||
|
(uintptr_t)pages;
|
||||||
|
ret = pages_trim(pages, alloc_size, leadsize, size);
|
||||||
|
} while (ret == NULL);
|
||||||
|
|
||||||
|
assert(ret != NULL);
|
||||||
|
*zero = true;
|
||||||
|
return (ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *
|
||||||
|
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
size_t offset;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ideally, there would be a way to specify alignment to mmap() (like
|
* Ideally, there would be a way to specify alignment to mmap() (like
|
||||||
* NetBSD has), but in the absence of such a feature, we have to work
|
* NetBSD has), but in the absence of such a feature, we have to work
|
||||||
* hard to efficiently create aligned mappings. The reliable, but
|
* hard to efficiently create aligned mappings. The reliable, but
|
||||||
* slow method is to create a mapping that is over-sized, then trim the
|
* slow method is to create a mapping that is over-sized, then trim the
|
||||||
* excess. However, that always results in at least one call to
|
* excess. However, that always results in one or two calls to
|
||||||
* pages_unmap().
|
* pages_unmap().
|
||||||
*
|
*
|
||||||
* A more optimistic approach is to try mapping precisely the right
|
* Optimistically try mapping precisely the right amount before falling
|
||||||
* amount, then try to append another mapping if alignment is off. In
|
* back to the slow method, with the expectation that the optimistic
|
||||||
* practice, this works out well as long as the application is not
|
* approach works most of the time.
|
||||||
* interleaving mappings via direct mmap() calls. If we do run into a
|
|
||||||
* situation where there is an interleaved mapping and we are unable to
|
|
||||||
* extend an unaligned mapping, our best option is to switch to the
|
|
||||||
* slow method until mmap() returns another aligned mapping. This will
|
|
||||||
* tend to leave a gap in the memory map that is too small to cause
|
|
||||||
* later problems for the optimistic method.
|
|
||||||
*
|
|
||||||
* Another possible confounding factor is address space layout
|
|
||||||
* randomization (ASLR), which causes mmap(2) to disregard the
|
|
||||||
* requested address. mmap_unaligned tracks whether the previous
|
|
||||||
* chunk_alloc_mmap() execution received any unaligned or relocated
|
|
||||||
* mappings, and if so, the current execution will immediately fall
|
|
||||||
* back to the slow method. However, we keep track of whether the fast
|
|
||||||
* method would have succeeded, and if so, we make a note to try the
|
|
||||||
* fast method next time.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (MMAP_UNALIGNED_GET() == false) {
|
assert(alignment != 0);
|
||||||
size_t offset;
|
assert((alignment & chunksize_mask) == 0);
|
||||||
|
|
||||||
ret = pages_map(NULL, size, noreserve);
|
ret = pages_map(NULL, size);
|
||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
|
||||||
offset = CHUNK_ADDR2OFFSET(ret);
|
if (offset != 0) {
|
||||||
if (offset != 0) {
|
pages_unmap(ret, size);
|
||||||
MMAP_UNALIGNED_SET(true);
|
return (chunk_alloc_mmap_slow(size, alignment, zero));
|
||||||
/* Try to extend chunk boundary. */
|
}
|
||||||
if (pages_map((void *)((uintptr_t)ret + size),
|
|
||||||
chunksize - offset, noreserve) == NULL) {
|
|
||||||
/*
|
|
||||||
* Extension failed. Clean up, then revert to
|
|
||||||
* the reliable-but-expensive method.
|
|
||||||
*/
|
|
||||||
pages_unmap(ret, size);
|
|
||||||
ret = chunk_alloc_mmap_slow(size, true,
|
|
||||||
noreserve);
|
|
||||||
} else {
|
|
||||||
/* Clean up unneeded leading space. */
|
|
||||||
pages_unmap(ret, chunksize - offset);
|
|
||||||
ret = (void *)((uintptr_t)ret + (chunksize -
|
|
||||||
offset));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else
|
|
||||||
ret = chunk_alloc_mmap_slow(size, false, noreserve);
|
|
||||||
|
|
||||||
|
assert(ret != NULL);
|
||||||
|
*zero = true;
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
bool
|
||||||
chunk_alloc_mmap(size_t size)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (chunk_alloc_mmap_internal(size, false));
|
|
||||||
}
|
|
||||||
|
|
||||||
void *
|
|
||||||
chunk_alloc_mmap_noreserve(size_t size)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (chunk_alloc_mmap_internal(size, true));
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
chunk_dealloc_mmap(void *chunk, size_t size)
|
chunk_dealloc_mmap(void *chunk, size_t size)
|
||||||
{
|
{
|
||||||
|
|
||||||
pages_unmap(chunk, size);
|
if (config_munmap)
|
||||||
}
|
pages_unmap(chunk, size);
|
||||||
|
|
||||||
bool
|
return (config_munmap == false);
|
||||||
chunk_mmap_boot(void)
|
|
||||||
{
|
|
||||||
|
|
||||||
#ifdef NO_TLS
|
|
||||||
if (pthread_key_create(&mmap_unaligned_tsd, NULL) != 0) {
|
|
||||||
malloc_write("<jemalloc>: Error in pthread_key_create()\n");
|
|
||||||
return (true);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return (false);
|
|
||||||
}
|
}
|
||||||
|
402
src/chunk_swap.c
402
src/chunk_swap.c
@ -1,402 +0,0 @@
|
|||||||
#define JEMALLOC_CHUNK_SWAP_C_
|
|
||||||
#include "jemalloc/internal/jemalloc_internal.h"
|
|
||||||
#ifdef JEMALLOC_SWAP
|
|
||||||
/******************************************************************************/
|
|
||||||
/* Data. */
|
|
||||||
|
|
||||||
malloc_mutex_t swap_mtx;
|
|
||||||
bool swap_enabled;
|
|
||||||
bool swap_prezeroed;
|
|
||||||
size_t swap_nfds;
|
|
||||||
int *swap_fds;
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
size_t swap_avail;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Base address of the mmap()ed file(s). */
|
|
||||||
static void *swap_base;
|
|
||||||
/* Current end of the space in use (<= swap_max). */
|
|
||||||
static void *swap_end;
|
|
||||||
/* Absolute upper limit on file-backed addresses. */
|
|
||||||
static void *swap_max;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Trees of chunks that were previously allocated (trees differ only in node
|
|
||||||
* ordering). These are used when allocating chunks, in an attempt to re-use
|
|
||||||
* address space. Depending on function, different tree orderings are needed,
|
|
||||||
* which is why there are two trees with the same contents.
|
|
||||||
*/
|
|
||||||
static extent_tree_t swap_chunks_szad;
|
|
||||||
static extent_tree_t swap_chunks_ad;
|
|
||||||
|
|
||||||
/******************************************************************************/
|
|
||||||
/* Function prototypes for non-inline static functions. */
|
|
||||||
|
|
||||||
static void *chunk_recycle_swap(size_t size, bool *zero);
|
|
||||||
static extent_node_t *chunk_dealloc_swap_record(void *chunk, size_t size);
|
|
||||||
|
|
||||||
/******************************************************************************/
|
|
||||||
|
|
||||||
static void *
|
|
||||||
chunk_recycle_swap(size_t size, bool *zero)
|
|
||||||
{
|
|
||||||
extent_node_t *node, key;
|
|
||||||
|
|
||||||
key.addr = NULL;
|
|
||||||
key.size = size;
|
|
||||||
malloc_mutex_lock(&swap_mtx);
|
|
||||||
node = extent_tree_szad_nsearch(&swap_chunks_szad, &key);
|
|
||||||
if (node != NULL) {
|
|
||||||
void *ret = node->addr;
|
|
||||||
|
|
||||||
/* Remove node from the tree. */
|
|
||||||
extent_tree_szad_remove(&swap_chunks_szad, node);
|
|
||||||
if (node->size == size) {
|
|
||||||
extent_tree_ad_remove(&swap_chunks_ad, node);
|
|
||||||
base_node_dealloc(node);
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* Insert the remainder of node's address range as a
|
|
||||||
* smaller chunk. Its position within swap_chunks_ad
|
|
||||||
* does not change.
|
|
||||||
*/
|
|
||||||
assert(node->size > size);
|
|
||||||
node->addr = (void *)((uintptr_t)node->addr + size);
|
|
||||||
node->size -= size;
|
|
||||||
extent_tree_szad_insert(&swap_chunks_szad, node);
|
|
||||||
}
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
swap_avail -= size;
|
|
||||||
#endif
|
|
||||||
malloc_mutex_unlock(&swap_mtx);
|
|
||||||
|
|
||||||
if (*zero)
|
|
||||||
memset(ret, 0, size);
|
|
||||||
return (ret);
|
|
||||||
}
|
|
||||||
malloc_mutex_unlock(&swap_mtx);
|
|
||||||
|
|
||||||
return (NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
void *
|
|
||||||
chunk_alloc_swap(size_t size, bool *zero)
|
|
||||||
{
|
|
||||||
void *ret;
|
|
||||||
|
|
||||||
assert(swap_enabled);
|
|
||||||
|
|
||||||
ret = chunk_recycle_swap(size, zero);
|
|
||||||
if (ret != NULL)
|
|
||||||
return (ret);
|
|
||||||
|
|
||||||
malloc_mutex_lock(&swap_mtx);
|
|
||||||
if ((uintptr_t)swap_end + size <= (uintptr_t)swap_max) {
|
|
||||||
ret = swap_end;
|
|
||||||
swap_end = (void *)((uintptr_t)swap_end + size);
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
swap_avail -= size;
|
|
||||||
#endif
|
|
||||||
malloc_mutex_unlock(&swap_mtx);
|
|
||||||
|
|
||||||
if (swap_prezeroed)
|
|
||||||
*zero = true;
|
|
||||||
else if (*zero)
|
|
||||||
memset(ret, 0, size);
|
|
||||||
} else {
|
|
||||||
malloc_mutex_unlock(&swap_mtx);
|
|
||||||
return (NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
static extent_node_t *
|
|
||||||
chunk_dealloc_swap_record(void *chunk, size_t size)
|
|
||||||
{
|
|
||||||
extent_node_t *xnode, *node, *prev, key;
|
|
||||||
|
|
||||||
xnode = NULL;
|
|
||||||
while (true) {
|
|
||||||
key.addr = (void *)((uintptr_t)chunk + size);
|
|
||||||
node = extent_tree_ad_nsearch(&swap_chunks_ad, &key);
|
|
||||||
/* Try to coalesce forward. */
|
|
||||||
if (node != NULL && node->addr == key.addr) {
|
|
||||||
/*
|
|
||||||
* Coalesce chunk with the following address range.
|
|
||||||
* This does not change the position within
|
|
||||||
* swap_chunks_ad, so only remove/insert from/into
|
|
||||||
* swap_chunks_szad.
|
|
||||||
*/
|
|
||||||
extent_tree_szad_remove(&swap_chunks_szad, node);
|
|
||||||
node->addr = chunk;
|
|
||||||
node->size += size;
|
|
||||||
extent_tree_szad_insert(&swap_chunks_szad, node);
|
|
||||||
break;
|
|
||||||
} else if (xnode == NULL) {
|
|
||||||
/*
|
|
||||||
* It is possible that base_node_alloc() will cause a
|
|
||||||
* new base chunk to be allocated, so take care not to
|
|
||||||
* deadlock on swap_mtx, and recover if another thread
|
|
||||||
* deallocates an adjacent chunk while this one is busy
|
|
||||||
* allocating xnode.
|
|
||||||
*/
|
|
||||||
malloc_mutex_unlock(&swap_mtx);
|
|
||||||
xnode = base_node_alloc();
|
|
||||||
malloc_mutex_lock(&swap_mtx);
|
|
||||||
if (xnode == NULL)
|
|
||||||
return (NULL);
|
|
||||||
} else {
|
|
||||||
/* Coalescing forward failed, so insert a new node. */
|
|
||||||
node = xnode;
|
|
||||||
xnode = NULL;
|
|
||||||
node->addr = chunk;
|
|
||||||
node->size = size;
|
|
||||||
extent_tree_ad_insert(&swap_chunks_ad, node);
|
|
||||||
extent_tree_szad_insert(&swap_chunks_szad, node);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/* Discard xnode if it ended up unused do to a race. */
|
|
||||||
if (xnode != NULL)
|
|
||||||
base_node_dealloc(xnode);
|
|
||||||
|
|
||||||
/* Try to coalesce backward. */
|
|
||||||
prev = extent_tree_ad_prev(&swap_chunks_ad, node);
|
|
||||||
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
|
|
||||||
chunk) {
|
|
||||||
/*
|
|
||||||
* Coalesce chunk with the previous address range. This does
|
|
||||||
* not change the position within swap_chunks_ad, so only
|
|
||||||
* remove/insert node from/into swap_chunks_szad.
|
|
||||||
*/
|
|
||||||
extent_tree_szad_remove(&swap_chunks_szad, prev);
|
|
||||||
extent_tree_ad_remove(&swap_chunks_ad, prev);
|
|
||||||
|
|
||||||
extent_tree_szad_remove(&swap_chunks_szad, node);
|
|
||||||
node->addr = prev->addr;
|
|
||||||
node->size += prev->size;
|
|
||||||
extent_tree_szad_insert(&swap_chunks_szad, node);
|
|
||||||
|
|
||||||
base_node_dealloc(prev);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (node);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
|
||||||
chunk_in_swap(void *chunk)
|
|
||||||
{
|
|
||||||
bool ret;
|
|
||||||
|
|
||||||
assert(swap_enabled);
|
|
||||||
|
|
||||||
malloc_mutex_lock(&swap_mtx);
|
|
||||||
if ((uintptr_t)chunk >= (uintptr_t)swap_base
|
|
||||||
&& (uintptr_t)chunk < (uintptr_t)swap_max)
|
|
||||||
ret = true;
|
|
||||||
else
|
|
||||||
ret = false;
|
|
||||||
malloc_mutex_unlock(&swap_mtx);
|
|
||||||
|
|
||||||
return (ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
|
||||||
chunk_dealloc_swap(void *chunk, size_t size)
|
|
||||||
{
|
|
||||||
bool ret;
|
|
||||||
|
|
||||||
assert(swap_enabled);
|
|
||||||
|
|
||||||
malloc_mutex_lock(&swap_mtx);
|
|
||||||
if ((uintptr_t)chunk >= (uintptr_t)swap_base
|
|
||||||
&& (uintptr_t)chunk < (uintptr_t)swap_max) {
|
|
||||||
extent_node_t *node;
|
|
||||||
|
|
||||||
/* Try to coalesce with other unused chunks. */
|
|
||||||
node = chunk_dealloc_swap_record(chunk, size);
|
|
||||||
if (node != NULL) {
|
|
||||||
chunk = node->addr;
|
|
||||||
size = node->size;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Try to shrink the in-use memory if this chunk is at the end
|
|
||||||
* of the in-use memory.
|
|
||||||
*/
|
|
||||||
if ((void *)((uintptr_t)chunk + size) == swap_end) {
|
|
||||||
swap_end = (void *)((uintptr_t)swap_end - size);
|
|
||||||
|
|
||||||
if (node != NULL) {
|
|
||||||
extent_tree_szad_remove(&swap_chunks_szad,
|
|
||||||
node);
|
|
||||||
extent_tree_ad_remove(&swap_chunks_ad, node);
|
|
||||||
base_node_dealloc(node);
|
|
||||||
}
|
|
||||||
} else
|
|
||||||
madvise(chunk, size, MADV_DONTNEED);
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
swap_avail += size;
|
|
||||||
#endif
|
|
||||||
ret = false;
|
|
||||||
goto RETURN;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = true;
|
|
||||||
RETURN:
|
|
||||||
malloc_mutex_unlock(&swap_mtx);
|
|
||||||
return (ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
|
||||||
chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed)
|
|
||||||
{
|
|
||||||
bool ret;
|
|
||||||
unsigned i;
|
|
||||||
off_t off;
|
|
||||||
void *vaddr;
|
|
||||||
size_t cumsize, voff;
|
|
||||||
size_t sizes[nfds];
|
|
||||||
|
|
||||||
malloc_mutex_lock(&swap_mtx);
|
|
||||||
|
|
||||||
/* Get file sizes. */
|
|
||||||
for (i = 0, cumsize = 0; i < nfds; i++) {
|
|
||||||
off = lseek(fds[i], 0, SEEK_END);
|
|
||||||
if (off == ((off_t)-1)) {
|
|
||||||
ret = true;
|
|
||||||
goto RETURN;
|
|
||||||
}
|
|
||||||
if (PAGE_CEILING(off) != off) {
|
|
||||||
/* Truncate to a multiple of the page size. */
|
|
||||||
off &= ~PAGE_MASK;
|
|
||||||
if (ftruncate(fds[i], off) != 0) {
|
|
||||||
ret = true;
|
|
||||||
goto RETURN;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sizes[i] = off;
|
|
||||||
if (cumsize + off < cumsize) {
|
|
||||||
/*
|
|
||||||
* Cumulative file size is greater than the total
|
|
||||||
* address space. Bail out while it's still obvious
|
|
||||||
* what the problem is.
|
|
||||||
*/
|
|
||||||
ret = true;
|
|
||||||
goto RETURN;
|
|
||||||
}
|
|
||||||
cumsize += off;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Round down to a multiple of the chunk size. */
|
|
||||||
cumsize &= ~chunksize_mask;
|
|
||||||
if (cumsize == 0) {
|
|
||||||
ret = true;
|
|
||||||
goto RETURN;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Allocate a chunk-aligned region of anonymous memory, which will
|
|
||||||
* be the final location for the memory-mapped files.
|
|
||||||
*/
|
|
||||||
vaddr = chunk_alloc_mmap_noreserve(cumsize);
|
|
||||||
if (vaddr == NULL) {
|
|
||||||
ret = true;
|
|
||||||
goto RETURN;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Overlay the files onto the anonymous mapping. */
|
|
||||||
for (i = 0, voff = 0; i < nfds; i++) {
|
|
||||||
void *addr = mmap((void *)((uintptr_t)vaddr + voff), sizes[i],
|
|
||||||
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fds[i], 0);
|
|
||||||
if (addr == MAP_FAILED) {
|
|
||||||
char buf[BUFERROR_BUF];
|
|
||||||
|
|
||||||
|
|
||||||
buferror(errno, buf, sizeof(buf));
|
|
||||||
malloc_write(
|
|
||||||
"<jemalloc>: Error in mmap(..., MAP_FIXED, ...): ");
|
|
||||||
malloc_write(buf);
|
|
||||||
malloc_write("\n");
|
|
||||||
if (opt_abort)
|
|
||||||
abort();
|
|
||||||
if (munmap(vaddr, voff) == -1) {
|
|
||||||
buferror(errno, buf, sizeof(buf));
|
|
||||||
malloc_write("<jemalloc>: Error in munmap(): ");
|
|
||||||
malloc_write(buf);
|
|
||||||
malloc_write("\n");
|
|
||||||
}
|
|
||||||
ret = true;
|
|
||||||
goto RETURN;
|
|
||||||
}
|
|
||||||
assert(addr == (void *)((uintptr_t)vaddr + voff));
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Tell the kernel that the mapping will be accessed randomly,
|
|
||||||
* and that it should not gratuitously sync pages to the
|
|
||||||
* filesystem.
|
|
||||||
*/
|
|
||||||
#ifdef MADV_RANDOM
|
|
||||||
madvise(addr, sizes[i], MADV_RANDOM);
|
|
||||||
#endif
|
|
||||||
#ifdef MADV_NOSYNC
|
|
||||||
madvise(addr, sizes[i], MADV_NOSYNC);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
voff += sizes[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
swap_prezeroed = prezeroed;
|
|
||||||
swap_base = vaddr;
|
|
||||||
swap_end = swap_base;
|
|
||||||
swap_max = (void *)((uintptr_t)vaddr + cumsize);
|
|
||||||
|
|
||||||
/* Copy the fds array for mallctl purposes. */
|
|
||||||
swap_fds = (int *)base_alloc(nfds * sizeof(int));
|
|
||||||
if (swap_fds == NULL) {
|
|
||||||
ret = true;
|
|
||||||
goto RETURN;
|
|
||||||
}
|
|
||||||
memcpy(swap_fds, fds, nfds * sizeof(int));
|
|
||||||
swap_nfds = nfds;
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
swap_avail = cumsize;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
swap_enabled = true;
|
|
||||||
|
|
||||||
ret = false;
|
|
||||||
RETURN:
|
|
||||||
malloc_mutex_unlock(&swap_mtx);
|
|
||||||
return (ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
|
||||||
chunk_swap_boot(void)
|
|
||||||
{
|
|
||||||
|
|
||||||
if (malloc_mutex_init(&swap_mtx))
|
|
||||||
return (true);
|
|
||||||
|
|
||||||
swap_enabled = false;
|
|
||||||
swap_prezeroed = false; /* swap.* mallctl's depend on this. */
|
|
||||||
swap_nfds = 0;
|
|
||||||
swap_fds = NULL;
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
swap_avail = 0;
|
|
||||||
#endif
|
|
||||||
swap_base = NULL;
|
|
||||||
swap_end = NULL;
|
|
||||||
swap_max = NULL;
|
|
||||||
|
|
||||||
extent_tree_szad_new(&swap_chunks_szad);
|
|
||||||
extent_tree_ad_new(&swap_chunks_ad);
|
|
||||||
|
|
||||||
return (false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/******************************************************************************/
|
|
||||||
#endif /* JEMALLOC_SWAP */
|
|
46
src/ckh.c
46
src/ckh.c
@ -73,7 +73,6 @@ ckh_isearch(ckh_t *ckh, const void *key)
|
|||||||
size_t hash1, hash2, bucket, cell;
|
size_t hash1, hash2, bucket, cell;
|
||||||
|
|
||||||
assert(ckh != NULL);
|
assert(ckh != NULL);
|
||||||
dassert(ckh->magic == CKH_MAGIC);
|
|
||||||
|
|
||||||
ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
|
ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
|
||||||
|
|
||||||
@ -100,7 +99,7 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
|
|||||||
* Cycle through the cells in the bucket, starting at a random position.
|
* Cycle through the cells in the bucket, starting at a random position.
|
||||||
* The randomness avoids worst-case search overhead as buckets fill up.
|
* The randomness avoids worst-case search overhead as buckets fill up.
|
||||||
*/
|
*/
|
||||||
prn32(offset, LG_CKH_BUCKET_CELLS, ckh->prn_state, CKH_A, CKH_C);
|
prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
|
||||||
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
|
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
|
||||||
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
|
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
|
||||||
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
|
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
|
||||||
@ -142,7 +141,7 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
|
|||||||
* were an item for which both hashes indicated the same
|
* were an item for which both hashes indicated the same
|
||||||
* bucket.
|
* bucket.
|
||||||
*/
|
*/
|
||||||
prn32(i, LG_CKH_BUCKET_CELLS, ckh->prn_state, CKH_A, CKH_C);
|
prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
|
||||||
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
|
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
|
||||||
assert(cell->key != NULL);
|
assert(cell->key != NULL);
|
||||||
|
|
||||||
@ -265,15 +264,15 @@ ckh_grow(ckh_t *ckh)
|
|||||||
size_t usize;
|
size_t usize;
|
||||||
|
|
||||||
lg_curcells++;
|
lg_curcells++;
|
||||||
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE, NULL);
|
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
|
||||||
if (usize == 0) {
|
if (usize == 0) {
|
||||||
ret = true;
|
ret = true;
|
||||||
goto RETURN;
|
goto label_return;
|
||||||
}
|
}
|
||||||
tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
|
tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
|
||||||
if (tab == NULL) {
|
if (tab == NULL) {
|
||||||
ret = true;
|
ret = true;
|
||||||
goto RETURN;
|
goto label_return;
|
||||||
}
|
}
|
||||||
/* Swap in new table. */
|
/* Swap in new table. */
|
||||||
ttab = ckh->tab;
|
ttab = ckh->tab;
|
||||||
@ -293,7 +292,7 @@ ckh_grow(ckh_t *ckh)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ret = false;
|
ret = false;
|
||||||
RETURN:
|
label_return:
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -310,7 +309,7 @@ ckh_shrink(ckh_t *ckh)
|
|||||||
*/
|
*/
|
||||||
lg_prevbuckets = ckh->lg_curbuckets;
|
lg_prevbuckets = ckh->lg_curbuckets;
|
||||||
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
|
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
|
||||||
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE, NULL);
|
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
|
||||||
if (usize == 0)
|
if (usize == 0)
|
||||||
return;
|
return;
|
||||||
tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
|
tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
|
||||||
@ -362,7 +361,7 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
|
|||||||
ckh->ninserts = 0;
|
ckh->ninserts = 0;
|
||||||
ckh->nrelocs = 0;
|
ckh->nrelocs = 0;
|
||||||
#endif
|
#endif
|
||||||
ckh->prn_state = 42; /* Value doesn't really matter. */
|
ckh->prng_state = 42; /* Value doesn't really matter. */
|
||||||
ckh->count = 0;
|
ckh->count = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -383,23 +382,19 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
|
|||||||
ckh->hash = hash;
|
ckh->hash = hash;
|
||||||
ckh->keycomp = keycomp;
|
ckh->keycomp = keycomp;
|
||||||
|
|
||||||
usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE, NULL);
|
usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
|
||||||
if (usize == 0) {
|
if (usize == 0) {
|
||||||
ret = true;
|
ret = true;
|
||||||
goto RETURN;
|
goto label_return;
|
||||||
}
|
}
|
||||||
ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
|
ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
|
||||||
if (ckh->tab == NULL) {
|
if (ckh->tab == NULL) {
|
||||||
ret = true;
|
ret = true;
|
||||||
goto RETURN;
|
goto label_return;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef JEMALLOC_DEBUG
|
|
||||||
ckh->magic = CKH_MAGIC;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
ret = false;
|
ret = false;
|
||||||
RETURN:
|
label_return:
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -408,7 +403,6 @@ ckh_delete(ckh_t *ckh)
|
|||||||
{
|
{
|
||||||
|
|
||||||
assert(ckh != NULL);
|
assert(ckh != NULL);
|
||||||
dassert(ckh->magic == CKH_MAGIC);
|
|
||||||
|
|
||||||
#ifdef CKH_VERBOSE
|
#ifdef CKH_VERBOSE
|
||||||
malloc_printf(
|
malloc_printf(
|
||||||
@ -433,7 +427,6 @@ ckh_count(ckh_t *ckh)
|
|||||||
{
|
{
|
||||||
|
|
||||||
assert(ckh != NULL);
|
assert(ckh != NULL);
|
||||||
dassert(ckh->magic == CKH_MAGIC);
|
|
||||||
|
|
||||||
return (ckh->count);
|
return (ckh->count);
|
||||||
}
|
}
|
||||||
@ -464,7 +457,6 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
|
|||||||
bool ret;
|
bool ret;
|
||||||
|
|
||||||
assert(ckh != NULL);
|
assert(ckh != NULL);
|
||||||
dassert(ckh->magic == CKH_MAGIC);
|
|
||||||
assert(ckh_search(ckh, key, NULL, NULL));
|
assert(ckh_search(ckh, key, NULL, NULL));
|
||||||
|
|
||||||
#ifdef CKH_COUNT
|
#ifdef CKH_COUNT
|
||||||
@ -474,12 +466,12 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
|
|||||||
while (ckh_try_insert(ckh, &key, &data)) {
|
while (ckh_try_insert(ckh, &key, &data)) {
|
||||||
if (ckh_grow(ckh)) {
|
if (ckh_grow(ckh)) {
|
||||||
ret = true;
|
ret = true;
|
||||||
goto RETURN;
|
goto label_return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = false;
|
ret = false;
|
||||||
RETURN:
|
label_return:
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -489,7 +481,6 @@ ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
|
|||||||
size_t cell;
|
size_t cell;
|
||||||
|
|
||||||
assert(ckh != NULL);
|
assert(ckh != NULL);
|
||||||
dassert(ckh->magic == CKH_MAGIC);
|
|
||||||
|
|
||||||
cell = ckh_isearch(ckh, searchkey);
|
cell = ckh_isearch(ckh, searchkey);
|
||||||
if (cell != SIZE_T_MAX) {
|
if (cell != SIZE_T_MAX) {
|
||||||
@ -521,7 +512,6 @@ ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
|
|||||||
size_t cell;
|
size_t cell;
|
||||||
|
|
||||||
assert(ckh != NULL);
|
assert(ckh != NULL);
|
||||||
dassert(ckh->magic == CKH_MAGIC);
|
|
||||||
|
|
||||||
cell = ckh_isearch(ckh, searchkey);
|
cell = ckh_isearch(ckh, searchkey);
|
||||||
if (cell != SIZE_T_MAX) {
|
if (cell != SIZE_T_MAX) {
|
||||||
@ -545,7 +535,7 @@ ckh_string_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
|
|||||||
assert(hash1 != NULL);
|
assert(hash1 != NULL);
|
||||||
assert(hash2 != NULL);
|
assert(hash2 != NULL);
|
||||||
|
|
||||||
h = hash(key, strlen((const char *)key), 0x94122f335b332aeaLLU);
|
h = hash(key, strlen((const char *)key), UINT64_C(0x94122f335b332aea));
|
||||||
if (minbits <= 32) {
|
if (minbits <= 32) {
|
||||||
/*
|
/*
|
||||||
* Avoid doing multiple hashes, since a single hash provides
|
* Avoid doing multiple hashes, since a single hash provides
|
||||||
@ -556,7 +546,7 @@ ckh_string_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
|
|||||||
} else {
|
} else {
|
||||||
ret1 = h;
|
ret1 = h;
|
||||||
ret2 = hash(key, strlen((const char *)key),
|
ret2 = hash(key, strlen((const char *)key),
|
||||||
0x8432a476666bbc13LLU);
|
UINT64_C(0x8432a476666bbc13));
|
||||||
}
|
}
|
||||||
|
|
||||||
*hash1 = ret1;
|
*hash1 = ret1;
|
||||||
@ -593,7 +583,7 @@ ckh_pointer_hash(const void *key, unsigned minbits, size_t *hash1,
|
|||||||
u.i = 0;
|
u.i = 0;
|
||||||
#endif
|
#endif
|
||||||
u.v = key;
|
u.v = key;
|
||||||
h = hash(&u.i, sizeof(u.i), 0xd983396e68886082LLU);
|
h = hash(&u.i, sizeof(u.i), UINT64_C(0xd983396e68886082));
|
||||||
if (minbits <= 32) {
|
if (minbits <= 32) {
|
||||||
/*
|
/*
|
||||||
* Avoid doing multiple hashes, since a single hash provides
|
* Avoid doing multiple hashes, since a single hash provides
|
||||||
@ -604,7 +594,7 @@ ckh_pointer_hash(const void *key, unsigned minbits, size_t *hash1,
|
|||||||
} else {
|
} else {
|
||||||
assert(SIZEOF_PTR == 8);
|
assert(SIZEOF_PTR == 8);
|
||||||
ret1 = h;
|
ret1 = h;
|
||||||
ret2 = hash(&u.i, sizeof(u.i), 0x5e2be9aff8709a5dLLU);
|
ret2 = hash(&u.i, sizeof(u.i), UINT64_C(0x5e2be9aff8709a5d));
|
||||||
}
|
}
|
||||||
|
|
||||||
*hash1 = ret1;
|
*hash1 = ret1;
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
|
|
||||||
static inline int
|
static inline int
|
||||||
extent_szad_comp(extent_node_t *a, extent_node_t *b)
|
extent_szad_comp(extent_node_t *a, extent_node_t *b)
|
||||||
{
|
{
|
||||||
@ -25,7 +24,6 @@ extent_szad_comp(extent_node_t *a, extent_node_t *b)
|
|||||||
/* Generate red-black tree functions. */
|
/* Generate red-black tree functions. */
|
||||||
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
|
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
|
||||||
extent_szad_comp)
|
extent_szad_comp)
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
extent_ad_comp(extent_node_t *a, extent_node_t *b)
|
extent_ad_comp(extent_node_t *a, extent_node_t *b)
|
||||||
|
200
src/huge.c
200
src/huge.c
@ -4,11 +4,9 @@
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Data. */
|
/* Data. */
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
uint64_t huge_nmalloc;
|
uint64_t huge_nmalloc;
|
||||||
uint64_t huge_ndalloc;
|
uint64_t huge_ndalloc;
|
||||||
size_t huge_allocated;
|
size_t huge_allocated;
|
||||||
#endif
|
|
||||||
|
|
||||||
malloc_mutex_t huge_mtx;
|
malloc_mutex_t huge_mtx;
|
||||||
|
|
||||||
@ -19,10 +17,18 @@ static extent_tree_t huge;
|
|||||||
|
|
||||||
void *
|
void *
|
||||||
huge_malloc(size_t size, bool zero)
|
huge_malloc(size_t size, bool zero)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (huge_palloc(size, chunksize, zero));
|
||||||
|
}
|
||||||
|
|
||||||
|
void *
|
||||||
|
huge_palloc(size_t size, size_t alignment, bool zero)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
size_t csize;
|
size_t csize;
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
|
bool is_zeroed;
|
||||||
|
|
||||||
/* Allocate one or more contiguous chunks for this request. */
|
/* Allocate one or more contiguous chunks for this request. */
|
||||||
|
|
||||||
@ -37,7 +43,12 @@ huge_malloc(size_t size, bool zero)
|
|||||||
if (node == NULL)
|
if (node == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
ret = chunk_alloc(csize, false, &zero);
|
/*
|
||||||
|
* Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
|
||||||
|
* it is possible to make correct junk/zero fill decisions below.
|
||||||
|
*/
|
||||||
|
is_zeroed = zero;
|
||||||
|
ret = chunk_alloc(csize, alignment, false, &is_zeroed);
|
||||||
if (ret == NULL) {
|
if (ret == NULL) {
|
||||||
base_node_dealloc(node);
|
base_node_dealloc(node);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
@ -49,106 +60,19 @@ huge_malloc(size_t size, bool zero)
|
|||||||
|
|
||||||
malloc_mutex_lock(&huge_mtx);
|
malloc_mutex_lock(&huge_mtx);
|
||||||
extent_tree_ad_insert(&huge, node);
|
extent_tree_ad_insert(&huge, node);
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats) {
|
||||||
stats_cactive_add(csize);
|
stats_cactive_add(csize);
|
||||||
huge_nmalloc++;
|
huge_nmalloc++;
|
||||||
huge_allocated += csize;
|
huge_allocated += csize;
|
||||||
#endif
|
}
|
||||||
malloc_mutex_unlock(&huge_mtx);
|
malloc_mutex_unlock(&huge_mtx);
|
||||||
|
|
||||||
#ifdef JEMALLOC_FILL
|
if (config_fill && zero == false) {
|
||||||
if (zero == false) {
|
|
||||||
if (opt_junk)
|
if (opt_junk)
|
||||||
memset(ret, 0xa5, csize);
|
memset(ret, 0xa5, csize);
|
||||||
else if (opt_zero)
|
else if (opt_zero && is_zeroed == false)
|
||||||
memset(ret, 0, csize);
|
memset(ret, 0, csize);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
return (ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Only handles large allocations that require more than chunk alignment. */
|
|
||||||
void *
|
|
||||||
huge_palloc(size_t size, size_t alignment, bool zero)
|
|
||||||
{
|
|
||||||
void *ret;
|
|
||||||
size_t alloc_size, chunk_size, offset;
|
|
||||||
extent_node_t *node;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This allocation requires alignment that is even larger than chunk
|
|
||||||
* alignment. This means that huge_malloc() isn't good enough.
|
|
||||||
*
|
|
||||||
* Allocate almost twice as many chunks as are demanded by the size or
|
|
||||||
* alignment, in order to assure the alignment can be achieved, then
|
|
||||||
* unmap leading and trailing chunks.
|
|
||||||
*/
|
|
||||||
assert(alignment > chunksize);
|
|
||||||
|
|
||||||
chunk_size = CHUNK_CEILING(size);
|
|
||||||
|
|
||||||
if (size >= alignment)
|
|
||||||
alloc_size = chunk_size + alignment - chunksize;
|
|
||||||
else
|
|
||||||
alloc_size = (alignment << 1) - chunksize;
|
|
||||||
|
|
||||||
/* Allocate an extent node with which to track the chunk. */
|
|
||||||
node = base_node_alloc();
|
|
||||||
if (node == NULL)
|
|
||||||
return (NULL);
|
|
||||||
|
|
||||||
ret = chunk_alloc(alloc_size, false, &zero);
|
|
||||||
if (ret == NULL) {
|
|
||||||
base_node_dealloc(node);
|
|
||||||
return (NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
offset = (uintptr_t)ret & (alignment - 1);
|
|
||||||
assert((offset & chunksize_mask) == 0);
|
|
||||||
assert(offset < alloc_size);
|
|
||||||
if (offset == 0) {
|
|
||||||
/* Trim trailing space. */
|
|
||||||
chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
|
|
||||||
- chunk_size, true);
|
|
||||||
} else {
|
|
||||||
size_t trailsize;
|
|
||||||
|
|
||||||
/* Trim leading space. */
|
|
||||||
chunk_dealloc(ret, alignment - offset, true);
|
|
||||||
|
|
||||||
ret = (void *)((uintptr_t)ret + (alignment - offset));
|
|
||||||
|
|
||||||
trailsize = alloc_size - (alignment - offset) - chunk_size;
|
|
||||||
if (trailsize != 0) {
|
|
||||||
/* Trim trailing space. */
|
|
||||||
assert(trailsize < alloc_size);
|
|
||||||
chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
|
|
||||||
trailsize, true);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Insert node into huge. */
|
|
||||||
node->addr = ret;
|
|
||||||
node->size = chunk_size;
|
|
||||||
|
|
||||||
malloc_mutex_lock(&huge_mtx);
|
|
||||||
extent_tree_ad_insert(&huge, node);
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
stats_cactive_add(chunk_size);
|
|
||||||
huge_nmalloc++;
|
|
||||||
huge_allocated += chunk_size;
|
|
||||||
#endif
|
|
||||||
malloc_mutex_unlock(&huge_mtx);
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_FILL
|
|
||||||
if (zero == false) {
|
|
||||||
if (opt_junk)
|
|
||||||
memset(ret, 0xa5, chunk_size);
|
|
||||||
else if (opt_zero)
|
|
||||||
memset(ret, 0, chunk_size);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -164,12 +88,10 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
|
|||||||
&& CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
|
&& CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
|
||||||
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
|
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
|
||||||
assert(CHUNK_CEILING(oldsize) == oldsize);
|
assert(CHUNK_CEILING(oldsize) == oldsize);
|
||||||
#ifdef JEMALLOC_FILL
|
if (config_fill && opt_junk && size < oldsize) {
|
||||||
if (opt_junk && size < oldsize) {
|
|
||||||
memset((void *)((uintptr_t)ptr + size), 0x5a,
|
memset((void *)((uintptr_t)ptr + size), 0x5a,
|
||||||
oldsize - size);
|
oldsize - size);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
return (ptr);
|
return (ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -218,20 +140,13 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
|||||||
*/
|
*/
|
||||||
copysize = (size < oldsize) ? size : oldsize;
|
copysize = (size < oldsize) ? size : oldsize;
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_MREMAP
|
||||||
/*
|
/*
|
||||||
* Use mremap(2) if this is a huge-->huge reallocation, and neither the
|
* Use mremap(2) if this is a huge-->huge reallocation, and neither the
|
||||||
* source nor the destination are in swap or dss.
|
* source nor the destination are in dss.
|
||||||
*/
|
*/
|
||||||
#ifdef JEMALLOC_MREMAP_FIXED
|
if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
|
||||||
if (oldsize >= chunksize
|
== false && chunk_in_dss(ret) == false))) {
|
||||||
# ifdef JEMALLOC_SWAP
|
|
||||||
&& (swap_enabled == false || (chunk_in_swap(ptr) == false &&
|
|
||||||
chunk_in_swap(ret) == false))
|
|
||||||
# endif
|
|
||||||
# ifdef JEMALLOC_DSS
|
|
||||||
&& chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false
|
|
||||||
# endif
|
|
||||||
) {
|
|
||||||
size_t newsize = huge_salloc(ret);
|
size_t newsize = huge_salloc(ret);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -253,10 +168,9 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
|||||||
*/
|
*/
|
||||||
char buf[BUFERROR_BUF];
|
char buf[BUFERROR_BUF];
|
||||||
|
|
||||||
buferror(errno, buf, sizeof(buf));
|
buferror(buf, sizeof(buf));
|
||||||
malloc_write("<jemalloc>: Error in mremap(): ");
|
malloc_printf("<jemalloc>: Error in mremap(): %s\n",
|
||||||
malloc_write(buf);
|
buf);
|
||||||
malloc_write("\n");
|
|
||||||
if (opt_abort)
|
if (opt_abort)
|
||||||
abort();
|
abort();
|
||||||
memcpy(ret, ptr, copysize);
|
memcpy(ret, ptr, copysize);
|
||||||
@ -266,7 +180,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
|||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
memcpy(ret, ptr, copysize);
|
memcpy(ret, ptr, copysize);
|
||||||
idalloc(ptr);
|
iqalloc(ptr);
|
||||||
}
|
}
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -285,23 +199,16 @@ huge_dalloc(void *ptr, bool unmap)
|
|||||||
assert(node->addr == ptr);
|
assert(node->addr == ptr);
|
||||||
extent_tree_ad_remove(&huge, node);
|
extent_tree_ad_remove(&huge, node);
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats) {
|
||||||
stats_cactive_sub(node->size);
|
stats_cactive_sub(node->size);
|
||||||
huge_ndalloc++;
|
huge_ndalloc++;
|
||||||
huge_allocated -= node->size;
|
huge_allocated -= node->size;
|
||||||
#endif
|
}
|
||||||
|
|
||||||
malloc_mutex_unlock(&huge_mtx);
|
malloc_mutex_unlock(&huge_mtx);
|
||||||
|
|
||||||
if (unmap) {
|
if (unmap && config_fill && config_dss && opt_junk)
|
||||||
/* Unmap chunk. */
|
memset(node->addr, 0x5a, node->size);
|
||||||
#ifdef JEMALLOC_FILL
|
|
||||||
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
|
|
||||||
if (opt_junk)
|
|
||||||
memset(node->addr, 0x5a, node->size);
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
chunk_dealloc(node->addr, node->size, unmap);
|
chunk_dealloc(node->addr, node->size, unmap);
|
||||||
|
|
||||||
@ -328,7 +235,6 @@ huge_salloc(const void *ptr)
|
|||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef JEMALLOC_PROF
|
|
||||||
prof_ctx_t *
|
prof_ctx_t *
|
||||||
huge_prof_ctx_get(const void *ptr)
|
huge_prof_ctx_get(const void *ptr)
|
||||||
{
|
{
|
||||||
@ -365,7 +271,6 @@ huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
|
|||||||
|
|
||||||
malloc_mutex_unlock(&huge_mtx);
|
malloc_mutex_unlock(&huge_mtx);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
bool
|
bool
|
||||||
huge_boot(void)
|
huge_boot(void)
|
||||||
@ -376,11 +281,32 @@ huge_boot(void)
|
|||||||
return (true);
|
return (true);
|
||||||
extent_tree_ad_new(&huge);
|
extent_tree_ad_new(&huge);
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats) {
|
||||||
huge_nmalloc = 0;
|
huge_nmalloc = 0;
|
||||||
huge_ndalloc = 0;
|
huge_ndalloc = 0;
|
||||||
huge_allocated = 0;
|
huge_allocated = 0;
|
||||||
#endif
|
}
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
huge_prefork(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
malloc_mutex_prefork(&huge_mtx);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
huge_postfork_parent(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
malloc_mutex_postfork_parent(&huge_mtx);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
huge_postfork_child(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
malloc_mutex_postfork_child(&huge_mtx);
|
||||||
|
}
|
||||||
|
1715
src/jemalloc.c
1715
src/jemalloc.c
File diff suppressed because it is too large
Load Diff
95
src/mutex.c
95
src/mutex.c
@ -1,14 +1,26 @@
|
|||||||
#define JEMALLOC_MUTEX_C_
|
#define JEMALLOC_MUTEX_C_
|
||||||
#include "jemalloc/internal/jemalloc_internal.h"
|
#include "jemalloc/internal/jemalloc_internal.h"
|
||||||
|
|
||||||
|
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
|
||||||
|
#include <dlfcn.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef _CRT_SPINCOUNT
|
||||||
|
#define _CRT_SPINCOUNT 4000
|
||||||
|
#endif
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Data. */
|
/* Data. */
|
||||||
|
|
||||||
#ifdef JEMALLOC_LAZY_LOCK
|
#ifdef JEMALLOC_LAZY_LOCK
|
||||||
bool isthreaded = false;
|
bool isthreaded = false;
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef JEMALLOC_MUTEX_INIT_CB
|
||||||
|
static bool postpone_init = true;
|
||||||
|
static malloc_mutex_t *postponed_mutexes = NULL;
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef JEMALLOC_LAZY_LOCK
|
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
|
||||||
static void pthread_create_once(void);
|
static void pthread_create_once(void);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -18,7 +30,7 @@ static void pthread_create_once(void);
|
|||||||
* process goes multi-threaded.
|
* process goes multi-threaded.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef JEMALLOC_LAZY_LOCK
|
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
|
||||||
static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
|
static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
|
||||||
void *(*)(void *), void *__restrict);
|
void *(*)(void *), void *__restrict);
|
||||||
|
|
||||||
@ -36,8 +48,7 @@ pthread_create_once(void)
|
|||||||
isthreaded = true;
|
isthreaded = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ATTR(visibility("default"))
|
JEMALLOC_EXPORT int
|
||||||
int
|
|
||||||
pthread_create(pthread_t *__restrict thread,
|
pthread_create(pthread_t *__restrict thread,
|
||||||
const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
|
const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
|
||||||
void *__restrict arg)
|
void *__restrict arg)
|
||||||
@ -52,39 +63,87 @@ pthread_create(pthread_t *__restrict thread,
|
|||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_MUTEX_INIT_CB
|
||||||
|
int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
|
||||||
|
void *(calloc_cb)(size_t, size_t));
|
||||||
|
#endif
|
||||||
|
|
||||||
bool
|
bool
|
||||||
malloc_mutex_init(malloc_mutex_t *mutex)
|
malloc_mutex_init(malloc_mutex_t *mutex)
|
||||||
{
|
{
|
||||||
#ifdef JEMALLOC_OSSPIN
|
|
||||||
*mutex = 0;
|
#ifdef _WIN32
|
||||||
|
if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
|
||||||
|
_CRT_SPINCOUNT))
|
||||||
|
return (true);
|
||||||
|
#elif (defined(JEMALLOC_OSSPIN))
|
||||||
|
mutex->lock = 0;
|
||||||
|
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||||
|
if (postpone_init) {
|
||||||
|
mutex->postponed_next = postponed_mutexes;
|
||||||
|
postponed_mutexes = mutex;
|
||||||
|
} else {
|
||||||
|
if (_pthread_mutex_init_calloc_cb(&mutex->lock, base_calloc) !=
|
||||||
|
0)
|
||||||
|
return (true);
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
pthread_mutexattr_t attr;
|
pthread_mutexattr_t attr;
|
||||||
|
|
||||||
if (pthread_mutexattr_init(&attr) != 0)
|
if (pthread_mutexattr_init(&attr) != 0)
|
||||||
return (true);
|
return (true);
|
||||||
#ifdef PTHREAD_MUTEX_ADAPTIVE_NP
|
pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
|
||||||
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
|
if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
|
||||||
#else
|
|
||||||
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT);
|
|
||||||
#endif
|
|
||||||
if (pthread_mutex_init(mutex, &attr) != 0) {
|
|
||||||
pthread_mutexattr_destroy(&attr);
|
pthread_mutexattr_destroy(&attr);
|
||||||
return (true);
|
return (true);
|
||||||
}
|
}
|
||||||
pthread_mutexattr_destroy(&attr);
|
pthread_mutexattr_destroy(&attr);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
malloc_mutex_destroy(malloc_mutex_t *mutex)
|
malloc_mutex_prefork(malloc_mutex_t *mutex)
|
||||||
{
|
{
|
||||||
|
|
||||||
#ifndef JEMALLOC_OSSPIN
|
malloc_mutex_lock(mutex);
|
||||||
if (pthread_mutex_destroy(mutex) != 0) {
|
}
|
||||||
malloc_write("<jemalloc>: Error in pthread_mutex_destroy()\n");
|
|
||||||
abort();
|
void
|
||||||
|
malloc_mutex_postfork_parent(malloc_mutex_t *mutex)
|
||||||
|
{
|
||||||
|
|
||||||
|
malloc_mutex_unlock(mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
malloc_mutex_postfork_child(malloc_mutex_t *mutex)
|
||||||
|
{
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_MUTEX_INIT_CB
|
||||||
|
malloc_mutex_unlock(mutex);
|
||||||
|
#else
|
||||||
|
if (malloc_mutex_init(mutex)) {
|
||||||
|
malloc_printf("<jemalloc>: Error re-initializing mutex in "
|
||||||
|
"child\n");
|
||||||
|
if (opt_abort)
|
||||||
|
abort();
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
mutex_boot(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_MUTEX_INIT_CB
|
||||||
|
postpone_init = false;
|
||||||
|
while (postponed_mutexes != NULL) {
|
||||||
|
if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
|
||||||
|
base_calloc) != 0)
|
||||||
|
return (true);
|
||||||
|
postponed_mutexes = postponed_mutexes->postponed_next;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
return (false);
|
||||||
|
}
|
||||||
|
657
src/prof.c
657
src/prof.c
File diff suppressed because it is too large
Load Diff
210
src/quarantine.c
Normal file
210
src/quarantine.c
Normal file
@ -0,0 +1,210 @@
|
|||||||
|
#include "jemalloc/internal/jemalloc_internal.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* quarantine pointers close to NULL are used to encode state information that
|
||||||
|
* is used for cleaning up during thread shutdown.
|
||||||
|
*/
|
||||||
|
#define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1)
|
||||||
|
#define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2)
|
||||||
|
#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY
|
||||||
|
|
||||||
|
/******************************************************************************/
|
||||||
|
/* Data. */
|
||||||
|
|
||||||
|
typedef struct quarantine_obj_s quarantine_obj_t;
|
||||||
|
typedef struct quarantine_s quarantine_t;
|
||||||
|
|
||||||
|
struct quarantine_obj_s {
|
||||||
|
void *ptr;
|
||||||
|
size_t usize;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct quarantine_s {
|
||||||
|
size_t curbytes;
|
||||||
|
size_t curobjs;
|
||||||
|
size_t first;
|
||||||
|
#define LG_MAXOBJS_INIT 10
|
||||||
|
size_t lg_maxobjs;
|
||||||
|
quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
|
||||||
|
};
|
||||||
|
|
||||||
|
static void quarantine_cleanup(void *arg);
|
||||||
|
|
||||||
|
malloc_tsd_data(static, quarantine, quarantine_t *, NULL)
|
||||||
|
malloc_tsd_funcs(JEMALLOC_INLINE, quarantine, quarantine_t *, NULL,
|
||||||
|
quarantine_cleanup)
|
||||||
|
|
||||||
|
/******************************************************************************/
|
||||||
|
/* Function prototypes for non-inline static functions. */
|
||||||
|
|
||||||
|
static quarantine_t *quarantine_init(size_t lg_maxobjs);
|
||||||
|
static quarantine_t *quarantine_grow(quarantine_t *quarantine);
|
||||||
|
static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound);
|
||||||
|
|
||||||
|
/******************************************************************************/
|
||||||
|
|
||||||
|
static quarantine_t *
|
||||||
|
quarantine_init(size_t lg_maxobjs)
|
||||||
|
{
|
||||||
|
quarantine_t *quarantine;
|
||||||
|
|
||||||
|
quarantine = (quarantine_t *)imalloc(offsetof(quarantine_t, objs) +
|
||||||
|
((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)));
|
||||||
|
if (quarantine == NULL)
|
||||||
|
return (NULL);
|
||||||
|
quarantine->curbytes = 0;
|
||||||
|
quarantine->curobjs = 0;
|
||||||
|
quarantine->first = 0;
|
||||||
|
quarantine->lg_maxobjs = lg_maxobjs;
|
||||||
|
|
||||||
|
quarantine_tsd_set(&quarantine);
|
||||||
|
|
||||||
|
return (quarantine);
|
||||||
|
}
|
||||||
|
|
||||||
|
static quarantine_t *
|
||||||
|
quarantine_grow(quarantine_t *quarantine)
|
||||||
|
{
|
||||||
|
quarantine_t *ret;
|
||||||
|
|
||||||
|
ret = quarantine_init(quarantine->lg_maxobjs + 1);
|
||||||
|
if (ret == NULL)
|
||||||
|
return (quarantine);
|
||||||
|
|
||||||
|
ret->curbytes = quarantine->curbytes;
|
||||||
|
ret->curobjs = quarantine->curobjs;
|
||||||
|
if (quarantine->first + quarantine->curobjs <= (ZU(1) <<
|
||||||
|
quarantine->lg_maxobjs)) {
|
||||||
|
/* objs ring buffer data are contiguous. */
|
||||||
|
memcpy(ret->objs, &quarantine->objs[quarantine->first],
|
||||||
|
quarantine->curobjs * sizeof(quarantine_obj_t));
|
||||||
|
} else {
|
||||||
|
/* objs ring buffer data wrap around. */
|
||||||
|
size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) -
|
||||||
|
quarantine->first;
|
||||||
|
size_t ncopy_b = quarantine->curobjs - ncopy_a;
|
||||||
|
|
||||||
|
memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a
|
||||||
|
* sizeof(quarantine_obj_t));
|
||||||
|
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
|
||||||
|
sizeof(quarantine_obj_t));
|
||||||
|
}
|
||||||
|
|
||||||
|
return (ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
quarantine_drain(quarantine_t *quarantine, size_t upper_bound)
|
||||||
|
{
|
||||||
|
|
||||||
|
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) {
|
||||||
|
quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
|
||||||
|
assert(obj->usize == isalloc(obj->ptr, config_prof));
|
||||||
|
idalloc(obj->ptr);
|
||||||
|
quarantine->curbytes -= obj->usize;
|
||||||
|
quarantine->curobjs--;
|
||||||
|
quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
|
||||||
|
quarantine->lg_maxobjs) - 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
quarantine(void *ptr)
|
||||||
|
{
|
||||||
|
quarantine_t *quarantine;
|
||||||
|
size_t usize = isalloc(ptr, config_prof);
|
||||||
|
|
||||||
|
cassert(config_fill);
|
||||||
|
assert(opt_quarantine);
|
||||||
|
|
||||||
|
quarantine = *quarantine_tsd_get();
|
||||||
|
if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) {
|
||||||
|
if (quarantine == NULL) {
|
||||||
|
if ((quarantine = quarantine_init(LG_MAXOBJS_INIT)) ==
|
||||||
|
NULL) {
|
||||||
|
idalloc(ptr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (quarantine == QUARANTINE_STATE_PURGATORY) {
|
||||||
|
/*
|
||||||
|
* Make a note that quarantine() was called
|
||||||
|
* after quarantine_cleanup() was called.
|
||||||
|
*/
|
||||||
|
quarantine = QUARANTINE_STATE_REINCARNATED;
|
||||||
|
quarantine_tsd_set(&quarantine);
|
||||||
|
}
|
||||||
|
idalloc(ptr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* Drain one or more objects if the quarantine size limit would be
|
||||||
|
* exceeded by appending ptr.
|
||||||
|
*/
|
||||||
|
if (quarantine->curbytes + usize > opt_quarantine) {
|
||||||
|
size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
|
||||||
|
- usize : 0;
|
||||||
|
quarantine_drain(quarantine, upper_bound);
|
||||||
|
}
|
||||||
|
/* Grow the quarantine ring buffer if it's full. */
|
||||||
|
if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
|
||||||
|
quarantine = quarantine_grow(quarantine);
|
||||||
|
/* quarantine_grow() must free a slot if it fails to grow. */
|
||||||
|
assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs));
|
||||||
|
/* Append ptr if its size doesn't exceed the quarantine size. */
|
||||||
|
if (quarantine->curbytes + usize <= opt_quarantine) {
|
||||||
|
size_t offset = (quarantine->first + quarantine->curobjs) &
|
||||||
|
((ZU(1) << quarantine->lg_maxobjs) - 1);
|
||||||
|
quarantine_obj_t *obj = &quarantine->objs[offset];
|
||||||
|
obj->ptr = ptr;
|
||||||
|
obj->usize = usize;
|
||||||
|
quarantine->curbytes += usize;
|
||||||
|
quarantine->curobjs++;
|
||||||
|
if (opt_junk)
|
||||||
|
memset(ptr, 0x5a, usize);
|
||||||
|
} else {
|
||||||
|
assert(quarantine->curbytes == 0);
|
||||||
|
idalloc(ptr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
quarantine_cleanup(void *arg)
|
||||||
|
{
|
||||||
|
quarantine_t *quarantine = *(quarantine_t **)arg;
|
||||||
|
|
||||||
|
if (quarantine == QUARANTINE_STATE_REINCARNATED) {
|
||||||
|
/*
|
||||||
|
* Another destructor deallocated memory after this destructor
|
||||||
|
* was called. Reset quarantine to QUARANTINE_STATE_PURGATORY
|
||||||
|
* in order to receive another callback.
|
||||||
|
*/
|
||||||
|
quarantine = QUARANTINE_STATE_PURGATORY;
|
||||||
|
quarantine_tsd_set(&quarantine);
|
||||||
|
} else if (quarantine == QUARANTINE_STATE_PURGATORY) {
|
||||||
|
/*
|
||||||
|
* The previous time this destructor was called, we set the key
|
||||||
|
* to QUARANTINE_STATE_PURGATORY so that other destructors
|
||||||
|
* wouldn't cause re-creation of the quarantine. This time, do
|
||||||
|
* nothing, so that the destructor will not be called again.
|
||||||
|
*/
|
||||||
|
} else if (quarantine != NULL) {
|
||||||
|
quarantine_drain(quarantine, 0);
|
||||||
|
idalloc(quarantine);
|
||||||
|
quarantine = QUARANTINE_STATE_PURGATORY;
|
||||||
|
quarantine_tsd_set(&quarantine);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
quarantine_boot(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
cassert(config_fill);
|
||||||
|
|
||||||
|
if (quarantine_tsd_boot())
|
||||||
|
return (true);
|
||||||
|
|
||||||
|
return (false);
|
||||||
|
}
|
485
src/stats.c
485
src/stats.c
@ -39,140 +39,40 @@
|
|||||||
|
|
||||||
bool opt_stats_print = false;
|
bool opt_stats_print = false;
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
size_t stats_cactive = 0;
|
size_t stats_cactive = 0;
|
||||||
#endif
|
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Function prototypes for non-inline static functions. */
|
/* Function prototypes for non-inline static functions. */
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
static void malloc_vcprintf(void (*write_cb)(void *, const char *),
|
|
||||||
void *cbopaque, const char *format, va_list ap);
|
|
||||||
static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
|
static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
|
||||||
void *cbopaque, unsigned i);
|
void *cbopaque, unsigned i);
|
||||||
static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
|
static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
|
||||||
void *cbopaque, unsigned i);
|
void *cbopaque, unsigned i);
|
||||||
static void stats_arena_print(void (*write_cb)(void *, const char *),
|
static void stats_arena_print(void (*write_cb)(void *, const char *),
|
||||||
void *cbopaque, unsigned i);
|
void *cbopaque, unsigned i, bool bins, bool large);
|
||||||
#endif
|
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
/*
|
|
||||||
* We don't want to depend on vsnprintf() for production builds, since that can
|
|
||||||
* cause unnecessary bloat for static binaries. u2s() provides minimal integer
|
|
||||||
* printing functionality, so that malloc_printf() use can be limited to
|
|
||||||
* JEMALLOC_STATS code.
|
|
||||||
*/
|
|
||||||
char *
|
|
||||||
u2s(uint64_t x, unsigned base, char *s)
|
|
||||||
{
|
|
||||||
unsigned i;
|
|
||||||
|
|
||||||
i = UMAX2S_BUFSIZE - 1;
|
|
||||||
s[i] = '\0';
|
|
||||||
switch (base) {
|
|
||||||
case 10:
|
|
||||||
do {
|
|
||||||
i--;
|
|
||||||
s[i] = "0123456789"[x % (uint64_t)10];
|
|
||||||
x /= (uint64_t)10;
|
|
||||||
} while (x > 0);
|
|
||||||
break;
|
|
||||||
case 16:
|
|
||||||
do {
|
|
||||||
i--;
|
|
||||||
s[i] = "0123456789abcdef"[x & 0xf];
|
|
||||||
x >>= 4;
|
|
||||||
} while (x > 0);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
do {
|
|
||||||
i--;
|
|
||||||
s[i] = "0123456789abcdefghijklmnopqrstuvwxyz"[x %
|
|
||||||
(uint64_t)base];
|
|
||||||
x /= (uint64_t)base;
|
|
||||||
} while (x > 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (&s[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
static void
|
|
||||||
malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
|
||||||
const char *format, va_list ap)
|
|
||||||
{
|
|
||||||
char buf[4096];
|
|
||||||
|
|
||||||
if (write_cb == NULL) {
|
|
||||||
/*
|
|
||||||
* The caller did not provide an alternate write_cb callback
|
|
||||||
* function, so use the default one. malloc_write() is an
|
|
||||||
* inline function, so use malloc_message() directly here.
|
|
||||||
*/
|
|
||||||
write_cb = JEMALLOC_P(malloc_message);
|
|
||||||
cbopaque = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
vsnprintf(buf, sizeof(buf), format, ap);
|
|
||||||
write_cb(cbopaque, buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Print to a callback function in such a way as to (hopefully) avoid memory
|
|
||||||
* allocation.
|
|
||||||
*/
|
|
||||||
JEMALLOC_ATTR(format(printf, 3, 4))
|
|
||||||
void
|
|
||||||
malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
|
||||||
const char *format, ...)
|
|
||||||
{
|
|
||||||
va_list ap;
|
|
||||||
|
|
||||||
va_start(ap, format);
|
|
||||||
malloc_vcprintf(write_cb, cbopaque, format, ap);
|
|
||||||
va_end(ap);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Print to stderr in such a way as to (hopefully) avoid memory allocation.
|
|
||||||
*/
|
|
||||||
JEMALLOC_ATTR(format(printf, 1, 2))
|
|
||||||
void
|
|
||||||
malloc_printf(const char *format, ...)
|
|
||||||
{
|
|
||||||
va_list ap;
|
|
||||||
|
|
||||||
va_start(ap, format);
|
|
||||||
malloc_vcprintf(NULL, NULL, format, ap);
|
|
||||||
va_end(ap);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
static void
|
static void
|
||||||
stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||||
unsigned i)
|
unsigned i)
|
||||||
{
|
{
|
||||||
size_t pagesize;
|
size_t page;
|
||||||
bool config_tcache;
|
bool config_tcache;
|
||||||
unsigned nbins, j, gap_start;
|
unsigned nbins, j, gap_start;
|
||||||
|
|
||||||
CTL_GET("arenas.pagesize", &pagesize, size_t);
|
CTL_GET("arenas.page", &page, size_t);
|
||||||
|
|
||||||
CTL_GET("config.tcache", &config_tcache, bool);
|
CTL_GET("config.tcache", &config_tcache, bool);
|
||||||
if (config_tcache) {
|
if (config_tcache) {
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"bins: bin size regs pgs allocated nmalloc"
|
"bins: bin size regs pgs allocated nmalloc"
|
||||||
" ndalloc nrequests nfills nflushes"
|
" ndalloc nrequests nfills nflushes"
|
||||||
" newruns reruns maxruns curruns\n");
|
" newruns reruns curruns\n");
|
||||||
} else {
|
} else {
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"bins: bin size regs pgs allocated nmalloc"
|
"bins: bin size regs pgs allocated nmalloc"
|
||||||
" ndalloc newruns reruns maxruns"
|
" ndalloc newruns reruns curruns\n");
|
||||||
" curruns\n");
|
|
||||||
}
|
}
|
||||||
CTL_GET("arenas.nbins", &nbins, unsigned);
|
CTL_GET("arenas.nbins", &nbins, unsigned);
|
||||||
for (j = 0, gap_start = UINT_MAX; j < nbins; j++) {
|
for (j = 0, gap_start = UINT_MAX; j < nbins; j++) {
|
||||||
@ -183,12 +83,11 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
if (gap_start == UINT_MAX)
|
if (gap_start == UINT_MAX)
|
||||||
gap_start = j;
|
gap_start = j;
|
||||||
} else {
|
} else {
|
||||||
unsigned ntbins_, nqbins, ncbins, nsbins;
|
|
||||||
size_t reg_size, run_size, allocated;
|
size_t reg_size, run_size, allocated;
|
||||||
uint32_t nregs;
|
uint32_t nregs;
|
||||||
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
|
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
|
||||||
uint64_t reruns;
|
uint64_t reruns;
|
||||||
size_t highruns, curruns;
|
size_t curruns;
|
||||||
|
|
||||||
if (gap_start != UINT_MAX) {
|
if (gap_start != UINT_MAX) {
|
||||||
if (j > gap_start + 1) {
|
if (j > gap_start + 1) {
|
||||||
@ -203,10 +102,6 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
}
|
}
|
||||||
gap_start = UINT_MAX;
|
gap_start = UINT_MAX;
|
||||||
}
|
}
|
||||||
CTL_GET("arenas.ntbins", &ntbins_, unsigned);
|
|
||||||
CTL_GET("arenas.nqbins", &nqbins, unsigned);
|
|
||||||
CTL_GET("arenas.ncbins", &ncbins, unsigned);
|
|
||||||
CTL_GET("arenas.nsbins", &nsbins, unsigned);
|
|
||||||
CTL_J_GET("arenas.bin.0.size", ®_size, size_t);
|
CTL_J_GET("arenas.bin.0.size", ®_size, size_t);
|
||||||
CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t);
|
CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t);
|
||||||
CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t);
|
CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t);
|
||||||
@ -226,36 +121,25 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
}
|
}
|
||||||
CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns,
|
CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns,
|
||||||
uint64_t);
|
uint64_t);
|
||||||
CTL_IJ_GET("stats.arenas.0.bins.0.highruns", &highruns,
|
|
||||||
size_t);
|
|
||||||
CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns,
|
CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns,
|
||||||
size_t);
|
size_t);
|
||||||
if (config_tcache) {
|
if (config_tcache) {
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"%13u %1s %5zu %4u %3zu %12zu %12"PRIu64
|
"%13u %5zu %4u %3zu %12zu %12"PRIu64
|
||||||
" %12"PRIu64" %12"PRIu64" %12"PRIu64
|
" %12"PRIu64" %12"PRIu64" %12"PRIu64
|
||||||
" %12"PRIu64" %12"PRIu64" %12"PRIu64
|
" %12"PRIu64" %12"PRIu64" %12"PRIu64
|
||||||
" %12zu %12zu\n",
|
" %12zu\n",
|
||||||
j,
|
j, reg_size, nregs, run_size / page,
|
||||||
j < ntbins_ ? "T" : j < ntbins_ + nqbins ?
|
|
||||||
"Q" : j < ntbins_ + nqbins + ncbins ? "C" :
|
|
||||||
"S",
|
|
||||||
reg_size, nregs, run_size / pagesize,
|
|
||||||
allocated, nmalloc, ndalloc, nrequests,
|
allocated, nmalloc, ndalloc, nrequests,
|
||||||
nfills, nflushes, nruns, reruns, highruns,
|
nfills, nflushes, nruns, reruns, curruns);
|
||||||
curruns);
|
|
||||||
} else {
|
} else {
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"%13u %1s %5zu %4u %3zu %12zu %12"PRIu64
|
"%13u %5zu %4u %3zu %12zu %12"PRIu64
|
||||||
" %12"PRIu64" %12"PRIu64" %12"PRIu64
|
" %12"PRIu64" %12"PRIu64" %12"PRIu64
|
||||||
" %12zu %12zu\n",
|
" %12zu\n",
|
||||||
j,
|
j, reg_size, nregs, run_size / page,
|
||||||
j < ntbins_ ? "T" : j < ntbins_ + nqbins ?
|
|
||||||
"Q" : j < ntbins_ + nqbins + ncbins ? "C" :
|
|
||||||
"S",
|
|
||||||
reg_size, nregs, run_size / pagesize,
|
|
||||||
allocated, nmalloc, ndalloc, nruns, reruns,
|
allocated, nmalloc, ndalloc, nruns, reruns,
|
||||||
highruns, curruns);
|
curruns);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -275,18 +159,18 @@ static void
|
|||||||
stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||||
unsigned i)
|
unsigned i)
|
||||||
{
|
{
|
||||||
size_t pagesize, nlruns, j;
|
size_t page, nlruns, j;
|
||||||
ssize_t gap_start;
|
ssize_t gap_start;
|
||||||
|
|
||||||
CTL_GET("arenas.pagesize", &pagesize, size_t);
|
CTL_GET("arenas.page", &page, size_t);
|
||||||
|
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"large: size pages nmalloc ndalloc nrequests"
|
"large: size pages nmalloc ndalloc nrequests"
|
||||||
" maxruns curruns\n");
|
" curruns\n");
|
||||||
CTL_GET("arenas.nlruns", &nlruns, size_t);
|
CTL_GET("arenas.nlruns", &nlruns, size_t);
|
||||||
for (j = 0, gap_start = -1; j < nlruns; j++) {
|
for (j = 0, gap_start = -1; j < nlruns; j++) {
|
||||||
uint64_t nmalloc, ndalloc, nrequests;
|
uint64_t nmalloc, ndalloc, nrequests;
|
||||||
size_t run_size, highruns, curruns;
|
size_t run_size, curruns;
|
||||||
|
|
||||||
CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc,
|
CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc,
|
||||||
uint64_t);
|
uint64_t);
|
||||||
@ -299,8 +183,6 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
gap_start = j;
|
gap_start = j;
|
||||||
} else {
|
} else {
|
||||||
CTL_J_GET("arenas.lrun.0.size", &run_size, size_t);
|
CTL_J_GET("arenas.lrun.0.size", &run_size, size_t);
|
||||||
CTL_IJ_GET("stats.arenas.0.lruns.0.highruns", &highruns,
|
|
||||||
size_t);
|
|
||||||
CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns,
|
CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns,
|
||||||
size_t);
|
size_t);
|
||||||
if (gap_start != -1) {
|
if (gap_start != -1) {
|
||||||
@ -310,9 +192,9 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
}
|
}
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64
|
"%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64
|
||||||
" %12zu %12zu\n",
|
" %12zu\n",
|
||||||
run_size, run_size / pagesize, nmalloc, ndalloc,
|
run_size, run_size / page, nmalloc, ndalloc,
|
||||||
nrequests, highruns, curruns);
|
nrequests, curruns);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (gap_start != -1)
|
if (gap_start != -1)
|
||||||
@ -321,17 +203,17 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
|
|
||||||
static void
|
static void
|
||||||
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||||
unsigned i)
|
unsigned i, bool bins, bool large)
|
||||||
{
|
{
|
||||||
unsigned nthreads;
|
unsigned nthreads;
|
||||||
size_t pagesize, pactive, pdirty, mapped;
|
size_t page, pactive, pdirty, mapped;
|
||||||
uint64_t npurge, nmadvise, purged;
|
uint64_t npurge, nmadvise, purged;
|
||||||
size_t small_allocated;
|
size_t small_allocated;
|
||||||
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
|
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
|
||||||
size_t large_allocated;
|
size_t large_allocated;
|
||||||
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
|
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
|
||||||
|
|
||||||
CTL_GET("arenas.pagesize", &pagesize, size_t);
|
CTL_GET("arenas.page", &page, size_t);
|
||||||
|
|
||||||
CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned);
|
CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned);
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
@ -369,15 +251,15 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
small_nmalloc + large_nmalloc,
|
small_nmalloc + large_nmalloc,
|
||||||
small_ndalloc + large_ndalloc,
|
small_ndalloc + large_ndalloc,
|
||||||
small_nrequests + large_nrequests);
|
small_nrequests + large_nrequests);
|
||||||
malloc_cprintf(write_cb, cbopaque, "active: %12zu\n",
|
malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page);
|
||||||
pactive * pagesize );
|
|
||||||
CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
|
CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
|
||||||
malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped);
|
malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped);
|
||||||
|
|
||||||
stats_arena_bins_print(write_cb, cbopaque, i);
|
if (bins)
|
||||||
stats_arena_lruns_print(write_cb, cbopaque, i);
|
stats_arena_bins_print(write_cb, cbopaque, i);
|
||||||
|
if (large)
|
||||||
|
stats_arena_lruns_print(write_cb, cbopaque, i);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
void
|
void
|
||||||
stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||||
@ -386,7 +268,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
int err;
|
int err;
|
||||||
uint64_t epoch;
|
uint64_t epoch;
|
||||||
size_t u64sz;
|
size_t u64sz;
|
||||||
char s[UMAX2S_BUFSIZE];
|
|
||||||
bool general = true;
|
bool general = true;
|
||||||
bool merged = true;
|
bool merged = true;
|
||||||
bool unmerged = true;
|
bool unmerged = true;
|
||||||
@ -402,8 +283,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
* */
|
* */
|
||||||
epoch = 1;
|
epoch = 1;
|
||||||
u64sz = sizeof(uint64_t);
|
u64sz = sizeof(uint64_t);
|
||||||
err = JEMALLOC_P(mallctl)("epoch", &epoch, &u64sz, &epoch,
|
err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
|
||||||
sizeof(uint64_t));
|
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
if (err == EAGAIN) {
|
if (err == EAGAIN) {
|
||||||
malloc_write("<jemalloc>: Memory allocation failure in "
|
malloc_write("<jemalloc>: Memory allocation failure in "
|
||||||
@ -415,42 +295,33 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (write_cb == NULL) {
|
|
||||||
/*
|
|
||||||
* The caller did not provide an alternate write_cb callback
|
|
||||||
* function, so use the default one. malloc_write() is an
|
|
||||||
* inline function, so use malloc_message() directly here.
|
|
||||||
*/
|
|
||||||
write_cb = JEMALLOC_P(malloc_message);
|
|
||||||
cbopaque = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (opts != NULL) {
|
if (opts != NULL) {
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
for (i = 0; opts[i] != '\0'; i++) {
|
for (i = 0; opts[i] != '\0'; i++) {
|
||||||
switch (opts[i]) {
|
switch (opts[i]) {
|
||||||
case 'g':
|
case 'g':
|
||||||
general = false;
|
general = false;
|
||||||
break;
|
break;
|
||||||
case 'm':
|
case 'm':
|
||||||
merged = false;
|
merged = false;
|
||||||
break;
|
break;
|
||||||
case 'a':
|
case 'a':
|
||||||
unmerged = false;
|
unmerged = false;
|
||||||
break;
|
break;
|
||||||
case 'b':
|
case 'b':
|
||||||
bins = false;
|
bins = false;
|
||||||
break;
|
break;
|
||||||
case 'l':
|
case 'l':
|
||||||
large = false;
|
large = false;
|
||||||
break;
|
break;
|
||||||
default:;
|
default:;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
write_cb(cbopaque, "___ Begin jemalloc statistics ___\n");
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
|
"___ Begin jemalloc statistics ___\n");
|
||||||
if (general) {
|
if (general) {
|
||||||
int err;
|
int err;
|
||||||
const char *cpv;
|
const char *cpv;
|
||||||
@ -465,229 +336,126 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
cpsz = sizeof(const char *);
|
cpsz = sizeof(const char *);
|
||||||
|
|
||||||
CTL_GET("version", &cpv, const char *);
|
CTL_GET("version", &cpv, const char *);
|
||||||
write_cb(cbopaque, "Version: ");
|
malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
|
||||||
write_cb(cbopaque, cpv);
|
|
||||||
write_cb(cbopaque, "\n");
|
|
||||||
CTL_GET("config.debug", &bv, bool);
|
CTL_GET("config.debug", &bv, bool);
|
||||||
write_cb(cbopaque, "Assertions ");
|
malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
|
||||||
write_cb(cbopaque, bv ? "enabled" : "disabled");
|
bv ? "enabled" : "disabled");
|
||||||
write_cb(cbopaque, "\n");
|
|
||||||
|
|
||||||
#define OPT_WRITE_BOOL(n) \
|
#define OPT_WRITE_BOOL(n) \
|
||||||
if ((err = JEMALLOC_P(mallctl)("opt."#n, &bv, &bsz, \
|
if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \
|
||||||
NULL, 0)) == 0) { \
|
== 0) { \
|
||||||
write_cb(cbopaque, " opt."#n": "); \
|
malloc_cprintf(write_cb, cbopaque, \
|
||||||
write_cb(cbopaque, bv ? "true" : "false"); \
|
" opt."#n": %s\n", bv ? "true" : "false"); \
|
||||||
write_cb(cbopaque, "\n"); \
|
|
||||||
}
|
}
|
||||||
#define OPT_WRITE_SIZE_T(n) \
|
#define OPT_WRITE_SIZE_T(n) \
|
||||||
if ((err = JEMALLOC_P(mallctl)("opt."#n, &sv, &ssz, \
|
if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \
|
||||||
NULL, 0)) == 0) { \
|
== 0) { \
|
||||||
write_cb(cbopaque, " opt."#n": "); \
|
malloc_cprintf(write_cb, cbopaque, \
|
||||||
write_cb(cbopaque, u2s(sv, 10, s)); \
|
" opt."#n": %zu\n", sv); \
|
||||||
write_cb(cbopaque, "\n"); \
|
|
||||||
}
|
}
|
||||||
#define OPT_WRITE_SSIZE_T(n) \
|
#define OPT_WRITE_SSIZE_T(n) \
|
||||||
if ((err = JEMALLOC_P(mallctl)("opt."#n, &ssv, &sssz, \
|
if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \
|
||||||
NULL, 0)) == 0) { \
|
== 0) { \
|
||||||
if (ssv >= 0) { \
|
malloc_cprintf(write_cb, cbopaque, \
|
||||||
write_cb(cbopaque, " opt."#n": "); \
|
" opt."#n": %zd\n", ssv); \
|
||||||
write_cb(cbopaque, u2s(ssv, 10, s)); \
|
|
||||||
} else { \
|
|
||||||
write_cb(cbopaque, " opt."#n": -"); \
|
|
||||||
write_cb(cbopaque, u2s(-ssv, 10, s)); \
|
|
||||||
} \
|
|
||||||
write_cb(cbopaque, "\n"); \
|
|
||||||
}
|
}
|
||||||
#define OPT_WRITE_CHAR_P(n) \
|
#define OPT_WRITE_CHAR_P(n) \
|
||||||
if ((err = JEMALLOC_P(mallctl)("opt."#n, &cpv, &cpsz, \
|
if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \
|
||||||
NULL, 0)) == 0) { \
|
== 0) { \
|
||||||
write_cb(cbopaque, " opt."#n": \""); \
|
malloc_cprintf(write_cb, cbopaque, \
|
||||||
write_cb(cbopaque, cpv); \
|
" opt."#n": \"%s\"\n", cpv); \
|
||||||
write_cb(cbopaque, "\"\n"); \
|
|
||||||
}
|
}
|
||||||
|
|
||||||
write_cb(cbopaque, "Run-time option settings:\n");
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
|
"Run-time option settings:\n");
|
||||||
OPT_WRITE_BOOL(abort)
|
OPT_WRITE_BOOL(abort)
|
||||||
OPT_WRITE_SIZE_T(lg_qspace_max)
|
|
||||||
OPT_WRITE_SIZE_T(lg_cspace_max)
|
|
||||||
OPT_WRITE_SIZE_T(lg_chunk)
|
OPT_WRITE_SIZE_T(lg_chunk)
|
||||||
OPT_WRITE_SIZE_T(narenas)
|
OPT_WRITE_SIZE_T(narenas)
|
||||||
OPT_WRITE_SSIZE_T(lg_dirty_mult)
|
OPT_WRITE_SSIZE_T(lg_dirty_mult)
|
||||||
OPT_WRITE_BOOL(stats_print)
|
OPT_WRITE_BOOL(stats_print)
|
||||||
OPT_WRITE_BOOL(junk)
|
OPT_WRITE_BOOL(junk)
|
||||||
|
OPT_WRITE_SIZE_T(quarantine)
|
||||||
|
OPT_WRITE_BOOL(redzone)
|
||||||
OPT_WRITE_BOOL(zero)
|
OPT_WRITE_BOOL(zero)
|
||||||
OPT_WRITE_BOOL(sysv)
|
OPT_WRITE_BOOL(utrace)
|
||||||
|
OPT_WRITE_BOOL(valgrind)
|
||||||
OPT_WRITE_BOOL(xmalloc)
|
OPT_WRITE_BOOL(xmalloc)
|
||||||
OPT_WRITE_BOOL(tcache)
|
OPT_WRITE_BOOL(tcache)
|
||||||
OPT_WRITE_SSIZE_T(lg_tcache_gc_sweep)
|
|
||||||
OPT_WRITE_SSIZE_T(lg_tcache_max)
|
OPT_WRITE_SSIZE_T(lg_tcache_max)
|
||||||
OPT_WRITE_BOOL(prof)
|
OPT_WRITE_BOOL(prof)
|
||||||
OPT_WRITE_CHAR_P(prof_prefix)
|
OPT_WRITE_CHAR_P(prof_prefix)
|
||||||
OPT_WRITE_SIZE_T(lg_prof_bt_max)
|
|
||||||
OPT_WRITE_BOOL(prof_active)
|
OPT_WRITE_BOOL(prof_active)
|
||||||
OPT_WRITE_SSIZE_T(lg_prof_sample)
|
OPT_WRITE_SSIZE_T(lg_prof_sample)
|
||||||
OPT_WRITE_BOOL(prof_accum)
|
OPT_WRITE_BOOL(prof_accum)
|
||||||
OPT_WRITE_SSIZE_T(lg_prof_tcmax)
|
|
||||||
OPT_WRITE_SSIZE_T(lg_prof_interval)
|
OPT_WRITE_SSIZE_T(lg_prof_interval)
|
||||||
OPT_WRITE_BOOL(prof_gdump)
|
OPT_WRITE_BOOL(prof_gdump)
|
||||||
|
OPT_WRITE_BOOL(prof_final)
|
||||||
OPT_WRITE_BOOL(prof_leak)
|
OPT_WRITE_BOOL(prof_leak)
|
||||||
OPT_WRITE_BOOL(overcommit)
|
|
||||||
|
|
||||||
#undef OPT_WRITE_BOOL
|
#undef OPT_WRITE_BOOL
|
||||||
#undef OPT_WRITE_SIZE_T
|
#undef OPT_WRITE_SIZE_T
|
||||||
#undef OPT_WRITE_SSIZE_T
|
#undef OPT_WRITE_SSIZE_T
|
||||||
#undef OPT_WRITE_CHAR_P
|
#undef OPT_WRITE_CHAR_P
|
||||||
|
|
||||||
write_cb(cbopaque, "CPUs: ");
|
malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus);
|
||||||
write_cb(cbopaque, u2s(ncpus, 10, s));
|
|
||||||
write_cb(cbopaque, "\n");
|
|
||||||
|
|
||||||
CTL_GET("arenas.narenas", &uv, unsigned);
|
CTL_GET("arenas.narenas", &uv, unsigned);
|
||||||
write_cb(cbopaque, "Max arenas: ");
|
malloc_cprintf(write_cb, cbopaque, "Max arenas: %u\n", uv);
|
||||||
write_cb(cbopaque, u2s(uv, 10, s));
|
|
||||||
write_cb(cbopaque, "\n");
|
|
||||||
|
|
||||||
write_cb(cbopaque, "Pointer size: ");
|
malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n",
|
||||||
write_cb(cbopaque, u2s(sizeof(void *), 10, s));
|
sizeof(void *));
|
||||||
write_cb(cbopaque, "\n");
|
|
||||||
|
|
||||||
CTL_GET("arenas.quantum", &sv, size_t);
|
CTL_GET("arenas.quantum", &sv, size_t);
|
||||||
write_cb(cbopaque, "Quantum size: ");
|
malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
|
||||||
write_cb(cbopaque, u2s(sv, 10, s));
|
|
||||||
write_cb(cbopaque, "\n");
|
|
||||||
|
|
||||||
CTL_GET("arenas.cacheline", &sv, size_t);
|
CTL_GET("arenas.page", &sv, size_t);
|
||||||
write_cb(cbopaque, "Cacheline size (assumed): ");
|
malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
|
||||||
write_cb(cbopaque, u2s(sv, 10, s));
|
|
||||||
write_cb(cbopaque, "\n");
|
|
||||||
|
|
||||||
CTL_GET("arenas.subpage", &sv, size_t);
|
|
||||||
write_cb(cbopaque, "Subpage spacing: ");
|
|
||||||
write_cb(cbopaque, u2s(sv, 10, s));
|
|
||||||
write_cb(cbopaque, "\n");
|
|
||||||
|
|
||||||
if ((err = JEMALLOC_P(mallctl)("arenas.tspace_min", &sv, &ssz,
|
|
||||||
NULL, 0)) == 0) {
|
|
||||||
write_cb(cbopaque, "Tiny 2^n-spaced sizes: [");
|
|
||||||
write_cb(cbopaque, u2s(sv, 10, s));
|
|
||||||
write_cb(cbopaque, "..");
|
|
||||||
|
|
||||||
CTL_GET("arenas.tspace_max", &sv, size_t);
|
|
||||||
write_cb(cbopaque, u2s(sv, 10, s));
|
|
||||||
write_cb(cbopaque, "]\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
CTL_GET("arenas.qspace_min", &sv, size_t);
|
|
||||||
write_cb(cbopaque, "Quantum-spaced sizes: [");
|
|
||||||
write_cb(cbopaque, u2s(sv, 10, s));
|
|
||||||
write_cb(cbopaque, "..");
|
|
||||||
CTL_GET("arenas.qspace_max", &sv, size_t);
|
|
||||||
write_cb(cbopaque, u2s(sv, 10, s));
|
|
||||||
write_cb(cbopaque, "]\n");
|
|
||||||
|
|
||||||
CTL_GET("arenas.cspace_min", &sv, size_t);
|
|
||||||
write_cb(cbopaque, "Cacheline-spaced sizes: [");
|
|
||||||
write_cb(cbopaque, u2s(sv, 10, s));
|
|
||||||
write_cb(cbopaque, "..");
|
|
||||||
CTL_GET("arenas.cspace_max", &sv, size_t);
|
|
||||||
write_cb(cbopaque, u2s(sv, 10, s));
|
|
||||||
write_cb(cbopaque, "]\n");
|
|
||||||
|
|
||||||
CTL_GET("arenas.sspace_min", &sv, size_t);
|
|
||||||
write_cb(cbopaque, "Subpage-spaced sizes: [");
|
|
||||||
write_cb(cbopaque, u2s(sv, 10, s));
|
|
||||||
write_cb(cbopaque, "..");
|
|
||||||
CTL_GET("arenas.sspace_max", &sv, size_t);
|
|
||||||
write_cb(cbopaque, u2s(sv, 10, s));
|
|
||||||
write_cb(cbopaque, "]\n");
|
|
||||||
|
|
||||||
CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t);
|
CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t);
|
||||||
if (ssv >= 0) {
|
if (ssv >= 0) {
|
||||||
write_cb(cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"Min active:dirty page ratio per arena: ");
|
"Min active:dirty page ratio per arena: %u:1\n",
|
||||||
write_cb(cbopaque, u2s((1U << ssv), 10, s));
|
(1U << ssv));
|
||||||
write_cb(cbopaque, ":1\n");
|
|
||||||
} else {
|
} else {
|
||||||
write_cb(cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"Min active:dirty page ratio per arena: N/A\n");
|
"Min active:dirty page ratio per arena: N/A\n");
|
||||||
}
|
}
|
||||||
if ((err = JEMALLOC_P(mallctl)("arenas.tcache_max", &sv,
|
if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0))
|
||||||
&ssz, NULL, 0)) == 0) {
|
== 0) {
|
||||||
write_cb(cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"Maximum thread-cached size class: ");
|
"Maximum thread-cached size class: %zu\n", sv);
|
||||||
write_cb(cbopaque, u2s(sv, 10, s));
|
|
||||||
write_cb(cbopaque, "\n");
|
|
||||||
}
|
}
|
||||||
if ((err = JEMALLOC_P(mallctl)("opt.lg_tcache_gc_sweep", &ssv,
|
if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 &&
|
||||||
&ssz, NULL, 0)) == 0) {
|
bv) {
|
||||||
size_t tcache_gc_sweep = (1U << ssv);
|
|
||||||
bool tcache_enabled;
|
|
||||||
CTL_GET("opt.tcache", &tcache_enabled, bool);
|
|
||||||
write_cb(cbopaque, "Thread cache GC sweep interval: ");
|
|
||||||
write_cb(cbopaque, tcache_enabled && ssv >= 0 ?
|
|
||||||
u2s(tcache_gc_sweep, 10, s) : "N/A");
|
|
||||||
write_cb(cbopaque, "\n");
|
|
||||||
}
|
|
||||||
if ((err = JEMALLOC_P(mallctl)("opt.prof", &bv, &bsz, NULL, 0))
|
|
||||||
== 0 && bv) {
|
|
||||||
CTL_GET("opt.lg_prof_bt_max", &sv, size_t);
|
|
||||||
write_cb(cbopaque, "Maximum profile backtrace depth: ");
|
|
||||||
write_cb(cbopaque, u2s((1U << sv), 10, s));
|
|
||||||
write_cb(cbopaque, "\n");
|
|
||||||
|
|
||||||
CTL_GET("opt.lg_prof_tcmax", &ssv, ssize_t);
|
|
||||||
write_cb(cbopaque,
|
|
||||||
"Maximum per thread backtrace cache: ");
|
|
||||||
if (ssv >= 0) {
|
|
||||||
write_cb(cbopaque, u2s((1U << ssv), 10, s));
|
|
||||||
write_cb(cbopaque, " (2^");
|
|
||||||
write_cb(cbopaque, u2s(ssv, 10, s));
|
|
||||||
write_cb(cbopaque, ")\n");
|
|
||||||
} else
|
|
||||||
write_cb(cbopaque, "N/A\n");
|
|
||||||
|
|
||||||
CTL_GET("opt.lg_prof_sample", &sv, size_t);
|
CTL_GET("opt.lg_prof_sample", &sv, size_t);
|
||||||
write_cb(cbopaque, "Average profile sample interval: ");
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
write_cb(cbopaque, u2s((((uint64_t)1U) << sv), 10, s));
|
"Average profile sample interval: %"PRIu64
|
||||||
write_cb(cbopaque, " (2^");
|
" (2^%zu)\n", (((uint64_t)1U) << sv), sv);
|
||||||
write_cb(cbopaque, u2s(sv, 10, s));
|
|
||||||
write_cb(cbopaque, ")\n");
|
|
||||||
|
|
||||||
CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
|
CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
|
||||||
write_cb(cbopaque, "Average profile dump interval: ");
|
|
||||||
if (ssv >= 0) {
|
if (ssv >= 0) {
|
||||||
write_cb(cbopaque, u2s((((uint64_t)1U) << ssv),
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
10, s));
|
"Average profile dump interval: %"PRIu64
|
||||||
write_cb(cbopaque, " (2^");
|
" (2^%zd)\n",
|
||||||
write_cb(cbopaque, u2s(ssv, 10, s));
|
(((uint64_t)1U) << ssv), ssv);
|
||||||
write_cb(cbopaque, ")\n");
|
} else {
|
||||||
} else
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
write_cb(cbopaque, "N/A\n");
|
"Average profile dump interval: N/A\n");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
CTL_GET("arenas.chunksize", &sv, size_t);
|
|
||||||
write_cb(cbopaque, "Chunk size: ");
|
|
||||||
write_cb(cbopaque, u2s(sv, 10, s));
|
|
||||||
CTL_GET("opt.lg_chunk", &sv, size_t);
|
CTL_GET("opt.lg_chunk", &sv, size_t);
|
||||||
write_cb(cbopaque, " (2^");
|
malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n",
|
||||||
write_cb(cbopaque, u2s(sv, 10, s));
|
(ZU(1) << sv), sv);
|
||||||
write_cb(cbopaque, ")\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats) {
|
||||||
{
|
|
||||||
int err;
|
|
||||||
size_t sszp, ssz;
|
|
||||||
size_t *cactive;
|
size_t *cactive;
|
||||||
size_t allocated, active, mapped;
|
size_t allocated, active, mapped;
|
||||||
size_t chunks_current, chunks_high, swap_avail;
|
size_t chunks_current, chunks_high;
|
||||||
uint64_t chunks_total;
|
uint64_t chunks_total;
|
||||||
size_t huge_allocated;
|
size_t huge_allocated;
|
||||||
uint64_t huge_nmalloc, huge_ndalloc;
|
uint64_t huge_nmalloc, huge_ndalloc;
|
||||||
|
|
||||||
sszp = sizeof(size_t *);
|
|
||||||
ssz = sizeof(size_t);
|
|
||||||
|
|
||||||
CTL_GET("stats.cactive", &cactive, size_t *);
|
CTL_GET("stats.cactive", &cactive, size_t *);
|
||||||
CTL_GET("stats.allocated", &allocated, size_t);
|
CTL_GET("stats.allocated", &allocated, size_t);
|
||||||
CTL_GET("stats.active", &active, size_t);
|
CTL_GET("stats.active", &active, size_t);
|
||||||
@ -702,24 +470,10 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
|
CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
|
||||||
CTL_GET("stats.chunks.high", &chunks_high, size_t);
|
CTL_GET("stats.chunks.high", &chunks_high, size_t);
|
||||||
CTL_GET("stats.chunks.current", &chunks_current, size_t);
|
CTL_GET("stats.chunks.current", &chunks_current, size_t);
|
||||||
if ((err = JEMALLOC_P(mallctl)("swap.avail", &swap_avail, &ssz,
|
malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
|
||||||
NULL, 0)) == 0) {
|
"highchunks curchunks\n");
|
||||||
size_t lg_chunk;
|
malloc_cprintf(write_cb, cbopaque, " %13"PRIu64"%13zu%13zu\n",
|
||||||
|
chunks_total, chunks_high, chunks_current);
|
||||||
malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
|
|
||||||
"highchunks curchunks swap_avail\n");
|
|
||||||
CTL_GET("opt.lg_chunk", &lg_chunk, size_t);
|
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
|
||||||
" %13"PRIu64"%13zu%13zu%13zu\n",
|
|
||||||
chunks_total, chunks_high, chunks_current,
|
|
||||||
swap_avail << lg_chunk);
|
|
||||||
} else {
|
|
||||||
malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
|
|
||||||
"highchunks curchunks\n");
|
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
|
||||||
" %13"PRIu64"%13zu%13zu\n",
|
|
||||||
chunks_total, chunks_high, chunks_current);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Print huge stats. */
|
/* Print huge stats. */
|
||||||
CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t);
|
CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t);
|
||||||
@ -736,11 +490,11 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
|
|
||||||
CTL_GET("arenas.narenas", &narenas, unsigned);
|
CTL_GET("arenas.narenas", &narenas, unsigned);
|
||||||
{
|
{
|
||||||
bool initialized[narenas];
|
VARIABLE_ARRAY(bool, initialized, narenas);
|
||||||
size_t isz;
|
size_t isz;
|
||||||
unsigned i, ninitialized;
|
unsigned i, ninitialized;
|
||||||
|
|
||||||
isz = sizeof(initialized);
|
isz = sizeof(bool) * narenas;
|
||||||
xmallctl("arenas.initialized", initialized,
|
xmallctl("arenas.initialized", initialized,
|
||||||
&isz, NULL, 0);
|
&isz, NULL, 0);
|
||||||
for (i = ninitialized = 0; i < narenas; i++) {
|
for (i = ninitialized = 0; i < narenas; i++) {
|
||||||
@ -753,7 +507,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"\nMerged arenas stats:\n");
|
"\nMerged arenas stats:\n");
|
||||||
stats_arena_print(write_cb, cbopaque,
|
stats_arena_print(write_cb, cbopaque,
|
||||||
narenas);
|
narenas, bins, large);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -765,11 +519,11 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
|
|
||||||
CTL_GET("arenas.narenas", &narenas, unsigned);
|
CTL_GET("arenas.narenas", &narenas, unsigned);
|
||||||
{
|
{
|
||||||
bool initialized[narenas];
|
VARIABLE_ARRAY(bool, initialized, narenas);
|
||||||
size_t isz;
|
size_t isz;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
isz = sizeof(initialized);
|
isz = sizeof(bool) * narenas;
|
||||||
xmallctl("arenas.initialized", initialized,
|
xmallctl("arenas.initialized", initialized,
|
||||||
&isz, NULL, 0);
|
&isz, NULL, 0);
|
||||||
|
|
||||||
@ -779,12 +533,11 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
cbopaque,
|
cbopaque,
|
||||||
"\narenas[%u]:\n", i);
|
"\narenas[%u]:\n", i);
|
||||||
stats_arena_print(write_cb,
|
stats_arena_print(write_cb,
|
||||||
cbopaque, i);
|
cbopaque, i, bins, large);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif /* #ifdef JEMALLOC_STATS */
|
malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n");
|
||||||
write_cb(cbopaque, "--- End jemalloc statistics ---\n");
|
|
||||||
}
|
}
|
||||||
|
390
src/tcache.c
390
src/tcache.c
@ -1,70 +1,92 @@
|
|||||||
#define JEMALLOC_TCACHE_C_
|
#define JEMALLOC_TCACHE_C_
|
||||||
#include "jemalloc/internal/jemalloc_internal.h"
|
#include "jemalloc/internal/jemalloc_internal.h"
|
||||||
#ifdef JEMALLOC_TCACHE
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Data. */
|
/* Data. */
|
||||||
|
|
||||||
|
malloc_tsd_data(, tcache, tcache_t *, NULL)
|
||||||
|
malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default)
|
||||||
|
|
||||||
bool opt_tcache = true;
|
bool opt_tcache = true;
|
||||||
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
|
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
|
||||||
ssize_t opt_lg_tcache_gc_sweep = LG_TCACHE_GC_SWEEP_DEFAULT;
|
|
||||||
|
|
||||||
tcache_bin_info_t *tcache_bin_info;
|
tcache_bin_info_t *tcache_bin_info;
|
||||||
static unsigned stack_nelms; /* Total stack elms per tcache. */
|
static unsigned stack_nelms; /* Total stack elms per tcache. */
|
||||||
|
|
||||||
/* Map of thread-specific caches. */
|
size_t nhbins;
|
||||||
#ifndef NO_TLS
|
size_t tcache_maxclass;
|
||||||
__thread tcache_t *tcache_tls JEMALLOC_ATTR(tls_model("initial-exec"));
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Same contents as tcache, but initialized such that the TSD destructor is
|
|
||||||
* called when a thread exits, so that the cache can be cleaned up.
|
|
||||||
*/
|
|
||||||
pthread_key_t tcache_tsd;
|
|
||||||
|
|
||||||
size_t nhbins;
|
|
||||||
size_t tcache_maxclass;
|
|
||||||
unsigned tcache_gc_incr;
|
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Function prototypes for non-inline static functions. */
|
|
||||||
|
|
||||||
static void tcache_thread_cleanup(void *arg);
|
size_t tcache_salloc(const void *ptr)
|
||||||
|
{
|
||||||
|
|
||||||
/******************************************************************************/
|
return (arena_salloc(ptr, false));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
tcache_event_hard(tcache_t *tcache)
|
||||||
|
{
|
||||||
|
size_t binind = tcache->next_gc_bin;
|
||||||
|
tcache_bin_t *tbin = &tcache->tbins[binind];
|
||||||
|
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
|
||||||
|
|
||||||
|
if (tbin->low_water > 0) {
|
||||||
|
/*
|
||||||
|
* Flush (ceiling) 3/4 of the objects below the low water mark.
|
||||||
|
*/
|
||||||
|
if (binind < NBINS) {
|
||||||
|
tcache_bin_flush_small(tbin, binind, tbin->ncached -
|
||||||
|
tbin->low_water + (tbin->low_water >> 2), tcache);
|
||||||
|
} else {
|
||||||
|
tcache_bin_flush_large(tbin, binind, tbin->ncached -
|
||||||
|
tbin->low_water + (tbin->low_water >> 2), tcache);
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* Reduce fill count by 2X. Limit lg_fill_div such that the
|
||||||
|
* fill count is always at least 1.
|
||||||
|
*/
|
||||||
|
if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
|
||||||
|
tbin->lg_fill_div++;
|
||||||
|
} else if (tbin->low_water < 0) {
|
||||||
|
/*
|
||||||
|
* Increase fill count by 2X. Make sure lg_fill_div stays
|
||||||
|
* greater than 0.
|
||||||
|
*/
|
||||||
|
if (tbin->lg_fill_div > 1)
|
||||||
|
tbin->lg_fill_div--;
|
||||||
|
}
|
||||||
|
tbin->low_water = tbin->ncached;
|
||||||
|
|
||||||
|
tcache->next_gc_bin++;
|
||||||
|
if (tcache->next_gc_bin == nhbins)
|
||||||
|
tcache->next_gc_bin = 0;
|
||||||
|
tcache->ev_cnt = 0;
|
||||||
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
|
tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
arena_tcache_fill_small(tcache->arena, tbin, binind
|
arena_tcache_fill_small(tcache->arena, tbin, binind,
|
||||||
#ifdef JEMALLOC_PROF
|
config_prof ? tcache->prof_accumbytes : 0);
|
||||||
, tcache->prof_accumbytes
|
if (config_prof)
|
||||||
#endif
|
tcache->prof_accumbytes = 0;
|
||||||
);
|
|
||||||
#ifdef JEMALLOC_PROF
|
|
||||||
tcache->prof_accumbytes = 0;
|
|
||||||
#endif
|
|
||||||
ret = tcache_alloc_easy(tbin);
|
ret = tcache_alloc_easy(tbin);
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
|
tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
tcache_t *tcache)
|
||||||
, tcache_t *tcache
|
|
||||||
#endif
|
|
||||||
)
|
|
||||||
{
|
{
|
||||||
void *ptr;
|
void *ptr;
|
||||||
unsigned i, nflush, ndeferred;
|
unsigned i, nflush, ndeferred;
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
bool merged_stats = false;
|
bool merged_stats = false;
|
||||||
#endif
|
|
||||||
|
|
||||||
assert(binind < nbins);
|
assert(binind < NBINS);
|
||||||
assert(rem <= tbin->ncached);
|
assert(rem <= tbin->ncached);
|
||||||
|
|
||||||
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
|
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
|
||||||
@ -74,25 +96,21 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
|
|||||||
arena_t *arena = chunk->arena;
|
arena_t *arena = chunk->arena;
|
||||||
arena_bin_t *bin = &arena->bins[binind];
|
arena_bin_t *bin = &arena->bins[binind];
|
||||||
|
|
||||||
#ifdef JEMALLOC_PROF
|
if (config_prof && arena == tcache->arena) {
|
||||||
if (arena == tcache->arena) {
|
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(&arena->lock);
|
||||||
arena_prof_accum(arena, tcache->prof_accumbytes);
|
arena_prof_accum(arena, tcache->prof_accumbytes);
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
tcache->prof_accumbytes = 0;
|
tcache->prof_accumbytes = 0;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
malloc_mutex_lock(&bin->lock);
|
malloc_mutex_lock(&bin->lock);
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats && arena == tcache->arena) {
|
||||||
if (arena == tcache->arena) {
|
|
||||||
assert(merged_stats == false);
|
assert(merged_stats == false);
|
||||||
merged_stats = true;
|
merged_stats = true;
|
||||||
bin->stats.nflushes++;
|
bin->stats.nflushes++;
|
||||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||||
tbin->tstats.nrequests = 0;
|
tbin->tstats.nrequests = 0;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
ndeferred = 0;
|
ndeferred = 0;
|
||||||
for (i = 0; i < nflush; i++) {
|
for (i = 0; i < nflush; i++) {
|
||||||
ptr = tbin->avail[i];
|
ptr = tbin->avail[i];
|
||||||
@ -100,10 +118,15 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
|
|||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
if (chunk->arena == arena) {
|
if (chunk->arena == arena) {
|
||||||
size_t pageind = ((uintptr_t)ptr -
|
size_t pageind = ((uintptr_t)ptr -
|
||||||
(uintptr_t)chunk) >> PAGE_SHIFT;
|
(uintptr_t)chunk) >> LG_PAGE;
|
||||||
arena_chunk_map_t *mapelm =
|
arena_chunk_map_t *mapelm =
|
||||||
&chunk->map[pageind-map_bias];
|
arena_mapp_get(chunk, pageind);
|
||||||
arena_dalloc_bin(arena, chunk, ptr, mapelm);
|
if (config_fill && opt_junk) {
|
||||||
|
arena_alloc_junk_small(ptr,
|
||||||
|
&arena_bin_info[binind], true);
|
||||||
|
}
|
||||||
|
arena_dalloc_bin_locked(arena, chunk, ptr,
|
||||||
|
mapelm);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* This object was allocated via a different
|
* This object was allocated via a different
|
||||||
@ -117,8 +140,7 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
|
|||||||
}
|
}
|
||||||
malloc_mutex_unlock(&bin->lock);
|
malloc_mutex_unlock(&bin->lock);
|
||||||
}
|
}
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats && merged_stats == false) {
|
||||||
if (merged_stats == false) {
|
|
||||||
/*
|
/*
|
||||||
* The flush loop didn't happen to flush to this thread's
|
* The flush loop didn't happen to flush to this thread's
|
||||||
* arena, so the stats didn't get merged. Manually do so now.
|
* arena, so the stats didn't get merged. Manually do so now.
|
||||||
@ -130,7 +152,6 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
|
|||||||
tbin->tstats.nrequests = 0;
|
tbin->tstats.nrequests = 0;
|
||||||
malloc_mutex_unlock(&bin->lock);
|
malloc_mutex_unlock(&bin->lock);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
|
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
|
||||||
rem * sizeof(void *));
|
rem * sizeof(void *));
|
||||||
@ -140,17 +161,12 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
|
tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
tcache_t *tcache)
|
||||||
, tcache_t *tcache
|
|
||||||
#endif
|
|
||||||
)
|
|
||||||
{
|
{
|
||||||
void *ptr;
|
void *ptr;
|
||||||
unsigned i, nflush, ndeferred;
|
unsigned i, nflush, ndeferred;
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
bool merged_stats = false;
|
bool merged_stats = false;
|
||||||
#endif
|
|
||||||
|
|
||||||
assert(binind < nhbins);
|
assert(binind < nhbins);
|
||||||
assert(rem <= tbin->ncached);
|
assert(rem <= tbin->ncached);
|
||||||
@ -162,30 +178,28 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
|
|||||||
arena_t *arena = chunk->arena;
|
arena_t *arena = chunk->arena;
|
||||||
|
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(&arena->lock);
|
||||||
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
|
if ((config_prof || config_stats) && arena == tcache->arena) {
|
||||||
if (arena == tcache->arena) {
|
if (config_prof) {
|
||||||
#endif
|
arena_prof_accum(arena,
|
||||||
#ifdef JEMALLOC_PROF
|
tcache->prof_accumbytes);
|
||||||
arena_prof_accum(arena, tcache->prof_accumbytes);
|
tcache->prof_accumbytes = 0;
|
||||||
tcache->prof_accumbytes = 0;
|
}
|
||||||
#endif
|
if (config_stats) {
|
||||||
#ifdef JEMALLOC_STATS
|
merged_stats = true;
|
||||||
merged_stats = true;
|
arena->stats.nrequests_large +=
|
||||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
tbin->tstats.nrequests;
|
||||||
arena->stats.lstats[binind - nbins].nrequests +=
|
arena->stats.lstats[binind - NBINS].nrequests +=
|
||||||
tbin->tstats.nrequests;
|
tbin->tstats.nrequests;
|
||||||
tbin->tstats.nrequests = 0;
|
tbin->tstats.nrequests = 0;
|
||||||
#endif
|
}
|
||||||
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
|
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
ndeferred = 0;
|
ndeferred = 0;
|
||||||
for (i = 0; i < nflush; i++) {
|
for (i = 0; i < nflush; i++) {
|
||||||
ptr = tbin->avail[i];
|
ptr = tbin->avail[i];
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
if (chunk->arena == arena)
|
if (chunk->arena == arena)
|
||||||
arena_dalloc_large(arena, chunk, ptr);
|
arena_dalloc_large_locked(arena, chunk, ptr);
|
||||||
else {
|
else {
|
||||||
/*
|
/*
|
||||||
* This object was allocated via a different
|
* This object was allocated via a different
|
||||||
@ -199,8 +213,7 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
|
|||||||
}
|
}
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
}
|
}
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats && merged_stats == false) {
|
||||||
if (merged_stats == false) {
|
|
||||||
/*
|
/*
|
||||||
* The flush loop didn't happen to flush to this thread's
|
* The flush loop didn't happen to flush to this thread's
|
||||||
* arena, so the stats didn't get merged. Manually do so now.
|
* arena, so the stats didn't get merged. Manually do so now.
|
||||||
@ -208,12 +221,11 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
|
|||||||
arena_t *arena = tcache->arena;
|
arena_t *arena = tcache->arena;
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(&arena->lock);
|
||||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
||||||
arena->stats.lstats[binind - nbins].nrequests +=
|
arena->stats.lstats[binind - NBINS].nrequests +=
|
||||||
tbin->tstats.nrequests;
|
tbin->tstats.nrequests;
|
||||||
tbin->tstats.nrequests = 0;
|
tbin->tstats.nrequests = 0;
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
|
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
|
||||||
rem * sizeof(void *));
|
rem * sizeof(void *));
|
||||||
@ -222,6 +234,33 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
|
|||||||
tbin->low_water = tbin->ncached;
|
tbin->low_water = tbin->ncached;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
tcache_arena_associate(tcache_t *tcache, arena_t *arena)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (config_stats) {
|
||||||
|
/* Link into list of extant tcaches. */
|
||||||
|
malloc_mutex_lock(&arena->lock);
|
||||||
|
ql_elm_new(tcache, link);
|
||||||
|
ql_tail_insert(&arena->tcache_ql, tcache, link);
|
||||||
|
malloc_mutex_unlock(&arena->lock);
|
||||||
|
}
|
||||||
|
tcache->arena = arena;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
tcache_arena_dissociate(tcache_t *tcache)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (config_stats) {
|
||||||
|
/* Unlink from list of extant tcaches. */
|
||||||
|
malloc_mutex_lock(&tcache->arena->lock);
|
||||||
|
ql_remove(&tcache->arena->tcache_ql, tcache, link);
|
||||||
|
malloc_mutex_unlock(&tcache->arena->lock);
|
||||||
|
tcache_stats_merge(tcache, tcache->arena);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
tcache_t *
|
tcache_t *
|
||||||
tcache_create(arena_t *arena)
|
tcache_create(arena_t *arena)
|
||||||
{
|
{
|
||||||
@ -244,7 +283,7 @@ tcache_create(arena_t *arena)
|
|||||||
*/
|
*/
|
||||||
size = (size + CACHELINE_MASK) & (-CACHELINE);
|
size = (size + CACHELINE_MASK) & (-CACHELINE);
|
||||||
|
|
||||||
if (size <= small_maxclass)
|
if (size <= SMALL_MAXCLASS)
|
||||||
tcache = (tcache_t *)arena_malloc_small(arena, size, true);
|
tcache = (tcache_t *)arena_malloc_small(arena, size, true);
|
||||||
else if (size <= tcache_maxclass)
|
else if (size <= tcache_maxclass)
|
||||||
tcache = (tcache_t *)arena_malloc_large(arena, size, true);
|
tcache = (tcache_t *)arena_malloc_large(arena, size, true);
|
||||||
@ -254,15 +293,8 @@ tcache_create(arena_t *arena)
|
|||||||
if (tcache == NULL)
|
if (tcache == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
tcache_arena_associate(tcache, arena);
|
||||||
/* Link into list of extant tcaches. */
|
|
||||||
malloc_mutex_lock(&arena->lock);
|
|
||||||
ql_elm_new(tcache, link);
|
|
||||||
ql_tail_insert(&arena->tcache_ql, tcache, link);
|
|
||||||
malloc_mutex_unlock(&arena->lock);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
tcache->arena = arena;
|
|
||||||
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
|
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
|
||||||
for (i = 0; i < nhbins; i++) {
|
for (i = 0; i < nhbins; i++) {
|
||||||
tcache->tbins[i].lg_fill_div = 1;
|
tcache->tbins[i].lg_fill_div = 1;
|
||||||
@ -271,7 +303,7 @@ tcache_create(arena_t *arena)
|
|||||||
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
|
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
|
||||||
}
|
}
|
||||||
|
|
||||||
TCACHE_SET(tcache);
|
tcache_tsd_set(&tcache);
|
||||||
|
|
||||||
return (tcache);
|
return (tcache);
|
||||||
}
|
}
|
||||||
@ -282,121 +314,96 @@ tcache_destroy(tcache_t *tcache)
|
|||||||
unsigned i;
|
unsigned i;
|
||||||
size_t tcache_size;
|
size_t tcache_size;
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
tcache_arena_dissociate(tcache);
|
||||||
/* Unlink from list of extant tcaches. */
|
|
||||||
malloc_mutex_lock(&tcache->arena->lock);
|
|
||||||
ql_remove(&tcache->arena->tcache_ql, tcache, link);
|
|
||||||
malloc_mutex_unlock(&tcache->arena->lock);
|
|
||||||
tcache_stats_merge(tcache, tcache->arena);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
for (i = 0; i < nbins; i++) {
|
for (i = 0; i < NBINS; i++) {
|
||||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||||
tcache_bin_flush_small(tbin, i, 0
|
tcache_bin_flush_small(tbin, i, 0, tcache);
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
, tcache
|
|
||||||
#endif
|
|
||||||
);
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats && tbin->tstats.nrequests != 0) {
|
||||||
if (tbin->tstats.nrequests != 0) {
|
|
||||||
arena_t *arena = tcache->arena;
|
arena_t *arena = tcache->arena;
|
||||||
arena_bin_t *bin = &arena->bins[i];
|
arena_bin_t *bin = &arena->bins[i];
|
||||||
malloc_mutex_lock(&bin->lock);
|
malloc_mutex_lock(&bin->lock);
|
||||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||||
malloc_mutex_unlock(&bin->lock);
|
malloc_mutex_unlock(&bin->lock);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (; i < nhbins; i++) {
|
for (; i < nhbins; i++) {
|
||||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||||
tcache_bin_flush_large(tbin, i, 0
|
tcache_bin_flush_large(tbin, i, 0, tcache);
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
, tcache
|
|
||||||
#endif
|
|
||||||
);
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats && tbin->tstats.nrequests != 0) {
|
||||||
if (tbin->tstats.nrequests != 0) {
|
|
||||||
arena_t *arena = tcache->arena;
|
arena_t *arena = tcache->arena;
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(&arena->lock);
|
||||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
||||||
arena->stats.lstats[i - nbins].nrequests +=
|
arena->stats.lstats[i - NBINS].nrequests +=
|
||||||
tbin->tstats.nrequests;
|
tbin->tstats.nrequests;
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef JEMALLOC_PROF
|
if (config_prof && tcache->prof_accumbytes > 0) {
|
||||||
if (tcache->prof_accumbytes > 0) {
|
|
||||||
malloc_mutex_lock(&tcache->arena->lock);
|
malloc_mutex_lock(&tcache->arena->lock);
|
||||||
arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
|
arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
|
||||||
malloc_mutex_unlock(&tcache->arena->lock);
|
malloc_mutex_unlock(&tcache->arena->lock);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
tcache_size = arena_salloc(tcache);
|
tcache_size = arena_salloc(tcache, false);
|
||||||
if (tcache_size <= small_maxclass) {
|
if (tcache_size <= SMALL_MAXCLASS) {
|
||||||
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
|
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
|
||||||
arena_t *arena = chunk->arena;
|
arena_t *arena = chunk->arena;
|
||||||
size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
|
size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
|
||||||
PAGE_SHIFT;
|
LG_PAGE;
|
||||||
arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias];
|
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
|
||||||
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
|
|
||||||
(uintptr_t)((pageind - (mapelm->bits >> PAGE_SHIFT)) <<
|
|
||||||
PAGE_SHIFT));
|
|
||||||
arena_bin_t *bin = run->bin;
|
|
||||||
|
|
||||||
malloc_mutex_lock(&bin->lock);
|
arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
|
||||||
arena_dalloc_bin(arena, chunk, tcache, mapelm);
|
|
||||||
malloc_mutex_unlock(&bin->lock);
|
|
||||||
} else if (tcache_size <= tcache_maxclass) {
|
} else if (tcache_size <= tcache_maxclass) {
|
||||||
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
|
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
|
||||||
arena_t *arena = chunk->arena;
|
arena_t *arena = chunk->arena;
|
||||||
|
|
||||||
malloc_mutex_lock(&arena->lock);
|
|
||||||
arena_dalloc_large(arena, chunk, tcache);
|
arena_dalloc_large(arena, chunk, tcache);
|
||||||
malloc_mutex_unlock(&arena->lock);
|
|
||||||
} else
|
} else
|
||||||
idalloc(tcache);
|
idalloc(tcache);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
void
|
||||||
tcache_thread_cleanup(void *arg)
|
tcache_thread_cleanup(void *arg)
|
||||||
{
|
{
|
||||||
tcache_t *tcache = (tcache_t *)arg;
|
tcache_t *tcache = *(tcache_t **)arg;
|
||||||
|
|
||||||
if (tcache == (void *)(uintptr_t)1) {
|
if (tcache == TCACHE_STATE_DISABLED) {
|
||||||
/*
|
/* Do nothing. */
|
||||||
* The previous time this destructor was called, we set the key
|
} else if (tcache == TCACHE_STATE_REINCARNATED) {
|
||||||
* to 1 so that other destructors wouldn't cause re-creation of
|
|
||||||
* the tcache. This time, do nothing, so that the destructor
|
|
||||||
* will not be called again.
|
|
||||||
*/
|
|
||||||
} else if (tcache == (void *)(uintptr_t)2) {
|
|
||||||
/*
|
/*
|
||||||
* Another destructor called an allocator function after this
|
* Another destructor called an allocator function after this
|
||||||
* destructor was called. Reset tcache to 1 in order to
|
* destructor was called. Reset tcache to
|
||||||
* receive another callback.
|
* TCACHE_STATE_PURGATORY in order to receive another callback.
|
||||||
|
*/
|
||||||
|
tcache = TCACHE_STATE_PURGATORY;
|
||||||
|
tcache_tsd_set(&tcache);
|
||||||
|
} else if (tcache == TCACHE_STATE_PURGATORY) {
|
||||||
|
/*
|
||||||
|
* The previous time this destructor was called, we set the key
|
||||||
|
* to TCACHE_STATE_PURGATORY so that other destructors wouldn't
|
||||||
|
* cause re-creation of the tcache. This time, do nothing, so
|
||||||
|
* that the destructor will not be called again.
|
||||||
*/
|
*/
|
||||||
TCACHE_SET((uintptr_t)1);
|
|
||||||
} else if (tcache != NULL) {
|
} else if (tcache != NULL) {
|
||||||
assert(tcache != (void *)(uintptr_t)1);
|
assert(tcache != TCACHE_STATE_PURGATORY);
|
||||||
tcache_destroy(tcache);
|
tcache_destroy(tcache);
|
||||||
TCACHE_SET((uintptr_t)1);
|
tcache = TCACHE_STATE_PURGATORY;
|
||||||
|
tcache_tsd_set(&tcache);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
void
|
void
|
||||||
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
|
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
|
||||||
{
|
{
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
/* Merge and reset tcache stats. */
|
/* Merge and reset tcache stats. */
|
||||||
for (i = 0; i < nbins; i++) {
|
for (i = 0; i < NBINS; i++) {
|
||||||
arena_bin_t *bin = &arena->bins[i];
|
arena_bin_t *bin = &arena->bins[i];
|
||||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||||
malloc_mutex_lock(&bin->lock);
|
malloc_mutex_lock(&bin->lock);
|
||||||
@ -406,75 +413,62 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (; i < nhbins; i++) {
|
for (; i < nhbins; i++) {
|
||||||
malloc_large_stats_t *lstats = &arena->stats.lstats[i - nbins];
|
malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
|
||||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
||||||
lstats->nrequests += tbin->tstats.nrequests;
|
lstats->nrequests += tbin->tstats.nrequests;
|
||||||
tbin->tstats.nrequests = 0;
|
tbin->tstats.nrequests = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
bool
|
bool
|
||||||
tcache_boot(void)
|
tcache_boot0(void)
|
||||||
{
|
{
|
||||||
|
unsigned i;
|
||||||
|
|
||||||
if (opt_tcache) {
|
/*
|
||||||
unsigned i;
|
* If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
|
||||||
|
* known.
|
||||||
|
*/
|
||||||
|
if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
|
||||||
|
tcache_maxclass = SMALL_MAXCLASS;
|
||||||
|
else if ((1U << opt_lg_tcache_max) > arena_maxclass)
|
||||||
|
tcache_maxclass = arena_maxclass;
|
||||||
|
else
|
||||||
|
tcache_maxclass = (1U << opt_lg_tcache_max);
|
||||||
|
|
||||||
/*
|
nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
|
||||||
* If necessary, clamp opt_lg_tcache_max, now that
|
|
||||||
* small_maxclass and arena_maxclass are known.
|
|
||||||
*/
|
|
||||||
if (opt_lg_tcache_max < 0 || (1U <<
|
|
||||||
opt_lg_tcache_max) < small_maxclass)
|
|
||||||
tcache_maxclass = small_maxclass;
|
|
||||||
else if ((1U << opt_lg_tcache_max) > arena_maxclass)
|
|
||||||
tcache_maxclass = arena_maxclass;
|
|
||||||
else
|
|
||||||
tcache_maxclass = (1U << opt_lg_tcache_max);
|
|
||||||
|
|
||||||
nhbins = nbins + (tcache_maxclass >> PAGE_SHIFT);
|
/* Initialize tcache_bin_info. */
|
||||||
|
tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
|
||||||
/* Initialize tcache_bin_info. */
|
sizeof(tcache_bin_info_t));
|
||||||
tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
|
if (tcache_bin_info == NULL)
|
||||||
sizeof(tcache_bin_info_t));
|
return (true);
|
||||||
if (tcache_bin_info == NULL)
|
stack_nelms = 0;
|
||||||
return (true);
|
for (i = 0; i < NBINS; i++) {
|
||||||
stack_nelms = 0;
|
if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
|
||||||
for (i = 0; i < nbins; i++) {
|
tcache_bin_info[i].ncached_max =
|
||||||
if ((arena_bin_info[i].nregs << 1) <=
|
(arena_bin_info[i].nregs << 1);
|
||||||
TCACHE_NSLOTS_SMALL_MAX) {
|
} else {
|
||||||
tcache_bin_info[i].ncached_max =
|
tcache_bin_info[i].ncached_max =
|
||||||
(arena_bin_info[i].nregs << 1);
|
TCACHE_NSLOTS_SMALL_MAX;
|
||||||
} else {
|
|
||||||
tcache_bin_info[i].ncached_max =
|
|
||||||
TCACHE_NSLOTS_SMALL_MAX;
|
|
||||||
}
|
|
||||||
stack_nelms += tcache_bin_info[i].ncached_max;
|
|
||||||
}
|
|
||||||
for (; i < nhbins; i++) {
|
|
||||||
tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
|
|
||||||
stack_nelms += tcache_bin_info[i].ncached_max;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Compute incremental GC event threshold. */
|
|
||||||
if (opt_lg_tcache_gc_sweep >= 0) {
|
|
||||||
tcache_gc_incr = ((1U << opt_lg_tcache_gc_sweep) /
|
|
||||||
nbins) + (((1U << opt_lg_tcache_gc_sweep) % nbins ==
|
|
||||||
0) ? 0 : 1);
|
|
||||||
} else
|
|
||||||
tcache_gc_incr = 0;
|
|
||||||
|
|
||||||
if (pthread_key_create(&tcache_tsd, tcache_thread_cleanup) !=
|
|
||||||
0) {
|
|
||||||
malloc_write(
|
|
||||||
"<jemalloc>: Error in pthread_key_create()\n");
|
|
||||||
abort();
|
|
||||||
}
|
}
|
||||||
|
stack_nelms += tcache_bin_info[i].ncached_max;
|
||||||
|
}
|
||||||
|
for (; i < nhbins; i++) {
|
||||||
|
tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
|
||||||
|
stack_nelms += tcache_bin_info[i].ncached_max;
|
||||||
}
|
}
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
/******************************************************************************/
|
|
||||||
#endif /* JEMALLOC_TCACHE */
|
bool
|
||||||
|
tcache_boot1(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (tcache_tsd_boot() || tcache_enabled_tsd_boot())
|
||||||
|
return (true);
|
||||||
|
|
||||||
|
return (false);
|
||||||
|
}
|
||||||
|
107
src/tsd.c
Normal file
107
src/tsd.c
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
#define JEMALLOC_TSD_C_
|
||||||
|
#include "jemalloc/internal/jemalloc_internal.h"
|
||||||
|
|
||||||
|
/******************************************************************************/
|
||||||
|
/* Data. */
|
||||||
|
|
||||||
|
static unsigned ncleanups;
|
||||||
|
static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
|
||||||
|
|
||||||
|
/******************************************************************************/
|
||||||
|
|
||||||
|
void *
|
||||||
|
malloc_tsd_malloc(size_t size)
|
||||||
|
{
|
||||||
|
|
||||||
|
/* Avoid choose_arena() in order to dodge bootstrapping issues. */
|
||||||
|
return (arena_malloc(arenas[0], size, false, false));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
malloc_tsd_dalloc(void *wrapper)
|
||||||
|
{
|
||||||
|
|
||||||
|
idalloc(wrapper);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
malloc_tsd_no_cleanup(void *arg)
|
||||||
|
{
|
||||||
|
|
||||||
|
not_reached();
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
|
||||||
|
#ifndef _WIN32
|
||||||
|
JEMALLOC_EXPORT
|
||||||
|
#endif
|
||||||
|
void
|
||||||
|
_malloc_thread_cleanup(void)
|
||||||
|
{
|
||||||
|
bool pending[MALLOC_TSD_CLEANUPS_MAX], again;
|
||||||
|
unsigned i;
|
||||||
|
|
||||||
|
for (i = 0; i < ncleanups; i++)
|
||||||
|
pending[i] = true;
|
||||||
|
|
||||||
|
do {
|
||||||
|
again = false;
|
||||||
|
for (i = 0; i < ncleanups; i++) {
|
||||||
|
if (pending[i]) {
|
||||||
|
pending[i] = cleanups[i]();
|
||||||
|
if (pending[i])
|
||||||
|
again = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} while (again);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void
|
||||||
|
malloc_tsd_cleanup_register(bool (*f)(void))
|
||||||
|
{
|
||||||
|
|
||||||
|
assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
|
||||||
|
cleanups[ncleanups] = f;
|
||||||
|
ncleanups++;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
malloc_tsd_boot(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
ncleanups = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
static BOOL WINAPI
|
||||||
|
_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
|
||||||
|
{
|
||||||
|
|
||||||
|
switch (fdwReason) {
|
||||||
|
#ifdef JEMALLOC_LAZY_LOCK
|
||||||
|
case DLL_THREAD_ATTACH:
|
||||||
|
isthreaded = true;
|
||||||
|
break;
|
||||||
|
#endif
|
||||||
|
case DLL_THREAD_DETACH:
|
||||||
|
_malloc_thread_cleanup();
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return (true);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef _MSC_VER
|
||||||
|
# ifdef _M_IX86
|
||||||
|
# pragma comment(linker, "/INCLUDE:__tls_used")
|
||||||
|
# else
|
||||||
|
# pragma comment(linker, "/INCLUDE:_tls_used")
|
||||||
|
# endif
|
||||||
|
# pragma section(".CRT$XLY",long,read)
|
||||||
|
#endif
|
||||||
|
JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
|
||||||
|
static const BOOL (WINAPI *tls_callback)(HINSTANCE hinstDLL,
|
||||||
|
DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
|
||||||
|
#endif
|
646
src/util.c
Normal file
646
src/util.c
Normal file
@ -0,0 +1,646 @@
|
|||||||
|
#define assert(e) do { \
|
||||||
|
if (config_debug && !(e)) { \
|
||||||
|
malloc_write("<jemalloc>: Failed assertion\n"); \
|
||||||
|
abort(); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define not_reached() do { \
|
||||||
|
if (config_debug) { \
|
||||||
|
malloc_write("<jemalloc>: Unreachable code reached\n"); \
|
||||||
|
abort(); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define not_implemented() do { \
|
||||||
|
if (config_debug) { \
|
||||||
|
malloc_write("<jemalloc>: Not implemented\n"); \
|
||||||
|
abort(); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define JEMALLOC_UTIL_C_
|
||||||
|
#include "jemalloc/internal/jemalloc_internal.h"
|
||||||
|
|
||||||
|
/******************************************************************************/
|
||||||
|
/* Function prototypes for non-inline static functions. */
|
||||||
|
|
||||||
|
static void wrtmessage(void *cbopaque, const char *s);
|
||||||
|
#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
|
||||||
|
static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
|
||||||
|
size_t *slen_p);
|
||||||
|
#define D2S_BUFSIZE (1 + U2S_BUFSIZE)
|
||||||
|
static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p);
|
||||||
|
#define O2S_BUFSIZE (1 + U2S_BUFSIZE)
|
||||||
|
static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p);
|
||||||
|
#define X2S_BUFSIZE (2 + U2S_BUFSIZE)
|
||||||
|
static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
|
||||||
|
size_t *slen_p);
|
||||||
|
|
||||||
|
/******************************************************************************/
|
||||||
|
|
||||||
|
/* malloc_message() setup. */
|
||||||
|
static void
|
||||||
|
wrtmessage(void *cbopaque, const char *s)
|
||||||
|
{
|
||||||
|
|
||||||
|
#ifdef SYS_write
|
||||||
|
/*
|
||||||
|
* Use syscall(2) rather than write(2) when possible in order to avoid
|
||||||
|
* the possibility of memory allocation within libc. This is necessary
|
||||||
|
* on FreeBSD; most operating systems do not have this problem though.
|
||||||
|
*/
|
||||||
|
UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
|
||||||
|
#else
|
||||||
|
UNUSED int result = write(STDERR_FILENO, s, strlen(s));
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Wrapper around malloc_message() that avoids the need for
|
||||||
|
* je_malloc_message(...) throughout the code.
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
malloc_write(const char *s)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (je_malloc_message != NULL)
|
||||||
|
je_malloc_message(NULL, s);
|
||||||
|
else
|
||||||
|
wrtmessage(NULL, s);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
|
||||||
|
* provide a wrapper.
|
||||||
|
*/
|
||||||
|
int
|
||||||
|
buferror(char *buf, size_t buflen)
|
||||||
|
{
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), 0,
|
||||||
|
(LPSTR)buf, buflen, NULL);
|
||||||
|
return (0);
|
||||||
|
#elif defined(_GNU_SOURCE)
|
||||||
|
char *b = strerror_r(errno, buf, buflen);
|
||||||
|
if (b != buf) {
|
||||||
|
strncpy(buf, b, buflen);
|
||||||
|
buf[buflen-1] = '\0';
|
||||||
|
}
|
||||||
|
return (0);
|
||||||
|
#else
|
||||||
|
return (strerror_r(errno, buf, buflen));
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
uintmax_t
|
||||||
|
malloc_strtoumax(const char *nptr, char **endptr, int base)
|
||||||
|
{
|
||||||
|
uintmax_t ret, digit;
|
||||||
|
int b;
|
||||||
|
bool neg;
|
||||||
|
const char *p, *ns;
|
||||||
|
|
||||||
|
if (base < 0 || base == 1 || base > 36) {
|
||||||
|
set_errno(EINVAL);
|
||||||
|
return (UINTMAX_MAX);
|
||||||
|
}
|
||||||
|
b = base;
|
||||||
|
|
||||||
|
/* Swallow leading whitespace and get sign, if any. */
|
||||||
|
neg = false;
|
||||||
|
p = nptr;
|
||||||
|
while (true) {
|
||||||
|
switch (*p) {
|
||||||
|
case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
|
||||||
|
p++;
|
||||||
|
break;
|
||||||
|
case '-':
|
||||||
|
neg = true;
|
||||||
|
/* Fall through. */
|
||||||
|
case '+':
|
||||||
|
p++;
|
||||||
|
/* Fall through. */
|
||||||
|
default:
|
||||||
|
goto label_prefix;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Get prefix, if any. */
|
||||||
|
label_prefix:
|
||||||
|
/*
|
||||||
|
* Note where the first non-whitespace/sign character is so that it is
|
||||||
|
* possible to tell whether any digits are consumed (e.g., " 0" vs.
|
||||||
|
* " -x").
|
||||||
|
*/
|
||||||
|
ns = p;
|
||||||
|
if (*p == '0') {
|
||||||
|
switch (p[1]) {
|
||||||
|
case '0': case '1': case '2': case '3': case '4': case '5':
|
||||||
|
case '6': case '7':
|
||||||
|
if (b == 0)
|
||||||
|
b = 8;
|
||||||
|
if (b == 8)
|
||||||
|
p++;
|
||||||
|
break;
|
||||||
|
case 'x':
|
||||||
|
switch (p[2]) {
|
||||||
|
case '0': case '1': case '2': case '3': case '4':
|
||||||
|
case '5': case '6': case '7': case '8': case '9':
|
||||||
|
case 'A': case 'B': case 'C': case 'D': case 'E':
|
||||||
|
case 'F':
|
||||||
|
case 'a': case 'b': case 'c': case 'd': case 'e':
|
||||||
|
case 'f':
|
||||||
|
if (b == 0)
|
||||||
|
b = 16;
|
||||||
|
if (b == 16)
|
||||||
|
p += 2;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (b == 0)
|
||||||
|
b = 10;
|
||||||
|
|
||||||
|
/* Convert. */
|
||||||
|
ret = 0;
|
||||||
|
while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b)
|
||||||
|
|| (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b)
|
||||||
|
|| (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) {
|
||||||
|
uintmax_t pret = ret;
|
||||||
|
ret *= b;
|
||||||
|
ret += digit;
|
||||||
|
if (ret < pret) {
|
||||||
|
/* Overflow. */
|
||||||
|
set_errno(ERANGE);
|
||||||
|
return (UINTMAX_MAX);
|
||||||
|
}
|
||||||
|
p++;
|
||||||
|
}
|
||||||
|
if (neg)
|
||||||
|
ret = -ret;
|
||||||
|
|
||||||
|
if (endptr != NULL) {
|
||||||
|
if (p == ns) {
|
||||||
|
/* No characters were converted. */
|
||||||
|
*endptr = (char *)nptr;
|
||||||
|
} else
|
||||||
|
*endptr = (char *)p;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
static char *
|
||||||
|
u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p)
|
||||||
|
{
|
||||||
|
unsigned i;
|
||||||
|
|
||||||
|
i = U2S_BUFSIZE - 1;
|
||||||
|
s[i] = '\0';
|
||||||
|
switch (base) {
|
||||||
|
case 10:
|
||||||
|
do {
|
||||||
|
i--;
|
||||||
|
s[i] = "0123456789"[x % (uint64_t)10];
|
||||||
|
x /= (uint64_t)10;
|
||||||
|
} while (x > 0);
|
||||||
|
break;
|
||||||
|
case 16: {
|
||||||
|
const char *digits = (uppercase)
|
||||||
|
? "0123456789ABCDEF"
|
||||||
|
: "0123456789abcdef";
|
||||||
|
|
||||||
|
do {
|
||||||
|
i--;
|
||||||
|
s[i] = digits[x & 0xf];
|
||||||
|
x >>= 4;
|
||||||
|
} while (x > 0);
|
||||||
|
break;
|
||||||
|
} default: {
|
||||||
|
const char *digits = (uppercase)
|
||||||
|
? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||||
|
: "0123456789abcdefghijklmnopqrstuvwxyz";
|
||||||
|
|
||||||
|
assert(base >= 2 && base <= 36);
|
||||||
|
do {
|
||||||
|
i--;
|
||||||
|
s[i] = digits[x % (uint64_t)base];
|
||||||
|
x /= (uint64_t)base;
|
||||||
|
} while (x > 0);
|
||||||
|
}}
|
||||||
|
|
||||||
|
*slen_p = U2S_BUFSIZE - 1 - i;
|
||||||
|
return (&s[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
static char *
|
||||||
|
d2s(intmax_t x, char sign, char *s, size_t *slen_p)
|
||||||
|
{
|
||||||
|
bool neg;
|
||||||
|
|
||||||
|
if ((neg = (x < 0)))
|
||||||
|
x = -x;
|
||||||
|
s = u2s(x, 10, false, s, slen_p);
|
||||||
|
if (neg)
|
||||||
|
sign = '-';
|
||||||
|
switch (sign) {
|
||||||
|
case '-':
|
||||||
|
if (neg == false)
|
||||||
|
break;
|
||||||
|
/* Fall through. */
|
||||||
|
case ' ':
|
||||||
|
case '+':
|
||||||
|
s--;
|
||||||
|
(*slen_p)++;
|
||||||
|
*s = sign;
|
||||||
|
break;
|
||||||
|
default: not_reached();
|
||||||
|
}
|
||||||
|
return (s);
|
||||||
|
}
|
||||||
|
|
||||||
|
static char *
|
||||||
|
o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p)
|
||||||
|
{
|
||||||
|
|
||||||
|
s = u2s(x, 8, false, s, slen_p);
|
||||||
|
if (alt_form && *s != '0') {
|
||||||
|
s--;
|
||||||
|
(*slen_p)++;
|
||||||
|
*s = '0';
|
||||||
|
}
|
||||||
|
return (s);
|
||||||
|
}
|
||||||
|
|
||||||
|
static char *
|
||||||
|
x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
|
||||||
|
{
|
||||||
|
|
||||||
|
s = u2s(x, 16, uppercase, s, slen_p);
|
||||||
|
if (alt_form) {
|
||||||
|
s -= 2;
|
||||||
|
(*slen_p) += 2;
|
||||||
|
memcpy(s, uppercase ? "0X" : "0x", 2);
|
||||||
|
}
|
||||||
|
return (s);
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
size_t i;
|
||||||
|
const char *f;
|
||||||
|
|
||||||
|
#define APPEND_C(c) do { \
|
||||||
|
if (i < size) \
|
||||||
|
str[i] = (c); \
|
||||||
|
i++; \
|
||||||
|
} while (0)
|
||||||
|
#define APPEND_S(s, slen) do { \
|
||||||
|
if (i < size) { \
|
||||||
|
size_t cpylen = (slen <= size - i) ? slen : size - i; \
|
||||||
|
memcpy(&str[i], s, cpylen); \
|
||||||
|
} \
|
||||||
|
i += slen; \
|
||||||
|
} while (0)
|
||||||
|
#define APPEND_PADDED_S(s, slen, width, left_justify) do { \
|
||||||
|
/* Left padding. */ \
|
||||||
|
size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \
|
||||||
|
(size_t)width - slen : 0); \
|
||||||
|
if (left_justify == false && pad_len != 0) { \
|
||||||
|
size_t j; \
|
||||||
|
for (j = 0; j < pad_len; j++) \
|
||||||
|
APPEND_C(' '); \
|
||||||
|
} \
|
||||||
|
/* Value. */ \
|
||||||
|
APPEND_S(s, slen); \
|
||||||
|
/* Right padding. */ \
|
||||||
|
if (left_justify && pad_len != 0) { \
|
||||||
|
size_t j; \
|
||||||
|
for (j = 0; j < pad_len; j++) \
|
||||||
|
APPEND_C(' '); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
#define GET_ARG_NUMERIC(val, len) do { \
|
||||||
|
switch (len) { \
|
||||||
|
case '?': \
|
||||||
|
val = va_arg(ap, int); \
|
||||||
|
break; \
|
||||||
|
case '?' | 0x80: \
|
||||||
|
val = va_arg(ap, unsigned int); \
|
||||||
|
break; \
|
||||||
|
case 'l': \
|
||||||
|
val = va_arg(ap, long); \
|
||||||
|
break; \
|
||||||
|
case 'l' | 0x80: \
|
||||||
|
val = va_arg(ap, unsigned long); \
|
||||||
|
break; \
|
||||||
|
case 'q': \
|
||||||
|
val = va_arg(ap, long long); \
|
||||||
|
break; \
|
||||||
|
case 'q' | 0x80: \
|
||||||
|
val = va_arg(ap, unsigned long long); \
|
||||||
|
break; \
|
||||||
|
case 'j': \
|
||||||
|
val = va_arg(ap, intmax_t); \
|
||||||
|
break; \
|
||||||
|
case 't': \
|
||||||
|
val = va_arg(ap, ptrdiff_t); \
|
||||||
|
break; \
|
||||||
|
case 'z': \
|
||||||
|
val = va_arg(ap, ssize_t); \
|
||||||
|
break; \
|
||||||
|
case 'z' | 0x80: \
|
||||||
|
val = va_arg(ap, size_t); \
|
||||||
|
break; \
|
||||||
|
case 'p': /* Synthetic; used for %p. */ \
|
||||||
|
val = va_arg(ap, uintptr_t); \
|
||||||
|
break; \
|
||||||
|
default: not_reached(); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
i = 0;
|
||||||
|
f = format;
|
||||||
|
while (true) {
|
||||||
|
switch (*f) {
|
||||||
|
case '\0': goto label_out;
|
||||||
|
case '%': {
|
||||||
|
bool alt_form = false;
|
||||||
|
bool zero_pad = false;
|
||||||
|
bool left_justify = false;
|
||||||
|
bool plus_space = false;
|
||||||
|
bool plus_plus = false;
|
||||||
|
int prec = -1;
|
||||||
|
int width = -1;
|
||||||
|
unsigned char len = '?';
|
||||||
|
|
||||||
|
f++;
|
||||||
|
if (*f == '%') {
|
||||||
|
/* %% */
|
||||||
|
APPEND_C(*f);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
/* Flags. */
|
||||||
|
while (true) {
|
||||||
|
switch (*f) {
|
||||||
|
case '#':
|
||||||
|
assert(alt_form == false);
|
||||||
|
alt_form = true;
|
||||||
|
break;
|
||||||
|
case '0':
|
||||||
|
assert(zero_pad == false);
|
||||||
|
zero_pad = true;
|
||||||
|
break;
|
||||||
|
case '-':
|
||||||
|
assert(left_justify == false);
|
||||||
|
left_justify = true;
|
||||||
|
break;
|
||||||
|
case ' ':
|
||||||
|
assert(plus_space == false);
|
||||||
|
plus_space = true;
|
||||||
|
break;
|
||||||
|
case '+':
|
||||||
|
assert(plus_plus == false);
|
||||||
|
plus_plus = true;
|
||||||
|
break;
|
||||||
|
default: goto label_width;
|
||||||
|
}
|
||||||
|
f++;
|
||||||
|
}
|
||||||
|
/* Width. */
|
||||||
|
label_width:
|
||||||
|
switch (*f) {
|
||||||
|
case '*':
|
||||||
|
width = va_arg(ap, int);
|
||||||
|
f++;
|
||||||
|
break;
|
||||||
|
case '0': case '1': case '2': case '3': case '4':
|
||||||
|
case '5': case '6': case '7': case '8': case '9': {
|
||||||
|
uintmax_t uwidth;
|
||||||
|
set_errno(0);
|
||||||
|
uwidth = malloc_strtoumax(f, (char **)&f, 10);
|
||||||
|
assert(uwidth != UINTMAX_MAX || get_errno() !=
|
||||||
|
ERANGE);
|
||||||
|
width = (int)uwidth;
|
||||||
|
if (*f == '.') {
|
||||||
|
f++;
|
||||||
|
goto label_precision;
|
||||||
|
} else
|
||||||
|
goto label_length;
|
||||||
|
break;
|
||||||
|
} case '.':
|
||||||
|
f++;
|
||||||
|
goto label_precision;
|
||||||
|
default: goto label_length;
|
||||||
|
}
|
||||||
|
/* Precision. */
|
||||||
|
label_precision:
|
||||||
|
switch (*f) {
|
||||||
|
case '*':
|
||||||
|
prec = va_arg(ap, int);
|
||||||
|
f++;
|
||||||
|
break;
|
||||||
|
case '0': case '1': case '2': case '3': case '4':
|
||||||
|
case '5': case '6': case '7': case '8': case '9': {
|
||||||
|
uintmax_t uprec;
|
||||||
|
set_errno(0);
|
||||||
|
uprec = malloc_strtoumax(f, (char **)&f, 10);
|
||||||
|
assert(uprec != UINTMAX_MAX || get_errno() !=
|
||||||
|
ERANGE);
|
||||||
|
prec = (int)uprec;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default: break;
|
||||||
|
}
|
||||||
|
/* Length. */
|
||||||
|
label_length:
|
||||||
|
switch (*f) {
|
||||||
|
case 'l':
|
||||||
|
f++;
|
||||||
|
if (*f == 'l') {
|
||||||
|
len = 'q';
|
||||||
|
f++;
|
||||||
|
} else
|
||||||
|
len = 'l';
|
||||||
|
break;
|
||||||
|
case 'j':
|
||||||
|
len = 'j';
|
||||||
|
f++;
|
||||||
|
break;
|
||||||
|
case 't':
|
||||||
|
len = 't';
|
||||||
|
f++;
|
||||||
|
break;
|
||||||
|
case 'z':
|
||||||
|
len = 'z';
|
||||||
|
f++;
|
||||||
|
break;
|
||||||
|
default: break;
|
||||||
|
}
|
||||||
|
/* Conversion specifier. */
|
||||||
|
switch (*f) {
|
||||||
|
char *s;
|
||||||
|
size_t slen;
|
||||||
|
case 'd': case 'i': {
|
||||||
|
intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
|
||||||
|
char buf[D2S_BUFSIZE];
|
||||||
|
|
||||||
|
GET_ARG_NUMERIC(val, len);
|
||||||
|
s = d2s(val, (plus_plus ? '+' : (plus_space ?
|
||||||
|
' ' : '-')), buf, &slen);
|
||||||
|
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||||
|
f++;
|
||||||
|
break;
|
||||||
|
} case 'o': {
|
||||||
|
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
|
||||||
|
char buf[O2S_BUFSIZE];
|
||||||
|
|
||||||
|
GET_ARG_NUMERIC(val, len | 0x80);
|
||||||
|
s = o2s(val, alt_form, buf, &slen);
|
||||||
|
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||||
|
f++;
|
||||||
|
break;
|
||||||
|
} case 'u': {
|
||||||
|
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
|
||||||
|
char buf[U2S_BUFSIZE];
|
||||||
|
|
||||||
|
GET_ARG_NUMERIC(val, len | 0x80);
|
||||||
|
s = u2s(val, 10, false, buf, &slen);
|
||||||
|
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||||
|
f++;
|
||||||
|
break;
|
||||||
|
} case 'x': case 'X': {
|
||||||
|
uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
|
||||||
|
char buf[X2S_BUFSIZE];
|
||||||
|
|
||||||
|
GET_ARG_NUMERIC(val, len | 0x80);
|
||||||
|
s = x2s(val, alt_form, *f == 'X', buf, &slen);
|
||||||
|
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||||
|
f++;
|
||||||
|
break;
|
||||||
|
} case 'c': {
|
||||||
|
unsigned char val;
|
||||||
|
char buf[2];
|
||||||
|
|
||||||
|
assert(len == '?' || len == 'l');
|
||||||
|
assert_not_implemented(len != 'l');
|
||||||
|
val = va_arg(ap, int);
|
||||||
|
buf[0] = val;
|
||||||
|
buf[1] = '\0';
|
||||||
|
APPEND_PADDED_S(buf, 1, width, left_justify);
|
||||||
|
f++;
|
||||||
|
break;
|
||||||
|
} case 's':
|
||||||
|
assert(len == '?' || len == 'l');
|
||||||
|
assert_not_implemented(len != 'l');
|
||||||
|
s = va_arg(ap, char *);
|
||||||
|
slen = (prec == -1) ? strlen(s) : prec;
|
||||||
|
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||||
|
f++;
|
||||||
|
break;
|
||||||
|
case 'p': {
|
||||||
|
uintmax_t val;
|
||||||
|
char buf[X2S_BUFSIZE];
|
||||||
|
|
||||||
|
GET_ARG_NUMERIC(val, 'p');
|
||||||
|
s = x2s(val, true, false, buf, &slen);
|
||||||
|
APPEND_PADDED_S(s, slen, width, left_justify);
|
||||||
|
f++;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default: not_implemented();
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
} default: {
|
||||||
|
APPEND_C(*f);
|
||||||
|
f++;
|
||||||
|
break;
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
label_out:
|
||||||
|
if (i < size)
|
||||||
|
str[i] = '\0';
|
||||||
|
else
|
||||||
|
str[size - 1] = '\0';
|
||||||
|
ret = i;
|
||||||
|
|
||||||
|
#undef APPEND_C
|
||||||
|
#undef APPEND_S
|
||||||
|
#undef APPEND_PADDED_S
|
||||||
|
#undef GET_ARG_NUMERIC
|
||||||
|
return (ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ATTR(format(printf, 3, 4))
|
||||||
|
int
|
||||||
|
malloc_snprintf(char *str, size_t size, const char *format, ...)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
va_list ap;
|
||||||
|
|
||||||
|
va_start(ap, format);
|
||||||
|
ret = malloc_vsnprintf(str, size, format, ap);
|
||||||
|
va_end(ap);
|
||||||
|
|
||||||
|
return (ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||||
|
const char *format, va_list ap)
|
||||||
|
{
|
||||||
|
char buf[MALLOC_PRINTF_BUFSIZE];
|
||||||
|
|
||||||
|
if (write_cb == NULL) {
|
||||||
|
/*
|
||||||
|
* The caller did not provide an alternate write_cb callback
|
||||||
|
* function, so use the default one. malloc_write() is an
|
||||||
|
* inline function, so use malloc_message() directly here.
|
||||||
|
*/
|
||||||
|
write_cb = (je_malloc_message != NULL) ? je_malloc_message :
|
||||||
|
wrtmessage;
|
||||||
|
cbopaque = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
malloc_vsnprintf(buf, sizeof(buf), format, ap);
|
||||||
|
write_cb(cbopaque, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Print to a callback function in such a way as to (hopefully) avoid memory
|
||||||
|
* allocation.
|
||||||
|
*/
|
||||||
|
JEMALLOC_ATTR(format(printf, 3, 4))
|
||||||
|
void
|
||||||
|
malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||||
|
const char *format, ...)
|
||||||
|
{
|
||||||
|
va_list ap;
|
||||||
|
|
||||||
|
va_start(ap, format);
|
||||||
|
malloc_vcprintf(write_cb, cbopaque, format, ap);
|
||||||
|
va_end(ap);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Print to stderr in such a way as to avoid memory allocation. */
|
||||||
|
JEMALLOC_ATTR(format(printf, 1, 2))
|
||||||
|
void
|
||||||
|
malloc_printf(const char *format, ...)
|
||||||
|
{
|
||||||
|
va_list ap;
|
||||||
|
|
||||||
|
va_start(ap, format);
|
||||||
|
malloc_vcprintf(NULL, NULL, format, ap);
|
||||||
|
va_end(ap);
|
||||||
|
}
|
276
src/zone.c
276
src/zone.c
@ -3,11 +3,18 @@
|
|||||||
# error "This source file is for zones on Darwin (OS X)."
|
# error "This source file is for zones on Darwin (OS X)."
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The malloc_default_purgeable_zone function is only available on >= 10.6.
|
||||||
|
* We need to check whether it is present at runtime, thus the weak_import.
|
||||||
|
*/
|
||||||
|
extern malloc_zone_t *malloc_default_purgeable_zone(void)
|
||||||
|
JEMALLOC_ATTR(weak_import);
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Data. */
|
/* Data. */
|
||||||
|
|
||||||
static malloc_zone_t zone, szone;
|
static malloc_zone_t zone;
|
||||||
static struct malloc_introspection_t zone_introspect, ozone_introspect;
|
static struct malloc_introspection_t zone_introspect;
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Function prototypes for non-inline static functions. */
|
/* Function prototypes for non-inline static functions. */
|
||||||
@ -18,8 +25,10 @@ static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
|
|||||||
static void *zone_valloc(malloc_zone_t *zone, size_t size);
|
static void *zone_valloc(malloc_zone_t *zone, size_t size);
|
||||||
static void zone_free(malloc_zone_t *zone, void *ptr);
|
static void zone_free(malloc_zone_t *zone, void *ptr);
|
||||||
static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
|
static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
|
||||||
#if (JEMALLOC_ZONE_VERSION >= 6)
|
#if (JEMALLOC_ZONE_VERSION >= 5)
|
||||||
static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
|
static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
|
||||||
|
#endif
|
||||||
|
#if (JEMALLOC_ZONE_VERSION >= 6)
|
||||||
size_t size);
|
size_t size);
|
||||||
static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
|
static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
|
||||||
size_t size);
|
size_t size);
|
||||||
@ -28,19 +37,6 @@ static void *zone_destroy(malloc_zone_t *zone);
|
|||||||
static size_t zone_good_size(malloc_zone_t *zone, size_t size);
|
static size_t zone_good_size(malloc_zone_t *zone, size_t size);
|
||||||
static void zone_force_lock(malloc_zone_t *zone);
|
static void zone_force_lock(malloc_zone_t *zone);
|
||||||
static void zone_force_unlock(malloc_zone_t *zone);
|
static void zone_force_unlock(malloc_zone_t *zone);
|
||||||
static size_t ozone_size(malloc_zone_t *zone, void *ptr);
|
|
||||||
static void ozone_free(malloc_zone_t *zone, void *ptr);
|
|
||||||
static void *ozone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
|
|
||||||
static unsigned ozone_batch_malloc(malloc_zone_t *zone, size_t size,
|
|
||||||
void **results, unsigned num_requested);
|
|
||||||
static void ozone_batch_free(malloc_zone_t *zone, void **to_be_freed,
|
|
||||||
unsigned num);
|
|
||||||
#if (JEMALLOC_ZONE_VERSION >= 6)
|
|
||||||
static void ozone_free_definite_size(malloc_zone_t *zone, void *ptr,
|
|
||||||
size_t size);
|
|
||||||
#endif
|
|
||||||
static void ozone_force_lock(malloc_zone_t *zone);
|
|
||||||
static void ozone_force_unlock(malloc_zone_t *zone);
|
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/*
|
/*
|
||||||
@ -60,21 +56,21 @@ zone_size(malloc_zone_t *zone, void *ptr)
|
|||||||
* not work in practice, we must check all pointers to assure that they
|
* not work in practice, we must check all pointers to assure that they
|
||||||
* reside within a mapped chunk before determining size.
|
* reside within a mapped chunk before determining size.
|
||||||
*/
|
*/
|
||||||
return (ivsalloc(ptr));
|
return (ivsalloc(ptr, config_prof));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
zone_malloc(malloc_zone_t *zone, size_t size)
|
zone_malloc(malloc_zone_t *zone, size_t size)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (JEMALLOC_P(malloc)(size));
|
return (je_malloc(size));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
|
zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (JEMALLOC_P(calloc)(num, size));
|
return (je_calloc(num, size));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
@ -82,7 +78,7 @@ zone_valloc(malloc_zone_t *zone, size_t size)
|
|||||||
{
|
{
|
||||||
void *ret = NULL; /* Assignment avoids useless compiler warning. */
|
void *ret = NULL; /* Assignment avoids useless compiler warning. */
|
||||||
|
|
||||||
JEMALLOC_P(posix_memalign)(&ret, PAGE_SIZE, size);
|
je_posix_memalign(&ret, PAGE, size);
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -91,33 +87,48 @@ static void
|
|||||||
zone_free(malloc_zone_t *zone, void *ptr)
|
zone_free(malloc_zone_t *zone, void *ptr)
|
||||||
{
|
{
|
||||||
|
|
||||||
JEMALLOC_P(free)(ptr);
|
if (ivsalloc(ptr, config_prof) != 0) {
|
||||||
|
je_free(ptr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
free(ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
|
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (JEMALLOC_P(realloc)(ptr, size));
|
if (ivsalloc(ptr, config_prof) != 0)
|
||||||
|
return (je_realloc(ptr, size));
|
||||||
|
|
||||||
|
return (realloc(ptr, size));
|
||||||
}
|
}
|
||||||
|
|
||||||
#if (JEMALLOC_ZONE_VERSION >= 6)
|
#if (JEMALLOC_ZONE_VERSION >= 5)
|
||||||
static void *
|
static void *
|
||||||
zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
|
zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
|
||||||
{
|
{
|
||||||
void *ret = NULL; /* Assignment avoids useless compiler warning. */
|
void *ret = NULL; /* Assignment avoids useless compiler warning. */
|
||||||
|
|
||||||
JEMALLOC_P(posix_memalign)(&ret, alignment, size);
|
je_posix_memalign(&ret, alignment, size);
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if (JEMALLOC_ZONE_VERSION >= 6)
|
||||||
static void
|
static void
|
||||||
zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
|
zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(ivsalloc(ptr) == size);
|
if (ivsalloc(ptr, config_prof) != 0) {
|
||||||
JEMALLOC_P(free)(ptr);
|
assert(ivsalloc(ptr, config_prof) == size);
|
||||||
|
je_free(ptr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
free(ptr);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -133,22 +144,10 @@ zone_destroy(malloc_zone_t *zone)
|
|||||||
static size_t
|
static size_t
|
||||||
zone_good_size(malloc_zone_t *zone, size_t size)
|
zone_good_size(malloc_zone_t *zone, size_t size)
|
||||||
{
|
{
|
||||||
size_t ret;
|
|
||||||
void *p;
|
|
||||||
|
|
||||||
/*
|
if (size == 0)
|
||||||
* Actually create an object of the appropriate size, then find out
|
size = 1;
|
||||||
* how large it could have been without moving up to the next size
|
return (s2u(size));
|
||||||
* class.
|
|
||||||
*/
|
|
||||||
p = JEMALLOC_P(malloc)(size);
|
|
||||||
if (p != NULL) {
|
|
||||||
ret = isalloc(p);
|
|
||||||
JEMALLOC_P(free)(p);
|
|
||||||
} else
|
|
||||||
ret = size;
|
|
||||||
|
|
||||||
return (ret);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -164,11 +163,12 @@ zone_force_unlock(malloc_zone_t *zone)
|
|||||||
{
|
{
|
||||||
|
|
||||||
if (isthreaded)
|
if (isthreaded)
|
||||||
jemalloc_postfork();
|
jemalloc_postfork_parent();
|
||||||
}
|
}
|
||||||
|
|
||||||
malloc_zone_t *
|
JEMALLOC_ATTR(constructor)
|
||||||
create_zone(void)
|
void
|
||||||
|
register_zone(void)
|
||||||
{
|
{
|
||||||
|
|
||||||
zone.size = (void *)zone_size;
|
zone.size = (void *)zone_size;
|
||||||
@ -183,10 +183,15 @@ create_zone(void)
|
|||||||
zone.batch_free = NULL;
|
zone.batch_free = NULL;
|
||||||
zone.introspect = &zone_introspect;
|
zone.introspect = &zone_introspect;
|
||||||
zone.version = JEMALLOC_ZONE_VERSION;
|
zone.version = JEMALLOC_ZONE_VERSION;
|
||||||
#if (JEMALLOC_ZONE_VERSION >= 6)
|
#if (JEMALLOC_ZONE_VERSION >= 5)
|
||||||
zone.memalign = zone_memalign;
|
zone.memalign = zone_memalign;
|
||||||
|
#endif
|
||||||
|
#if (JEMALLOC_ZONE_VERSION >= 6)
|
||||||
zone.free_definite_size = zone_free_definite_size;
|
zone.free_definite_size = zone_free_definite_size;
|
||||||
#endif
|
#endif
|
||||||
|
#if (JEMALLOC_ZONE_VERSION >= 8)
|
||||||
|
zone.pressure_relief = NULL;
|
||||||
|
#endif
|
||||||
|
|
||||||
zone_introspect.enumerator = NULL;
|
zone_introspect.enumerator = NULL;
|
||||||
zone_introspect.good_size = (void *)zone_good_size;
|
zone_introspect.good_size = (void *)zone_good_size;
|
||||||
@ -199,156 +204,45 @@ create_zone(void)
|
|||||||
#if (JEMALLOC_ZONE_VERSION >= 6)
|
#if (JEMALLOC_ZONE_VERSION >= 6)
|
||||||
zone_introspect.zone_locked = NULL;
|
zone_introspect.zone_locked = NULL;
|
||||||
#endif
|
#endif
|
||||||
|
#if (JEMALLOC_ZONE_VERSION >= 7)
|
||||||
return (&zone);
|
zone_introspect.enable_discharge_checking = NULL;
|
||||||
}
|
zone_introspect.disable_discharge_checking = NULL;
|
||||||
|
zone_introspect.discharge = NULL;
|
||||||
static size_t
|
#ifdef __BLOCKS__
|
||||||
ozone_size(malloc_zone_t *zone, void *ptr)
|
zone_introspect.enumerate_discharged_pointers = NULL;
|
||||||
{
|
#else
|
||||||
size_t ret;
|
zone_introspect.enumerate_unavailable_without_blocks = NULL;
|
||||||
|
#endif
|
||||||
ret = ivsalloc(ptr);
|
|
||||||
if (ret == 0)
|
|
||||||
ret = szone.size(zone, ptr);
|
|
||||||
|
|
||||||
return (ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
ozone_free(malloc_zone_t *zone, void *ptr)
|
|
||||||
{
|
|
||||||
|
|
||||||
if (ivsalloc(ptr) != 0)
|
|
||||||
JEMALLOC_P(free)(ptr);
|
|
||||||
else {
|
|
||||||
size_t size = szone.size(zone, ptr);
|
|
||||||
if (size != 0)
|
|
||||||
(szone.free)(zone, ptr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void *
|
|
||||||
ozone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
|
|
||||||
{
|
|
||||||
size_t oldsize;
|
|
||||||
|
|
||||||
if (ptr == NULL)
|
|
||||||
return (JEMALLOC_P(malloc)(size));
|
|
||||||
|
|
||||||
oldsize = ivsalloc(ptr);
|
|
||||||
if (oldsize != 0)
|
|
||||||
return (JEMALLOC_P(realloc)(ptr, size));
|
|
||||||
else {
|
|
||||||
oldsize = szone.size(zone, ptr);
|
|
||||||
if (oldsize == 0)
|
|
||||||
return (JEMALLOC_P(malloc)(size));
|
|
||||||
else {
|
|
||||||
void *ret = JEMALLOC_P(malloc)(size);
|
|
||||||
if (ret != NULL) {
|
|
||||||
memcpy(ret, ptr, (oldsize < size) ? oldsize :
|
|
||||||
size);
|
|
||||||
(szone.free)(zone, ptr);
|
|
||||||
}
|
|
||||||
return (ret);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static unsigned
|
|
||||||
ozone_batch_malloc(malloc_zone_t *zone, size_t size, void **results,
|
|
||||||
unsigned num_requested)
|
|
||||||
{
|
|
||||||
|
|
||||||
/* Don't bother implementing this interface, since it isn't required. */
|
|
||||||
return (0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
ozone_batch_free(malloc_zone_t *zone, void **to_be_freed, unsigned num)
|
|
||||||
{
|
|
||||||
unsigned i;
|
|
||||||
|
|
||||||
for (i = 0; i < num; i++)
|
|
||||||
ozone_free(zone, to_be_freed[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#if (JEMALLOC_ZONE_VERSION >= 6)
|
|
||||||
static void
|
|
||||||
ozone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
|
|
||||||
{
|
|
||||||
|
|
||||||
if (ivsalloc(ptr) != 0) {
|
|
||||||
assert(ivsalloc(ptr) == size);
|
|
||||||
JEMALLOC_P(free)(ptr);
|
|
||||||
} else {
|
|
||||||
assert(size == szone.size(zone, ptr));
|
|
||||||
szone.free_definite_size(zone, ptr, size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void
|
|
||||||
ozone_force_lock(malloc_zone_t *zone)
|
|
||||||
{
|
|
||||||
|
|
||||||
/* jemalloc locking is taken care of by the normal jemalloc zone. */
|
|
||||||
szone.introspect->force_lock(zone);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
ozone_force_unlock(malloc_zone_t *zone)
|
|
||||||
{
|
|
||||||
|
|
||||||
/* jemalloc locking is taken care of by the normal jemalloc zone. */
|
|
||||||
szone.introspect->force_unlock(zone);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Overlay the default scalable zone (szone) such that existing allocations are
|
|
||||||
* drained, and further allocations come from jemalloc. This is necessary
|
|
||||||
* because Core Foundation directly accesses and uses the szone before the
|
|
||||||
* jemalloc library is even loaded.
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
szone2ozone(malloc_zone_t *zone)
|
|
||||||
{
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Stash a copy of the original szone so that we can call its
|
* The default purgeable zone is created lazily by OSX's libc. It uses
|
||||||
* functions as needed. Note that the internally, the szone stores its
|
* the default zone when it is created for "small" allocations
|
||||||
* bookkeeping data structures immediately following the malloc_zone_t
|
* (< 15 KiB), but assumes the default zone is a scalable_zone. This
|
||||||
* header, so when calling szone functions, we need to pass a pointer
|
* obviously fails when the default zone is the jemalloc zone, so
|
||||||
* to the original zone structure.
|
* malloc_default_purgeable_zone is called beforehand so that the
|
||||||
|
* default purgeable zone is created when the default zone is still
|
||||||
|
* a scalable_zone. As purgeable zones only exist on >= 10.6, we need
|
||||||
|
* to check for the existence of malloc_default_purgeable_zone() at
|
||||||
|
* run time.
|
||||||
*/
|
*/
|
||||||
memcpy(&szone, zone, sizeof(malloc_zone_t));
|
if (malloc_default_purgeable_zone != NULL)
|
||||||
|
malloc_default_purgeable_zone();
|
||||||
|
|
||||||
zone->size = (void *)ozone_size;
|
/* Register the custom zone. At this point it won't be the default. */
|
||||||
zone->malloc = (void *)zone_malloc;
|
malloc_zone_register(&zone);
|
||||||
zone->calloc = (void *)zone_calloc;
|
|
||||||
zone->valloc = (void *)zone_valloc;
|
|
||||||
zone->free = (void *)ozone_free;
|
|
||||||
zone->realloc = (void *)ozone_realloc;
|
|
||||||
zone->destroy = (void *)zone_destroy;
|
|
||||||
zone->zone_name = "jemalloc_ozone";
|
|
||||||
zone->batch_malloc = ozone_batch_malloc;
|
|
||||||
zone->batch_free = ozone_batch_free;
|
|
||||||
zone->introspect = &ozone_introspect;
|
|
||||||
zone->version = JEMALLOC_ZONE_VERSION;
|
|
||||||
#if (JEMALLOC_ZONE_VERSION >= 6)
|
|
||||||
zone->memalign = zone_memalign;
|
|
||||||
zone->free_definite_size = ozone_free_definite_size;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
ozone_introspect.enumerator = NULL;
|
/*
|
||||||
ozone_introspect.good_size = (void *)zone_good_size;
|
* Unregister and reregister the default zone. On OSX >= 10.6,
|
||||||
ozone_introspect.check = NULL;
|
* unregistering takes the last registered zone and places it at the
|
||||||
ozone_introspect.print = NULL;
|
* location of the specified zone. Unregistering the default zone thus
|
||||||
ozone_introspect.log = NULL;
|
* makes the last registered one the default. On OSX < 10.6,
|
||||||
ozone_introspect.force_lock = (void *)ozone_force_lock;
|
* unregistering shifts all registered zones. The first registered zone
|
||||||
ozone_introspect.force_unlock = (void *)ozone_force_unlock;
|
* then becomes the default.
|
||||||
ozone_introspect.statistics = NULL;
|
*/
|
||||||
#if (JEMALLOC_ZONE_VERSION >= 6)
|
do {
|
||||||
ozone_introspect.zone_locked = NULL;
|
malloc_zone_t *default_zone = malloc_default_zone();
|
||||||
#endif
|
malloc_zone_unregister(default_zone);
|
||||||
|
malloc_zone_register(default_zone);
|
||||||
|
} while (malloc_default_zone() != &zone);
|
||||||
}
|
}
|
||||||
|
119
test/aligned_alloc.c
Normal file
119
test/aligned_alloc.c
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
#define JEMALLOC_MANGLE
|
||||||
|
#include "jemalloc_test.h"
|
||||||
|
|
||||||
|
#define CHUNK 0x400000
|
||||||
|
/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */
|
||||||
|
#define MAXALIGN ((size_t)0x2000000LU)
|
||||||
|
#define NITER 4
|
||||||
|
|
||||||
|
int
|
||||||
|
main(void)
|
||||||
|
{
|
||||||
|
size_t alignment, size, total;
|
||||||
|
unsigned i;
|
||||||
|
void *p, *ps[NITER];
|
||||||
|
|
||||||
|
malloc_printf("Test begin\n");
|
||||||
|
|
||||||
|
/* Test error conditions. */
|
||||||
|
alignment = 0;
|
||||||
|
set_errno(0);
|
||||||
|
p = aligned_alloc(alignment, 1);
|
||||||
|
if (p != NULL || get_errno() != EINVAL) {
|
||||||
|
malloc_printf(
|
||||||
|
"Expected error for invalid alignment %zu\n", alignment);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (alignment = sizeof(size_t); alignment < MAXALIGN;
|
||||||
|
alignment <<= 1) {
|
||||||
|
set_errno(0);
|
||||||
|
p = aligned_alloc(alignment + 1, 1);
|
||||||
|
if (p != NULL || get_errno() != EINVAL) {
|
||||||
|
malloc_printf(
|
||||||
|
"Expected error for invalid alignment %zu\n",
|
||||||
|
alignment + 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#if LG_SIZEOF_PTR == 3
|
||||||
|
alignment = UINT64_C(0x8000000000000000);
|
||||||
|
size = UINT64_C(0x8000000000000000);
|
||||||
|
#else
|
||||||
|
alignment = 0x80000000LU;
|
||||||
|
size = 0x80000000LU;
|
||||||
|
#endif
|
||||||
|
set_errno(0);
|
||||||
|
p = aligned_alloc(alignment, size);
|
||||||
|
if (p != NULL || get_errno() != ENOMEM) {
|
||||||
|
malloc_printf(
|
||||||
|
"Expected error for aligned_alloc(%zu, %zu)\n",
|
||||||
|
alignment, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
#if LG_SIZEOF_PTR == 3
|
||||||
|
alignment = UINT64_C(0x4000000000000000);
|
||||||
|
size = UINT64_C(0x8400000000000001);
|
||||||
|
#else
|
||||||
|
alignment = 0x40000000LU;
|
||||||
|
size = 0x84000001LU;
|
||||||
|
#endif
|
||||||
|
set_errno(0);
|
||||||
|
p = aligned_alloc(alignment, size);
|
||||||
|
if (p != NULL || get_errno() != ENOMEM) {
|
||||||
|
malloc_printf(
|
||||||
|
"Expected error for aligned_alloc(%zu, %zu)\n",
|
||||||
|
alignment, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
alignment = 0x10LU;
|
||||||
|
#if LG_SIZEOF_PTR == 3
|
||||||
|
size = UINT64_C(0xfffffffffffffff0);
|
||||||
|
#else
|
||||||
|
size = 0xfffffff0LU;
|
||||||
|
#endif
|
||||||
|
set_errno(0);
|
||||||
|
p = aligned_alloc(alignment, size);
|
||||||
|
if (p != NULL || get_errno() != ENOMEM) {
|
||||||
|
malloc_printf(
|
||||||
|
"Expected error for aligned_alloc(&p, %zu, %zu)\n",
|
||||||
|
alignment, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < NITER; i++)
|
||||||
|
ps[i] = NULL;
|
||||||
|
|
||||||
|
for (alignment = 8;
|
||||||
|
alignment <= MAXALIGN;
|
||||||
|
alignment <<= 1) {
|
||||||
|
total = 0;
|
||||||
|
malloc_printf("Alignment: %zu\n", alignment);
|
||||||
|
for (size = 1;
|
||||||
|
size < 3 * alignment && size < (1U << 31);
|
||||||
|
size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
|
||||||
|
for (i = 0; i < NITER; i++) {
|
||||||
|
ps[i] = aligned_alloc(alignment, size);
|
||||||
|
if (ps[i] == NULL) {
|
||||||
|
char buf[BUFERROR_BUF];
|
||||||
|
|
||||||
|
buferror(buf, sizeof(buf));
|
||||||
|
malloc_printf(
|
||||||
|
"Error for size %zu (%#zx): %s\n",
|
||||||
|
size, size, buf);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
total += malloc_usable_size(ps[i]);
|
||||||
|
if (total >= (MAXALIGN << 1))
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
for (i = 0; i < NITER; i++) {
|
||||||
|
if (ps[i] != NULL) {
|
||||||
|
free(ps[i]);
|
||||||
|
ps[i] = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
malloc_printf("Test end\n");
|
||||||
|
return (0);
|
||||||
|
}
|
25
test/aligned_alloc.exp
Normal file
25
test/aligned_alloc.exp
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
Test begin
|
||||||
|
Alignment: 8
|
||||||
|
Alignment: 16
|
||||||
|
Alignment: 32
|
||||||
|
Alignment: 64
|
||||||
|
Alignment: 128
|
||||||
|
Alignment: 256
|
||||||
|
Alignment: 512
|
||||||
|
Alignment: 1024
|
||||||
|
Alignment: 2048
|
||||||
|
Alignment: 4096
|
||||||
|
Alignment: 8192
|
||||||
|
Alignment: 16384
|
||||||
|
Alignment: 32768
|
||||||
|
Alignment: 65536
|
||||||
|
Alignment: 131072
|
||||||
|
Alignment: 262144
|
||||||
|
Alignment: 524288
|
||||||
|
Alignment: 1048576
|
||||||
|
Alignment: 2097152
|
||||||
|
Alignment: 4194304
|
||||||
|
Alignment: 8388608
|
||||||
|
Alignment: 16777216
|
||||||
|
Alignment: 33554432
|
||||||
|
Test end
|
@ -1,17 +1,8 @@
|
|||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <stdint.h>
|
|
||||||
#include <stdbool.h>
|
|
||||||
#include <pthread.h>
|
|
||||||
#include <assert.h>
|
|
||||||
#include <errno.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
#define JEMALLOC_MANGLE
|
#define JEMALLOC_MANGLE
|
||||||
#include "jemalloc_test.h"
|
#include "jemalloc_test.h"
|
||||||
|
|
||||||
void *
|
void *
|
||||||
thread_start(void *arg)
|
je_thread_start(void *arg)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
void *p;
|
void *p;
|
||||||
@ -20,89 +11,85 @@ thread_start(void *arg)
|
|||||||
size_t sz, usize;
|
size_t sz, usize;
|
||||||
|
|
||||||
sz = sizeof(a0);
|
sz = sizeof(a0);
|
||||||
if ((err = JEMALLOC_P(mallctl)("thread.allocated", &a0, &sz, NULL,
|
if ((err = mallctl("thread.allocated", &a0, &sz, NULL, 0))) {
|
||||||
0))) {
|
|
||||||
if (err == ENOENT) {
|
if (err == ENOENT) {
|
||||||
#ifdef JEMALLOC_STATS
|
#ifdef JEMALLOC_STATS
|
||||||
assert(false);
|
assert(false);
|
||||||
#endif
|
#endif
|
||||||
goto RETURN;
|
goto label_return;
|
||||||
}
|
}
|
||||||
fprintf(stderr, "%s(): Error in mallctl(): %s\n", __func__,
|
malloc_printf("%s(): Error in mallctl(): %s\n", __func__,
|
||||||
strerror(err));
|
strerror(err));
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
sz = sizeof(ap0);
|
sz = sizeof(ap0);
|
||||||
if ((err = JEMALLOC_P(mallctl)("thread.allocatedp", &ap0, &sz, NULL,
|
if ((err = mallctl("thread.allocatedp", &ap0, &sz, NULL, 0))) {
|
||||||
0))) {
|
|
||||||
if (err == ENOENT) {
|
if (err == ENOENT) {
|
||||||
#ifdef JEMALLOC_STATS
|
#ifdef JEMALLOC_STATS
|
||||||
assert(false);
|
assert(false);
|
||||||
#endif
|
#endif
|
||||||
goto RETURN;
|
goto label_return;
|
||||||
}
|
}
|
||||||
fprintf(stderr, "%s(): Error in mallctl(): %s\n", __func__,
|
malloc_printf("%s(): Error in mallctl(): %s\n", __func__,
|
||||||
strerror(err));
|
strerror(err));
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
assert(*ap0 == a0);
|
assert(*ap0 == a0);
|
||||||
|
|
||||||
sz = sizeof(d0);
|
sz = sizeof(d0);
|
||||||
if ((err = JEMALLOC_P(mallctl)("thread.deallocated", &d0, &sz, NULL,
|
if ((err = mallctl("thread.deallocated", &d0, &sz, NULL, 0))) {
|
||||||
0))) {
|
|
||||||
if (err == ENOENT) {
|
if (err == ENOENT) {
|
||||||
#ifdef JEMALLOC_STATS
|
#ifdef JEMALLOC_STATS
|
||||||
assert(false);
|
assert(false);
|
||||||
#endif
|
#endif
|
||||||
goto RETURN;
|
goto label_return;
|
||||||
}
|
}
|
||||||
fprintf(stderr, "%s(): Error in mallctl(): %s\n", __func__,
|
malloc_printf("%s(): Error in mallctl(): %s\n", __func__,
|
||||||
strerror(err));
|
strerror(err));
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
sz = sizeof(dp0);
|
sz = sizeof(dp0);
|
||||||
if ((err = JEMALLOC_P(mallctl)("thread.deallocatedp", &dp0, &sz, NULL,
|
if ((err = mallctl("thread.deallocatedp", &dp0, &sz, NULL, 0))) {
|
||||||
0))) {
|
|
||||||
if (err == ENOENT) {
|
if (err == ENOENT) {
|
||||||
#ifdef JEMALLOC_STATS
|
#ifdef JEMALLOC_STATS
|
||||||
assert(false);
|
assert(false);
|
||||||
#endif
|
#endif
|
||||||
goto RETURN;
|
goto label_return;
|
||||||
}
|
}
|
||||||
fprintf(stderr, "%s(): Error in mallctl(): %s\n", __func__,
|
malloc_printf("%s(): Error in mallctl(): %s\n", __func__,
|
||||||
strerror(err));
|
strerror(err));
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
assert(*dp0 == d0);
|
assert(*dp0 == d0);
|
||||||
|
|
||||||
p = JEMALLOC_P(malloc)(1);
|
p = malloc(1);
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
fprintf(stderr, "%s(): Error in malloc()\n", __func__);
|
malloc_printf("%s(): Error in malloc()\n", __func__);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
sz = sizeof(a1);
|
sz = sizeof(a1);
|
||||||
JEMALLOC_P(mallctl)("thread.allocated", &a1, &sz, NULL, 0);
|
mallctl("thread.allocated", &a1, &sz, NULL, 0);
|
||||||
sz = sizeof(ap1);
|
sz = sizeof(ap1);
|
||||||
JEMALLOC_P(mallctl)("thread.allocatedp", &ap1, &sz, NULL, 0);
|
mallctl("thread.allocatedp", &ap1, &sz, NULL, 0);
|
||||||
assert(*ap1 == a1);
|
assert(*ap1 == a1);
|
||||||
assert(ap0 == ap1);
|
assert(ap0 == ap1);
|
||||||
|
|
||||||
usize = JEMALLOC_P(malloc_usable_size)(p);
|
usize = malloc_usable_size(p);
|
||||||
assert(a0 + usize <= a1);
|
assert(a0 + usize <= a1);
|
||||||
|
|
||||||
JEMALLOC_P(free)(p);
|
free(p);
|
||||||
|
|
||||||
sz = sizeof(d1);
|
sz = sizeof(d1);
|
||||||
JEMALLOC_P(mallctl)("thread.deallocated", &d1, &sz, NULL, 0);
|
mallctl("thread.deallocated", &d1, &sz, NULL, 0);
|
||||||
sz = sizeof(dp1);
|
sz = sizeof(dp1);
|
||||||
JEMALLOC_P(mallctl)("thread.deallocatedp", &dp1, &sz, NULL, 0);
|
mallctl("thread.deallocatedp", &dp1, &sz, NULL, 0);
|
||||||
assert(*dp1 == d1);
|
assert(*dp1 == d1);
|
||||||
assert(dp0 == dp1);
|
assert(dp0 == dp1);
|
||||||
|
|
||||||
assert(d0 + usize <= d1);
|
assert(d0 + usize <= d1);
|
||||||
|
|
||||||
RETURN:
|
label_return:
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -110,33 +97,22 @@ int
|
|||||||
main(void)
|
main(void)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
pthread_t thread;
|
je_thread_t thread;
|
||||||
|
|
||||||
fprintf(stderr, "Test begin\n");
|
malloc_printf("Test begin\n");
|
||||||
|
|
||||||
thread_start(NULL);
|
je_thread_start(NULL);
|
||||||
|
|
||||||
if (pthread_create(&thread, NULL, thread_start, NULL)
|
je_thread_create(&thread, je_thread_start, NULL);
|
||||||
!= 0) {
|
je_thread_join(thread, (void *)&ret);
|
||||||
fprintf(stderr, "%s(): Error in pthread_create()\n", __func__);
|
|
||||||
ret = 1;
|
|
||||||
goto RETURN;
|
|
||||||
}
|
|
||||||
pthread_join(thread, (void *)&ret);
|
|
||||||
|
|
||||||
thread_start(NULL);
|
je_thread_start(NULL);
|
||||||
|
|
||||||
if (pthread_create(&thread, NULL, thread_start, NULL)
|
je_thread_create(&thread, je_thread_start, NULL);
|
||||||
!= 0) {
|
je_thread_join(thread, (void *)&ret);
|
||||||
fprintf(stderr, "%s(): Error in pthread_create()\n", __func__);
|
|
||||||
ret = 1;
|
|
||||||
goto RETURN;
|
|
||||||
}
|
|
||||||
pthread_join(thread, (void *)&ret);
|
|
||||||
|
|
||||||
thread_start(NULL);
|
je_thread_start(NULL);
|
||||||
|
|
||||||
RETURN:
|
malloc_printf("Test end\n");
|
||||||
fprintf(stderr, "Test end\n");
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
159
test/allocm.c
159
test/allocm.c
@ -1,13 +1,9 @@
|
|||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <stdint.h>
|
|
||||||
|
|
||||||
#define JEMALLOC_MANGLE
|
#define JEMALLOC_MANGLE
|
||||||
#include "jemalloc_test.h"
|
#include "jemalloc_test.h"
|
||||||
|
|
||||||
#define CHUNK 0x400000
|
#define CHUNK 0x400000
|
||||||
/* #define MAXALIGN ((size_t)0x80000000000LLU) */
|
/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */
|
||||||
#define MAXALIGN ((size_t)0x2000000LLU)
|
#define MAXALIGN ((size_t)0x2000000LU)
|
||||||
#define NITER 4
|
#define NITER 4
|
||||||
|
|
||||||
int
|
int
|
||||||
@ -15,79 +11,122 @@ main(void)
|
|||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
void *p;
|
void *p;
|
||||||
size_t sz, alignment, total, tsz;
|
size_t nsz, rsz, sz, alignment, total;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
void *ps[NITER];
|
void *ps[NITER];
|
||||||
|
|
||||||
fprintf(stderr, "Test begin\n");
|
malloc_printf("Test begin\n");
|
||||||
|
|
||||||
sz = 0;
|
sz = 42;
|
||||||
r = JEMALLOC_P(allocm)(&p, &sz, 42, 0);
|
nsz = 0;
|
||||||
|
r = nallocm(&nsz, sz, 0);
|
||||||
if (r != ALLOCM_SUCCESS) {
|
if (r != ALLOCM_SUCCESS) {
|
||||||
fprintf(stderr, "Unexpected allocm() error\n");
|
malloc_printf("Unexpected nallocm() error\n");
|
||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
if (sz < 42)
|
rsz = 0;
|
||||||
fprintf(stderr, "Real size smaller than expected\n");
|
r = allocm(&p, &rsz, sz, 0);
|
||||||
if (JEMALLOC_P(dallocm)(p, 0) != ALLOCM_SUCCESS)
|
|
||||||
fprintf(stderr, "Unexpected dallocm() error\n");
|
|
||||||
|
|
||||||
r = JEMALLOC_P(allocm)(&p, NULL, 42, 0);
|
|
||||||
if (r != ALLOCM_SUCCESS) {
|
if (r != ALLOCM_SUCCESS) {
|
||||||
fprintf(stderr, "Unexpected allocm() error\n");
|
malloc_printf("Unexpected allocm() error\n");
|
||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
if (JEMALLOC_P(dallocm)(p, 0) != ALLOCM_SUCCESS)
|
if (rsz < sz)
|
||||||
fprintf(stderr, "Unexpected dallocm() error\n");
|
malloc_printf("Real size smaller than expected\n");
|
||||||
|
if (nsz != rsz)
|
||||||
|
malloc_printf("nallocm()/allocm() rsize mismatch\n");
|
||||||
|
if (dallocm(p, 0) != ALLOCM_SUCCESS)
|
||||||
|
malloc_printf("Unexpected dallocm() error\n");
|
||||||
|
|
||||||
r = JEMALLOC_P(allocm)(&p, NULL, 42, ALLOCM_ZERO);
|
r = allocm(&p, NULL, sz, 0);
|
||||||
if (r != ALLOCM_SUCCESS) {
|
if (r != ALLOCM_SUCCESS) {
|
||||||
fprintf(stderr, "Unexpected allocm() error\n");
|
malloc_printf("Unexpected allocm() error\n");
|
||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
if (JEMALLOC_P(dallocm)(p, 0) != ALLOCM_SUCCESS)
|
if (dallocm(p, 0) != ALLOCM_SUCCESS)
|
||||||
fprintf(stderr, "Unexpected dallocm() error\n");
|
malloc_printf("Unexpected dallocm() error\n");
|
||||||
|
|
||||||
|
nsz = 0;
|
||||||
|
r = nallocm(&nsz, sz, ALLOCM_ZERO);
|
||||||
|
if (r != ALLOCM_SUCCESS) {
|
||||||
|
malloc_printf("Unexpected nallocm() error\n");
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
rsz = 0;
|
||||||
|
r = allocm(&p, &rsz, sz, ALLOCM_ZERO);
|
||||||
|
if (r != ALLOCM_SUCCESS) {
|
||||||
|
malloc_printf("Unexpected allocm() error\n");
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
if (nsz != rsz)
|
||||||
|
malloc_printf("nallocm()/allocm() rsize mismatch\n");
|
||||||
|
if (dallocm(p, 0) != ALLOCM_SUCCESS)
|
||||||
|
malloc_printf("Unexpected dallocm() error\n");
|
||||||
|
|
||||||
#if LG_SIZEOF_PTR == 3
|
#if LG_SIZEOF_PTR == 3
|
||||||
alignment = 0x8000000000000000LLU;
|
alignment = UINT64_C(0x8000000000000000);
|
||||||
sz = 0x8000000000000000LLU;
|
sz = UINT64_C(0x8000000000000000);
|
||||||
#else
|
#else
|
||||||
alignment = 0x80000000LU;
|
alignment = 0x80000000LU;
|
||||||
sz = 0x80000000LU;
|
sz = 0x80000000LU;
|
||||||
#endif
|
#endif
|
||||||
r = JEMALLOC_P(allocm)(&p, NULL, sz, ALLOCM_ALIGN(alignment));
|
nsz = 0;
|
||||||
|
r = nallocm(&nsz, sz, ALLOCM_ALIGN(alignment));
|
||||||
if (r == ALLOCM_SUCCESS) {
|
if (r == ALLOCM_SUCCESS) {
|
||||||
fprintf(stderr,
|
malloc_printf(
|
||||||
"Expected error for allocm(&p, %zu, 0x%x)\n",
|
"Expected error for nallocm(&nsz, %zu, %#x)\n",
|
||||||
sz, ALLOCM_ALIGN(alignment));
|
sz, ALLOCM_ALIGN(alignment));
|
||||||
}
|
}
|
||||||
|
rsz = 0;
|
||||||
|
r = allocm(&p, &rsz, sz, ALLOCM_ALIGN(alignment));
|
||||||
|
if (r == ALLOCM_SUCCESS) {
|
||||||
|
malloc_printf(
|
||||||
|
"Expected error for allocm(&p, %zu, %#x)\n",
|
||||||
|
sz, ALLOCM_ALIGN(alignment));
|
||||||
|
}
|
||||||
|
if (nsz != rsz)
|
||||||
|
malloc_printf("nallocm()/allocm() rsize mismatch\n");
|
||||||
|
|
||||||
#if LG_SIZEOF_PTR == 3
|
#if LG_SIZEOF_PTR == 3
|
||||||
alignment = 0x4000000000000000LLU;
|
alignment = UINT64_C(0x4000000000000000);
|
||||||
sz = 0x8400000000000001LLU;
|
sz = UINT64_C(0x8400000000000001);
|
||||||
#else
|
#else
|
||||||
alignment = 0x40000000LU;
|
alignment = 0x40000000LU;
|
||||||
sz = 0x84000001LU;
|
sz = 0x84000001LU;
|
||||||
#endif
|
#endif
|
||||||
r = JEMALLOC_P(allocm)(&p, NULL, sz, ALLOCM_ALIGN(alignment));
|
nsz = 0;
|
||||||
|
r = nallocm(&nsz, sz, ALLOCM_ALIGN(alignment));
|
||||||
|
if (r != ALLOCM_SUCCESS)
|
||||||
|
malloc_printf("Unexpected nallocm() error\n");
|
||||||
|
rsz = 0;
|
||||||
|
r = allocm(&p, &rsz, sz, ALLOCM_ALIGN(alignment));
|
||||||
if (r == ALLOCM_SUCCESS) {
|
if (r == ALLOCM_SUCCESS) {
|
||||||
fprintf(stderr,
|
malloc_printf(
|
||||||
"Expected error for allocm(&p, %zu, 0x%x)\n",
|
"Expected error for allocm(&p, %zu, %#x)\n",
|
||||||
sz, ALLOCM_ALIGN(alignment));
|
sz, ALLOCM_ALIGN(alignment));
|
||||||
}
|
}
|
||||||
|
|
||||||
alignment = 0x10LLU;
|
alignment = 0x10LU;
|
||||||
#if LG_SIZEOF_PTR == 3
|
#if LG_SIZEOF_PTR == 3
|
||||||
sz = 0xfffffffffffffff0LLU;
|
sz = UINT64_C(0xfffffffffffffff0);
|
||||||
#else
|
#else
|
||||||
sz = 0xfffffff0LU;
|
sz = 0xfffffff0LU;
|
||||||
#endif
|
#endif
|
||||||
r = JEMALLOC_P(allocm)(&p, NULL, sz, ALLOCM_ALIGN(alignment));
|
nsz = 0;
|
||||||
|
r = nallocm(&nsz, sz, ALLOCM_ALIGN(alignment));
|
||||||
if (r == ALLOCM_SUCCESS) {
|
if (r == ALLOCM_SUCCESS) {
|
||||||
fprintf(stderr,
|
malloc_printf(
|
||||||
"Expected error for allocm(&p, %zu, 0x%x)\n",
|
"Expected error for nallocm(&nsz, %zu, %#x)\n",
|
||||||
sz, ALLOCM_ALIGN(alignment));
|
sz, ALLOCM_ALIGN(alignment));
|
||||||
}
|
}
|
||||||
|
rsz = 0;
|
||||||
|
r = allocm(&p, &rsz, sz, ALLOCM_ALIGN(alignment));
|
||||||
|
if (r == ALLOCM_SUCCESS) {
|
||||||
|
malloc_printf(
|
||||||
|
"Expected error for allocm(&p, %zu, %#x)\n",
|
||||||
|
sz, ALLOCM_ALIGN(alignment));
|
||||||
|
}
|
||||||
|
if (nsz != rsz)
|
||||||
|
malloc_printf("nallocm()/allocm() rsize mismatch\n");
|
||||||
|
|
||||||
for (i = 0; i < NITER; i++)
|
for (i = 0; i < NITER; i++)
|
||||||
ps[i] = NULL;
|
ps[i] = NULL;
|
||||||
@ -96,38 +135,60 @@ main(void)
|
|||||||
alignment <= MAXALIGN;
|
alignment <= MAXALIGN;
|
||||||
alignment <<= 1) {
|
alignment <<= 1) {
|
||||||
total = 0;
|
total = 0;
|
||||||
fprintf(stderr, "Alignment: %zu\n", alignment);
|
malloc_printf("Alignment: %zu\n", alignment);
|
||||||
for (sz = 1;
|
for (sz = 1;
|
||||||
sz < 3 * alignment && sz < (1U << 31);
|
sz < 3 * alignment && sz < (1U << 31);
|
||||||
sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
|
sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
|
||||||
for (i = 0; i < NITER; i++) {
|
for (i = 0; i < NITER; i++) {
|
||||||
r = JEMALLOC_P(allocm)(&ps[i], NULL, sz,
|
nsz = 0;
|
||||||
|
r = nallocm(&nsz, sz,
|
||||||
ALLOCM_ALIGN(alignment) | ALLOCM_ZERO);
|
ALLOCM_ALIGN(alignment) | ALLOCM_ZERO);
|
||||||
if (r != ALLOCM_SUCCESS) {
|
if (r != ALLOCM_SUCCESS) {
|
||||||
fprintf(stderr,
|
malloc_printf(
|
||||||
"Error for size %zu (0x%zx): %d\n",
|
"nallocm() error for size %zu"
|
||||||
|
" (%#zx): %d\n",
|
||||||
sz, sz, r);
|
sz, sz, r);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
rsz = 0;
|
||||||
|
r = allocm(&ps[i], &rsz, sz,
|
||||||
|
ALLOCM_ALIGN(alignment) | ALLOCM_ZERO);
|
||||||
|
if (r != ALLOCM_SUCCESS) {
|
||||||
|
malloc_printf(
|
||||||
|
"allocm() error for size %zu"
|
||||||
|
" (%#zx): %d\n",
|
||||||
|
sz, sz, r);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
if (rsz < sz) {
|
||||||
|
malloc_printf(
|
||||||
|
"Real size smaller than"
|
||||||
|
" expected\n");
|
||||||
|
}
|
||||||
|
if (nsz != rsz) {
|
||||||
|
malloc_printf(
|
||||||
|
"nallocm()/allocm() rsize"
|
||||||
|
" mismatch\n");
|
||||||
|
}
|
||||||
if ((uintptr_t)p & (alignment-1)) {
|
if ((uintptr_t)p & (alignment-1)) {
|
||||||
fprintf(stderr,
|
malloc_printf(
|
||||||
"%p inadequately aligned for"
|
"%p inadequately aligned for"
|
||||||
" alignment: %zu\n", p, alignment);
|
" alignment: %zu\n", p, alignment);
|
||||||
}
|
}
|
||||||
JEMALLOC_P(sallocm)(ps[i], &tsz, 0);
|
sallocm(ps[i], &rsz, 0);
|
||||||
total += tsz;
|
total += rsz;
|
||||||
if (total >= (MAXALIGN << 1))
|
if (total >= (MAXALIGN << 1))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
for (i = 0; i < NITER; i++) {
|
for (i = 0; i < NITER; i++) {
|
||||||
if (ps[i] != NULL) {
|
if (ps[i] != NULL) {
|
||||||
JEMALLOC_P(dallocm)(ps[i], 0);
|
dallocm(ps[i], 0);
|
||||||
ps[i] = NULL;
|
ps[i] = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fprintf(stderr, "Test end\n");
|
malloc_printf("Test end\n");
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
|
@ -1,18 +1,6 @@
|
|||||||
#define JEMALLOC_MANGLE
|
#define JEMALLOC_MANGLE
|
||||||
#include "jemalloc_test.h"
|
#include "jemalloc_test.h"
|
||||||
|
|
||||||
/*
|
|
||||||
* Avoid using the assert() from jemalloc_internal.h, since it requires
|
|
||||||
* internal libjemalloc functionality.
|
|
||||||
* */
|
|
||||||
#include <assert.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Directly include the bitmap code, since it isn't exposed outside
|
|
||||||
* libjemalloc.
|
|
||||||
*/
|
|
||||||
#include "../src/bitmap.c"
|
|
||||||
|
|
||||||
#if (LG_BITMAP_MAXBITS > 12)
|
#if (LG_BITMAP_MAXBITS > 12)
|
||||||
# define MAXBITS 4500
|
# define MAXBITS 4500
|
||||||
#else
|
#else
|
||||||
@ -42,11 +30,13 @@ test_bitmap_init(void)
|
|||||||
bitmap_info_init(&binfo, i);
|
bitmap_info_init(&binfo, i);
|
||||||
{
|
{
|
||||||
size_t j;
|
size_t j;
|
||||||
bitmap_t bitmap[bitmap_info_ngroups(&binfo)];
|
bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
|
||||||
|
bitmap_info_ngroups(&binfo));
|
||||||
bitmap_init(bitmap, &binfo);
|
bitmap_init(bitmap, &binfo);
|
||||||
|
|
||||||
for (j = 0; j < i; j++)
|
for (j = 0; j < i; j++)
|
||||||
assert(bitmap_get(bitmap, &binfo, j) == false);
|
assert(bitmap_get(bitmap, &binfo, j) == false);
|
||||||
|
free(bitmap);
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -62,12 +52,14 @@ test_bitmap_set(void)
|
|||||||
bitmap_info_init(&binfo, i);
|
bitmap_info_init(&binfo, i);
|
||||||
{
|
{
|
||||||
size_t j;
|
size_t j;
|
||||||
bitmap_t bitmap[bitmap_info_ngroups(&binfo)];
|
bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
|
||||||
|
bitmap_info_ngroups(&binfo));
|
||||||
bitmap_init(bitmap, &binfo);
|
bitmap_init(bitmap, &binfo);
|
||||||
|
|
||||||
for (j = 0; j < i; j++)
|
for (j = 0; j < i; j++)
|
||||||
bitmap_set(bitmap, &binfo, j);
|
bitmap_set(bitmap, &binfo, j);
|
||||||
assert(bitmap_full(bitmap, &binfo));
|
assert(bitmap_full(bitmap, &binfo));
|
||||||
|
free(bitmap);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -82,7 +74,8 @@ test_bitmap_unset(void)
|
|||||||
bitmap_info_init(&binfo, i);
|
bitmap_info_init(&binfo, i);
|
||||||
{
|
{
|
||||||
size_t j;
|
size_t j;
|
||||||
bitmap_t bitmap[bitmap_info_ngroups(&binfo)];
|
bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
|
||||||
|
bitmap_info_ngroups(&binfo));
|
||||||
bitmap_init(bitmap, &binfo);
|
bitmap_init(bitmap, &binfo);
|
||||||
|
|
||||||
for (j = 0; j < i; j++)
|
for (j = 0; j < i; j++)
|
||||||
@ -93,6 +86,7 @@ test_bitmap_unset(void)
|
|||||||
for (j = 0; j < i; j++)
|
for (j = 0; j < i; j++)
|
||||||
bitmap_set(bitmap, &binfo, j);
|
bitmap_set(bitmap, &binfo, j);
|
||||||
assert(bitmap_full(bitmap, &binfo));
|
assert(bitmap_full(bitmap, &binfo));
|
||||||
|
free(bitmap);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -107,7 +101,8 @@ test_bitmap_sfu(void)
|
|||||||
bitmap_info_init(&binfo, i);
|
bitmap_info_init(&binfo, i);
|
||||||
{
|
{
|
||||||
ssize_t j;
|
ssize_t j;
|
||||||
bitmap_t bitmap[bitmap_info_ngroups(&binfo)];
|
bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
|
||||||
|
bitmap_info_ngroups(&binfo));
|
||||||
bitmap_init(bitmap, &binfo);
|
bitmap_init(bitmap, &binfo);
|
||||||
|
|
||||||
/* Iteratively set bits starting at the beginning. */
|
/* Iteratively set bits starting at the beginning. */
|
||||||
@ -137,6 +132,7 @@ test_bitmap_sfu(void)
|
|||||||
}
|
}
|
||||||
assert(bitmap_sfu(bitmap, &binfo) == i - 1);
|
assert(bitmap_sfu(bitmap, &binfo) == i - 1);
|
||||||
assert(bitmap_full(bitmap, &binfo));
|
assert(bitmap_full(bitmap, &binfo));
|
||||||
|
free(bitmap);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -144,7 +140,7 @@ test_bitmap_sfu(void)
|
|||||||
int
|
int
|
||||||
main(void)
|
main(void)
|
||||||
{
|
{
|
||||||
fprintf(stderr, "Test begin\n");
|
malloc_printf("Test begin\n");
|
||||||
|
|
||||||
test_bitmap_size();
|
test_bitmap_size();
|
||||||
test_bitmap_init();
|
test_bitmap_init();
|
||||||
@ -152,6 +148,6 @@ main(void)
|
|||||||
test_bitmap_unset();
|
test_bitmap_unset();
|
||||||
test_bitmap_sfu();
|
test_bitmap_sfu();
|
||||||
|
|
||||||
fprintf(stderr, "Test end\n");
|
malloc_printf("Test end\n");
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
|
@ -4,3 +4,50 @@
|
|||||||
* have a different name.
|
* have a different name.
|
||||||
*/
|
*/
|
||||||
#include "jemalloc/jemalloc@install_suffix@.h"
|
#include "jemalloc/jemalloc@install_suffix@.h"
|
||||||
|
#include "jemalloc/internal/jemalloc_internal.h"
|
||||||
|
|
||||||
|
/* Abstraction layer for threading in tests */
|
||||||
|
#ifdef _WIN32
|
||||||
|
#include <windows.h>
|
||||||
|
|
||||||
|
typedef HANDLE je_thread_t;
|
||||||
|
|
||||||
|
void
|
||||||
|
je_thread_create(je_thread_t *thread, void *(*proc)(void *), void *arg)
|
||||||
|
{
|
||||||
|
LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc;
|
||||||
|
*thread = CreateThread(NULL, 0, routine, arg, 0, NULL);
|
||||||
|
if (*thread == NULL) {
|
||||||
|
malloc_printf("Error in CreateThread()\n");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
je_thread_join(je_thread_t thread, void **ret)
|
||||||
|
{
|
||||||
|
WaitForSingleObject(thread, INFINITE);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
#include <pthread.h>
|
||||||
|
|
||||||
|
typedef pthread_t je_thread_t;
|
||||||
|
|
||||||
|
void
|
||||||
|
je_thread_create(je_thread_t *thread, void *(*proc)(void *), void *arg)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (pthread_create(thread, NULL, proc, arg) != 0) {
|
||||||
|
malloc_printf("Error in pthread_create()\n");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
je_thread_join(je_thread_t thread, void **ret)
|
||||||
|
{
|
||||||
|
|
||||||
|
pthread_join(thread, ret);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
@ -1,9 +1,3 @@
|
|||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <assert.h>
|
|
||||||
#include <errno.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
#define JEMALLOC_MANGLE
|
#define JEMALLOC_MANGLE
|
||||||
#include "jemalloc_test.h"
|
#include "jemalloc_test.h"
|
||||||
|
|
||||||
@ -14,33 +8,32 @@ main(void)
|
|||||||
size_t sz, lg_chunk, chunksize, i;
|
size_t sz, lg_chunk, chunksize, i;
|
||||||
char *p, *q;
|
char *p, *q;
|
||||||
|
|
||||||
fprintf(stderr, "Test begin\n");
|
malloc_printf("Test begin\n");
|
||||||
|
|
||||||
sz = sizeof(lg_chunk);
|
sz = sizeof(lg_chunk);
|
||||||
if ((err = JEMALLOC_P(mallctl)("opt.lg_chunk", &lg_chunk, &sz, NULL,
|
if ((err = mallctl("opt.lg_chunk", &lg_chunk, &sz, NULL, 0))) {
|
||||||
0))) {
|
|
||||||
assert(err != ENOENT);
|
assert(err != ENOENT);
|
||||||
fprintf(stderr, "%s(): Error in mallctl(): %s\n", __func__,
|
malloc_printf("%s(): Error in mallctl(): %s\n", __func__,
|
||||||
strerror(err));
|
strerror(err));
|
||||||
ret = 1;
|
ret = 1;
|
||||||
goto RETURN;
|
goto label_return;
|
||||||
}
|
}
|
||||||
chunksize = ((size_t)1U) << lg_chunk;
|
chunksize = ((size_t)1U) << lg_chunk;
|
||||||
|
|
||||||
p = (char *)malloc(chunksize);
|
p = (char *)malloc(chunksize);
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
fprintf(stderr, "malloc(%zu) --> %p\n", chunksize, p);
|
malloc_printf("malloc(%zu) --> %p\n", chunksize, p);
|
||||||
ret = 1;
|
ret = 1;
|
||||||
goto RETURN;
|
goto label_return;
|
||||||
}
|
}
|
||||||
memset(p, 'a', chunksize);
|
memset(p, 'a', chunksize);
|
||||||
|
|
||||||
q = (char *)realloc(p, chunksize * 2);
|
q = (char *)realloc(p, chunksize * 2);
|
||||||
if (q == NULL) {
|
if (q == NULL) {
|
||||||
fprintf(stderr, "realloc(%p, %zu) --> %p\n", p, chunksize * 2,
|
malloc_printf("realloc(%p, %zu) --> %p\n", p, chunksize * 2,
|
||||||
q);
|
q);
|
||||||
ret = 1;
|
ret = 1;
|
||||||
goto RETURN;
|
goto label_return;
|
||||||
}
|
}
|
||||||
for (i = 0; i < chunksize; i++) {
|
for (i = 0; i < chunksize; i++) {
|
||||||
assert(q[i] == 'a');
|
assert(q[i] == 'a');
|
||||||
@ -50,9 +43,9 @@ main(void)
|
|||||||
|
|
||||||
q = (char *)realloc(p, chunksize);
|
q = (char *)realloc(p, chunksize);
|
||||||
if (q == NULL) {
|
if (q == NULL) {
|
||||||
fprintf(stderr, "realloc(%p, %zu) --> %p\n", p, chunksize, q);
|
malloc_printf("realloc(%p, %zu) --> %p\n", p, chunksize, q);
|
||||||
ret = 1;
|
ret = 1;
|
||||||
goto RETURN;
|
goto label_return;
|
||||||
}
|
}
|
||||||
for (i = 0; i < chunksize; i++) {
|
for (i = 0; i < chunksize; i++) {
|
||||||
assert(q[i] == 'a');
|
assert(q[i] == 'a');
|
||||||
@ -61,7 +54,7 @@ main(void)
|
|||||||
free(q);
|
free(q);
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
RETURN:
|
label_return:
|
||||||
fprintf(stderr, "Test end\n");
|
malloc_printf("Test end\n");
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
@ -1,15 +1,9 @@
|
|||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#include <errno.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
#define JEMALLOC_MANGLE
|
#define JEMALLOC_MANGLE
|
||||||
#include "jemalloc_test.h"
|
#include "jemalloc_test.h"
|
||||||
|
|
||||||
#define CHUNK 0x400000
|
#define CHUNK 0x400000
|
||||||
/* #define MAXALIGN ((size_t)0x80000000000LLU) */
|
/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */
|
||||||
#define MAXALIGN ((size_t)0x2000000LLU)
|
#define MAXALIGN ((size_t)0x2000000LU)
|
||||||
#define NITER 4
|
#define NITER 4
|
||||||
|
|
||||||
int
|
int
|
||||||
@ -20,13 +14,13 @@ main(void)
|
|||||||
int err;
|
int err;
|
||||||
void *p, *ps[NITER];
|
void *p, *ps[NITER];
|
||||||
|
|
||||||
fprintf(stderr, "Test begin\n");
|
malloc_printf("Test begin\n");
|
||||||
|
|
||||||
/* Test error conditions. */
|
/* Test error conditions. */
|
||||||
for (alignment = 0; alignment < sizeof(void *); alignment++) {
|
for (alignment = 0; alignment < sizeof(void *); alignment++) {
|
||||||
err = JEMALLOC_P(posix_memalign)(&p, alignment, 1);
|
err = posix_memalign(&p, alignment, 1);
|
||||||
if (err != EINVAL) {
|
if (err != EINVAL) {
|
||||||
fprintf(stderr,
|
malloc_printf(
|
||||||
"Expected error for invalid alignment %zu\n",
|
"Expected error for invalid alignment %zu\n",
|
||||||
alignment);
|
alignment);
|
||||||
}
|
}
|
||||||
@ -34,51 +28,51 @@ main(void)
|
|||||||
|
|
||||||
for (alignment = sizeof(size_t); alignment < MAXALIGN;
|
for (alignment = sizeof(size_t); alignment < MAXALIGN;
|
||||||
alignment <<= 1) {
|
alignment <<= 1) {
|
||||||
err = JEMALLOC_P(posix_memalign)(&p, alignment + 1, 1);
|
err = posix_memalign(&p, alignment + 1, 1);
|
||||||
if (err == 0) {
|
if (err == 0) {
|
||||||
fprintf(stderr,
|
malloc_printf(
|
||||||
"Expected error for invalid alignment %zu\n",
|
"Expected error for invalid alignment %zu\n",
|
||||||
alignment + 1);
|
alignment + 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#if LG_SIZEOF_PTR == 3
|
#if LG_SIZEOF_PTR == 3
|
||||||
alignment = 0x8000000000000000LLU;
|
alignment = UINT64_C(0x8000000000000000);
|
||||||
size = 0x8000000000000000LLU;
|
size = UINT64_C(0x8000000000000000);
|
||||||
#else
|
#else
|
||||||
alignment = 0x80000000LU;
|
alignment = 0x80000000LU;
|
||||||
size = 0x80000000LU;
|
size = 0x80000000LU;
|
||||||
#endif
|
#endif
|
||||||
err = JEMALLOC_P(posix_memalign)(&p, alignment, size);
|
err = posix_memalign(&p, alignment, size);
|
||||||
if (err == 0) {
|
if (err == 0) {
|
||||||
fprintf(stderr,
|
malloc_printf(
|
||||||
"Expected error for posix_memalign(&p, %zu, %zu)\n",
|
"Expected error for posix_memalign(&p, %zu, %zu)\n",
|
||||||
alignment, size);
|
alignment, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if LG_SIZEOF_PTR == 3
|
#if LG_SIZEOF_PTR == 3
|
||||||
alignment = 0x4000000000000000LLU;
|
alignment = UINT64_C(0x4000000000000000);
|
||||||
size = 0x8400000000000001LLU;
|
size = UINT64_C(0x8400000000000001);
|
||||||
#else
|
#else
|
||||||
alignment = 0x40000000LU;
|
alignment = 0x40000000LU;
|
||||||
size = 0x84000001LU;
|
size = 0x84000001LU;
|
||||||
#endif
|
#endif
|
||||||
err = JEMALLOC_P(posix_memalign)(&p, alignment, size);
|
err = posix_memalign(&p, alignment, size);
|
||||||
if (err == 0) {
|
if (err == 0) {
|
||||||
fprintf(stderr,
|
malloc_printf(
|
||||||
"Expected error for posix_memalign(&p, %zu, %zu)\n",
|
"Expected error for posix_memalign(&p, %zu, %zu)\n",
|
||||||
alignment, size);
|
alignment, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
alignment = 0x10LLU;
|
alignment = 0x10LU;
|
||||||
#if LG_SIZEOF_PTR == 3
|
#if LG_SIZEOF_PTR == 3
|
||||||
size = 0xfffffffffffffff0LLU;
|
size = UINT64_C(0xfffffffffffffff0);
|
||||||
#else
|
#else
|
||||||
size = 0xfffffff0LU;
|
size = 0xfffffff0LU;
|
||||||
#endif
|
#endif
|
||||||
err = JEMALLOC_P(posix_memalign)(&p, alignment, size);
|
err = posix_memalign(&p, alignment, size);
|
||||||
if (err == 0) {
|
if (err == 0) {
|
||||||
fprintf(stderr,
|
malloc_printf(
|
||||||
"Expected error for posix_memalign(&p, %zu, %zu)\n",
|
"Expected error for posix_memalign(&p, %zu, %zu)\n",
|
||||||
alignment, size);
|
alignment, size);
|
||||||
}
|
}
|
||||||
@ -90,32 +84,32 @@ main(void)
|
|||||||
alignment <= MAXALIGN;
|
alignment <= MAXALIGN;
|
||||||
alignment <<= 1) {
|
alignment <<= 1) {
|
||||||
total = 0;
|
total = 0;
|
||||||
fprintf(stderr, "Alignment: %zu\n", alignment);
|
malloc_printf("Alignment: %zu\n", alignment);
|
||||||
for (size = 1;
|
for (size = 1;
|
||||||
size < 3 * alignment && size < (1U << 31);
|
size < 3 * alignment && size < (1U << 31);
|
||||||
size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
|
size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
|
||||||
for (i = 0; i < NITER; i++) {
|
for (i = 0; i < NITER; i++) {
|
||||||
err = JEMALLOC_P(posix_memalign)(&ps[i],
|
err = posix_memalign(&ps[i],
|
||||||
alignment, size);
|
alignment, size);
|
||||||
if (err) {
|
if (err) {
|
||||||
fprintf(stderr,
|
malloc_printf(
|
||||||
"Error for size %zu (0x%zx): %s\n",
|
"Error for size %zu (%#zx): %s\n",
|
||||||
size, size, strerror(err));
|
size, size, strerror(err));
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
total += JEMALLOC_P(malloc_usable_size)(ps[i]);
|
total += malloc_usable_size(ps[i]);
|
||||||
if (total >= (MAXALIGN << 1))
|
if (total >= (MAXALIGN << 1))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
for (i = 0; i < NITER; i++) {
|
for (i = 0; i < NITER; i++) {
|
||||||
if (ps[i] != NULL) {
|
if (ps[i] != NULL) {
|
||||||
JEMALLOC_P(free)(ps[i]);
|
free(ps[i]);
|
||||||
ps[i] = NULL;
|
ps[i] = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fprintf(stderr, "Test end\n");
|
malloc_printf("Test end\n");
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
|
@ -1,9 +1,3 @@
|
|||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <assert.h>
|
|
||||||
|
|
||||||
#define JEMALLOC_MANGLE
|
#define JEMALLOC_MANGLE
|
||||||
#include "jemalloc_test.h"
|
#include "jemalloc_test.h"
|
||||||
|
|
||||||
@ -15,113 +9,119 @@ main(void)
|
|||||||
size_t sz, tsz;
|
size_t sz, tsz;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
fprintf(stderr, "Test begin\n");
|
malloc_printf("Test begin\n");
|
||||||
|
|
||||||
/* Get page size. */
|
/* Get page size. */
|
||||||
{
|
{
|
||||||
|
#ifdef _WIN32
|
||||||
|
SYSTEM_INFO si;
|
||||||
|
GetSystemInfo(&si);
|
||||||
|
pagesize = (size_t)si.dwPageSize;
|
||||||
|
#else
|
||||||
long result = sysconf(_SC_PAGESIZE);
|
long result = sysconf(_SC_PAGESIZE);
|
||||||
assert(result != -1);
|
assert(result != -1);
|
||||||
pagesize = (size_t)result;
|
pagesize = (size_t)result;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
r = JEMALLOC_P(allocm)(&p, &sz, 42, 0);
|
r = allocm(&p, &sz, 42, 0);
|
||||||
if (r != ALLOCM_SUCCESS) {
|
if (r != ALLOCM_SUCCESS) {
|
||||||
fprintf(stderr, "Unexpected allocm() error\n");
|
malloc_printf("Unexpected allocm() error\n");
|
||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
|
|
||||||
q = p;
|
q = p;
|
||||||
r = JEMALLOC_P(rallocm)(&q, &tsz, sz, 0, ALLOCM_NO_MOVE);
|
r = rallocm(&q, &tsz, sz, 0, ALLOCM_NO_MOVE);
|
||||||
if (r != ALLOCM_SUCCESS)
|
if (r != ALLOCM_SUCCESS)
|
||||||
fprintf(stderr, "Unexpected rallocm() error\n");
|
malloc_printf("Unexpected rallocm() error\n");
|
||||||
if (q != p)
|
if (q != p)
|
||||||
fprintf(stderr, "Unexpected object move\n");
|
malloc_printf("Unexpected object move\n");
|
||||||
if (tsz != sz) {
|
if (tsz != sz) {
|
||||||
fprintf(stderr, "Unexpected size change: %zu --> %zu\n",
|
malloc_printf("Unexpected size change: %zu --> %zu\n",
|
||||||
sz, tsz);
|
sz, tsz);
|
||||||
}
|
}
|
||||||
|
|
||||||
q = p;
|
q = p;
|
||||||
r = JEMALLOC_P(rallocm)(&q, &tsz, sz, 5, ALLOCM_NO_MOVE);
|
r = rallocm(&q, &tsz, sz, 5, ALLOCM_NO_MOVE);
|
||||||
if (r != ALLOCM_SUCCESS)
|
if (r != ALLOCM_SUCCESS)
|
||||||
fprintf(stderr, "Unexpected rallocm() error\n");
|
malloc_printf("Unexpected rallocm() error\n");
|
||||||
if (q != p)
|
if (q != p)
|
||||||
fprintf(stderr, "Unexpected object move\n");
|
malloc_printf("Unexpected object move\n");
|
||||||
if (tsz != sz) {
|
if (tsz != sz) {
|
||||||
fprintf(stderr, "Unexpected size change: %zu --> %zu\n",
|
malloc_printf("Unexpected size change: %zu --> %zu\n",
|
||||||
sz, tsz);
|
sz, tsz);
|
||||||
}
|
}
|
||||||
|
|
||||||
q = p;
|
q = p;
|
||||||
r = JEMALLOC_P(rallocm)(&q, &tsz, sz + 5, 0, ALLOCM_NO_MOVE);
|
r = rallocm(&q, &tsz, sz + 5, 0, ALLOCM_NO_MOVE);
|
||||||
if (r != ALLOCM_ERR_NOT_MOVED)
|
if (r != ALLOCM_ERR_NOT_MOVED)
|
||||||
fprintf(stderr, "Unexpected rallocm() result\n");
|
malloc_printf("Unexpected rallocm() result\n");
|
||||||
if (q != p)
|
if (q != p)
|
||||||
fprintf(stderr, "Unexpected object move\n");
|
malloc_printf("Unexpected object move\n");
|
||||||
if (tsz != sz) {
|
if (tsz != sz) {
|
||||||
fprintf(stderr, "Unexpected size change: %zu --> %zu\n",
|
malloc_printf("Unexpected size change: %zu --> %zu\n",
|
||||||
sz, tsz);
|
sz, tsz);
|
||||||
}
|
}
|
||||||
|
|
||||||
q = p;
|
q = p;
|
||||||
r = JEMALLOC_P(rallocm)(&q, &tsz, sz + 5, 0, 0);
|
r = rallocm(&q, &tsz, sz + 5, 0, 0);
|
||||||
if (r != ALLOCM_SUCCESS)
|
if (r != ALLOCM_SUCCESS)
|
||||||
fprintf(stderr, "Unexpected rallocm() error\n");
|
malloc_printf("Unexpected rallocm() error\n");
|
||||||
if (q == p)
|
if (q == p)
|
||||||
fprintf(stderr, "Expected object move\n");
|
malloc_printf("Expected object move\n");
|
||||||
if (tsz == sz) {
|
if (tsz == sz) {
|
||||||
fprintf(stderr, "Expected size change: %zu --> %zu\n",
|
malloc_printf("Expected size change: %zu --> %zu\n",
|
||||||
sz, tsz);
|
sz, tsz);
|
||||||
}
|
}
|
||||||
p = q;
|
p = q;
|
||||||
sz = tsz;
|
sz = tsz;
|
||||||
|
|
||||||
r = JEMALLOC_P(rallocm)(&q, &tsz, pagesize*2, 0, 0);
|
r = rallocm(&q, &tsz, pagesize*2, 0, 0);
|
||||||
if (r != ALLOCM_SUCCESS)
|
if (r != ALLOCM_SUCCESS)
|
||||||
fprintf(stderr, "Unexpected rallocm() error\n");
|
malloc_printf("Unexpected rallocm() error\n");
|
||||||
if (q == p)
|
if (q == p)
|
||||||
fprintf(stderr, "Expected object move\n");
|
malloc_printf("Expected object move\n");
|
||||||
if (tsz == sz) {
|
if (tsz == sz) {
|
||||||
fprintf(stderr, "Expected size change: %zu --> %zu\n",
|
malloc_printf("Expected size change: %zu --> %zu\n",
|
||||||
sz, tsz);
|
sz, tsz);
|
||||||
}
|
}
|
||||||
p = q;
|
p = q;
|
||||||
sz = tsz;
|
sz = tsz;
|
||||||
|
|
||||||
r = JEMALLOC_P(rallocm)(&q, &tsz, pagesize*4, 0, 0);
|
r = rallocm(&q, &tsz, pagesize*4, 0, 0);
|
||||||
if (r != ALLOCM_SUCCESS)
|
if (r != ALLOCM_SUCCESS)
|
||||||
fprintf(stderr, "Unexpected rallocm() error\n");
|
malloc_printf("Unexpected rallocm() error\n");
|
||||||
if (tsz == sz) {
|
if (tsz == sz) {
|
||||||
fprintf(stderr, "Expected size change: %zu --> %zu\n",
|
malloc_printf("Expected size change: %zu --> %zu\n",
|
||||||
sz, tsz);
|
sz, tsz);
|
||||||
}
|
}
|
||||||
p = q;
|
p = q;
|
||||||
sz = tsz;
|
sz = tsz;
|
||||||
|
|
||||||
r = JEMALLOC_P(rallocm)(&q, &tsz, pagesize*2, 0, ALLOCM_NO_MOVE);
|
r = rallocm(&q, &tsz, pagesize*2, 0, ALLOCM_NO_MOVE);
|
||||||
if (r != ALLOCM_SUCCESS)
|
if (r != ALLOCM_SUCCESS)
|
||||||
fprintf(stderr, "Unexpected rallocm() error\n");
|
malloc_printf("Unexpected rallocm() error\n");
|
||||||
if (q != p)
|
if (q != p)
|
||||||
fprintf(stderr, "Unexpected object move\n");
|
malloc_printf("Unexpected object move\n");
|
||||||
if (tsz == sz) {
|
if (tsz == sz) {
|
||||||
fprintf(stderr, "Expected size change: %zu --> %zu\n",
|
malloc_printf("Expected size change: %zu --> %zu\n",
|
||||||
sz, tsz);
|
sz, tsz);
|
||||||
}
|
}
|
||||||
sz = tsz;
|
sz = tsz;
|
||||||
|
|
||||||
r = JEMALLOC_P(rallocm)(&q, &tsz, pagesize*4, 0, ALLOCM_NO_MOVE);
|
r = rallocm(&q, &tsz, pagesize*4, 0, ALLOCM_NO_MOVE);
|
||||||
if (r != ALLOCM_SUCCESS)
|
if (r != ALLOCM_SUCCESS)
|
||||||
fprintf(stderr, "Unexpected rallocm() error\n");
|
malloc_printf("Unexpected rallocm() error\n");
|
||||||
if (q != p)
|
if (q != p)
|
||||||
fprintf(stderr, "Unexpected object move\n");
|
malloc_printf("Unexpected object move\n");
|
||||||
if (tsz == sz) {
|
if (tsz == sz) {
|
||||||
fprintf(stderr, "Expected size change: %zu --> %zu\n",
|
malloc_printf("Expected size change: %zu --> %zu\n",
|
||||||
sz, tsz);
|
sz, tsz);
|
||||||
}
|
}
|
||||||
sz = tsz;
|
sz = tsz;
|
||||||
|
|
||||||
JEMALLOC_P(dallocm)(p, 0);
|
dallocm(p, 0);
|
||||||
|
|
||||||
fprintf(stderr, "Test end\n");
|
malloc_printf("Test end\n");
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
|
@ -1,16 +1,10 @@
|
|||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <pthread.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <assert.h>
|
|
||||||
|
|
||||||
#define JEMALLOC_MANGLE
|
#define JEMALLOC_MANGLE
|
||||||
#include "jemalloc_test.h"
|
#include "jemalloc_test.h"
|
||||||
|
|
||||||
#define NTHREADS 10
|
#define NTHREADS 10
|
||||||
|
|
||||||
void *
|
void *
|
||||||
thread_start(void *arg)
|
je_thread_start(void *arg)
|
||||||
{
|
{
|
||||||
unsigned main_arena_ind = *(unsigned *)arg;
|
unsigned main_arena_ind = *(unsigned *)arg;
|
||||||
void *p;
|
void *p;
|
||||||
@ -18,24 +12,24 @@ thread_start(void *arg)
|
|||||||
size_t size;
|
size_t size;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
p = JEMALLOC_P(malloc)(1);
|
p = malloc(1);
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
fprintf(stderr, "%s(): Error in malloc()\n", __func__);
|
malloc_printf("%s(): Error in malloc()\n", __func__);
|
||||||
return (void *)1;
|
return (void *)1;
|
||||||
}
|
}
|
||||||
|
|
||||||
size = sizeof(arena_ind);
|
size = sizeof(arena_ind);
|
||||||
if ((err = JEMALLOC_P(mallctl)("thread.arena", &arena_ind, &size,
|
if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind,
|
||||||
&main_arena_ind, sizeof(main_arena_ind)))) {
|
sizeof(main_arena_ind)))) {
|
||||||
fprintf(stderr, "%s(): Error in mallctl(): %s\n", __func__,
|
malloc_printf("%s(): Error in mallctl(): %s\n", __func__,
|
||||||
strerror(err));
|
strerror(err));
|
||||||
return (void *)1;
|
return (void *)1;
|
||||||
}
|
}
|
||||||
|
|
||||||
size = sizeof(arena_ind);
|
size = sizeof(arena_ind);
|
||||||
if ((err = JEMALLOC_P(mallctl)("thread.arena", &arena_ind, &size, NULL,
|
if ((err = mallctl("thread.arena", &arena_ind, &size, NULL,
|
||||||
0))) {
|
0))) {
|
||||||
fprintf(stderr, "%s(): Error in mallctl(): %s\n", __func__,
|
malloc_printf("%s(): Error in mallctl(): %s\n", __func__,
|
||||||
strerror(err));
|
strerror(err));
|
||||||
return (void *)1;
|
return (void *)1;
|
||||||
}
|
}
|
||||||
@ -52,41 +46,33 @@ main(void)
|
|||||||
unsigned arena_ind;
|
unsigned arena_ind;
|
||||||
size_t size;
|
size_t size;
|
||||||
int err;
|
int err;
|
||||||
pthread_t threads[NTHREADS];
|
je_thread_t threads[NTHREADS];
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
fprintf(stderr, "Test begin\n");
|
malloc_printf("Test begin\n");
|
||||||
|
|
||||||
p = JEMALLOC_P(malloc)(1);
|
p = malloc(1);
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
fprintf(stderr, "%s(): Error in malloc()\n", __func__);
|
malloc_printf("%s(): Error in malloc()\n", __func__);
|
||||||
ret = 1;
|
ret = 1;
|
||||||
goto RETURN;
|
goto label_return;
|
||||||
}
|
}
|
||||||
|
|
||||||
size = sizeof(arena_ind);
|
size = sizeof(arena_ind);
|
||||||
if ((err = JEMALLOC_P(mallctl)("thread.arena", &arena_ind, &size, NULL,
|
if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) {
|
||||||
0))) {
|
malloc_printf("%s(): Error in mallctl(): %s\n", __func__,
|
||||||
fprintf(stderr, "%s(): Error in mallctl(): %s\n", __func__,
|
|
||||||
strerror(err));
|
strerror(err));
|
||||||
ret = 1;
|
ret = 1;
|
||||||
goto RETURN;
|
goto label_return;
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < NTHREADS; i++) {
|
|
||||||
if (pthread_create(&threads[i], NULL, thread_start,
|
|
||||||
(void *)&arena_ind) != 0) {
|
|
||||||
fprintf(stderr, "%s(): Error in pthread_create()\n",
|
|
||||||
__func__);
|
|
||||||
ret = 1;
|
|
||||||
goto RETURN;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < NTHREADS; i++)
|
for (i = 0; i < NTHREADS; i++)
|
||||||
pthread_join(threads[i], (void *)&ret);
|
je_thread_create(&threads[i], je_thread_start, (void *)&arena_ind);
|
||||||
|
|
||||||
RETURN:
|
for (i = 0; i < NTHREADS; i++)
|
||||||
fprintf(stderr, "Test end\n");
|
je_thread_join(threads[i], (void *)&ret);
|
||||||
|
|
||||||
|
label_return:
|
||||||
|
malloc_printf("Test end\n");
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
91
test/thread_tcache_enabled.c
Normal file
91
test/thread_tcache_enabled.c
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
#define JEMALLOC_MANGLE
|
||||||
|
#include "jemalloc_test.h"
|
||||||
|
|
||||||
|
void *
|
||||||
|
je_thread_start(void *arg)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
size_t sz;
|
||||||
|
bool e0, e1;
|
||||||
|
|
||||||
|
sz = sizeof(bool);
|
||||||
|
if ((err = mallctl("thread.tcache.enabled", &e0, &sz, NULL, 0))) {
|
||||||
|
if (err == ENOENT) {
|
||||||
|
#ifdef JEMALLOC_TCACHE
|
||||||
|
assert(false);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
goto label_return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (e0) {
|
||||||
|
e1 = false;
|
||||||
|
assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz)
|
||||||
|
== 0);
|
||||||
|
assert(e0);
|
||||||
|
}
|
||||||
|
|
||||||
|
e1 = true;
|
||||||
|
assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0);
|
||||||
|
assert(e0 == false);
|
||||||
|
|
||||||
|
e1 = true;
|
||||||
|
assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0);
|
||||||
|
assert(e0);
|
||||||
|
|
||||||
|
e1 = false;
|
||||||
|
assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0);
|
||||||
|
assert(e0);
|
||||||
|
|
||||||
|
e1 = false;
|
||||||
|
assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0);
|
||||||
|
assert(e0 == false);
|
||||||
|
|
||||||
|
free(malloc(1));
|
||||||
|
e1 = true;
|
||||||
|
assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0);
|
||||||
|
assert(e0 == false);
|
||||||
|
|
||||||
|
free(malloc(1));
|
||||||
|
e1 = true;
|
||||||
|
assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0);
|
||||||
|
assert(e0);
|
||||||
|
|
||||||
|
free(malloc(1));
|
||||||
|
e1 = false;
|
||||||
|
assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0);
|
||||||
|
assert(e0);
|
||||||
|
|
||||||
|
free(malloc(1));
|
||||||
|
e1 = false;
|
||||||
|
assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0);
|
||||||
|
assert(e0 == false);
|
||||||
|
|
||||||
|
free(malloc(1));
|
||||||
|
label_return:
|
||||||
|
return (NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
main(void)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
je_thread_t thread;
|
||||||
|
|
||||||
|
malloc_printf("Test begin\n");
|
||||||
|
|
||||||
|
je_thread_start(NULL);
|
||||||
|
|
||||||
|
je_thread_create(&thread, je_thread_start, NULL);
|
||||||
|
je_thread_join(thread, (void *)&ret);
|
||||||
|
|
||||||
|
je_thread_start(NULL);
|
||||||
|
|
||||||
|
je_thread_create(&thread, je_thread_start, NULL);
|
||||||
|
je_thread_join(thread, (void *)&ret);
|
||||||
|
|
||||||
|
je_thread_start(NULL);
|
||||||
|
|
||||||
|
malloc_printf("Test end\n");
|
||||||
|
return (ret);
|
||||||
|
}
|
2
test/thread_tcache_enabled.exp
Normal file
2
test/thread_tcache_enabled.exp
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
Test begin
|
||||||
|
Test end
|
Loading…
Reference in New Issue
Block a user