Merge branch 'dev'
This commit is contained in:
commit
61efbda709
2
.gitignore
vendored
2
.gitignore
vendored
@ -77,12 +77,14 @@ test/include/test/jemalloc_test_defs.h
|
||||
*.pdb
|
||||
*.sdf
|
||||
*.opendb
|
||||
*.VC.db
|
||||
*.opensdf
|
||||
*.cachefile
|
||||
*.suo
|
||||
*.user
|
||||
*.sln.docstates
|
||||
*.tmp
|
||||
.vs/
|
||||
/msvc/Win32/
|
||||
/msvc/x64/
|
||||
/msvc/projects/*/*/Debug*/
|
||||
|
@ -1,4 +1,5 @@
|
||||
language: generic
|
||||
dist: precise
|
||||
|
||||
matrix:
|
||||
include:
|
||||
|
4
COPYING
4
COPYING
@ -1,10 +1,10 @@
|
||||
Unless otherwise specified, files in the jemalloc source distribution are
|
||||
subject to the following license:
|
||||
--------------------------------------------------------------------------------
|
||||
Copyright (C) 2002-2017 Jason Evans <jasone@canonware.com>.
|
||||
Copyright (C) 2002-2018 Jason Evans <jasone@canonware.com>.
|
||||
All rights reserved.
|
||||
Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
|
||||
Copyright (C) 2009-2017 Facebook, Inc. All rights reserved.
|
||||
Copyright (C) 2009-2018 Facebook, Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
121
ChangeLog
121
ChangeLog
@ -4,6 +4,123 @@ brevity. Much more detail can be found in the git revision history:
|
||||
|
||||
https://github.com/jemalloc/jemalloc
|
||||
|
||||
* 5.1.0 (May 4th, 2018)
|
||||
|
||||
This release is primarily about fine-tuning, ranging from several new features
|
||||
to numerous notable performance and portability enhancements. The release and
|
||||
prior dev versions have been running in multiple large scale applications for
|
||||
months, and the cumulative improvements are substantial in many cases.
|
||||
|
||||
Given the long and successful production runs, this release is likely a good
|
||||
candidate for applications to upgrade, from both jemalloc 5.0 and before. For
|
||||
performance-critical applications, the newly added TUNING.md provides
|
||||
guidelines on jemalloc tuning.
|
||||
|
||||
New features:
|
||||
- Implement transparent huge page support for internal metadata. (@interwq)
|
||||
- Add opt.thp to allow enabling / disabling transparent huge pages for all
|
||||
mappings. (@interwq)
|
||||
- Add maximum background thread count option. (@djwatson)
|
||||
- Allow prof_active to control opt.lg_prof_interval and prof.gdump.
|
||||
(@interwq)
|
||||
- Allow arena index lookup based on allocation addresses via mallctl.
|
||||
(@lionkov)
|
||||
- Allow disabling initial-exec TLS model. (@davidtgoldblatt, @KenMacD)
|
||||
- Add opt.lg_extent_max_active_fit to set the max ratio between the size of
|
||||
the active extent selected (to split off from) and the size of the requested
|
||||
allocation. (@interwq, @davidtgoldblatt)
|
||||
- Add retain_grow_limit to set the max size when growing virtual address
|
||||
space. (@interwq)
|
||||
- Add mallctl interfaces:
|
||||
+ arena.<i>.retain_grow_limit (@interwq)
|
||||
+ arenas.lookup (@lionkov)
|
||||
+ max_background_threads (@djwatson)
|
||||
+ opt.lg_extent_max_active_fit (@interwq)
|
||||
+ opt.max_background_threads (@djwatson)
|
||||
+ opt.metadata_thp (@interwq)
|
||||
+ opt.thp (@interwq)
|
||||
+ stats.metadata_thp (@interwq)
|
||||
|
||||
Portability improvements:
|
||||
- Support GNU/kFreeBSD configuration. (@paravoid)
|
||||
- Support m68k, nios2 and SH3 architectures. (@paravoid)
|
||||
- Fall back to FD_CLOEXEC when O_CLOEXEC is unavailable. (@zonyitoo)
|
||||
- Fix symbol listing for cross-compiling. (@tamird)
|
||||
- Fix high bits computation on ARM. (@davidtgoldblatt, @paravoid)
|
||||
- Disable the CPU_SPINWAIT macro for Power. (@davidtgoldblatt, @marxin)
|
||||
- Fix MSVC 2015 & 2017 builds. (@rustyx)
|
||||
- Improve RISC-V support. (@EdSchouten)
|
||||
- Set name mangling script in strict mode. (@nicolov)
|
||||
- Avoid MADV_HUGEPAGE on ARM. (@marxin)
|
||||
- Modify configure to determine return value of strerror_r.
|
||||
(@davidtgoldblatt, @cferris1000)
|
||||
- Make sure CXXFLAGS is tested with CPP compiler. (@nehaljwani)
|
||||
- Fix 32-bit build on MSVC. (@rustyx)
|
||||
- Fix external symbol on MSVC. (@maksqwe)
|
||||
- Avoid a printf format specifier warning. (@jasone)
|
||||
- Add configure option --disable-initial-exec-tls which can allow jemalloc to
|
||||
be dynamically loaded after program startup. (@davidtgoldblatt, @KenMacD)
|
||||
- AArch64: Add ILP32 support. (@cmuellner)
|
||||
- Add --with-lg-vaddr configure option to support cross compiling.
|
||||
(@cmuellner, @davidtgoldblatt)
|
||||
|
||||
Optimizations and refactors:
|
||||
- Improve active extent fit with extent_max_active_fit. This considerably
|
||||
reduces fragmentation over time and improves virtual memory and metadata
|
||||
usage. (@davidtgoldblatt, @interwq)
|
||||
- Eagerly coalesce large extents to reduce fragmentation. (@interwq)
|
||||
- sdallocx: only read size info when page aligned (i.e. possibly sampled),
|
||||
which speeds up the sized deallocation path significantly. (@interwq)
|
||||
- Avoid attempting new mappings for in place expansion with retain, since
|
||||
it rarely succeeds in practice and causes high overhead. (@interwq)
|
||||
- Refactor OOM handling in newImpl. (@wqfish)
|
||||
- Add internal fine-grained logging functionality for debugging use.
|
||||
(@davidtgoldblatt)
|
||||
- Refactor arena / tcache interactions. (@davidtgoldblatt)
|
||||
- Refactor extent management with dumpable flag. (@davidtgoldblatt)
|
||||
- Add runtime detection of lazy purging. (@interwq)
|
||||
- Use pairing heap instead of red-black tree for extents_avail. (@djwatson)
|
||||
- Use sysctl on startup in FreeBSD. (@trasz)
|
||||
- Use thread local prng state instead of atomic. (@djwatson)
|
||||
- Make decay to always purge one more extent than before, because in
|
||||
practice large extents are usually the ones that cross the decay threshold.
|
||||
Purging the additional extent helps save memory as well as reduce VM
|
||||
fragmentation. (@interwq)
|
||||
- Fast division by dynamic values. (@davidtgoldblatt)
|
||||
- Improve the fit for aligned allocation. (@interwq, @edwinsmith)
|
||||
- Refactor extent_t bitpacking. (@rkmisra)
|
||||
- Optimize the generated assembly for ticker operations. (@davidtgoldblatt)
|
||||
- Convert stats printing to use a structured text emitter. (@davidtgoldblatt)
|
||||
- Remove preserve_lru feature for extents management. (@djwatson)
|
||||
- Consolidate two memory loads into one on the fast deallocation path.
|
||||
(@davidtgoldblatt, @interwq)
|
||||
|
||||
Bug fixes (most of the issues are only relevant to jemalloc 5.0):
|
||||
- Fix deadlock with multithreaded fork in OS X. (@davidtgoldblatt)
|
||||
- Validate returned file descriptor before use. (@zonyitoo)
|
||||
- Fix a few background thread initialization and shutdown issues. (@interwq)
|
||||
- Fix an extent coalesce + decay race by taking both coalescing extents off
|
||||
the LRU list. (@interwq)
|
||||
- Fix potentially unbound increase during decay, caused by one thread keep
|
||||
stashing memory to purge while other threads generating new pages. The
|
||||
number of pages to purge is checked to prevent this. (@interwq)
|
||||
- Fix a FreeBSD bootstrap assertion. (@strejda, @interwq)
|
||||
- Handle 32 bit mutex counters. (@rkmisra)
|
||||
- Fix a indexing bug when creating background threads. (@davidtgoldblatt,
|
||||
@binliu19)
|
||||
- Fix arguments passed to extent_init. (@yuleniwo, @interwq)
|
||||
- Fix addresses used for ordering mutexes. (@rkmisra)
|
||||
- Fix abort_conf processing during bootstrap. (@interwq)
|
||||
- Fix include path order for out-of-tree builds. (@cmuellner)
|
||||
|
||||
Incompatible changes:
|
||||
- Remove --disable-thp. (@interwq)
|
||||
- Remove mallctl interfaces:
|
||||
+ config.thp (@interwq)
|
||||
|
||||
Documentation:
|
||||
- Add TUNING.md. (@interwq, @davidtgoldblatt, @djwatson)
|
||||
|
||||
* 5.0.1 (July 1, 2017)
|
||||
|
||||
This bugfix release fixes several issues, most of which are obscure enough
|
||||
@ -22,7 +139,7 @@ brevity. Much more detail can be found in the git revision history:
|
||||
unlikely to be an issue with other libc implementations. (@interwq)
|
||||
- Mask signals during background thread creation. This prevents signals from
|
||||
being inadvertently delivered to background threads. (@jasone,
|
||||
@davidgoldblatt, @interwq)
|
||||
@davidtgoldblatt, @interwq)
|
||||
- Avoid inactivity checks within background threads, in order to prevent
|
||||
recursive mutex acquisition. (@interwq)
|
||||
- Fix extent_grow_retained() to use the specified hooks when the
|
||||
@ -515,7 +632,7 @@ brevity. Much more detail can be found in the git revision history:
|
||||
these fixes, xallocx() now tries harder to partially fulfill requests for
|
||||
optional extra space. Note that a couple of minor heap profiling
|
||||
optimizations are included, but these are better thought of as performance
|
||||
fixes that were integral to disovering most of the other bugs.
|
||||
fixes that were integral to discovering most of the other bugs.
|
||||
|
||||
Optimizations:
|
||||
- Avoid a chunk metadata read in arena_prof_tctx_set(), since it is in the
|
||||
|
22
INSTALL.md
22
INSTALL.md
@ -157,11 +157,6 @@ any of the following arguments (not a definitive list) to 'configure':
|
||||
Statically link against the specified libunwind.a rather than dynamically
|
||||
linking with -lunwind.
|
||||
|
||||
* `--disable-thp`
|
||||
|
||||
Disable transparent huge page (THP) integration. This option can be useful
|
||||
when cross compiling.
|
||||
|
||||
* `--disable-fill`
|
||||
|
||||
Disable support for junk/zero filling of memory. See the "opt.junk" and
|
||||
@ -265,6 +260,22 @@ any of the following arguments (not a definitive list) to 'configure':
|
||||
configuration, jemalloc will provide additional size classes that are not
|
||||
16-byte-aligned (24, 40, and 56).
|
||||
|
||||
* `--with-lg-vaddr=<lg-vaddr>`
|
||||
|
||||
Specify the number of significant virtual address bits. By default, the
|
||||
configure script attempts to detect virtual address size on those platforms
|
||||
where it knows how, and picks a default otherwise. This option may be
|
||||
useful when cross-compiling.
|
||||
|
||||
* `--disable-initial-exec-tls`
|
||||
|
||||
Disable the initial-exec TLS model for jemalloc's internal thread-local
|
||||
storage (on those platforms that support explicit settings). This can allow
|
||||
jemalloc to be dynamically loaded after program startup (e.g. using dlopen).
|
||||
Note that in this case, there will be two malloc implementations operating
|
||||
in the same process, which will almost certainly result in confusing runtime
|
||||
crashes if pointers leak from one implementation to the other.
|
||||
|
||||
The following environment variables (not a definitive list) impact configure's
|
||||
behavior:
|
||||
|
||||
@ -329,6 +340,7 @@ To install only parts of jemalloc, use the following targets:
|
||||
install_include
|
||||
install_lib_shared
|
||||
install_lib_static
|
||||
install_lib_pc
|
||||
install_lib
|
||||
install_doc_html
|
||||
install_doc_man
|
||||
|
10
Makefile.in
10
Makefile.in
@ -24,7 +24,7 @@ abs_srcroot := @abs_srcroot@
|
||||
abs_objroot := @abs_objroot@
|
||||
|
||||
# Build parameters.
|
||||
CPPFLAGS := @CPPFLAGS@ -I$(srcroot)include -I$(objroot)include
|
||||
CPPFLAGS := @CPPFLAGS@ -I$(objroot)include -I$(srcroot)include
|
||||
CONFIGURE_CFLAGS := @CONFIGURE_CFLAGS@
|
||||
SPECIFIED_CFLAGS := @SPECIFIED_CFLAGS@
|
||||
EXTRA_CFLAGS := @EXTRA_CFLAGS@
|
||||
@ -93,15 +93,18 @@ C_SRCS := $(srcroot)src/jemalloc.c \
|
||||
$(srcroot)src/arena.c \
|
||||
$(srcroot)src/background_thread.c \
|
||||
$(srcroot)src/base.c \
|
||||
$(srcroot)src/bin.c \
|
||||
$(srcroot)src/bitmap.c \
|
||||
$(srcroot)src/ckh.c \
|
||||
$(srcroot)src/ctl.c \
|
||||
$(srcroot)src/div.c \
|
||||
$(srcroot)src/extent.c \
|
||||
$(srcroot)src/extent_dss.c \
|
||||
$(srcroot)src/extent_mmap.c \
|
||||
$(srcroot)src/hash.c \
|
||||
$(srcroot)src/hooks.c \
|
||||
$(srcroot)src/large.c \
|
||||
$(srcroot)src/log.c \
|
||||
$(srcroot)src/malloc_io.c \
|
||||
$(srcroot)src/mutex.c \
|
||||
$(srcroot)src/mutex_pool.c \
|
||||
@ -111,7 +114,6 @@ C_SRCS := $(srcroot)src/jemalloc.c \
|
||||
$(srcroot)src/prof.c \
|
||||
$(srcroot)src/rtree.c \
|
||||
$(srcroot)src/stats.c \
|
||||
$(srcroot)src/spin.c \
|
||||
$(srcroot)src/sz.c \
|
||||
$(srcroot)src/tcache.c \
|
||||
$(srcroot)src/ticker.c \
|
||||
@ -160,10 +162,13 @@ TESTS_UNIT := \
|
||||
$(srcroot)test/unit/arena_reset.c \
|
||||
$(srcroot)test/unit/atomic.c \
|
||||
$(srcroot)test/unit/background_thread.c \
|
||||
$(srcroot)test/unit/background_thread_enable.c \
|
||||
$(srcroot)test/unit/base.c \
|
||||
$(srcroot)test/unit/bitmap.c \
|
||||
$(srcroot)test/unit/ckh.c \
|
||||
$(srcroot)test/unit/decay.c \
|
||||
$(srcroot)test/unit/div.c \
|
||||
$(srcroot)test/unit/emitter.c \
|
||||
$(srcroot)test/unit/extent_quantize.c \
|
||||
$(srcroot)test/unit/fork.c \
|
||||
$(srcroot)test/unit/hash.c \
|
||||
@ -171,6 +176,7 @@ TESTS_UNIT := \
|
||||
$(srcroot)test/unit/junk.c \
|
||||
$(srcroot)test/unit/junk_alloc.c \
|
||||
$(srcroot)test/unit/junk_free.c \
|
||||
$(srcroot)test/unit/log.c \
|
||||
$(srcroot)test/unit/mallctl.c \
|
||||
$(srcroot)test/unit/malloc_io.c \
|
||||
$(srcroot)test/unit/math.c \
|
||||
|
129
TUNING.md
Normal file
129
TUNING.md
Normal file
@ -0,0 +1,129 @@
|
||||
This document summarizes the common approaches for performance fine tuning with
|
||||
jemalloc (as of 5.1.0). The default configuration of jemalloc tends to work
|
||||
reasonably well in practice, and most applications should not have to tune any
|
||||
options. However, in order to cover a wide range of applications and avoid
|
||||
pathological cases, the default setting is sometimes kept conservative and
|
||||
suboptimal, even for many common workloads. When jemalloc is properly tuned for
|
||||
a specific application / workload, it is common to improve system level metrics
|
||||
by a few percent, or make favorable trade-offs.
|
||||
|
||||
|
||||
## Notable runtime options for performance tuning
|
||||
|
||||
Runtime options can be set via
|
||||
[malloc_conf](http://jemalloc.net/jemalloc.3.html#tuning).
|
||||
|
||||
* [background_thread](http://jemalloc.net/jemalloc.3.html#background_thread)
|
||||
|
||||
Enabling jemalloc background threads generally improves the tail latency for
|
||||
application threads, since unused memory purging is shifted to the dedicated
|
||||
background threads. In addition, unintended purging delay caused by
|
||||
application inactivity is avoided with background threads.
|
||||
|
||||
Suggested: `background_thread:true` when jemalloc managed threads can be
|
||||
allowed.
|
||||
|
||||
* [metadata_thp](http://jemalloc.net/jemalloc.3.html#opt.metadata_thp)
|
||||
|
||||
Allowing jemalloc to utilize transparent huge pages for its internal
|
||||
metadata usually reduces TLB misses significantly, especially for programs
|
||||
with large memory footprint and frequent allocation / deallocation
|
||||
activities. Metadata memory usage may increase due to the use of huge
|
||||
pages.
|
||||
|
||||
Suggested for allocation intensive programs: `metadata_thp:auto` or
|
||||
`metadata_thp:always`, which is expected to improve CPU utilization at a
|
||||
small memory cost.
|
||||
|
||||
* [dirty_decay_ms](http://jemalloc.net/jemalloc.3.html#opt.dirty_decay_ms) and
|
||||
[muzzy_decay_ms](http://jemalloc.net/jemalloc.3.html#opt.muzzy_decay_ms)
|
||||
|
||||
Decay time determines how fast jemalloc returns unused pages back to the
|
||||
operating system, and therefore provides a fairly straightforward trade-off
|
||||
between CPU and memory usage. Shorter decay time purges unused pages faster
|
||||
to reduces memory usage (usually at the cost of more CPU cycles spent on
|
||||
purging), and vice versa.
|
||||
|
||||
Suggested: tune the values based on the desired trade-offs.
|
||||
|
||||
* [narenas](http://jemalloc.net/jemalloc.3.html#opt.narenas)
|
||||
|
||||
By default jemalloc uses multiple arenas to reduce internal lock contention.
|
||||
However high arena count may also increase overall memory fragmentation,
|
||||
since arenas manage memory independently. When high degree of parallelism
|
||||
is not expected at the allocator level, lower number of arenas often
|
||||
improves memory usage.
|
||||
|
||||
Suggested: if low parallelism is expected, try lower arena count while
|
||||
monitoring CPU and memory usage.
|
||||
|
||||
* [percpu_arena](http://jemalloc.net/jemalloc.3.html#opt.percpu_arena)
|
||||
|
||||
Enable dynamic thread to arena association based on running CPU. This has
|
||||
the potential to improve locality, e.g. when thread to CPU affinity is
|
||||
present.
|
||||
|
||||
Suggested: try `percpu_arena:percpu` or `percpu_arena:phycpu` if
|
||||
thread migration between processors is expected to be infrequent.
|
||||
|
||||
Examples:
|
||||
|
||||
* High resource consumption application, prioritizing CPU utilization:
|
||||
|
||||
`background_thread:true,metadata_thp:auto` combined with relaxed decay time
|
||||
(increased `dirty_decay_ms` and / or `muzzy_decay_ms`,
|
||||
e.g. `dirty_decay_ms:30000,muzzy_decay_ms:30000`).
|
||||
|
||||
* High resource consumption application, prioritizing memory usage:
|
||||
|
||||
`background_thread:true` combined with shorter decay time (decreased
|
||||
`dirty_decay_ms` and / or `muzzy_decay_ms`,
|
||||
e.g. `dirty_decay_ms:5000,muzzy_decay_ms:5000`), and lower arena count
|
||||
(e.g. number of CPUs).
|
||||
|
||||
* Low resource consumption application:
|
||||
|
||||
`narenas:1,lg_tcache_max:13` combined with shorter decay time (decreased
|
||||
`dirty_decay_ms` and / or `muzzy_decay_ms`,e.g.
|
||||
`dirty_decay_ms:1000,muzzy_decay_ms:0`).
|
||||
|
||||
* Extremely conservative -- minimize memory usage at all costs, only suitable when
|
||||
allocation activity is very rare:
|
||||
|
||||
`narenas:1,tcache:false,dirty_decay_ms:0,muzzy_decay_ms:0`
|
||||
|
||||
Note that it is recommended to combine the options with `abort_conf:true` which
|
||||
aborts immediately on illegal options.
|
||||
|
||||
## Beyond runtime options
|
||||
|
||||
In addition to the runtime options, there are a number of programmatic ways to
|
||||
improve application performance with jemalloc.
|
||||
|
||||
* [Explicit arenas](http://jemalloc.net/jemalloc.3.html#arenas.create)
|
||||
|
||||
Manually created arenas can help performance in various ways, e.g. by
|
||||
managing locality and contention for specific usages. For example,
|
||||
applications can explicitly allocate frequently accessed objects from a
|
||||
dedicated arena with
|
||||
[mallocx()](http://jemalloc.net/jemalloc.3.html#MALLOCX_ARENA) to improve
|
||||
locality. In addition, explicit arenas often benefit from individually
|
||||
tuned options, e.g. relaxed [decay
|
||||
time](http://jemalloc.net/jemalloc.3.html#arena.i.dirty_decay_ms) if
|
||||
frequent reuse is expected.
|
||||
|
||||
* [Extent hooks](http://jemalloc.net/jemalloc.3.html#arena.i.extent_hooks)
|
||||
|
||||
Extent hooks allow customization for managing underlying memory. One use
|
||||
case for performance purpose is to utilize huge pages -- for example,
|
||||
[HHVM](https://github.com/facebook/hhvm/blob/master/hphp/util/alloc.cpp)
|
||||
uses explicit arenas with customized extent hooks to manage 1GB huge pages
|
||||
for frequently accessed data, which reduces TLB misses significantly.
|
||||
|
||||
* [Explicit thread-to-arena
|
||||
binding](http://jemalloc.net/jemalloc.3.html#thread.arena)
|
||||
|
||||
It is common for some threads in an application to have different memory
|
||||
access / allocation patterns. Threads with heavy workloads often benefit
|
||||
from explicit binding, e.g. binding very active threads to dedicated arenas
|
||||
may reduce contention at the allocator level.
|
@ -2895,6 +2895,8 @@ sub RemoveUninterestingFrames {
|
||||
foreach my $name ('@JEMALLOC_PREFIX@calloc',
|
||||
'cfree',
|
||||
'@JEMALLOC_PREFIX@malloc',
|
||||
'newImpl',
|
||||
'void* newImpl',
|
||||
'@JEMALLOC_PREFIX@free',
|
||||
'@JEMALLOC_PREFIX@memalign',
|
||||
'@JEMALLOC_PREFIX@posix_memalign',
|
||||
|
159
configure.ac
159
configure.ac
@ -10,7 +10,7 @@ dnl Custom macro definitions.
|
||||
dnl JE_CONCAT_VVV(r, a, b)
|
||||
dnl
|
||||
dnl Set $r to the concatenation of $a and $b, with a space separating them iff
|
||||
dnl both $a and $b are non-emty.
|
||||
dnl both $a and $b are non-empty.
|
||||
AC_DEFUN([JE_CONCAT_VVV],
|
||||
if test "x[$]{$2}" = "x" -o "x[$]{$3}" = "x" ; then
|
||||
$1="[$]{$2}[$]{$3}"
|
||||
@ -76,6 +76,7 @@ AC_MSG_CHECKING([whether compiler supports $1])
|
||||
T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}"
|
||||
JE_APPEND_VS(CONFIGURE_CXXFLAGS, $1)
|
||||
JE_CONCAT_VVV(CXXFLAGS, CONFIGURE_CXXFLAGS, SPECIFIED_CXXFLAGS)
|
||||
AC_LANG_PUSH([C++])
|
||||
AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
|
||||
[[
|
||||
]], [[
|
||||
@ -87,6 +88,7 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
|
||||
AC_MSG_RESULT([no])
|
||||
[CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}"]
|
||||
)
|
||||
AC_LANG_POP([C++])
|
||||
JE_CONCAT_VVV(CXXFLAGS, CONFIGURE_CXXFLAGS, SPECIFIED_CXXFLAGS)
|
||||
])
|
||||
|
||||
@ -243,6 +245,7 @@ if test "x$GCC" = "xyes" ; then
|
||||
JE_CFLAGS_ADD([-Wshorten-64-to-32])
|
||||
JE_CFLAGS_ADD([-Wsign-compare])
|
||||
JE_CFLAGS_ADD([-Wundef])
|
||||
JE_CFLAGS_ADD([-Wno-format-zero-length])
|
||||
JE_CFLAGS_ADD([-pipe])
|
||||
JE_CFLAGS_ADD([-g3])
|
||||
elif test "x$je_cv_msvc" = "xyes" ; then
|
||||
@ -380,6 +383,7 @@ dnl CPU-specific settings.
|
||||
CPU_SPINWAIT=""
|
||||
case "${host_cpu}" in
|
||||
i686|x86_64)
|
||||
HAVE_CPU_SPINWAIT=1
|
||||
if test "x${je_cv_msvc}" = "xyes" ; then
|
||||
AC_CACHE_VAL([je_cv_pause_msvc],
|
||||
[JE_COMPILABLE([pause instruction MSVC], [],
|
||||
@ -398,22 +402,33 @@ case "${host_cpu}" in
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
powerpc*)
|
||||
AC_DEFINE_UNQUOTED([HAVE_ALTIVEC], [ ])
|
||||
CPU_SPINWAIT='__asm__ volatile("or 31,31,31")'
|
||||
;;
|
||||
*)
|
||||
HAVE_CPU_SPINWAIT=0
|
||||
;;
|
||||
esac
|
||||
AC_DEFINE_UNQUOTED([HAVE_CPU_SPINWAIT], [$HAVE_CPU_SPINWAIT])
|
||||
AC_DEFINE_UNQUOTED([CPU_SPINWAIT], [$CPU_SPINWAIT])
|
||||
|
||||
AC_ARG_WITH([lg_vaddr],
|
||||
[AS_HELP_STRING([--with-lg-vaddr=<lg-vaddr>], [Number of significant virtual address bits])],
|
||||
[LG_VADDR="$with_lg_vaddr"], [LG_VADDR="detect"])
|
||||
|
||||
case "${host_cpu}" in
|
||||
aarch64)
|
||||
if test "x$LG_VADDR" = "xdetect"; then
|
||||
AC_MSG_CHECKING([number of significant virtual address bits])
|
||||
if test "x${LG_SIZEOF_PTR}" = "x2" ; then
|
||||
#aarch64 ILP32
|
||||
LG_VADDR=32
|
||||
else
|
||||
#aarch64 LP64
|
||||
LG_VADDR=48
|
||||
fi
|
||||
AC_MSG_RESULT([$LG_VADDR])
|
||||
fi
|
||||
;;
|
||||
x86_64)
|
||||
if test "x$LG_VADDR" = "xdetect"; then
|
||||
AC_CACHE_CHECK([number of significant virtual address bits],
|
||||
[je_cv_lg_vaddr],
|
||||
AC_RUN_IFELSE([AC_LANG_PROGRAM(
|
||||
@ -461,8 +476,10 @@ typedef unsigned __int32 uint32_t;
|
||||
else
|
||||
AC_MSG_ERROR([cannot determine number of significant virtual address bits])
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
if test "x$LG_VADDR" = "xdetect"; then
|
||||
AC_MSG_CHECKING([number of significant virtual address bits])
|
||||
if test "x${LG_SIZEOF_PTR}" = "x3" ; then
|
||||
LG_VADDR=64
|
||||
@ -474,6 +491,7 @@ typedef unsigned __int32 uint32_t;
|
||||
AC_MSG_ERROR([Unsupported lg(pointer size): ${LG_SIZEOF_PTR}])
|
||||
fi
|
||||
AC_MSG_RESULT([$LG_VADDR])
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
AC_DEFINE_UNQUOTED([LG_VADDR], [$LG_VADDR])
|
||||
@ -561,7 +579,7 @@ case "${host}" in
|
||||
dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
|
||||
JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
|
||||
abi="elf"
|
||||
AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS])
|
||||
AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS], [ ])
|
||||
AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
|
||||
AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ])
|
||||
AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
|
||||
@ -571,11 +589,11 @@ case "${host}" in
|
||||
default_retain="1"
|
||||
fi
|
||||
;;
|
||||
*-*-linux* | *-*-kfreebsd*)
|
||||
*-*-linux*)
|
||||
dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
|
||||
JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
|
||||
abi="elf"
|
||||
AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS])
|
||||
AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS], [ ])
|
||||
AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
|
||||
AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ])
|
||||
AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
|
||||
@ -584,6 +602,15 @@ case "${host}" in
|
||||
default_retain="1"
|
||||
fi
|
||||
;;
|
||||
*-*-kfreebsd*)
|
||||
dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
|
||||
JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
|
||||
abi="elf"
|
||||
AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
|
||||
AC_DEFINE([JEMALLOC_SYSCTL_VM_OVERCOMMIT], [ ])
|
||||
AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
|
||||
AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ])
|
||||
;;
|
||||
*-*-netbsd*)
|
||||
AC_MSG_CHECKING([ABI])
|
||||
AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
|
||||
@ -722,12 +749,9 @@ JE_COMPILABLE([tls_model attribute], [],
|
||||
foo = 0;],
|
||||
[je_cv_tls_model])
|
||||
JE_CFLAGS_RESTORE()
|
||||
if test "x${je_cv_tls_model}" = "xyes" ; then
|
||||
AC_DEFINE([JEMALLOC_TLS_MODEL],
|
||||
[__attribute__((tls_model("initial-exec")))])
|
||||
else
|
||||
AC_DEFINE([JEMALLOC_TLS_MODEL], [ ])
|
||||
fi
|
||||
dnl (Setting of JEMALLOC_TLS_MODEL is done later, after we've checked for
|
||||
dnl --disable-initial-exec-tls)
|
||||
|
||||
dnl Check for alloc_size attribute support.
|
||||
JE_CFLAGS_SAVE()
|
||||
JE_CFLAGS_ADD([-Werror])
|
||||
@ -1237,6 +1261,21 @@ if test "x$enable_cache_oblivious" = "x1" ; then
|
||||
fi
|
||||
AC_SUBST([enable_cache_oblivious])
|
||||
|
||||
dnl Do not log by default.
|
||||
AC_ARG_ENABLE([log],
|
||||
[AS_HELP_STRING([--enable-log], [Support debug logging])],
|
||||
[if test "x$enable_log" = "xno" ; then
|
||||
enable_log="0"
|
||||
else
|
||||
enable_log="1"
|
||||
fi
|
||||
],
|
||||
[enable_log="0"]
|
||||
)
|
||||
if test "x$enable_log" = "x1" ; then
|
||||
AC_DEFINE([JEMALLOC_LOG], [ ])
|
||||
fi
|
||||
AC_SUBST([enable_log])
|
||||
|
||||
|
||||
JE_COMPILABLE([a program using __builtin_unreachable], [
|
||||
@ -1800,6 +1839,15 @@ if test "x${je_cv_madvise}" = "xyes" ; then
|
||||
], [je_cv_madv_free])
|
||||
if test "x${je_cv_madv_free}" = "xyes" ; then
|
||||
AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
|
||||
elif test "x${je_cv_madvise}" = "xyes" ; then
|
||||
case "${host_cpu}" in i686|x86_64)
|
||||
case "${host}" in *-*-linux*)
|
||||
AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
|
||||
AC_DEFINE([JEMALLOC_DEFINE_MADVISE_FREE], [ ])
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
dnl Check for madvise(..., MADV_DONTNEED).
|
||||
@ -1812,6 +1860,17 @@ if test "x${je_cv_madvise}" = "xyes" ; then
|
||||
AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ])
|
||||
fi
|
||||
|
||||
dnl Check for madvise(..., MADV_DO[NT]DUMP).
|
||||
JE_COMPILABLE([madvise(..., MADV_DO[[NT]]DUMP)], [
|
||||
#include <sys/mman.h>
|
||||
], [
|
||||
madvise((void *)0, 0, MADV_DONTDUMP);
|
||||
madvise((void *)0, 0, MADV_DODUMP);
|
||||
], [je_cv_madv_dontdump])
|
||||
if test "x${je_cv_madv_dontdump}" = "xyes" ; then
|
||||
AC_DEFINE([JEMALLOC_MADVISE_DONTDUMP], [ ])
|
||||
fi
|
||||
|
||||
dnl Check for madvise(..., MADV_[NO]HUGEPAGE).
|
||||
JE_COMPILABLE([madvise(..., MADV_[[NO]]HUGEPAGE)], [
|
||||
#include <sys/mman.h>
|
||||
@ -1819,28 +1878,16 @@ if test "x${je_cv_madvise}" = "xyes" ; then
|
||||
madvise((void *)0, 0, MADV_HUGEPAGE);
|
||||
madvise((void *)0, 0, MADV_NOHUGEPAGE);
|
||||
], [je_cv_thp])
|
||||
case "${host_cpu}" in
|
||||
arm*)
|
||||
;;
|
||||
*)
|
||||
if test "x${je_cv_thp}" = "xyes" ; then
|
||||
AC_DEFINE([JEMALLOC_HAVE_MADVISE_HUGE], [ ])
|
||||
fi
|
||||
|
||||
dnl Enable transparent huge page support by default.
|
||||
AC_ARG_ENABLE([thp],
|
||||
[AS_HELP_STRING([--disable-thp],
|
||||
[Disable transparent huge page support])],
|
||||
[if test "x$enable_thp" = "xno" -o "x${je_cv_thp}" != "xyes" ; then
|
||||
enable_thp="0"
|
||||
else
|
||||
enable_thp="1"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
],
|
||||
[if test "x${je_cv_thp}" = "xyes" ; then
|
||||
enable_thp="1"
|
||||
else
|
||||
enable_thp="0"
|
||||
fi
|
||||
])
|
||||
if test "x$enable_thp" = "x1" ; then
|
||||
AC_DEFINE([JEMALLOC_THP], [ ])
|
||||
fi
|
||||
AC_SUBST([enable_thp])
|
||||
|
||||
dnl ============================================================================
|
||||
dnl Check whether __sync_{add,sub}_and_fetch() are available despite
|
||||
@ -1959,6 +2006,29 @@ if test "x${enable_zone_allocator}" = "x1" ; then
|
||||
AC_DEFINE([JEMALLOC_ZONE], [ ])
|
||||
fi
|
||||
|
||||
dnl ============================================================================
|
||||
dnl Use initial-exec TLS by default.
|
||||
AC_ARG_ENABLE([initial-exec-tls],
|
||||
[AS_HELP_STRING([--disable-initial-exec-tls],
|
||||
[Disable the initial-exec tls model])],
|
||||
[if test "x$enable_initial_exec_tls" = "xno" ; then
|
||||
enable_initial_exec_tls="0"
|
||||
else
|
||||
enable_initial_exec_tls="1"
|
||||
fi
|
||||
],
|
||||
[enable_initial_exec_tls="1"]
|
||||
)
|
||||
AC_SUBST([enable_initial_exec_tls])
|
||||
|
||||
if test "x${je_cv_tls_model}" = "xyes" -a \
|
||||
"x${enable_initial_exec_tls}" = "x1" ; then
|
||||
AC_DEFINE([JEMALLOC_TLS_MODEL],
|
||||
[__attribute__((tls_model("initial-exec")))])
|
||||
else
|
||||
AC_DEFINE([JEMALLOC_TLS_MODEL], [ ])
|
||||
fi
|
||||
|
||||
dnl ============================================================================
|
||||
dnl Enable background threads if possible.
|
||||
|
||||
@ -2017,6 +2087,25 @@ if test "x${je_cv_pthread_mutex_adaptive_np}" = "xyes" ; then
|
||||
AC_DEFINE([JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP], [ ])
|
||||
fi
|
||||
|
||||
JE_CFLAGS_SAVE()
|
||||
JE_CFLAGS_ADD([-D_GNU_SOURCE])
|
||||
JE_CFLAGS_ADD([-Werror])
|
||||
JE_CFLAGS_ADD([-herror_on_warning])
|
||||
JE_COMPILABLE([strerror_r returns char with gnu source], [
|
||||
#include <errno.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
], [
|
||||
char *buffer = (char *) malloc(100);
|
||||
char *error = strerror_r(EINVAL, buffer, 100);
|
||||
printf("%s\n", error);
|
||||
], [je_cv_strerror_r_returns_char_with_gnu_source])
|
||||
JE_CFLAGS_RESTORE()
|
||||
if test "x${je_cv_strerror_r_returns_char_with_gnu_source}" = "xyes" ; then
|
||||
AC_DEFINE([JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE], [ ])
|
||||
fi
|
||||
|
||||
dnl ============================================================================
|
||||
dnl Check for typedefs, structures, and compiler characteristics.
|
||||
AC_HEADER_STDBOOL
|
||||
@ -2195,10 +2284,10 @@ AC_MSG_RESULT([prof : ${enable_prof}])
|
||||
AC_MSG_RESULT([prof-libunwind : ${enable_prof_libunwind}])
|
||||
AC_MSG_RESULT([prof-libgcc : ${enable_prof_libgcc}])
|
||||
AC_MSG_RESULT([prof-gcc : ${enable_prof_gcc}])
|
||||
AC_MSG_RESULT([thp : ${enable_thp}])
|
||||
AC_MSG_RESULT([fill : ${enable_fill}])
|
||||
AC_MSG_RESULT([utrace : ${enable_utrace}])
|
||||
AC_MSG_RESULT([xmalloc : ${enable_xmalloc}])
|
||||
AC_MSG_RESULT([log : ${enable_log}])
|
||||
AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}])
|
||||
AC_MSG_RESULT([cache-oblivious : ${enable_cache_oblivious}])
|
||||
AC_MSG_RESULT([cxx : ${enable_cxx}])
|
||||
|
@ -761,6 +761,18 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
|
||||
selected pthread-based platforms.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="max_background_threads">
|
||||
<term>
|
||||
<mallctl>max_background_threads</mallctl>
|
||||
(<type>size_t</type>)
|
||||
<literal>rw</literal>
|
||||
</term>
|
||||
<listitem><para>Maximum number of background worker threads that will
|
||||
be created. This value is capped at <link
|
||||
linkend="opt.max_background_threads"><mallctl>opt.max_background_threads</mallctl></link> at
|
||||
startup.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="config.cache_oblivious">
|
||||
<term>
|
||||
<mallctl>config.cache_oblivious</mallctl>
|
||||
@ -852,16 +864,6 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
|
||||
build configuration.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="config.thp">
|
||||
<term>
|
||||
<mallctl>config.thp</mallctl>
|
||||
(<type>bool</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para><option>--disable-thp</option> was not specified
|
||||
during build configuration, and the system supports transparent huge
|
||||
page manipulation.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="config.utrace">
|
||||
<term>
|
||||
@ -916,6 +918,20 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
|
||||
</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.metadata_thp">
|
||||
<term>
|
||||
<mallctl>opt.metadata_thp</mallctl>
|
||||
(<type>const char *</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para>Controls whether to allow jemalloc to use transparent
|
||||
huge page (THP) for internal metadata (see <link
|
||||
linkend="stats.metadata">stats.metadata</link>). <quote>always</quote>
|
||||
allows such usage. <quote>auto</quote> uses no THP initially, but may
|
||||
begin to do so when metadata usage reaches certain level. The default
|
||||
is <quote>disabled</quote>.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.retain">
|
||||
<term>
|
||||
<mallctl>opt.retain</mallctl>
|
||||
@ -996,12 +1012,26 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
|
||||
(<type>const bool</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para>Internal background worker threads enabled/disabled. See
|
||||
<link linkend="background_thread">background_thread</link> for dynamic
|
||||
control options and details. This option is disabled by
|
||||
<listitem><para>Internal background worker threads enabled/disabled.
|
||||
Because of potential circular dependencies, enabling background thread
|
||||
using this option may cause crash or deadlock during initialization. For
|
||||
a reliable way to use this feature, see <link
|
||||
linkend="background_thread">background_thread</link> for dynamic control
|
||||
options and details. This option is disabled by
|
||||
default.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.max_background_threads">
|
||||
<term>
|
||||
<mallctl>opt.max_background_threads</mallctl>
|
||||
(<type>const size_t</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para>Maximum number of background threads that will be created
|
||||
if <link linkend="background_thread">background_thread</link> is set.
|
||||
Defaults to number of cpus.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.dirty_decay_ms">
|
||||
<term>
|
||||
<mallctl>opt.dirty_decay_ms</mallctl>
|
||||
@ -1022,7 +1052,7 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
|
||||
The default decay time is 10 seconds. See <link
|
||||
linkend="arenas.dirty_decay_ms"><mallctl>arenas.dirty_decay_ms</mallctl></link>
|
||||
and <link
|
||||
linkend="arena.i.muzzy_decay_ms"><mallctl>arena.<i>.muzzy_decay_ms</mallctl></link>
|
||||
linkend="arena.i.dirty_decay_ms"><mallctl>arena.<i>.dirty_decay_ms</mallctl></link>
|
||||
for related dynamic control options. See <link
|
||||
linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
|
||||
for a description of muzzy pages.</para></listitem>
|
||||
@ -1052,6 +1082,22 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
|
||||
for related dynamic control options.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.lg_extent_max_active_fit">
|
||||
<term>
|
||||
<mallctl>opt.lg_extent_max_active_fit</mallctl>
|
||||
(<type>size_t</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para>When reusing dirty extents, this determines the (log
|
||||
base 2 of the) maximum ratio between the size of the active extent
|
||||
selected (to split off from) and the size of the requested allocation.
|
||||
This prevents the splitting of large active extents for smaller
|
||||
allocations, which can reduce fragmentation over the long run
|
||||
(especially for non-active extents). Lower value may reduce
|
||||
fragmentation, at the cost of extra active extents. The default value
|
||||
is 6, which gives a maximum ratio of 64 (2^6).</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.stats_print">
|
||||
<term>
|
||||
<mallctl>opt.stats_print</mallctl>
|
||||
@ -1194,6 +1240,28 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
||||
default maximum is 32 KiB (2^15).</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.thp">
|
||||
<term>
|
||||
<mallctl>opt.thp</mallctl>
|
||||
(<type>const char *</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para>Transparent hugepage (THP) mode. Settings "always",
|
||||
"never" and "default" are available if THP is supported by the operating
|
||||
system. The "always" setting enables transparent hugepage for all user
|
||||
memory mappings with
|
||||
<parameter><constant>MADV_HUGEPAGE</constant></parameter>; "never"
|
||||
ensures no transparent hugepage with
|
||||
<parameter><constant>MADV_NOHUGEPAGE</constant></parameter>; the default
|
||||
setting "default" makes no changes. Note that: this option does not
|
||||
affect THP for jemalloc internal metadata (see <link
|
||||
linkend="opt.metadata_thp"><mallctl>opt.metadata_thp</mallctl></link>);
|
||||
in addition, for arenas with customized <link
|
||||
linkend="arena.i.extent_hooks"><mallctl>extent_hooks</mallctl></link>,
|
||||
this option is bypassed as it is implemented as part of the default
|
||||
extent hooks.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="opt.prof">
|
||||
<term>
|
||||
<mallctl>opt.prof</mallctl>
|
||||
@ -1666,6 +1734,22 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
||||
for additional information.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="arena.i.retain_grow_limit">
|
||||
<term>
|
||||
<mallctl>arena.<i>.retain_grow_limit</mallctl>
|
||||
(<type>size_t</type>)
|
||||
<literal>rw</literal>
|
||||
</term>
|
||||
<listitem><para>Maximum size to grow retained region (only relevant when
|
||||
<link linkend="opt.retain"><mallctl>opt.retain</mallctl></link> is
|
||||
enabled). This controls the maximum increment to expand virtual memory,
|
||||
or allocation through <link
|
||||
linkend="arena.i.extent_hooks"><mallctl>arena.<i>extent_hooks</mallctl></link>.
|
||||
In particular, if customized extent hooks reserve physical memory
|
||||
(e.g. 1G huge pages), this is useful to control the allocation hook's
|
||||
input size. The default is no limit.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="arena.i.extent_hooks">
|
||||
<term>
|
||||
<mallctl>arena.<i>.extent_hooks</mallctl>
|
||||
@ -1708,7 +1792,9 @@ struct extent_hooks_s {
|
||||
in favor of less permanent (and often less costly) operations. All
|
||||
operations except allocation can be universally opted out of by setting
|
||||
the hook pointers to <constant>NULL</constant>, or selectively opted out
|
||||
of by returning failure.</para>
|
||||
of by returning failure. Note that once the extent hook is set, the
|
||||
structure is accessed directly by the associated arenas, so it must
|
||||
remain valid for the entire lifetime of the arenas.</para>
|
||||
|
||||
<funcsynopsis><funcprototype>
|
||||
<funcdef>typedef void *<function>(extent_alloc_t)</function></funcdef>
|
||||
@ -2044,6 +2130,15 @@ struct extent_hooks_s {
|
||||
and return the new arena index.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="arenas.lookup">
|
||||
<term>
|
||||
<mallctl>arenas.lookup</mallctl>
|
||||
(<type>unsigned</type>, <type>void*</type>)
|
||||
<literal>rw</literal>
|
||||
</term>
|
||||
<listitem><para>Index of the arena to which an allocation belongs to.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="prof.thread_active_init">
|
||||
<term>
|
||||
<mallctl>prof.thread_active_init</mallctl>
|
||||
@ -2187,7 +2282,24 @@ struct extent_hooks_s {
|
||||
metadata structures (see <link
|
||||
linkend="stats.arenas.i.base"><mallctl>stats.arenas.<i>.base</mallctl></link>)
|
||||
and internal allocations (see <link
|
||||
linkend="stats.arenas.i.internal"><mallctl>stats.arenas.<i>.internal</mallctl></link>).</para></listitem>
|
||||
linkend="stats.arenas.i.internal"><mallctl>stats.arenas.<i>.internal</mallctl></link>).
|
||||
Transparent huge page (enabled with <link
|
||||
linkend="opt.metadata_thp">opt.metadata_thp</link>) usage is not
|
||||
considered.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="stats.metadata_thp">
|
||||
<term>
|
||||
<mallctl>stats.metadata_thp</mallctl>
|
||||
(<type>size_t</type>)
|
||||
<literal>r-</literal>
|
||||
[<option>--enable-stats</option>]
|
||||
</term>
|
||||
<listitem><para>Number of transparent huge pages (THP) used for
|
||||
metadata. See <link
|
||||
linkend="stats.metadata"><mallctl>stats.metadata</mallctl></link> and
|
||||
<link linkend="opt.metadata_thp">opt.metadata_thp</link>) for
|
||||
details.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="stats.resident">
|
||||
@ -2506,6 +2618,18 @@ struct extent_hooks_s {
|
||||
profiles.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="stats.arenas.i.metadata_thp">
|
||||
<term>
|
||||
<mallctl>stats.arenas.<i>.metadata_thp</mallctl>
|
||||
(<type>size_t</type>)
|
||||
<literal>r-</literal>
|
||||
[<option>--enable-stats</option>]
|
||||
</term>
|
||||
<listitem><para>Number of transparent huge pages (THP) used for
|
||||
metadata. See <link linkend="opt.metadata_thp">opt.metadata_thp</link>
|
||||
for details.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="stats.arenas.i.resident">
|
||||
<term>
|
||||
<mallctl>stats.arenas.<i>.resident</mallctl>
|
||||
|
@ -1,6 +1,7 @@
|
||||
#ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H
|
||||
#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
|
||||
|
||||
#include "jemalloc/internal/bin.h"
|
||||
#include "jemalloc/internal/extent_dss.h"
|
||||
#include "jemalloc/internal/pages.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
@ -9,25 +10,19 @@
|
||||
extern ssize_t opt_dirty_decay_ms;
|
||||
extern ssize_t opt_muzzy_decay_ms;
|
||||
|
||||
extern const arena_bin_info_t arena_bin_info[NBINS];
|
||||
|
||||
extern percpu_arena_mode_t opt_percpu_arena;
|
||||
extern const char *percpu_arena_mode_names[];
|
||||
|
||||
extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS];
|
||||
extern malloc_mutex_t arenas_lock;
|
||||
|
||||
void arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
szind_t szind, uint64_t nrequests);
|
||||
void arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
size_t size);
|
||||
void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
|
||||
unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
|
||||
ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
|
||||
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
|
||||
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
|
||||
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats);
|
||||
bin_stats_t *bstats, arena_stats_large_t *lstats);
|
||||
void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent);
|
||||
#ifdef JEMALLOC_JET
|
||||
@ -50,11 +45,11 @@ void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
|
||||
void arena_reset(tsd_t *tsd, arena_t *arena);
|
||||
void arena_destroy(tsd_t *tsd, arena_t *arena);
|
||||
void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
||||
tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
|
||||
void arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info,
|
||||
cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
|
||||
void arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info,
|
||||
bool zero);
|
||||
|
||||
typedef void (arena_dalloc_junk_small_t)(void *, const arena_bin_info_t *);
|
||||
typedef void (arena_dalloc_junk_small_t)(void *, const bin_info_t *);
|
||||
extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small;
|
||||
|
||||
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
|
||||
@ -77,6 +72,8 @@ ssize_t arena_dirty_decay_ms_default_get(void);
|
||||
bool arena_dirty_decay_ms_default_set(ssize_t decay_ms);
|
||||
ssize_t arena_muzzy_decay_ms_default_get(void);
|
||||
bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms);
|
||||
bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena,
|
||||
size_t *old_limit, size_t *new_limit);
|
||||
unsigned arena_nthreads_get(arena_t *arena, bool internal);
|
||||
void arena_nthreads_inc(arena_t *arena, bool internal);
|
||||
void arena_nthreads_dec(arena_t *arena, bool internal);
|
||||
|
@ -25,7 +25,7 @@ static inline bool
|
||||
arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) {
|
||||
cassert(config_prof);
|
||||
|
||||
if (likely(prof_interval == 0)) {
|
||||
if (likely(prof_interval == 0 || !prof_active_get_unlocked())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -8,13 +8,6 @@
|
||||
#include "jemalloc/internal/sz.h"
|
||||
#include "jemalloc/internal/ticker.h"
|
||||
|
||||
static inline szind_t
|
||||
arena_bin_index(arena_t *arena, arena_bin_t *bin) {
|
||||
szind_t binind = (szind_t)(bin - arena->bins);
|
||||
assert(binind < NBINS);
|
||||
return binind;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
||||
arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
|
||||
cassert(config_prof);
|
||||
@ -35,7 +28,7 @@ arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
|
||||
arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, UNUSED size_t usize,
|
||||
alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
@ -54,7 +47,7 @@ arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
|
||||
arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, UNUSED prof_tctx_t *tctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
|
237
include/jemalloc/internal/arena_stats.h
Normal file
237
include/jemalloc/internal/arena_stats.h
Normal file
@ -0,0 +1,237 @@
|
||||
#ifndef JEMALLOC_INTERNAL_ARENA_STATS_H
|
||||
#define JEMALLOC_INTERNAL_ARENA_STATS_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/mutex_prof.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
|
||||
/*
|
||||
* In those architectures that support 64-bit atomics, we use atomic updates for
|
||||
* our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
|
||||
* externally.
|
||||
*/
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
typedef atomic_u64_t arena_stats_u64_t;
|
||||
#else
|
||||
/* Must hold the arena stats mutex while reading atomically. */
|
||||
typedef uint64_t arena_stats_u64_t;
|
||||
#endif
|
||||
|
||||
typedef struct arena_stats_large_s arena_stats_large_t;
|
||||
struct arena_stats_large_s {
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the arena.
|
||||
*/
|
||||
arena_stats_u64_t nmalloc;
|
||||
arena_stats_u64_t ndalloc;
|
||||
|
||||
/*
|
||||
* Number of allocation requests that correspond to this size class.
|
||||
* This includes requests served by tcache, though tcache only
|
||||
* periodically merges into this counter.
|
||||
*/
|
||||
arena_stats_u64_t nrequests; /* Partially derived. */
|
||||
|
||||
/* Current number of allocations of this size class. */
|
||||
size_t curlextents; /* Derived. */
|
||||
};
|
||||
|
||||
typedef struct arena_stats_decay_s arena_stats_decay_t;
|
||||
struct arena_stats_decay_s {
|
||||
/* Total number of purge sweeps. */
|
||||
arena_stats_u64_t npurge;
|
||||
/* Total number of madvise calls made. */
|
||||
arena_stats_u64_t nmadvise;
|
||||
/* Total number of pages purged. */
|
||||
arena_stats_u64_t purged;
|
||||
};
|
||||
|
||||
/*
|
||||
* Arena stats. Note that fields marked "derived" are not directly maintained
|
||||
* within the arena code; rather their values are derived during stats merge
|
||||
* requests.
|
||||
*/
|
||||
typedef struct arena_stats_s arena_stats_t;
|
||||
struct arena_stats_s {
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
malloc_mutex_t mtx;
|
||||
#endif
|
||||
|
||||
/* Number of bytes currently mapped, excluding retained memory. */
|
||||
atomic_zu_t mapped; /* Partially derived. */
|
||||
|
||||
/*
|
||||
* Number of unused virtual memory bytes currently retained. Retained
|
||||
* bytes are technically mapped (though always decommitted or purged),
|
||||
* but they are excluded from the mapped statistic (above).
|
||||
*/
|
||||
atomic_zu_t retained; /* Derived. */
|
||||
|
||||
arena_stats_decay_t decay_dirty;
|
||||
arena_stats_decay_t decay_muzzy;
|
||||
|
||||
atomic_zu_t base; /* Derived. */
|
||||
atomic_zu_t internal;
|
||||
atomic_zu_t resident; /* Derived. */
|
||||
atomic_zu_t metadata_thp;
|
||||
|
||||
atomic_zu_t allocated_large; /* Derived. */
|
||||
arena_stats_u64_t nmalloc_large; /* Derived. */
|
||||
arena_stats_u64_t ndalloc_large; /* Derived. */
|
||||
arena_stats_u64_t nrequests_large; /* Derived. */
|
||||
|
||||
/* Number of bytes cached in tcache associated with this arena. */
|
||||
atomic_zu_t tcache_bytes; /* Derived. */
|
||||
|
||||
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
|
||||
|
||||
/* One element for each large size class. */
|
||||
arena_stats_large_t lstats[NSIZES - NBINS];
|
||||
|
||||
/* Arena uptime. */
|
||||
nstime_t uptime;
|
||||
};
|
||||
|
||||
static inline bool
|
||||
arena_stats_init(UNUSED tsdn_t *tsdn, arena_stats_t *arena_stats) {
|
||||
if (config_debug) {
|
||||
for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
|
||||
assert(((char *)arena_stats)[i] == 0);
|
||||
}
|
||||
}
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
if (malloc_mutex_init(&arena_stats->mtx, "arena_stats",
|
||||
WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
/* Memory is zeroed, so there is no need to clear stats. */
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
malloc_mutex_lock(tsdn, &arena_stats->mtx);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
malloc_mutex_unlock(tsdn, &arena_stats->mtx);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
arena_stats_u64_t *p) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
return atomic_load_u64(p, ATOMIC_RELAXED);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
return *p;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
arena_stats_u64_t *p, uint64_t x) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
atomic_fetch_add_u64(p, x, ATOMIC_RELAXED);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
*p += x;
|
||||
#endif
|
||||
}
|
||||
|
||||
UNUSED static inline void
|
||||
arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
arena_stats_u64_t *p, uint64_t x) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
|
||||
assert(r - x <= r);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
*p -= x;
|
||||
assert(*p + x >= *p);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Non-atomically sets *dst += src. *dst needs external synchronization.
|
||||
* This lets us avoid the cost of a fetch_add when its unnecessary (note that
|
||||
* the types here are atomic).
|
||||
*/
|
||||
static inline void
|
||||
arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
|
||||
atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED);
|
||||
#else
|
||||
*dst += src;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
return atomic_load_zu(p, ATOMIC_RELAXED);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
return atomic_load_zu(p, ATOMIC_RELAXED);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
|
||||
size_t x) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
|
||||
atomic_store_zu(p, cur + x, ATOMIC_RELAXED);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
|
||||
size_t x) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
|
||||
assert(r - x <= r);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
|
||||
atomic_store_zu(p, cur - x, ATOMIC_RELAXED);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Like the _u64 variant, needs an externally synchronized *dst. */
|
||||
static inline void
|
||||
arena_stats_accum_zu(atomic_zu_t *dst, size_t src) {
|
||||
size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
|
||||
atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
szind_t szind, uint64_t nrequests) {
|
||||
arena_stats_lock(tsdn, arena_stats);
|
||||
arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind -
|
||||
NBINS].nrequests, nrequests);
|
||||
arena_stats_unlock(tsdn, arena_stats);
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
|
||||
arena_stats_lock(tsdn, arena_stats);
|
||||
arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size);
|
||||
arena_stats_unlock(tsdn, arena_stats);
|
||||
}
|
||||
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */
|
@ -1,7 +1,9 @@
|
||||
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
|
||||
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
|
||||
|
||||
#include "jemalloc/internal/arena_stats.h"
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/bin.h"
|
||||
#include "jemalloc/internal/bitmap.h"
|
||||
#include "jemalloc/internal/extent_dss.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
@ -10,45 +12,8 @@
|
||||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
#include "jemalloc/internal/smoothstep.h"
|
||||
#include "jemalloc/internal/stats.h"
|
||||
#include "jemalloc/internal/ticker.h"
|
||||
|
||||
/*
|
||||
* Read-only information associated with each element of arena_t's bins array
|
||||
* is stored separately, partly to reduce memory usage (only one copy, rather
|
||||
* than one per arena), but mainly to avoid false cacheline sharing.
|
||||
*
|
||||
* Each slab has the following layout:
|
||||
*
|
||||
* /--------------------\
|
||||
* | region 0 |
|
||||
* |--------------------|
|
||||
* | region 1 |
|
||||
* |--------------------|
|
||||
* | ... |
|
||||
* | ... |
|
||||
* | ... |
|
||||
* |--------------------|
|
||||
* | region nregs-1 |
|
||||
* \--------------------/
|
||||
*/
|
||||
struct arena_bin_info_s {
|
||||
/* Size of regions in a slab for this bin's size class. */
|
||||
size_t reg_size;
|
||||
|
||||
/* Total size of a slab for this bin's size class. */
|
||||
size_t slab_size;
|
||||
|
||||
/* Total number of regions in a slab for this bin's size class. */
|
||||
uint32_t nregs;
|
||||
|
||||
/*
|
||||
* Metadata used to manipulate bitmaps for slabs associated with this
|
||||
* bin.
|
||||
*/
|
||||
bitmap_info_t bitmap_info;
|
||||
};
|
||||
|
||||
struct arena_decay_s {
|
||||
/* Synchronizes all non-atomic fields. */
|
||||
malloc_mutex_t mtx;
|
||||
@ -104,37 +69,11 @@ struct arena_decay_s {
|
||||
* arena and ctl code.
|
||||
*
|
||||
* Synchronization: Same as associated arena's stats field. */
|
||||
decay_stats_t *stats;
|
||||
arena_stats_decay_t *stats;
|
||||
/* Peak number of pages in associated extents. Used for debug only. */
|
||||
uint64_t ceil_npages;
|
||||
};
|
||||
|
||||
struct arena_bin_s {
|
||||
/* All operations on arena_bin_t fields require lock ownership. */
|
||||
malloc_mutex_t lock;
|
||||
|
||||
/*
|
||||
* Current slab being used to service allocations of this bin's size
|
||||
* class. slabcur is independent of slabs_{nonfull,full}; whenever
|
||||
* slabcur is reassigned, the previous slab must be deallocated or
|
||||
* inserted into slabs_{nonfull,full}.
|
||||
*/
|
||||
extent_t *slabcur;
|
||||
|
||||
/*
|
||||
* Heap of non-full slabs. This heap is used to assure that new
|
||||
* allocations come from the non-full slab that is oldest/lowest in
|
||||
* memory.
|
||||
*/
|
||||
extent_heap_t slabs_nonfull;
|
||||
|
||||
/* List used to track full slabs. */
|
||||
extent_list_t slabs_full;
|
||||
|
||||
/* Bin statistics. */
|
||||
malloc_bin_stats_t stats;
|
||||
};
|
||||
|
||||
struct arena_s {
|
||||
/*
|
||||
* Number of threads currently assigned to this arena. Each thread has
|
||||
@ -162,13 +101,14 @@ struct arena_s {
|
||||
arena_stats_t stats;
|
||||
|
||||
/*
|
||||
* List of tcaches for extant threads associated with this arena.
|
||||
* Stats from these are merged incrementally, and at exit if
|
||||
* opt_stats_print is enabled.
|
||||
* Lists of tcaches and cache_bin_array_descriptors for extant threads
|
||||
* associated with this arena. Stats from these are merged
|
||||
* incrementally, and at exit if opt_stats_print is enabled.
|
||||
*
|
||||
* Synchronization: tcache_ql_mtx.
|
||||
*/
|
||||
ql_head(tcache_t) tcache_ql;
|
||||
ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql;
|
||||
malloc_mutex_t tcache_ql_mtx;
|
||||
|
||||
/* Synchronization: internal. */
|
||||
@ -239,9 +179,14 @@ struct arena_s {
|
||||
* be effective even if multiple arenas' extent allocation requests are
|
||||
* highly interleaved.
|
||||
*
|
||||
* retain_grow_limit is the max allowed size ind to expand (unless the
|
||||
* required size is greater). Default is no limit, and controlled
|
||||
* through mallctl only.
|
||||
*
|
||||
* Synchronization: extent_grow_mtx
|
||||
*/
|
||||
pszind_t extent_grow_next;
|
||||
pszind_t retain_grow_limit;
|
||||
malloc_mutex_t extent_grow_mtx;
|
||||
|
||||
/*
|
||||
@ -258,7 +203,7 @@ struct arena_s {
|
||||
*
|
||||
* Synchronization: internal.
|
||||
*/
|
||||
arena_bin_t bins[NBINS];
|
||||
bin_t bins[NBINS];
|
||||
|
||||
/*
|
||||
* Base allocator, from which arena metadata are allocated.
|
||||
|
@ -12,9 +12,7 @@
|
||||
#define DECAY_NTICKS_PER_UPDATE 1000
|
||||
|
||||
typedef struct arena_slab_data_s arena_slab_data_t;
|
||||
typedef struct arena_bin_info_s arena_bin_info_t;
|
||||
typedef struct arena_decay_s arena_decay_t;
|
||||
typedef struct arena_bin_s arena_bin_t;
|
||||
typedef struct arena_s arena_t;
|
||||
typedef struct arena_tdata_s arena_tdata_t;
|
||||
typedef struct alloc_ctx_s alloc_ctx_t;
|
||||
|
@ -2,9 +2,11 @@
|
||||
#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
|
||||
|
||||
extern bool opt_background_thread;
|
||||
extern size_t opt_max_background_threads;
|
||||
extern malloc_mutex_t background_thread_lock;
|
||||
extern atomic_b_t background_thread_enabled_state;
|
||||
extern size_t n_background_threads;
|
||||
extern size_t max_background_threads;
|
||||
extern background_thread_info_t *background_thread_info;
|
||||
extern bool can_enable_background_thread;
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
#endif
|
||||
|
||||
#define BACKGROUND_THREAD_INDEFINITE_SLEEP UINT64_MAX
|
||||
#define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT
|
||||
|
||||
typedef enum {
|
||||
background_thread_stopped,
|
||||
|
@ -1,6 +1,9 @@
|
||||
#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H
|
||||
#define JEMALLOC_INTERNAL_BASE_EXTERNS_H
|
||||
|
||||
extern metadata_thp_mode_t opt_metadata_thp;
|
||||
extern const char *metadata_thp_mode_names[];
|
||||
|
||||
base_t *b0get(void);
|
||||
base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
|
||||
void base_delete(tsdn_t *tsdn, base_t *base);
|
||||
@ -10,7 +13,7 @@ extent_hooks_t *base_extent_hooks_set(base_t *base,
|
||||
void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
|
||||
extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base);
|
||||
void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
|
||||
size_t *resident, size_t *mapped);
|
||||
size_t *resident, size_t *mapped, size_t *n_thp);
|
||||
void base_prefork(tsdn_t *tsdn, base_t *base);
|
||||
void base_postfork_parent(tsdn_t *tsdn, base_t *base);
|
||||
void base_postfork_child(tsdn_t *tsdn, base_t *base);
|
||||
|
@ -6,4 +6,8 @@ base_ind_get(const base_t *base) {
|
||||
return base->ind;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
metadata_thp_enabled(void) {
|
||||
return (opt_metadata_thp != metadata_thp_disabled);
|
||||
}
|
||||
#endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */
|
||||
|
@ -30,6 +30,8 @@ struct base_s {
|
||||
/* Protects base_alloc() and base_stats_get() operations. */
|
||||
malloc_mutex_t mtx;
|
||||
|
||||
/* Using THP when true (metadata_thp auto mode). */
|
||||
bool auto_thp_switched;
|
||||
/*
|
||||
* Most recent size class in the series of increasingly large base
|
||||
* extents. Logarithmic spacing between subsequent allocations ensures
|
||||
@ -50,6 +52,8 @@ struct base_s {
|
||||
size_t allocated;
|
||||
size_t resident;
|
||||
size_t mapped;
|
||||
/* Number of THP regions touched. */
|
||||
size_t n_thp;
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BASE_STRUCTS_H */
|
||||
|
@ -4,4 +4,30 @@
|
||||
typedef struct base_block_s base_block_t;
|
||||
typedef struct base_s base_t;
|
||||
|
||||
#define METADATA_THP_DEFAULT metadata_thp_disabled
|
||||
|
||||
/*
|
||||
* In auto mode, arenas switch to huge pages for the base allocator on the
|
||||
* second base block. a0 switches to thp on the 5th block (after 20 megabytes
|
||||
* of metadata), since more metadata (e.g. rtree nodes) come from a0's base.
|
||||
*/
|
||||
|
||||
#define BASE_AUTO_THP_THRESHOLD 2
|
||||
#define BASE_AUTO_THP_THRESHOLD_A0 5
|
||||
|
||||
typedef enum {
|
||||
metadata_thp_disabled = 0,
|
||||
/*
|
||||
* Lazily enable hugepage for metadata. To avoid high RSS caused by THP
|
||||
* + low usage arena (i.e. THP becomes a significant percentage), the
|
||||
* "auto" option only starts using THP after a base allocator used up
|
||||
* the first THP region. Starting from the second hugepage (in a single
|
||||
* arena), "auto" behaves the same as "always", i.e. madvise hugepage
|
||||
* right away.
|
||||
*/
|
||||
metadata_thp_auto = 1,
|
||||
metadata_thp_always = 2,
|
||||
metadata_thp_mode_limit = 3
|
||||
} metadata_thp_mode_t;
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BASE_TYPES_H */
|
||||
|
106
include/jemalloc/internal/bin.h
Normal file
106
include/jemalloc/internal/bin.h
Normal file
@ -0,0 +1,106 @@
|
||||
#ifndef JEMALLOC_INTERNAL_BIN_H
|
||||
#define JEMALLOC_INTERNAL_BIN_H
|
||||
|
||||
#include "jemalloc/internal/extent_types.h"
|
||||
#include "jemalloc/internal/extent_structs.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/bin_stats.h"
|
||||
|
||||
/*
|
||||
* A bin contains a set of extents that are currently being used for slab
|
||||
* allocations.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Read-only information associated with each element of arena_t's bins array
|
||||
* is stored separately, partly to reduce memory usage (only one copy, rather
|
||||
* than one per arena), but mainly to avoid false cacheline sharing.
|
||||
*
|
||||
* Each slab has the following layout:
|
||||
*
|
||||
* /--------------------\
|
||||
* | region 0 |
|
||||
* |--------------------|
|
||||
* | region 1 |
|
||||
* |--------------------|
|
||||
* | ... |
|
||||
* | ... |
|
||||
* | ... |
|
||||
* |--------------------|
|
||||
* | region nregs-1 |
|
||||
* \--------------------/
|
||||
*/
|
||||
typedef struct bin_info_s bin_info_t;
|
||||
struct bin_info_s {
|
||||
/* Size of regions in a slab for this bin's size class. */
|
||||
size_t reg_size;
|
||||
|
||||
/* Total size of a slab for this bin's size class. */
|
||||
size_t slab_size;
|
||||
|
||||
/* Total number of regions in a slab for this bin's size class. */
|
||||
uint32_t nregs;
|
||||
|
||||
/*
|
||||
* Metadata used to manipulate bitmaps for slabs associated with this
|
||||
* bin.
|
||||
*/
|
||||
bitmap_info_t bitmap_info;
|
||||
};
|
||||
|
||||
extern const bin_info_t bin_infos[NBINS];
|
||||
|
||||
|
||||
typedef struct bin_s bin_t;
|
||||
struct bin_s {
|
||||
/* All operations on bin_t fields require lock ownership. */
|
||||
malloc_mutex_t lock;
|
||||
|
||||
/*
|
||||
* Current slab being used to service allocations of this bin's size
|
||||
* class. slabcur is independent of slabs_{nonfull,full}; whenever
|
||||
* slabcur is reassigned, the previous slab must be deallocated or
|
||||
* inserted into slabs_{nonfull,full}.
|
||||
*/
|
||||
extent_t *slabcur;
|
||||
|
||||
/*
|
||||
* Heap of non-full slabs. This heap is used to assure that new
|
||||
* allocations come from the non-full slab that is oldest/lowest in
|
||||
* memory.
|
||||
*/
|
||||
extent_heap_t slabs_nonfull;
|
||||
|
||||
/* List used to track full slabs. */
|
||||
extent_list_t slabs_full;
|
||||
|
||||
/* Bin statistics. */
|
||||
bin_stats_t stats;
|
||||
};
|
||||
|
||||
/* Initializes a bin to empty. Returns true on error. */
|
||||
bool bin_init(bin_t *bin);
|
||||
|
||||
/* Forking. */
|
||||
void bin_prefork(tsdn_t *tsdn, bin_t *bin);
|
||||
void bin_postfork_parent(tsdn_t *tsdn, bin_t *bin);
|
||||
void bin_postfork_child(tsdn_t *tsdn, bin_t *bin);
|
||||
|
||||
/* Stats. */
|
||||
static inline void
|
||||
bin_stats_merge(tsdn_t *tsdn, bin_stats_t *dst_bin_stats, bin_t *bin) {
|
||||
malloc_mutex_lock(tsdn, &bin->lock);
|
||||
malloc_mutex_prof_read(tsdn, &dst_bin_stats->mutex_data, &bin->lock);
|
||||
dst_bin_stats->nmalloc += bin->stats.nmalloc;
|
||||
dst_bin_stats->ndalloc += bin->stats.ndalloc;
|
||||
dst_bin_stats->nrequests += bin->stats.nrequests;
|
||||
dst_bin_stats->curregs += bin->stats.curregs;
|
||||
dst_bin_stats->nfills += bin->stats.nfills;
|
||||
dst_bin_stats->nflushes += bin->stats.nflushes;
|
||||
dst_bin_stats->nslabs += bin->stats.nslabs;
|
||||
dst_bin_stats->reslabs += bin->stats.reslabs;
|
||||
dst_bin_stats->curslabs += bin->stats.curslabs;
|
||||
malloc_mutex_unlock(tsdn, &bin->lock);
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BIN_H */
|
51
include/jemalloc/internal/bin_stats.h
Normal file
51
include/jemalloc/internal/bin_stats.h
Normal file
@ -0,0 +1,51 @@
|
||||
#ifndef JEMALLOC_INTERNAL_BIN_STATS_H
|
||||
#define JEMALLOC_INTERNAL_BIN_STATS_H
|
||||
|
||||
#include "jemalloc/internal/mutex_prof.h"
|
||||
|
||||
typedef struct bin_stats_s bin_stats_t;
|
||||
struct bin_stats_s {
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the bin. Note that tcache may allocate an object, then recycle it
|
||||
* many times, resulting many increments to nrequests, but only one
|
||||
* each to nmalloc and ndalloc.
|
||||
*/
|
||||
uint64_t nmalloc;
|
||||
uint64_t ndalloc;
|
||||
|
||||
/*
|
||||
* Number of allocation requests that correspond to the size of this
|
||||
* bin. This includes requests served by tcache, though tcache only
|
||||
* periodically merges into this counter.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
|
||||
/*
|
||||
* Current number of regions of this size class, including regions
|
||||
* currently cached by tcache.
|
||||
*/
|
||||
size_t curregs;
|
||||
|
||||
/* Number of tcache fills from this bin. */
|
||||
uint64_t nfills;
|
||||
|
||||
/* Number of tcache flushes to this bin. */
|
||||
uint64_t nflushes;
|
||||
|
||||
/* Total number of slabs created for this bin's size class. */
|
||||
uint64_t nslabs;
|
||||
|
||||
/*
|
||||
* Total number of slabs reused by extracting them from the slabs heap
|
||||
* for this bin's size class.
|
||||
*/
|
||||
uint64_t reslabs;
|
||||
|
||||
/* Current number of slabs in this bin. */
|
||||
size_t curslabs;
|
||||
|
||||
mutex_prof_data_t mutex_data;
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BIN_STATS_H */
|
114
include/jemalloc/internal/cache_bin.h
Normal file
114
include/jemalloc/internal/cache_bin.h
Normal file
@ -0,0 +1,114 @@
|
||||
#ifndef JEMALLOC_INTERNAL_CACHE_BIN_H
|
||||
#define JEMALLOC_INTERNAL_CACHE_BIN_H
|
||||
|
||||
#include "jemalloc/internal/ql.h"
|
||||
|
||||
/*
|
||||
* The cache_bins are the mechanism that the tcache and the arena use to
|
||||
* communicate. The tcache fills from and flushes to the arena by passing a
|
||||
* cache_bin_t to fill/flush. When the arena needs to pull stats from the
|
||||
* tcaches associated with it, it does so by iterating over its
|
||||
* cache_bin_array_descriptor_t objects and reading out per-bin stats it
|
||||
* contains. This makes it so that the arena need not know about the existence
|
||||
* of the tcache at all.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* The count of the number of cached allocations in a bin. We make this signed
|
||||
* so that negative numbers can encode "invalid" states (e.g. a low water mark
|
||||
* of -1 for a cache that has been depleted).
|
||||
*/
|
||||
typedef int32_t cache_bin_sz_t;
|
||||
|
||||
typedef struct cache_bin_stats_s cache_bin_stats_t;
|
||||
struct cache_bin_stats_s {
|
||||
/*
|
||||
* Number of allocation requests that corresponded to the size of this
|
||||
* bin.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
};
|
||||
|
||||
/*
|
||||
* Read-only information associated with each element of tcache_t's tbins array
|
||||
* is stored separately, mainly to reduce memory usage.
|
||||
*/
|
||||
typedef struct cache_bin_info_s cache_bin_info_t;
|
||||
struct cache_bin_info_s {
|
||||
/* Upper limit on ncached. */
|
||||
cache_bin_sz_t ncached_max;
|
||||
};
|
||||
|
||||
typedef struct cache_bin_s cache_bin_t;
|
||||
struct cache_bin_s {
|
||||
/* Min # cached since last GC. */
|
||||
cache_bin_sz_t low_water;
|
||||
/* # of cached objects. */
|
||||
cache_bin_sz_t ncached;
|
||||
/*
|
||||
* ncached and stats are both modified frequently. Let's keep them
|
||||
* close so that they have a higher chance of being on the same
|
||||
* cacheline, thus less write-backs.
|
||||
*/
|
||||
cache_bin_stats_t tstats;
|
||||
/*
|
||||
* Stack of available objects.
|
||||
*
|
||||
* To make use of adjacent cacheline prefetch, the items in the avail
|
||||
* stack goes to higher address for newer allocations. avail points
|
||||
* just above the available space, which means that
|
||||
* avail[-ncached, ... -1] are available items and the lowest item will
|
||||
* be allocated first.
|
||||
*/
|
||||
void **avail;
|
||||
};
|
||||
|
||||
typedef struct cache_bin_array_descriptor_s cache_bin_array_descriptor_t;
|
||||
struct cache_bin_array_descriptor_s {
|
||||
/*
|
||||
* The arena keeps a list of the cache bins associated with it, for
|
||||
* stats collection.
|
||||
*/
|
||||
ql_elm(cache_bin_array_descriptor_t) link;
|
||||
/* Pointers to the tcache bins. */
|
||||
cache_bin_t *bins_small;
|
||||
cache_bin_t *bins_large;
|
||||
};
|
||||
|
||||
static inline void
|
||||
cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor,
|
||||
cache_bin_t *bins_small, cache_bin_t *bins_large) {
|
||||
ql_elm_new(descriptor, link);
|
||||
descriptor->bins_small = bins_small;
|
||||
descriptor->bins_large = bins_large;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
cache_bin_alloc_easy(cache_bin_t *bin, bool *success) {
|
||||
void *ret;
|
||||
|
||||
if (unlikely(bin->ncached == 0)) {
|
||||
bin->low_water = -1;
|
||||
*success = false;
|
||||
return NULL;
|
||||
}
|
||||
/*
|
||||
* success (instead of ret) should be checked upon the return of this
|
||||
* function. We avoid checking (ret == NULL) because there is never a
|
||||
* null stored on the avail stack (which is unknown to the compiler),
|
||||
* and eagerly checking ret would cause pipeline stall (waiting for the
|
||||
* cacheline).
|
||||
*/
|
||||
*success = true;
|
||||
ret = *(bin->avail - bin->ncached);
|
||||
bin->ncached--;
|
||||
|
||||
if (unlikely(bin->ncached < bin->low_water)) {
|
||||
bin->low_water = bin->ncached;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */
|
@ -40,14 +40,15 @@ typedef struct ctl_arena_stats_s {
|
||||
uint64_t ndalloc_small;
|
||||
uint64_t nrequests_small;
|
||||
|
||||
malloc_bin_stats_t bstats[NBINS];
|
||||
malloc_large_stats_t lstats[NSIZES - NBINS];
|
||||
bin_stats_t bstats[NBINS];
|
||||
arena_stats_large_t lstats[NSIZES - NBINS];
|
||||
} ctl_arena_stats_t;
|
||||
|
||||
typedef struct ctl_stats_s {
|
||||
size_t allocated;
|
||||
size_t active;
|
||||
size_t metadata;
|
||||
size_t metadata_thp;
|
||||
size_t resident;
|
||||
size_t mapped;
|
||||
size_t retained;
|
||||
|
41
include/jemalloc/internal/div.h
Normal file
41
include/jemalloc/internal/div.h
Normal file
@ -0,0 +1,41 @@
|
||||
#ifndef JEMALLOC_INTERNAL_DIV_H
|
||||
#define JEMALLOC_INTERNAL_DIV_H
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
/*
|
||||
* This module does the division that computes the index of a region in a slab,
|
||||
* given its offset relative to the base.
|
||||
* That is, given a divisor d, an n = i * d (all integers), we'll return i.
|
||||
* We do some pre-computation to do this more quickly than a CPU division
|
||||
* instruction.
|
||||
* We bound n < 2^32, and don't support dividing by one.
|
||||
*/
|
||||
|
||||
typedef struct div_info_s div_info_t;
|
||||
struct div_info_s {
|
||||
uint32_t magic;
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
size_t d;
|
||||
#endif
|
||||
};
|
||||
|
||||
void div_init(div_info_t *div_info, size_t divisor);
|
||||
|
||||
static inline size_t
|
||||
div_compute(div_info_t *div_info, size_t n) {
|
||||
assert(n <= (uint32_t)-1);
|
||||
/*
|
||||
* This generates, e.g. mov; imul; shr on x86-64. On a 32-bit machine,
|
||||
* the compilers I tried were all smart enough to turn this into the
|
||||
* appropriate "get the high 32 bits of the result of a multiply" (e.g.
|
||||
* mul; mov edx eax; on x86, umull on arm, etc.).
|
||||
*/
|
||||
size_t i = ((uint64_t)n * (uint64_t)div_info->magic) >> 32;
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
assert(i * div_info->d == n);
|
||||
#endif
|
||||
return i;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DIV_H */
|
435
include/jemalloc/internal/emitter.h
Normal file
435
include/jemalloc/internal/emitter.h
Normal file
@ -0,0 +1,435 @@
|
||||
#ifndef JEMALLOC_INTERNAL_EMITTER_H
|
||||
#define JEMALLOC_INTERNAL_EMITTER_H
|
||||
|
||||
#include "jemalloc/internal/ql.h"
|
||||
|
||||
typedef enum emitter_output_e emitter_output_t;
|
||||
enum emitter_output_e {
|
||||
emitter_output_json,
|
||||
emitter_output_table
|
||||
};
|
||||
|
||||
typedef enum emitter_justify_e emitter_justify_t;
|
||||
enum emitter_justify_e {
|
||||
emitter_justify_left,
|
||||
emitter_justify_right,
|
||||
/* Not for users; just to pass to internal functions. */
|
||||
emitter_justify_none
|
||||
};
|
||||
|
||||
typedef enum emitter_type_e emitter_type_t;
|
||||
enum emitter_type_e {
|
||||
emitter_type_bool,
|
||||
emitter_type_int,
|
||||
emitter_type_unsigned,
|
||||
emitter_type_uint32,
|
||||
emitter_type_uint64,
|
||||
emitter_type_size,
|
||||
emitter_type_ssize,
|
||||
emitter_type_string,
|
||||
/*
|
||||
* A title is a column title in a table; it's just a string, but it's
|
||||
* not quoted.
|
||||
*/
|
||||
emitter_type_title,
|
||||
};
|
||||
|
||||
typedef struct emitter_col_s emitter_col_t;
|
||||
struct emitter_col_s {
|
||||
/* Filled in by the user. */
|
||||
emitter_justify_t justify;
|
||||
int width;
|
||||
emitter_type_t type;
|
||||
union {
|
||||
bool bool_val;
|
||||
int int_val;
|
||||
unsigned unsigned_val;
|
||||
uint32_t uint32_val;
|
||||
uint64_t uint64_val;
|
||||
size_t size_val;
|
||||
ssize_t ssize_val;
|
||||
const char *str_val;
|
||||
};
|
||||
|
||||
/* Filled in by initialization. */
|
||||
ql_elm(emitter_col_t) link;
|
||||
};
|
||||
|
||||
typedef struct emitter_row_s emitter_row_t;
|
||||
struct emitter_row_s {
|
||||
ql_head(emitter_col_t) cols;
|
||||
};
|
||||
|
||||
static inline void
|
||||
emitter_row_init(emitter_row_t *row) {
|
||||
ql_new(&row->cols);
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_col_init(emitter_col_t *col, emitter_row_t *row) {
|
||||
ql_elm_new(col, link);
|
||||
ql_tail_insert(&row->cols, col, link);
|
||||
}
|
||||
|
||||
typedef struct emitter_s emitter_t;
|
||||
struct emitter_s {
|
||||
emitter_output_t output;
|
||||
/* The output information. */
|
||||
void (*write_cb)(void *, const char *);
|
||||
void *cbopaque;
|
||||
int nesting_depth;
|
||||
/* True if we've already emitted a value at the given depth. */
|
||||
bool item_at_depth;
|
||||
};
|
||||
|
||||
static inline void
|
||||
emitter_init(emitter_t *emitter, emitter_output_t emitter_output,
|
||||
void (*write_cb)(void *, const char *), void *cbopaque) {
|
||||
emitter->output = emitter_output;
|
||||
emitter->write_cb = write_cb;
|
||||
emitter->cbopaque = cbopaque;
|
||||
emitter->item_at_depth = false;
|
||||
emitter->nesting_depth = 0;
|
||||
}
|
||||
|
||||
/* Internal convenience function. Write to the emitter the given string. */
|
||||
JEMALLOC_FORMAT_PRINTF(2, 3)
|
||||
static inline void
|
||||
emitter_printf(emitter_t *emitter, const char *format, ...) {
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, format);
|
||||
malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
/* Write to the emitter the given string, but only in table mode. */
|
||||
JEMALLOC_FORMAT_PRINTF(2, 3)
|
||||
static inline void
|
||||
emitter_table_printf(emitter_t *emitter, const char *format, ...) {
|
||||
if (emitter->output == emitter_output_table) {
|
||||
va_list ap;
|
||||
va_start(ap, format);
|
||||
malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_gen_fmt(char *out_fmt, size_t out_size, const char *fmt_specifier,
|
||||
emitter_justify_t justify, int width) {
|
||||
size_t written;
|
||||
if (justify == emitter_justify_none) {
|
||||
written = malloc_snprintf(out_fmt, out_size,
|
||||
"%%%s", fmt_specifier);
|
||||
} else if (justify == emitter_justify_left) {
|
||||
written = malloc_snprintf(out_fmt, out_size,
|
||||
"%%-%d%s", width, fmt_specifier);
|
||||
} else {
|
||||
written = malloc_snprintf(out_fmt, out_size,
|
||||
"%%%d%s", width, fmt_specifier);
|
||||
}
|
||||
/* Only happens in case of bad format string, which *we* choose. */
|
||||
assert(written < out_size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal. Emit the given value type in the relevant encoding (so that the
|
||||
* bool true gets mapped to json "true", but the string "true" gets mapped to
|
||||
* json "\"true\"", for instance.
|
||||
*
|
||||
* Width is ignored if justify is emitter_justify_none.
|
||||
*/
|
||||
static inline void
|
||||
emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width,
|
||||
emitter_type_t value_type, const void *value) {
|
||||
size_t str_written;
|
||||
#define BUF_SIZE 256
|
||||
#define FMT_SIZE 10
|
||||
/*
|
||||
* We dynamically generate a format string to emit, to let us use the
|
||||
* snprintf machinery. This is kinda hacky, but gets the job done
|
||||
* quickly without having to think about the various snprintf edge
|
||||
* cases.
|
||||
*/
|
||||
char fmt[FMT_SIZE];
|
||||
char buf[BUF_SIZE];
|
||||
|
||||
#define EMIT_SIMPLE(type, format) \
|
||||
emitter_gen_fmt(fmt, FMT_SIZE, format, justify, width); \
|
||||
emitter_printf(emitter, fmt, *(const type *)value); \
|
||||
|
||||
switch (value_type) {
|
||||
case emitter_type_bool:
|
||||
emitter_gen_fmt(fmt, FMT_SIZE, "s", justify, width);
|
||||
emitter_printf(emitter, fmt, *(const bool *)value ?
|
||||
"true" : "false");
|
||||
break;
|
||||
case emitter_type_int:
|
||||
EMIT_SIMPLE(int, "d")
|
||||
break;
|
||||
case emitter_type_unsigned:
|
||||
EMIT_SIMPLE(unsigned, "u")
|
||||
break;
|
||||
case emitter_type_ssize:
|
||||
EMIT_SIMPLE(ssize_t, "zd")
|
||||
break;
|
||||
case emitter_type_size:
|
||||
EMIT_SIMPLE(size_t, "zu")
|
||||
break;
|
||||
case emitter_type_string:
|
||||
str_written = malloc_snprintf(buf, BUF_SIZE, "\"%s\"",
|
||||
*(const char *const *)value);
|
||||
/*
|
||||
* We control the strings we output; we shouldn't get anything
|
||||
* anywhere near the fmt size.
|
||||
*/
|
||||
assert(str_written < BUF_SIZE);
|
||||
emitter_gen_fmt(fmt, FMT_SIZE, "s", justify, width);
|
||||
emitter_printf(emitter, fmt, buf);
|
||||
break;
|
||||
case emitter_type_uint32:
|
||||
EMIT_SIMPLE(uint32_t, FMTu32)
|
||||
break;
|
||||
case emitter_type_uint64:
|
||||
EMIT_SIMPLE(uint64_t, FMTu64)
|
||||
break;
|
||||
case emitter_type_title:
|
||||
EMIT_SIMPLE(char *const, "s");
|
||||
break;
|
||||
default:
|
||||
unreachable();
|
||||
}
|
||||
#undef BUF_SIZE
|
||||
#undef FMT_SIZE
|
||||
}
|
||||
|
||||
|
||||
/* Internal functions. In json mode, tracks nesting state. */
|
||||
static inline void
|
||||
emitter_nest_inc(emitter_t *emitter) {
|
||||
emitter->nesting_depth++;
|
||||
emitter->item_at_depth = false;
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_nest_dec(emitter_t *emitter) {
|
||||
emitter->nesting_depth--;
|
||||
emitter->item_at_depth = true;
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_indent(emitter_t *emitter) {
|
||||
int amount = emitter->nesting_depth;
|
||||
const char *indent_str;
|
||||
if (emitter->output == emitter_output_json) {
|
||||
indent_str = "\t";
|
||||
} else {
|
||||
amount *= 2;
|
||||
indent_str = " ";
|
||||
}
|
||||
for (int i = 0; i < amount; i++) {
|
||||
emitter_printf(emitter, "%s", indent_str);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_key_prefix(emitter_t *emitter) {
|
||||
emitter_printf(emitter, "%s\n", emitter->item_at_depth ? "," : "");
|
||||
emitter_indent(emitter);
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_begin(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
assert(emitter->nesting_depth == 0);
|
||||
emitter_printf(emitter, "{");
|
||||
emitter_nest_inc(emitter);
|
||||
} else {
|
||||
// tabular init
|
||||
emitter_printf(emitter, "%s", "");
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_end(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
assert(emitter->nesting_depth == 1);
|
||||
emitter_nest_dec(emitter);
|
||||
emitter_printf(emitter, "\n}\n");
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Note emits a different kv pair as well, but only in table mode. Omits the
|
||||
* note if table_note_key is NULL.
|
||||
*/
|
||||
static inline void
|
||||
emitter_kv_note(emitter_t *emitter, const char *json_key, const char *table_key,
|
||||
emitter_type_t value_type, const void *value,
|
||||
const char *table_note_key, emitter_type_t table_note_value_type,
|
||||
const void *table_note_value) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
assert(emitter->nesting_depth > 0);
|
||||
emitter_json_key_prefix(emitter);
|
||||
emitter_printf(emitter, "\"%s\": ", json_key);
|
||||
emitter_print_value(emitter, emitter_justify_none, -1,
|
||||
value_type, value);
|
||||
} else {
|
||||
emitter_indent(emitter);
|
||||
emitter_printf(emitter, "%s: ", table_key);
|
||||
emitter_print_value(emitter, emitter_justify_none, -1,
|
||||
value_type, value);
|
||||
if (table_note_key != NULL) {
|
||||
emitter_printf(emitter, " (%s: ", table_note_key);
|
||||
emitter_print_value(emitter, emitter_justify_none, -1,
|
||||
table_note_value_type, table_note_value);
|
||||
emitter_printf(emitter, ")");
|
||||
}
|
||||
emitter_printf(emitter, "\n");
|
||||
}
|
||||
emitter->item_at_depth = true;
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_kv(emitter_t *emitter, const char *json_key, const char *table_key,
|
||||
emitter_type_t value_type, const void *value) {
|
||||
emitter_kv_note(emitter, json_key, table_key, value_type, value, NULL,
|
||||
emitter_type_bool, NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_kv(emitter_t *emitter, const char *json_key,
|
||||
emitter_type_t value_type, const void *value) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
emitter_kv(emitter, json_key, NULL, value_type, value);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_table_kv(emitter_t *emitter, const char *table_key,
|
||||
emitter_type_t value_type, const void *value) {
|
||||
if (emitter->output == emitter_output_table) {
|
||||
emitter_kv(emitter, NULL, table_key, value_type, value);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_dict_begin(emitter_t *emitter, const char *json_key,
|
||||
const char *table_header) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
emitter_json_key_prefix(emitter);
|
||||
emitter_printf(emitter, "\"%s\": {", json_key);
|
||||
emitter_nest_inc(emitter);
|
||||
} else {
|
||||
emitter_indent(emitter);
|
||||
emitter_printf(emitter, "%s\n", table_header);
|
||||
emitter_nest_inc(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_dict_end(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
assert(emitter->nesting_depth > 0);
|
||||
emitter_nest_dec(emitter);
|
||||
emitter_printf(emitter, "\n");
|
||||
emitter_indent(emitter);
|
||||
emitter_printf(emitter, "}");
|
||||
} else {
|
||||
emitter_nest_dec(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_dict_begin(emitter_t *emitter, const char *json_key) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
emitter_dict_begin(emitter, json_key, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_dict_end(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
emitter_dict_end(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_table_dict_begin(emitter_t *emitter, const char *table_key) {
|
||||
if (emitter->output == emitter_output_table) {
|
||||
emitter_dict_begin(emitter, NULL, table_key);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_table_dict_end(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_table) {
|
||||
emitter_dict_end(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_arr_begin(emitter_t *emitter, const char *json_key) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
emitter_json_key_prefix(emitter);
|
||||
emitter_printf(emitter, "\"%s\": [", json_key);
|
||||
emitter_nest_inc(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_arr_end(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
assert(emitter->nesting_depth > 0);
|
||||
emitter_nest_dec(emitter);
|
||||
emitter_printf(emitter, "\n");
|
||||
emitter_indent(emitter);
|
||||
emitter_printf(emitter, "]");
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_arr_obj_begin(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
emitter_json_key_prefix(emitter);
|
||||
emitter_printf(emitter, "{");
|
||||
emitter_nest_inc(emitter);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_arr_obj_end(emitter_t *emitter) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
assert(emitter->nesting_depth > 0);
|
||||
emitter_nest_dec(emitter);
|
||||
emitter_printf(emitter, "\n");
|
||||
emitter_indent(emitter);
|
||||
emitter_printf(emitter, "}");
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_json_arr_value(emitter_t *emitter, emitter_type_t value_type,
|
||||
const void *value) {
|
||||
if (emitter->output == emitter_output_json) {
|
||||
emitter_json_key_prefix(emitter);
|
||||
emitter_print_value(emitter, emitter_justify_none, -1,
|
||||
value_type, value);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
emitter_table_row(emitter_t *emitter, emitter_row_t *row) {
|
||||
if (emitter->output != emitter_output_table) {
|
||||
return;
|
||||
}
|
||||
emitter_col_t *col;
|
||||
ql_foreach(col, &row->cols, link) {
|
||||
emitter_print_value(emitter, col->justify, col->width,
|
||||
col->type, (const void *)&col->bool_val);
|
||||
}
|
||||
emitter_table_printf(emitter, "\n");
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EMITTER_H */
|
@ -4,9 +4,10 @@
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/mutex_pool.h"
|
||||
#include "jemalloc/internal/ph.h"
|
||||
#include "jemalloc/internal/rb.h"
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
|
||||
extern size_t opt_lg_extent_max_active_fit;
|
||||
|
||||
extern rtree_t extents_rtree;
|
||||
extern const extent_hooks_t extent_hooks_default;
|
||||
extern mutex_pool_t extent_mutex_pool;
|
||||
|
@ -93,6 +93,12 @@ extent_committed_get(const extent_t *extent) {
|
||||
EXTENT_BITS_COMMITTED_SHIFT);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
extent_dumpable_get(const extent_t *extent) {
|
||||
return (bool)((extent->e_bits & EXTENT_BITS_DUMPABLE_MASK) >>
|
||||
EXTENT_BITS_DUMPABLE_SHIFT);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
extent_slab_get(const extent_t *extent) {
|
||||
return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >>
|
||||
@ -184,15 +190,22 @@ extent_addr_set(extent_t *extent, void *addr) {
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) {
|
||||
extent_addr_randomize(UNUSED tsdn_t *tsdn, extent_t *extent, size_t alignment) {
|
||||
assert(extent_base_get(extent) == extent_addr_get(extent));
|
||||
|
||||
if (alignment < PAGE) {
|
||||
unsigned lg_range = LG_PAGE -
|
||||
lg_floor(CACHELINE_CEILING(alignment));
|
||||
size_t r =
|
||||
prng_lg_range_zu(&extent_arena_get(extent)->offset_state,
|
||||
size_t r;
|
||||
if (!tsdn_null(tsdn)) {
|
||||
tsd_t *tsd = tsdn_tsd(tsdn);
|
||||
r = (size_t)prng_lg_range_u64(
|
||||
tsd_offset_statep_get(tsd), lg_range);
|
||||
} else {
|
||||
r = prng_lg_range_zu(
|
||||
&extent_arena_get(extent)->offset_state,
|
||||
lg_range, true);
|
||||
}
|
||||
uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
|
||||
lg_range);
|
||||
extent->e_addr = (void *)((uintptr_t)extent->e_addr +
|
||||
@ -269,6 +282,12 @@ extent_committed_set(extent_t *extent, bool committed) {
|
||||
((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_dumpable_set(extent_t *extent, bool dumpable) {
|
||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_DUMPABLE_MASK) |
|
||||
((uint64_t)dumpable << EXTENT_BITS_DUMPABLE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_slab_set(extent_t *extent, bool slab) {
|
||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) |
|
||||
@ -283,7 +302,7 @@ extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
|
||||
static inline void
|
||||
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
|
||||
bool committed) {
|
||||
bool committed, bool dumpable) {
|
||||
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
||||
|
||||
extent_arena_set(extent, arena);
|
||||
@ -295,6 +314,7 @@ extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||
extent_state_set(extent, state);
|
||||
extent_zeroed_set(extent, zeroed);
|
||||
extent_committed_set(extent, committed);
|
||||
extent_dumpable_set(extent, dumpable);
|
||||
ql_elm_new(extent, ql_link);
|
||||
if (config_prof) {
|
||||
extent_prof_tctx_set(extent, NULL);
|
||||
@ -312,6 +332,7 @@ extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
|
||||
extent_state_set(extent, extent_state_active);
|
||||
extent_zeroed_set(extent, true);
|
||||
extent_committed_set(extent, true);
|
||||
extent_dumpable_set(extent, true);
|
||||
}
|
||||
|
||||
static inline void
|
||||
@ -334,6 +355,11 @@ extent_list_append(extent_list_t *list, extent_t *extent) {
|
||||
ql_tail_insert(list, extent, ql_link);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_list_prepend(extent_list_t *list, extent_t *extent) {
|
||||
ql_head_insert(list, extent, ql_link);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_list_replace(extent_list_t *list, extent_t *to_remove,
|
||||
extent_t *to_insert) {
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include "jemalloc/internal/bitmap.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/rb.h"
|
||||
#include "jemalloc/internal/ph.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
|
||||
@ -24,13 +23,14 @@ struct extent_s {
|
||||
* a: arena_ind
|
||||
* b: slab
|
||||
* c: committed
|
||||
* d: dumpable
|
||||
* z: zeroed
|
||||
* t: state
|
||||
* i: szind
|
||||
* f: nfree
|
||||
* n: sn
|
||||
*
|
||||
* nnnnnnnn ... nnnnnfff fffffffi iiiiiiit tzcbaaaa aaaaaaaa
|
||||
* nnnnnnnn ... nnnnffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa
|
||||
*
|
||||
* arena_ind: Arena from which this extent came, or all 1 bits if
|
||||
* unassociated.
|
||||
@ -45,6 +45,23 @@ struct extent_s {
|
||||
* as on a system that overcommits and satisfies physical
|
||||
* memory needs on demand via soft page faults.
|
||||
*
|
||||
* dumpable: The dumpable flag indicates whether or not we've set the
|
||||
* memory in question to be dumpable. Note that this
|
||||
* interacts somewhat subtly with user-specified extent hooks,
|
||||
* since we don't know if *they* are fiddling with
|
||||
* dumpability (in which case, we don't want to undo whatever
|
||||
* they're doing). To deal with this scenario, we:
|
||||
* - Make dumpable false only for memory allocated with the
|
||||
* default hooks.
|
||||
* - Only allow memory to go from non-dumpable to dumpable,
|
||||
* and only once.
|
||||
* - Never make the OS call to allow dumping when the
|
||||
* dumpable bit is already set.
|
||||
* These three constraints mean that we will never
|
||||
* accidentally dump user memory that the user meant to set
|
||||
* nondumpable with their extent hooks.
|
||||
*
|
||||
*
|
||||
* zeroed: The zeroed flag is used by extent recycling code to track
|
||||
* whether memory is zero-filled.
|
||||
*
|
||||
@ -69,37 +86,41 @@ struct extent_s {
|
||||
* serial number to both resulting adjacent extents.
|
||||
*/
|
||||
uint64_t e_bits;
|
||||
#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
|
||||
|
||||
#define EXTENT_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
|
||||
#define EXTENT_BITS_ARENA_SHIFT 0
|
||||
#define EXTENT_BITS_ARENA_MASK \
|
||||
(((uint64_t)(1U << MALLOCX_ARENA_BITS) - 1) << EXTENT_BITS_ARENA_SHIFT)
|
||||
#define EXTENT_BITS_ARENA_MASK MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_SLAB_SHIFT MALLOCX_ARENA_BITS
|
||||
#define EXTENT_BITS_SLAB_MASK \
|
||||
((uint64_t)0x1U << EXTENT_BITS_SLAB_SHIFT)
|
||||
#define EXTENT_BITS_SLAB_WIDTH 1
|
||||
#define EXTENT_BITS_SLAB_SHIFT (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT)
|
||||
#define EXTENT_BITS_SLAB_MASK MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_COMMITTED_SHIFT (MALLOCX_ARENA_BITS + 1)
|
||||
#define EXTENT_BITS_COMMITTED_MASK \
|
||||
((uint64_t)0x1U << EXTENT_BITS_COMMITTED_SHIFT)
|
||||
#define EXTENT_BITS_COMMITTED_WIDTH 1
|
||||
#define EXTENT_BITS_COMMITTED_SHIFT (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT)
|
||||
#define EXTENT_BITS_COMMITTED_MASK MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_ZEROED_SHIFT (MALLOCX_ARENA_BITS + 2)
|
||||
#define EXTENT_BITS_ZEROED_MASK \
|
||||
((uint64_t)0x1U << EXTENT_BITS_ZEROED_SHIFT)
|
||||
#define EXTENT_BITS_DUMPABLE_WIDTH 1
|
||||
#define EXTENT_BITS_DUMPABLE_SHIFT (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT)
|
||||
#define EXTENT_BITS_DUMPABLE_MASK MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_STATE_SHIFT (MALLOCX_ARENA_BITS + 3)
|
||||
#define EXTENT_BITS_STATE_MASK \
|
||||
((uint64_t)0x3U << EXTENT_BITS_STATE_SHIFT)
|
||||
#define EXTENT_BITS_ZEROED_WIDTH 1
|
||||
#define EXTENT_BITS_ZEROED_SHIFT (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT)
|
||||
#define EXTENT_BITS_ZEROED_MASK MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_SZIND_SHIFT (MALLOCX_ARENA_BITS + 5)
|
||||
#define EXTENT_BITS_SZIND_MASK \
|
||||
(((uint64_t)(1U << LG_CEIL_NSIZES) - 1) << EXTENT_BITS_SZIND_SHIFT)
|
||||
#define EXTENT_BITS_STATE_WIDTH 2
|
||||
#define EXTENT_BITS_STATE_SHIFT (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT)
|
||||
#define EXTENT_BITS_STATE_MASK MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_NFREE_SHIFT \
|
||||
(MALLOCX_ARENA_BITS + 5 + LG_CEIL_NSIZES)
|
||||
#define EXTENT_BITS_NFREE_MASK \
|
||||
((uint64_t)((1U << (LG_SLAB_MAXREGS + 1)) - 1) << EXTENT_BITS_NFREE_SHIFT)
|
||||
#define EXTENT_BITS_SZIND_WIDTH LG_CEIL_NSIZES
|
||||
#define EXTENT_BITS_SZIND_SHIFT (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT)
|
||||
#define EXTENT_BITS_SZIND_MASK MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_SN_SHIFT \
|
||||
(MALLOCX_ARENA_BITS + 5 + LG_CEIL_NSIZES + (LG_SLAB_MAXREGS + 1))
|
||||
#define EXTENT_BITS_NFREE_WIDTH (LG_SLAB_MAXREGS + 1)
|
||||
#define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT)
|
||||
#define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT)
|
||||
#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
|
||||
|
||||
/* Pointer to the extent that this structure is responsible for. */
|
||||
@ -120,20 +141,19 @@ struct extent_s {
|
||||
size_t e_bsize;
|
||||
};
|
||||
|
||||
union {
|
||||
/*
|
||||
* List linkage, used by a variety of lists:
|
||||
* - arena_bin_t's slabs_full
|
||||
* - bin_t's slabs_full
|
||||
* - extents_t's LRU
|
||||
* - stashed dirty extents
|
||||
* - arena's large allocations
|
||||
*/
|
||||
ql_elm(extent_t) ql_link;
|
||||
/* Red-black tree linkage, used by arena's extent_avail. */
|
||||
rb_node(extent_t) rb_link;
|
||||
};
|
||||
|
||||
/* Linkage for per size class sn/address-ordered heaps. */
|
||||
/*
|
||||
* Linkage for per size class sn/address-ordered heaps, and
|
||||
* for extent_avail
|
||||
*/
|
||||
phn(extent_t) ph_link;
|
||||
|
||||
union {
|
||||
@ -148,7 +168,7 @@ struct extent_s {
|
||||
};
|
||||
};
|
||||
typedef ql_head(extent_t) extent_list_t;
|
||||
typedef rb_tree(extent_t) extent_tree_t;
|
||||
typedef ph(extent_t) extent_tree_t;
|
||||
typedef ph(extent_t) extent_heap_t;
|
||||
|
||||
/* Quantized collection of extents, with built-in LRU queue. */
|
||||
|
@ -6,4 +6,12 @@ typedef struct extents_s extents_t;
|
||||
|
||||
#define EXTENT_HOOKS_INITIALIZER NULL
|
||||
|
||||
#define EXTENT_GROW_MAX_PIND (NPSIZES - 1)
|
||||
|
||||
/*
|
||||
* When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit)
|
||||
* is the max ratio between the size of the active extent and the new extent.
|
||||
*/
|
||||
#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */
|
||||
|
@ -260,22 +260,22 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
|
||||
uint64_t k2 = 0;
|
||||
|
||||
switch (len & 15) {
|
||||
case 15: k2 ^= ((uint64_t)(tail[14])) << 48;
|
||||
case 14: k2 ^= ((uint64_t)(tail[13])) << 40;
|
||||
case 13: k2 ^= ((uint64_t)(tail[12])) << 32;
|
||||
case 12: k2 ^= ((uint64_t)(tail[11])) << 24;
|
||||
case 11: k2 ^= ((uint64_t)(tail[10])) << 16;
|
||||
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;
|
||||
case 15: k2 ^= ((uint64_t)(tail[14])) << 48; /* falls through */
|
||||
case 14: k2 ^= ((uint64_t)(tail[13])) << 40; /* falls through */
|
||||
case 13: k2 ^= ((uint64_t)(tail[12])) << 32; /* falls through */
|
||||
case 12: k2 ^= ((uint64_t)(tail[11])) << 24; /* falls through */
|
||||
case 11: k2 ^= ((uint64_t)(tail[10])) << 16; /* falls through */
|
||||
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; /* falls through */
|
||||
case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
|
||||
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
|
||||
|
||||
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56;
|
||||
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48;
|
||||
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40;
|
||||
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32;
|
||||
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24;
|
||||
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16;
|
||||
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8;
|
||||
/* falls through */
|
||||
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; /* falls through */
|
||||
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; /* falls through */
|
||||
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; /* falls through */
|
||||
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; /* falls through */
|
||||
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; /* falls through */
|
||||
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; /* falls through */
|
||||
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; /* falls through */
|
||||
case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
|
||||
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
|
||||
}
|
||||
|
@ -5,7 +5,16 @@
|
||||
#ifdef _WIN32
|
||||
# include <windows.h>
|
||||
# include "msvc_compat/windows_extra.h"
|
||||
|
||||
# ifdef _WIN64
|
||||
# if LG_VADDR <= 32
|
||||
# error Generate the headers using x64 vcargs
|
||||
# endif
|
||||
# else
|
||||
# if LG_VADDR > 32
|
||||
# undef LG_VADDR
|
||||
# define LG_VADDR 32
|
||||
# endif
|
||||
# endif
|
||||
#else
|
||||
# include <sys/param.h>
|
||||
# include <sys/mman.h>
|
||||
|
@ -33,6 +33,8 @@
|
||||
* order to yield to another virtual CPU.
|
||||
*/
|
||||
#undef CPU_SPINWAIT
|
||||
/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
|
||||
#undef HAVE_CPU_SPINWAIT
|
||||
|
||||
/*
|
||||
* Number of significant bits in virtual addresses. This may be less than the
|
||||
@ -237,6 +239,12 @@
|
||||
*/
|
||||
#undef JEMALLOC_CACHE_OBLIVIOUS
|
||||
|
||||
/*
|
||||
* If defined, enable logging facilities. We make this a configure option to
|
||||
* avoid taking extra branches everywhere.
|
||||
*/
|
||||
#undef JEMALLOC_LOG
|
||||
|
||||
/*
|
||||
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
||||
*/
|
||||
@ -254,6 +262,12 @@
|
||||
/* Defined if madvise(2) is available. */
|
||||
#undef JEMALLOC_HAVE_MADVISE
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
|
||||
* arguments to madvise(2).
|
||||
*/
|
||||
#undef JEMALLOC_HAVE_MADVISE_HUGE
|
||||
|
||||
/*
|
||||
* Methods for purging unused pages differ between operating systems.
|
||||
*
|
||||
@ -271,6 +285,14 @@
|
||||
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
#undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
|
||||
|
||||
/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
|
||||
#undef JEMALLOC_DEFINE_MADVISE_FREE
|
||||
|
||||
/*
|
||||
* Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
|
||||
*/
|
||||
#undef JEMALLOC_MADVISE_DONTDUMP
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages (THPs) are supported via the
|
||||
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||
@ -336,4 +358,9 @@
|
||||
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||
#undef JEMALLOC_IS_MALLOC
|
||||
|
||||
/*
|
||||
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
|
||||
*/
|
||||
#undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
||||
|
@ -106,16 +106,16 @@ decay_ticker_get(tsd_t *tsd, unsigned ind) {
|
||||
return &tdata->decay_ticker;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tcache_bin_t *
|
||||
JEMALLOC_ALWAYS_INLINE cache_bin_t *
|
||||
tcache_small_bin_get(tcache_t *tcache, szind_t binind) {
|
||||
assert(binind < NBINS);
|
||||
return &tcache->tbins_small[binind];
|
||||
return &tcache->bins_small[binind];
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tcache_bin_t *
|
||||
JEMALLOC_ALWAYS_INLINE cache_bin_t *
|
||||
tcache_large_bin_get(tcache_t *tcache, szind_t binind) {
|
||||
assert(binind >= NBINS &&binind < nhbins);
|
||||
return &tcache->tbins_large[binind - NBINS];
|
||||
return &tcache->bins_large[binind - NBINS];
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
@ -151,6 +151,7 @@ pre_reentrancy(tsd_t *tsd, arena_t *arena) {
|
||||
assert(arena != arena_get(tsd_tsdn(tsd), 0, false));
|
||||
|
||||
bool fast = tsd_fast(tsd);
|
||||
assert(tsd_reentrancy_level_get(tsd) < INT8_MAX);
|
||||
++*tsd_reentrancy_levelp_get(tsd);
|
||||
if (fast) {
|
||||
/* Prepare slow path for reentrancy. */
|
||||
|
@ -5,6 +5,24 @@
|
||||
#include "jemalloc/internal/sz.h"
|
||||
#include "jemalloc/internal/witness.h"
|
||||
|
||||
/*
|
||||
* Translating the names of the 'i' functions:
|
||||
* Abbreviations used in the first part of the function name (before
|
||||
* alloc/dalloc) describe what that function accomplishes:
|
||||
* a: arena (query)
|
||||
* s: size (query, or sized deallocation)
|
||||
* e: extent (query)
|
||||
* p: aligned (allocates)
|
||||
* vs: size (query, without knowing that the pointer is into the heap)
|
||||
* r: rallocx implementation
|
||||
* x: xallocx implementation
|
||||
* Abbreviations used in the second part of the function name (after
|
||||
* alloc/dalloc) describe the arguments it takes
|
||||
* z: whether to return zeroed memory
|
||||
* t: accepts a tcache_t * parameter
|
||||
* m: accepts an arena_t * parameter
|
||||
*/
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE arena_t *
|
||||
iaalloc(tsdn_t *tsdn, const void *ptr) {
|
||||
assert(ptr != NULL);
|
||||
@ -27,8 +45,10 @@ iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
|
||||
assert(size != 0);
|
||||
assert(!is_internal || tcache == NULL);
|
||||
assert(!is_internal || arena == NULL || arena_is_auto(arena));
|
||||
if (!tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) == 0) {
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
}
|
||||
|
||||
ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
|
||||
if (config_stats && is_internal && likely(ret != NULL)) {
|
||||
@ -91,7 +111,8 @@ idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx,
|
||||
if (config_stats && is_internal) {
|
||||
arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
|
||||
}
|
||||
if (!is_internal && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) {
|
||||
if (!is_internal && !tsdn_null(tsdn) &&
|
||||
tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) {
|
||||
assert(tcache == NULL);
|
||||
}
|
||||
arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path);
|
||||
|
@ -37,4 +37,7 @@
|
||||
# define JET_MUTABLE const
|
||||
#endif
|
||||
|
||||
#define JEMALLOC_VA_ARGS_HEAD(head, ...) head
|
||||
#define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_MACROS_H */
|
||||
|
@ -79,22 +79,29 @@ typedef int malloc_cpuid_t;
|
||||
# ifdef __hppa__
|
||||
# define LG_QUANTUM 4
|
||||
# endif
|
||||
# ifdef __m68k__
|
||||
# define LG_QUANTUM 3
|
||||
# endif
|
||||
# ifdef __mips__
|
||||
# define LG_QUANTUM 3
|
||||
# endif
|
||||
# ifdef __nios2__
|
||||
# define LG_QUANTUM 3
|
||||
# endif
|
||||
# ifdef __or1k__
|
||||
# define LG_QUANTUM 3
|
||||
# endif
|
||||
# ifdef __powerpc__
|
||||
# define LG_QUANTUM 4
|
||||
# endif
|
||||
# ifdef __riscv__
|
||||
# if defined(__riscv) || defined(__riscv__)
|
||||
# define LG_QUANTUM 4
|
||||
# endif
|
||||
# ifdef __s390__
|
||||
# define LG_QUANTUM 4
|
||||
# endif
|
||||
# ifdef __SH4__
|
||||
# if (defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || \
|
||||
defined(__SH4_SINGLE_ONLY__))
|
||||
# define LG_QUANTUM 4
|
||||
# endif
|
||||
# ifdef __tile__
|
||||
|
@ -47,6 +47,10 @@
|
||||
#endif
|
||||
#include "jemalloc/internal/hooks.h"
|
||||
|
||||
#ifdef JEMALLOC_DEFINE_MADVISE_FREE
|
||||
# define JEMALLOC_MADV_FREE 8
|
||||
#endif
|
||||
|
||||
static const bool config_debug =
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
true
|
||||
@ -61,6 +65,13 @@ static const bool have_dss =
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool have_madvise_huge =
|
||||
#ifdef JEMALLOC_HAVE_MADVISE_HUGE
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_fill =
|
||||
#ifdef JEMALLOC_FILL
|
||||
true
|
||||
@ -111,13 +122,6 @@ static const bool config_stats =
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_thp =
|
||||
#ifdef JEMALLOC_THP
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_tls =
|
||||
#ifdef JEMALLOC_TLS
|
||||
true
|
||||
@ -146,6 +150,17 @@ static const bool config_cache_oblivious =
|
||||
false
|
||||
#endif
|
||||
;
|
||||
/*
|
||||
* Undocumented, for jemalloc development use only at the moment. See the note
|
||||
* in jemalloc/internal/log.h.
|
||||
*/
|
||||
static const bool config_log =
|
||||
#ifdef JEMALLOC_LOG
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
#ifdef JEMALLOC_HAVE_SCHED_GETCPU
|
||||
/* Currently percpu_arena depends on sched_getcpu. */
|
||||
#define JEMALLOC_PERCPU_ARENA
|
||||
|
115
include/jemalloc/internal/log.h
Normal file
115
include/jemalloc/internal/log.h
Normal file
@ -0,0 +1,115 @@
|
||||
#ifndef JEMALLOC_INTERNAL_LOG_H
|
||||
#define JEMALLOC_INTERNAL_LOG_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/malloc_io.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
|
||||
#ifdef JEMALLOC_LOG
|
||||
# define JEMALLOC_LOG_VAR_BUFSIZE 1000
|
||||
#else
|
||||
# define JEMALLOC_LOG_VAR_BUFSIZE 1
|
||||
#endif
|
||||
|
||||
#define JEMALLOC_LOG_BUFSIZE 4096
|
||||
|
||||
/*
|
||||
* The log malloc_conf option is a '|'-delimited list of log_var name segments
|
||||
* which should be logged. The names are themselves hierarchical, with '.' as
|
||||
* the delimiter (a "segment" is just a prefix in the log namespace). So, if
|
||||
* you have:
|
||||
*
|
||||
* log("arena", "log msg for arena"); // 1
|
||||
* log("arena.a", "log msg for arena.a"); // 2
|
||||
* log("arena.b", "log msg for arena.b"); // 3
|
||||
* log("arena.a.a", "log msg for arena.a.a"); // 4
|
||||
* log("extent.a", "log msg for extent.a"); // 5
|
||||
* log("extent.b", "log msg for extent.b"); // 6
|
||||
*
|
||||
* And your malloc_conf option is "log=arena.a|extent", then lines 2, 4, 5, and
|
||||
* 6 will print at runtime. You can enable logging from all log vars by
|
||||
* writing "log=.".
|
||||
*
|
||||
* None of this should be regarded as a stable API for right now. It's intended
|
||||
* as a debugging interface, to let us keep around some of our printf-debugging
|
||||
* statements.
|
||||
*/
|
||||
|
||||
extern char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE];
|
||||
extern atomic_b_t log_init_done;
|
||||
|
||||
typedef struct log_var_s log_var_t;
|
||||
struct log_var_s {
|
||||
/*
|
||||
* Lowest bit is "inited", second lowest is "enabled". Putting them in
|
||||
* a single word lets us avoid any fences on weak architectures.
|
||||
*/
|
||||
atomic_u_t state;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
#define LOG_NOT_INITIALIZED 0U
|
||||
#define LOG_INITIALIZED_NOT_ENABLED 1U
|
||||
#define LOG_ENABLED 2U
|
||||
|
||||
#define LOG_VAR_INIT(name_str) {ATOMIC_INIT(LOG_NOT_INITIALIZED), name_str}
|
||||
|
||||
/*
|
||||
* Returns the value we should assume for state (which is not necessarily
|
||||
* accurate; if logging is done before logging has finished initializing, then
|
||||
* we default to doing the safe thing by logging everything).
|
||||
*/
|
||||
unsigned log_var_update_state(log_var_t *log_var);
|
||||
|
||||
/* We factor out the metadata management to allow us to test more easily. */
|
||||
#define log_do_begin(log_var) \
|
||||
if (config_log) { \
|
||||
unsigned log_state = atomic_load_u(&(log_var).state, \
|
||||
ATOMIC_RELAXED); \
|
||||
if (unlikely(log_state == LOG_NOT_INITIALIZED)) { \
|
||||
log_state = log_var_update_state(&(log_var)); \
|
||||
assert(log_state != LOG_NOT_INITIALIZED); \
|
||||
} \
|
||||
if (log_state == LOG_ENABLED) { \
|
||||
{
|
||||
/* User code executes here. */
|
||||
#define log_do_end(log_var) \
|
||||
} \
|
||||
} \
|
||||
}
|
||||
|
||||
/*
|
||||
* MSVC has some preprocessor bugs in its expansion of __VA_ARGS__ during
|
||||
* preprocessing. To work around this, we take all potential extra arguments in
|
||||
* a var-args functions. Since a varargs macro needs at least one argument in
|
||||
* the "...", we accept the format string there, and require that the first
|
||||
* argument in this "..." is a const char *.
|
||||
*/
|
||||
static inline void
|
||||
log_impl_varargs(const char *name, ...) {
|
||||
char buf[JEMALLOC_LOG_BUFSIZE];
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, name);
|
||||
const char *format = va_arg(ap, const char *);
|
||||
size_t dst_offset = 0;
|
||||
dst_offset += malloc_snprintf(buf, JEMALLOC_LOG_BUFSIZE, "%s: ", name);
|
||||
dst_offset += malloc_vsnprintf(buf + dst_offset,
|
||||
JEMALLOC_LOG_BUFSIZE - dst_offset, format, ap);
|
||||
dst_offset += malloc_snprintf(buf + dst_offset,
|
||||
JEMALLOC_LOG_BUFSIZE - dst_offset, "\n");
|
||||
va_end(ap);
|
||||
|
||||
malloc_write(buf);
|
||||
}
|
||||
|
||||
/* Call as log("log.var.str", "format_string %d", arg_for_format_string); */
|
||||
#define LOG(log_var_str, ...) \
|
||||
do { \
|
||||
static log_var_t log_var = LOG_VAR_INIT(log_var_str); \
|
||||
log_do_begin(log_var) \
|
||||
log_impl_varargs((log_var).name, __VA_ARGS__); \
|
||||
log_do_end(log_var) \
|
||||
} while (0)
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_LOG_H */
|
@ -53,10 +53,50 @@ size_t malloc_vsnprintf(char *str, size_t size, const char *format,
|
||||
va_list ap);
|
||||
size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
|
||||
JEMALLOC_FORMAT_PRINTF(3, 4);
|
||||
/*
|
||||
* The caller can set write_cb and cbopaque to null to choose to print with the
|
||||
* je_malloc_message hook.
|
||||
*/
|
||||
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *format, va_list ap);
|
||||
void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4);
|
||||
void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
|
||||
|
||||
static inline ssize_t
|
||||
malloc_write_fd(int fd, const void *buf, size_t count) {
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
|
||||
/*
|
||||
* Use syscall(2) rather than write(2) when possible in order to avoid
|
||||
* the possibility of memory allocation within libc. This is necessary
|
||||
* on FreeBSD; most operating systems do not have this problem though.
|
||||
*
|
||||
* syscall() returns long or int, depending on platform, so capture the
|
||||
* result in the widest plausible type to avoid compiler warnings.
|
||||
*/
|
||||
long result = syscall(SYS_write, fd, buf, count);
|
||||
#else
|
||||
ssize_t result = (ssize_t)write(fd, buf,
|
||||
#ifdef _WIN32
|
||||
(unsigned int)
|
||||
#endif
|
||||
count);
|
||||
#endif
|
||||
return (ssize_t)result;
|
||||
}
|
||||
|
||||
static inline ssize_t
|
||||
malloc_read_fd(int fd, void *buf, size_t count) {
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
|
||||
long result = syscall(SYS_read, fd, buf, count);
|
||||
#else
|
||||
ssize_t result = read(fd, buf,
|
||||
#ifdef _WIN32
|
||||
(unsigned int)
|
||||
#endif
|
||||
count);
|
||||
#endif
|
||||
return (ssize_t)result;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_MALLOC_IO_H */
|
||||
|
@ -35,21 +35,34 @@ typedef enum {
|
||||
mutex_prof_num_arena_mutexes
|
||||
} mutex_prof_arena_ind_t;
|
||||
|
||||
#define MUTEX_PROF_COUNTERS \
|
||||
OP(num_ops, uint64_t) \
|
||||
OP(num_wait, uint64_t) \
|
||||
OP(num_spin_acq, uint64_t) \
|
||||
OP(num_owner_switch, uint64_t) \
|
||||
OP(total_wait_time, uint64_t) \
|
||||
OP(max_wait_time, uint64_t) \
|
||||
OP(max_num_thds, uint32_t)
|
||||
#define MUTEX_PROF_UINT64_COUNTERS \
|
||||
OP(num_ops, uint64_t, "n_lock_ops") \
|
||||
OP(num_wait, uint64_t, "n_waiting") \
|
||||
OP(num_spin_acq, uint64_t, "n_spin_acq") \
|
||||
OP(num_owner_switch, uint64_t, "n_owner_switch") \
|
||||
OP(total_wait_time, uint64_t, "total_wait_ns") \
|
||||
OP(max_wait_time, uint64_t, "max_wait_ns")
|
||||
|
||||
typedef enum {
|
||||
#define OP(counter, type) mutex_counter_##counter,
|
||||
MUTEX_PROF_COUNTERS
|
||||
#define MUTEX_PROF_UINT32_COUNTERS \
|
||||
OP(max_num_thds, uint32_t, "max_n_thds")
|
||||
|
||||
#define MUTEX_PROF_COUNTERS \
|
||||
MUTEX_PROF_UINT64_COUNTERS \
|
||||
MUTEX_PROF_UINT32_COUNTERS
|
||||
|
||||
#define OP(counter, type, human) mutex_counter_##counter,
|
||||
|
||||
#define COUNTER_ENUM(counter_list, t) \
|
||||
typedef enum { \
|
||||
counter_list \
|
||||
mutex_prof_num_##t##_counters \
|
||||
} mutex_prof_##t##_counter_ind_t;
|
||||
|
||||
COUNTER_ENUM(MUTEX_PROF_UINT64_COUNTERS, uint64_t)
|
||||
COUNTER_ENUM(MUTEX_PROF_UINT32_COUNTERS, uint32_t)
|
||||
|
||||
#undef COUNTER_ENUM
|
||||
#undef OP
|
||||
mutex_prof_num_counters
|
||||
} mutex_prof_counter_ind_t;
|
||||
|
||||
typedef struct {
|
||||
/*
|
||||
|
@ -58,6 +58,20 @@ static const bool pages_can_purge_forced =
|
||||
#endif
|
||||
;
|
||||
|
||||
typedef enum {
|
||||
thp_mode_default = 0, /* Do not change hugepage settings. */
|
||||
thp_mode_always = 1, /* Always set MADV_HUGEPAGE. */
|
||||
thp_mode_never = 2, /* Always set MADV_NOHUGEPAGE. */
|
||||
|
||||
thp_mode_names_limit = 3, /* Used for option processing. */
|
||||
thp_mode_not_supported = 3 /* No THP support detected. */
|
||||
} thp_mode_t;
|
||||
|
||||
#define THP_MODE_DEFAULT thp_mode_default
|
||||
extern thp_mode_t opt_thp;
|
||||
extern thp_mode_t init_system_thp_mode; /* Initial system wide state. */
|
||||
extern const char *thp_mode_names[];
|
||||
|
||||
void *pages_map(void *addr, size_t size, size_t alignment, bool *commit);
|
||||
void pages_unmap(void *addr, size_t size);
|
||||
bool pages_commit(void *addr, size_t size);
|
||||
@ -66,6 +80,9 @@ bool pages_purge_lazy(void *addr, size_t size);
|
||||
bool pages_purge_forced(void *addr, size_t size);
|
||||
bool pages_huge(void *addr, size_t size);
|
||||
bool pages_nohuge(void *addr, size_t size);
|
||||
bool pages_dontdump(void *addr, size_t size);
|
||||
bool pages_dodump(void *addr, size_t size);
|
||||
bool pages_boot(void);
|
||||
void pages_set_thp_state (void *ptr, size_t size);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */
|
||||
|
@ -69,4 +69,15 @@ prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, size_t usize) {
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
prof_active_get_unlocked(void) {
|
||||
/*
|
||||
* Even if opt_prof is true, sampling can be temporarily disabled by
|
||||
* setting prof_active to false. No locking is used when reading
|
||||
* prof_active in the fast path, so there are no guarantees regarding
|
||||
* how long it will take for all threads to notice state changes.
|
||||
*/
|
||||
return prof_active;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */
|
||||
|
@ -3,17 +3,6 @@
|
||||
|
||||
#include "jemalloc/internal/sz.h"
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
prof_active_get_unlocked(void) {
|
||||
/*
|
||||
* Even if opt_prof is true, sampling can be temporarily disabled by
|
||||
* setting prof_active to false. No locking is used when reading
|
||||
* prof_active in the fast path, so there are no guarantees regarding
|
||||
* how long it will take for all threads to notice state changes.
|
||||
*/
|
||||
return prof_active;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
prof_gdump_get_unlocked(void) {
|
||||
/*
|
||||
|
@ -178,9 +178,21 @@ rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||
rtree_leaf_elm_bits_extent_get(uintptr_t bits) {
|
||||
# ifdef __aarch64__
|
||||
/*
|
||||
* aarch64 doesn't sign extend the highest virtual address bit to set
|
||||
* the higher ones. Instead, the high bits gets zeroed.
|
||||
*/
|
||||
uintptr_t high_bit_mask = ((uintptr_t)1 << LG_VADDR) - 1;
|
||||
/* Mask off the slab bit. */
|
||||
uintptr_t low_bit_mask = ~(uintptr_t)1;
|
||||
uintptr_t mask = high_bit_mask & low_bit_mask;
|
||||
return (extent_t *)(bits & mask);
|
||||
# else
|
||||
/* Restore sign-extended high bits, mask slab bit. */
|
||||
return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >>
|
||||
RTREE_NHIB) & ~((uintptr_t)0x1));
|
||||
# endif
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE szind_t
|
||||
@ -196,8 +208,8 @@ rtree_leaf_elm_bits_slab_get(uintptr_t bits) {
|
||||
# endif
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||
rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
bool dependent) {
|
||||
rtree_leaf_elm_extent_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, bool dependent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||
return rtree_leaf_elm_bits_extent_get(bits);
|
||||
@ -209,8 +221,8 @@ rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE szind_t
|
||||
rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
bool dependent) {
|
||||
rtree_leaf_elm_szind_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, bool dependent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||
return rtree_leaf_elm_bits_szind_get(bits);
|
||||
@ -221,8 +233,8 @@ rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
bool dependent) {
|
||||
rtree_leaf_elm_slab_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, bool dependent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||
return rtree_leaf_elm_bits_slab_get(bits);
|
||||
@ -233,8 +245,8 @@ rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
extent_t *extent) {
|
||||
rtree_leaf_elm_extent_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, extent_t *extent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true);
|
||||
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
|
||||
@ -247,8 +259,8 @@ rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
szind_t szind) {
|
||||
rtree_leaf_elm_szind_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, szind_t szind) {
|
||||
assert(szind <= NSIZES);
|
||||
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
@ -265,8 +277,8 @@ rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
bool slab) {
|
||||
rtree_leaf_elm_slab_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, bool slab) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
|
||||
true);
|
||||
@ -448,8 +460,14 @@ rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
if (!dependent && elm == NULL) {
|
||||
return true;
|
||||
}
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||
*r_szind = rtree_leaf_elm_bits_szind_get(bits);
|
||||
*r_slab = rtree_leaf_elm_bits_slab_get(bits);
|
||||
#else
|
||||
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
|
||||
*r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent);
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,7 @@
|
||||
* Zero initializer required for tsd initialization only. Proper initialization
|
||||
* done via rtree_ctx_data_init().
|
||||
*/
|
||||
#define RTREE_CTX_ZERO_INITIALIZER {{{0}}}
|
||||
#define RTREE_CTX_ZERO_INITIALIZER {{{0}}, {{0}}}
|
||||
|
||||
|
||||
typedef struct rtree_leaf_elm_s rtree_leaf_elm_t;
|
||||
|
@ -1,25 +1,29 @@
|
||||
#ifndef JEMALLOC_INTERNAL_SPIN_H
|
||||
#define JEMALLOC_INTERNAL_SPIN_H
|
||||
|
||||
#ifdef JEMALLOC_SPIN_C_
|
||||
# define SPIN_INLINE extern inline
|
||||
#else
|
||||
# define SPIN_INLINE inline
|
||||
#endif
|
||||
|
||||
#define SPIN_INITIALIZER {0U}
|
||||
|
||||
typedef struct {
|
||||
unsigned iteration;
|
||||
} spin_t;
|
||||
|
||||
SPIN_INLINE void
|
||||
static inline void
|
||||
spin_cpu_spinwait() {
|
||||
# if HAVE_CPU_SPINWAIT
|
||||
CPU_SPINWAIT;
|
||||
# else
|
||||
volatile int x = 0;
|
||||
x = x;
|
||||
# endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
spin_adaptive(spin_t *spin) {
|
||||
volatile uint32_t i;
|
||||
|
||||
if (spin->iteration < 5) {
|
||||
for (i = 0; i < (1U << spin->iteration); i++) {
|
||||
CPU_SPINWAIT;
|
||||
spin_cpu_spinwait();
|
||||
}
|
||||
spin->iteration++;
|
||||
} else {
|
||||
|
@ -1,12 +1,6 @@
|
||||
#ifndef JEMALLOC_INTERNAL_STATS_H
|
||||
#define JEMALLOC_INTERNAL_STATS_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/mutex_prof.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
#include "jemalloc/internal/stats_tsd.h"
|
||||
|
||||
/* OPTION(opt, var_name, default, set_value_to) */
|
||||
#define STATS_PRINT_OPTIONS \
|
||||
OPTION('J', json, false, true) \
|
||||
@ -33,132 +27,4 @@ extern char opt_stats_print_opts[stats_print_tot_num_options+1];
|
||||
void stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *opts);
|
||||
|
||||
/*
|
||||
* In those architectures that support 64-bit atomics, we use atomic updates for
|
||||
* our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
|
||||
* externally.
|
||||
*/
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
typedef atomic_u64_t arena_stats_u64_t;
|
||||
#else
|
||||
/* Must hold the arena stats mutex while reading atomically. */
|
||||
typedef uint64_t arena_stats_u64_t;
|
||||
#endif
|
||||
|
||||
typedef struct malloc_bin_stats_s {
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the bin. Note that tcache may allocate an object, then recycle it
|
||||
* many times, resulting many increments to nrequests, but only one
|
||||
* each to nmalloc and ndalloc.
|
||||
*/
|
||||
uint64_t nmalloc;
|
||||
uint64_t ndalloc;
|
||||
|
||||
/*
|
||||
* Number of allocation requests that correspond to the size of this
|
||||
* bin. This includes requests served by tcache, though tcache only
|
||||
* periodically merges into this counter.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
|
||||
/*
|
||||
* Current number of regions of this size class, including regions
|
||||
* currently cached by tcache.
|
||||
*/
|
||||
size_t curregs;
|
||||
|
||||
/* Number of tcache fills from this bin. */
|
||||
uint64_t nfills;
|
||||
|
||||
/* Number of tcache flushes to this bin. */
|
||||
uint64_t nflushes;
|
||||
|
||||
/* Total number of slabs created for this bin's size class. */
|
||||
uint64_t nslabs;
|
||||
|
||||
/*
|
||||
* Total number of slabs reused by extracting them from the slabs heap
|
||||
* for this bin's size class.
|
||||
*/
|
||||
uint64_t reslabs;
|
||||
|
||||
/* Current number of slabs in this bin. */
|
||||
size_t curslabs;
|
||||
|
||||
mutex_prof_data_t mutex_data;
|
||||
} malloc_bin_stats_t;
|
||||
|
||||
typedef struct malloc_large_stats_s {
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the arena.
|
||||
*/
|
||||
arena_stats_u64_t nmalloc;
|
||||
arena_stats_u64_t ndalloc;
|
||||
|
||||
/*
|
||||
* Number of allocation requests that correspond to this size class.
|
||||
* This includes requests served by tcache, though tcache only
|
||||
* periodically merges into this counter.
|
||||
*/
|
||||
arena_stats_u64_t nrequests; /* Partially derived. */
|
||||
|
||||
/* Current number of allocations of this size class. */
|
||||
size_t curlextents; /* Derived. */
|
||||
} malloc_large_stats_t;
|
||||
|
||||
typedef struct decay_stats_s {
|
||||
/* Total number of purge sweeps. */
|
||||
arena_stats_u64_t npurge;
|
||||
/* Total number of madvise calls made. */
|
||||
arena_stats_u64_t nmadvise;
|
||||
/* Total number of pages purged. */
|
||||
arena_stats_u64_t purged;
|
||||
} decay_stats_t;
|
||||
|
||||
/*
|
||||
* Arena stats. Note that fields marked "derived" are not directly maintained
|
||||
* within the arena code; rather their values are derived during stats merge
|
||||
* requests.
|
||||
*/
|
||||
typedef struct arena_stats_s {
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
malloc_mutex_t mtx;
|
||||
#endif
|
||||
|
||||
/* Number of bytes currently mapped, excluding retained memory. */
|
||||
atomic_zu_t mapped; /* Partially derived. */
|
||||
|
||||
/*
|
||||
* Number of unused virtual memory bytes currently retained. Retained
|
||||
* bytes are technically mapped (though always decommitted or purged),
|
||||
* but they are excluded from the mapped statistic (above).
|
||||
*/
|
||||
atomic_zu_t retained; /* Derived. */
|
||||
|
||||
decay_stats_t decay_dirty;
|
||||
decay_stats_t decay_muzzy;
|
||||
|
||||
atomic_zu_t base; /* Derived. */
|
||||
atomic_zu_t internal;
|
||||
atomic_zu_t resident; /* Derived. */
|
||||
|
||||
atomic_zu_t allocated_large; /* Derived. */
|
||||
arena_stats_u64_t nmalloc_large; /* Derived. */
|
||||
arena_stats_u64_t ndalloc_large; /* Derived. */
|
||||
arena_stats_u64_t nrequests_large; /* Derived. */
|
||||
|
||||
/* Number of bytes cached in tcache associated with this arena. */
|
||||
atomic_zu_t tcache_bytes; /* Derived. */
|
||||
|
||||
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
|
||||
|
||||
/* One element for each large size class. */
|
||||
malloc_large_stats_t lstats[NSIZES - NBINS];
|
||||
|
||||
/* Arena uptime. */
|
||||
nstime_t uptime;
|
||||
} arena_stats_t;
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_STATS_H */
|
||||
|
@ -1,12 +0,0 @@
|
||||
#ifndef JEMALLOC_INTERNAL_STATS_TSD_H
|
||||
#define JEMALLOC_INTERNAL_STATS_TSD_H
|
||||
|
||||
typedef struct tcache_bin_stats_s {
|
||||
/*
|
||||
* Number of allocation requests that corresponded to the size of this
|
||||
* bin.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
} tcache_bin_stats_t;
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_STATS_TSD_H */
|
@ -61,7 +61,7 @@ sz_psz2ind(size_t psz) {
|
||||
pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
|
||||
LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
|
||||
|
||||
size_t delta_inverse_mask = ZD(-1) << lg_delta;
|
||||
size_t delta_inverse_mask = ZU(-1) << lg_delta;
|
||||
pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
|
||||
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
|
||||
|
||||
@ -142,7 +142,7 @@ sz_size2index_compute(size_t size) {
|
||||
szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
|
||||
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
|
||||
|
||||
size_t delta_inverse_mask = ZD(-1) << lg_delta;
|
||||
size_t delta_inverse_mask = ZU(-1) << lg_delta;
|
||||
szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
|
||||
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
extern bool opt_tcache;
|
||||
extern ssize_t opt_lg_tcache_max;
|
||||
|
||||
extern tcache_bin_info_t *tcache_bin_info;
|
||||
extern cache_bin_info_t *tcache_bin_info;
|
||||
|
||||
/*
|
||||
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
|
||||
@ -30,10 +30,10 @@ extern tcaches_t *tcaches;
|
||||
size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
|
||||
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
|
||||
void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
||||
tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
|
||||
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
cache_bin_t *tbin, szind_t binind, bool *tcache_success);
|
||||
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
|
||||
szind_t binind, unsigned rem);
|
||||
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
void tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind,
|
||||
unsigned rem, tcache_t *tcache);
|
||||
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
|
||||
arena_t *arena);
|
||||
|
@ -1,6 +1,7 @@
|
||||
#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H
|
||||
#define JEMALLOC_INTERNAL_TCACHE_INLINES_H
|
||||
|
||||
#include "jemalloc/internal/bin.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
#include "jemalloc/internal/sz.h"
|
||||
@ -38,43 +39,16 @@ tcache_event(tsd_t *tsd, tcache_t *tcache) {
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success) {
|
||||
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||
UNUSED size_t size, szind_t binind, bool zero, bool slow_path) {
|
||||
void *ret;
|
||||
|
||||
if (unlikely(tbin->ncached == 0)) {
|
||||
tbin->low_water = -1;
|
||||
*tcache_success = false;
|
||||
return NULL;
|
||||
}
|
||||
/*
|
||||
* tcache_success (instead of ret) should be checked upon the return of
|
||||
* this function. We avoid checking (ret == NULL) because there is
|
||||
* never a null stored on the avail stack (which is unknown to the
|
||||
* compiler), and eagerly checking ret would cause pipeline stall
|
||||
* (waiting for the cacheline).
|
||||
*/
|
||||
*tcache_success = true;
|
||||
ret = *(tbin->avail - tbin->ncached);
|
||||
tbin->ncached--;
|
||||
|
||||
if (unlikely((low_water_t)tbin->ncached < tbin->low_water)) {
|
||||
tbin->low_water = tbin->ncached;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
szind_t binind, bool zero, bool slow_path) {
|
||||
void *ret;
|
||||
tcache_bin_t *tbin;
|
||||
cache_bin_t *bin;
|
||||
bool tcache_success;
|
||||
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
|
||||
assert(binind < NBINS);
|
||||
tbin = tcache_small_bin_get(tcache, binind);
|
||||
ret = tcache_alloc_easy(tbin, &tcache_success);
|
||||
bin = tcache_small_bin_get(tcache, binind);
|
||||
ret = cache_bin_alloc_easy(bin, &tcache_success);
|
||||
assert(tcache_success == (ret != NULL));
|
||||
if (unlikely(!tcache_success)) {
|
||||
bool tcache_hard_success;
|
||||
@ -84,7 +58,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
}
|
||||
|
||||
ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
|
||||
tbin, binind, &tcache_hard_success);
|
||||
bin, binind, &tcache_hard_success);
|
||||
if (tcache_hard_success == false) {
|
||||
return NULL;
|
||||
}
|
||||
@ -103,22 +77,21 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
if (likely(!zero)) {
|
||||
if (slow_path && config_fill) {
|
||||
if (unlikely(opt_junk_alloc)) {
|
||||
arena_alloc_junk_small(ret,
|
||||
&arena_bin_info[binind], false);
|
||||
arena_alloc_junk_small(ret, &bin_infos[binind],
|
||||
false);
|
||||
} else if (unlikely(opt_zero)) {
|
||||
memset(ret, 0, usize);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
|
||||
arena_alloc_junk_small(ret, &arena_bin_info[binind],
|
||||
true);
|
||||
arena_alloc_junk_small(ret, &bin_infos[binind], true);
|
||||
}
|
||||
memset(ret, 0, usize);
|
||||
}
|
||||
|
||||
if (config_stats) {
|
||||
tbin->tstats.nrequests++;
|
||||
bin->tstats.nrequests++;
|
||||
}
|
||||
if (config_prof) {
|
||||
tcache->prof_accumbytes += usize;
|
||||
@ -131,12 +104,12 @@ JEMALLOC_ALWAYS_INLINE void *
|
||||
tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
szind_t binind, bool zero, bool slow_path) {
|
||||
void *ret;
|
||||
tcache_bin_t *tbin;
|
||||
cache_bin_t *bin;
|
||||
bool tcache_success;
|
||||
|
||||
assert(binind >= NBINS &&binind < nhbins);
|
||||
tbin = tcache_large_bin_get(tcache, binind);
|
||||
ret = tcache_alloc_easy(tbin, &tcache_success);
|
||||
bin = tcache_large_bin_get(tcache, binind);
|
||||
ret = cache_bin_alloc_easy(bin, &tcache_success);
|
||||
assert(tcache_success == (ret != NULL));
|
||||
if (unlikely(!tcache_success)) {
|
||||
/*
|
||||
@ -176,7 +149,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
}
|
||||
|
||||
if (config_stats) {
|
||||
tbin->tstats.nrequests++;
|
||||
bin->tstats.nrequests++;
|
||||
}
|
||||
if (config_prof) {
|
||||
tcache->prof_accumbytes += usize;
|
||||
@ -190,24 +163,24 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
||||
bool slow_path) {
|
||||
tcache_bin_t *tbin;
|
||||
tcache_bin_info_t *tbin_info;
|
||||
cache_bin_t *bin;
|
||||
cache_bin_info_t *bin_info;
|
||||
|
||||
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
|
||||
|
||||
if (slow_path && config_fill && unlikely(opt_junk_free)) {
|
||||
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
|
||||
arena_dalloc_junk_small(ptr, &bin_infos[binind]);
|
||||
}
|
||||
|
||||
tbin = tcache_small_bin_get(tcache, binind);
|
||||
tbin_info = &tcache_bin_info[binind];
|
||||
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
|
||||
tcache_bin_flush_small(tsd, tcache, tbin, binind,
|
||||
(tbin_info->ncached_max >> 1));
|
||||
bin = tcache_small_bin_get(tcache, binind);
|
||||
bin_info = &tcache_bin_info[binind];
|
||||
if (unlikely(bin->ncached == bin_info->ncached_max)) {
|
||||
tcache_bin_flush_small(tsd, tcache, bin, binind,
|
||||
(bin_info->ncached_max >> 1));
|
||||
}
|
||||
assert(tbin->ncached < tbin_info->ncached_max);
|
||||
tbin->ncached++;
|
||||
*(tbin->avail - tbin->ncached) = ptr;
|
||||
assert(bin->ncached < bin_info->ncached_max);
|
||||
bin->ncached++;
|
||||
*(bin->avail - bin->ncached) = ptr;
|
||||
|
||||
tcache_event(tsd, tcache);
|
||||
}
|
||||
@ -215,8 +188,8 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
||||
bool slow_path) {
|
||||
tcache_bin_t *tbin;
|
||||
tcache_bin_info_t *tbin_info;
|
||||
cache_bin_t *bin;
|
||||
cache_bin_info_t *bin_info;
|
||||
|
||||
assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
|
||||
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
|
||||
@ -225,15 +198,15 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
||||
large_dalloc_junk(ptr, sz_index2size(binind));
|
||||
}
|
||||
|
||||
tbin = tcache_large_bin_get(tcache, binind);
|
||||
tbin_info = &tcache_bin_info[binind];
|
||||
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
|
||||
tcache_bin_flush_large(tsd, tbin, binind,
|
||||
(tbin_info->ncached_max >> 1), tcache);
|
||||
bin = tcache_large_bin_get(tcache, binind);
|
||||
bin_info = &tcache_bin_info[binind];
|
||||
if (unlikely(bin->ncached == bin_info->ncached_max)) {
|
||||
tcache_bin_flush_large(tsd, bin, binind,
|
||||
(bin_info->ncached_max >> 1), tcache);
|
||||
}
|
||||
assert(tbin->ncached < tbin_info->ncached_max);
|
||||
tbin->ncached++;
|
||||
*(tbin->avail - tbin->ncached) = ptr;
|
||||
assert(bin->ncached < bin_info->ncached_max);
|
||||
bin->ncached++;
|
||||
*(bin->avail - bin->ncached) = ptr;
|
||||
|
||||
tcache_event(tsd, tcache);
|
||||
}
|
||||
|
@ -3,54 +3,51 @@
|
||||
|
||||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
#include "jemalloc/internal/stats_tsd.h"
|
||||
#include "jemalloc/internal/cache_bin.h"
|
||||
#include "jemalloc/internal/ticker.h"
|
||||
|
||||
/*
|
||||
* Read-only information associated with each element of tcache_t's tbins array
|
||||
* is stored separately, mainly to reduce memory usage.
|
||||
*/
|
||||
struct tcache_bin_info_s {
|
||||
unsigned ncached_max; /* Upper limit on ncached. */
|
||||
};
|
||||
|
||||
struct tcache_bin_s {
|
||||
low_water_t low_water; /* Min # cached since last GC. */
|
||||
uint32_t ncached; /* # of cached objects. */
|
||||
/*
|
||||
* ncached and stats are both modified frequently. Let's keep them
|
||||
* close so that they have a higher chance of being on the same
|
||||
* cacheline, thus less write-backs.
|
||||
*/
|
||||
tcache_bin_stats_t tstats;
|
||||
/*
|
||||
* To make use of adjacent cacheline prefetch, the items in the avail
|
||||
* stack goes to higher address for newer allocations. avail points
|
||||
* just above the available space, which means that
|
||||
* avail[-ncached, ... -1] are available items and the lowest item will
|
||||
* be allocated first.
|
||||
*/
|
||||
void **avail; /* Stack of available objects. */
|
||||
};
|
||||
|
||||
struct tcache_s {
|
||||
/* Data accessed frequently first: prof, ticker and small bins. */
|
||||
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
|
||||
ticker_t gc_ticker; /* Drives incremental GC. */
|
||||
/*
|
||||
* The pointer stacks associated with tbins follow as a contiguous
|
||||
* array. During tcache initialization, the avail pointer in each
|
||||
* element of tbins is initialized to point to the proper offset within
|
||||
* this array.
|
||||
* To minimize our cache-footprint, we put the frequently accessed data
|
||||
* together at the start of this struct.
|
||||
*/
|
||||
tcache_bin_t tbins_small[NBINS];
|
||||
/* Data accessed less often below. */
|
||||
ql_elm(tcache_t) link; /* Used for aggregating stats. */
|
||||
arena_t *arena; /* Associated arena. */
|
||||
szind_t next_gc_bin; /* Next bin to GC. */
|
||||
|
||||
/* Cleared after arena_prof_accum(). */
|
||||
uint64_t prof_accumbytes;
|
||||
/* Drives incremental GC. */
|
||||
ticker_t gc_ticker;
|
||||
/*
|
||||
* The pointer stacks associated with bins follow as a contiguous array.
|
||||
* During tcache initialization, the avail pointer in each element of
|
||||
* tbins is initialized to point to the proper offset within this array.
|
||||
*/
|
||||
cache_bin_t bins_small[NBINS];
|
||||
|
||||
/*
|
||||
* This data is less hot; we can be a little less careful with our
|
||||
* footprint here.
|
||||
*/
|
||||
/* Lets us track all the tcaches in an arena. */
|
||||
ql_elm(tcache_t) link;
|
||||
/*
|
||||
* The descriptor lets the arena find our cache bins without seeing the
|
||||
* tcache definition. This enables arenas to aggregate stats across
|
||||
* tcaches without having a tcache dependency.
|
||||
*/
|
||||
cache_bin_array_descriptor_t cache_bin_array_descriptor;
|
||||
|
||||
/* The arena this tcache is associated with. */
|
||||
arena_t *arena;
|
||||
/* Next bin to GC. */
|
||||
szind_t next_gc_bin;
|
||||
/* For small bins, fill (ncached_max >> lg_fill_div). */
|
||||
uint8_t lg_fill_div[NBINS];
|
||||
tcache_bin_t tbins_large[NSIZES-NBINS];
|
||||
/*
|
||||
* We put the cache bins for large size classes at the end of the
|
||||
* struct, since some of them might not get used. This might end up
|
||||
* letting us avoid touching an extra page if we don't have to.
|
||||
*/
|
||||
cache_bin_t bins_large[NSIZES-NBINS];
|
||||
};
|
||||
|
||||
/* Linkage for list of available (previously used) explicit tcache IDs. */
|
||||
|
@ -3,14 +3,9 @@
|
||||
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
|
||||
typedef struct tcache_bin_info_s tcache_bin_info_t;
|
||||
typedef struct tcache_bin_s tcache_bin_t;
|
||||
typedef struct tcache_s tcache_t;
|
||||
typedef struct tcaches_s tcaches_t;
|
||||
|
||||
/* ncached is cast to this type for comparison. */
|
||||
typedef int32_t low_water_t;
|
||||
|
||||
/*
|
||||
* tcache pointers close to NULL are used to encode state information that is
|
||||
* used for two purposes: preventing thread caching on a per thread basis and
|
||||
|
@ -32,14 +32,42 @@ ticker_read(const ticker_t *ticker) {
|
||||
return ticker->tick;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ticker_ticks(ticker_t *ticker, int32_t nticks) {
|
||||
if (unlikely(ticker->tick < nticks)) {
|
||||
/*
|
||||
* Not intended to be a public API. Unfortunately, on x86, neither gcc nor
|
||||
* clang seems smart enough to turn
|
||||
* ticker->tick -= nticks;
|
||||
* if (unlikely(ticker->tick < 0)) {
|
||||
* fixup ticker
|
||||
* return true;
|
||||
* }
|
||||
* return false;
|
||||
* into
|
||||
* subq %nticks_reg, (%ticker_reg)
|
||||
* js fixup ticker
|
||||
*
|
||||
* unless we force "fixup ticker" out of line. In that case, gcc gets it right,
|
||||
* but clang now does worse than before. So, on x86 with gcc, we force it out
|
||||
* of line, but otherwise let the inlining occur. Ordinarily this wouldn't be
|
||||
* worth the hassle, but this is on the fast path of both malloc and free (via
|
||||
* tcache_event).
|
||||
*/
|
||||
#if defined(__GNUC__) && !defined(__clang__) \
|
||||
&& (defined(__x86_64__) || defined(__i386__))
|
||||
JEMALLOC_NOINLINE
|
||||
#endif
|
||||
static bool
|
||||
ticker_fixup(ticker_t *ticker) {
|
||||
ticker->tick = ticker->nticks;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ticker_ticks(ticker_t *ticker, int32_t nticks) {
|
||||
ticker->tick -= nticks;
|
||||
return(false);
|
||||
if (unlikely(ticker->tick < 0)) {
|
||||
return ticker_fixup(ticker);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
|
@ -65,6 +65,7 @@ typedef void (*test_callback_t)(int *);
|
||||
O(arenas_tdata_bypass, bool, bool) \
|
||||
O(reentrancy_level, int8_t, int8_t) \
|
||||
O(narenas_tdata, uint32_t, uint32_t) \
|
||||
O(offset_state, uint64_t, uint64_t) \
|
||||
O(thread_allocated, uint64_t, uint64_t) \
|
||||
O(thread_deallocated, uint64_t, uint64_t) \
|
||||
O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \
|
||||
@ -84,6 +85,7 @@ typedef void (*test_callback_t)(int *);
|
||||
0, \
|
||||
0, \
|
||||
0, \
|
||||
0, \
|
||||
NULL, \
|
||||
RTREE_CTX_ZERO_INITIALIZER, \
|
||||
NULL, \
|
||||
|
@ -39,7 +39,7 @@ tsd_get_allocates(void) {
|
||||
|
||||
/* Get/set. */
|
||||
JEMALLOC_ALWAYS_INLINE tsd_t *
|
||||
tsd_get(bool init) {
|
||||
tsd_get(UNUSED bool init) {
|
||||
assert(tsd_booted);
|
||||
return &tsd_tls;
|
||||
}
|
||||
|
@ -51,7 +51,7 @@
|
||||
#define WITNESS_RANK_ARENA_LARGE 19U
|
||||
|
||||
#define WITNESS_RANK_LEAF 0xffffffffU
|
||||
#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_BIN WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_ARENA_STATS WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/bin/sh
|
||||
#!/bin/sh -eu
|
||||
|
||||
public_symbols_txt=$1
|
||||
symbol_prefix=$2
|
||||
|
@ -7,6 +7,6 @@ install_suffix=@install_suffix@
|
||||
Name: jemalloc
|
||||
Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support.
|
||||
URL: http://jemalloc.net/
|
||||
Version: @jemalloc_version@
|
||||
Version: @jemalloc_version_major@.@jemalloc_version_minor@.@jemalloc_version_bugfix@_@jemalloc_version_nrev@
|
||||
Cflags: -I${includedir}
|
||||
Libs: -L${libdir} -ljemalloc${install_suffix}
|
||||
|
@ -9,16 +9,15 @@ How to build jemalloc for Windows
|
||||
* grep
|
||||
* sed
|
||||
|
||||
2. Install Visual Studio 2015 with Visual C++
|
||||
2. Install Visual Studio 2015 or 2017 with Visual C++
|
||||
|
||||
3. Add Cygwin\bin to the PATH environment variable
|
||||
|
||||
4. Open "VS2015 x86 Native Tools Command Prompt"
|
||||
4. Open "x64 Native Tools Command Prompt for VS 2017"
|
||||
(note: x86/x64 doesn't matter at this point)
|
||||
|
||||
5. Generate header files:
|
||||
sh -c "CC=cl ./autogen.sh"
|
||||
|
||||
6. Now the project can be opened and built in Visual Studio:
|
||||
msvc\jemalloc_vc2015.sln
|
||||
|
||||
msvc\jemalloc_vc2017.sln
|
||||
|
63
msvc/jemalloc_vc2017.sln
Normal file
63
msvc/jemalloc_vc2017.sln
Normal file
@ -0,0 +1,63 @@
|
||||
|
||||
Microsoft Visual Studio Solution File, Format Version 12.00
|
||||
# Visual Studio 14
|
||||
VisualStudioVersion = 14.0.24720.0
|
||||
MinimumVisualStudioVersion = 10.0.40219.1
|
||||
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{70A99006-6DE9-472B-8F83-4CEE6C616DF3}"
|
||||
ProjectSection(SolutionItems) = preProject
|
||||
ReadMe.txt = ReadMe.txt
|
||||
EndProjectSection
|
||||
EndProject
|
||||
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "jemalloc", "projects\vc2017\jemalloc\jemalloc.vcxproj", "{8D6BB292-9E1C-413D-9F98-4864BDC1514A}"
|
||||
EndProject
|
||||
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test_threads", "projects\vc2017\test_threads\test_threads.vcxproj", "{09028CFD-4EB7-491D-869C-0708DB97ED44}"
|
||||
EndProject
|
||||
Global
|
||||
GlobalSection(SolutionConfigurationPlatforms) = preSolution
|
||||
Debug|x64 = Debug|x64
|
||||
Debug|x86 = Debug|x86
|
||||
Debug-static|x64 = Debug-static|x64
|
||||
Debug-static|x86 = Debug-static|x86
|
||||
Release|x64 = Release|x64
|
||||
Release|x86 = Release|x86
|
||||
Release-static|x64 = Release-static|x64
|
||||
Release-static|x86 = Release-static|x86
|
||||
EndGlobalSection
|
||||
GlobalSection(ProjectConfigurationPlatforms) = postSolution
|
||||
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.ActiveCfg = Debug|x64
|
||||
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.Build.0 = Debug|x64
|
||||
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.ActiveCfg = Debug|Win32
|
||||
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.Build.0 = Debug|Win32
|
||||
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.ActiveCfg = Debug-static|x64
|
||||
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.Build.0 = Debug-static|x64
|
||||
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.ActiveCfg = Debug-static|Win32
|
||||
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.Build.0 = Debug-static|Win32
|
||||
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.ActiveCfg = Release|x64
|
||||
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.Build.0 = Release|x64
|
||||
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.ActiveCfg = Release|Win32
|
||||
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.Build.0 = Release|Win32
|
||||
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.ActiveCfg = Release-static|x64
|
||||
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.Build.0 = Release-static|x64
|
||||
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.ActiveCfg = Release-static|Win32
|
||||
{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.Build.0 = Release-static|Win32
|
||||
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.ActiveCfg = Debug|x64
|
||||
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.Build.0 = Debug|x64
|
||||
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.ActiveCfg = Debug|Win32
|
||||
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.Build.0 = Debug|Win32
|
||||
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.ActiveCfg = Debug-static|x64
|
||||
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.Build.0 = Debug-static|x64
|
||||
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.ActiveCfg = Debug-static|Win32
|
||||
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.Build.0 = Debug-static|Win32
|
||||
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.ActiveCfg = Release|x64
|
||||
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.Build.0 = Release|x64
|
||||
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.ActiveCfg = Release|Win32
|
||||
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.Build.0 = Release|Win32
|
||||
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.ActiveCfg = Release-static|x64
|
||||
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.Build.0 = Release-static|x64
|
||||
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.ActiveCfg = Release-static|Win32
|
||||
{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.Build.0 = Release-static|Win32
|
||||
EndGlobalSection
|
||||
GlobalSection(SolutionProperties) = preSolution
|
||||
HideSolutionNode = FALSE
|
||||
EndGlobalSection
|
||||
EndGlobal
|
@ -38,9 +38,11 @@
|
||||
<ClCompile Include="..\..\..\..\src\arena.c" />
|
||||
<ClCompile Include="..\..\..\..\src\background_thread.c" />
|
||||
<ClCompile Include="..\..\..\..\src\base.c" />
|
||||
<ClCompile Include="..\..\..\..\src\bin.c" />
|
||||
<ClCompile Include="..\..\..\..\src\bitmap.c" />
|
||||
<ClCompile Include="..\..\..\..\src\ckh.c" />
|
||||
<ClCompile Include="..\..\..\..\src\ctl.c" />
|
||||
<ClCompile Include="..\..\..\..\src\div.c" />
|
||||
<ClCompile Include="..\..\..\..\src\extent.c" />
|
||||
<ClCompile Include="..\..\..\..\src\extent_dss.c" />
|
||||
<ClCompile Include="..\..\..\..\src\extent_mmap.c" />
|
||||
@ -48,6 +50,7 @@
|
||||
<ClCompile Include="..\..\..\..\src\hooks.c" />
|
||||
<ClCompile Include="..\..\..\..\src\jemalloc.c" />
|
||||
<ClCompile Include="..\..\..\..\src\large.c" />
|
||||
<ClCompile Include="..\..\..\..\src\log.c" />
|
||||
<ClCompile Include="..\..\..\..\src\malloc_io.c" />
|
||||
<ClCompile Include="..\..\..\..\src\mutex.c" />
|
||||
<ClCompile Include="..\..\..\..\src\mutex_pool.c" />
|
||||
@ -56,7 +59,6 @@
|
||||
<ClCompile Include="..\..\..\..\src\prng.c" />
|
||||
<ClCompile Include="..\..\..\..\src\prof.c" />
|
||||
<ClCompile Include="..\..\..\..\src\rtree.c" />
|
||||
<ClCompile Include="..\..\..\..\src\spin.c" />
|
||||
<ClCompile Include="..\..\..\..\src\stats.c" />
|
||||
<ClCompile Include="..\..\..\..\src\sz.c" />
|
||||
<ClCompile Include="..\..\..\..\src\tcache.c" />
|
||||
@ -197,7 +199,7 @@
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
|
||||
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
|
||||
@ -213,7 +215,7 @@
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
|
||||
@ -266,7 +268,7 @@
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
|
||||
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
|
||||
@ -286,7 +288,7 @@
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
|
||||
|
@ -70,9 +70,6 @@
|
||||
<ClCompile Include="..\..\..\..\src\rtree.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\spin.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\stats.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
@ -91,5 +88,14 @@
|
||||
<ClCompile Include="..\..\..\..\src\witness.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\log.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\bin.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\div.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
</ItemGroup>
|
||||
</Project>
|
@ -310,8 +310,8 @@
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemGroup>
|
||||
<ClCompile Include="test_threads.cpp" />
|
||||
<ClCompile Include="test_threads_main.cpp" />
|
||||
<ClCompile Include="..\..\..\test_threads\test_threads.cpp" />
|
||||
<ClCompile Include="..\..\..\test_threads\test_threads_main.cpp" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="..\jemalloc\jemalloc.vcxproj">
|
||||
@ -319,7 +319,7 @@
|
||||
</ProjectReference>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="test_threads.h" />
|
||||
<ClInclude Include="..\..\..\test_threads\test_threads.h" />
|
||||
</ItemGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||
<ImportGroup Label="ExtensionTargets">
|
||||
|
@ -11,15 +11,15 @@
|
||||
</Filter>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClCompile Include="test_threads.cpp">
|
||||
<ClCompile Include="..\..\..\test_threads\test_threads.cpp">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="test_threads_main.cpp">
|
||||
<ClCompile Include="..\..\..\test_threads\test_threads_main.cpp">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="test_threads.h">
|
||||
<ClInclude Include="..\..\..\test_threads\test_threads.h">
|
||||
<Filter>Header Files</Filter>
|
||||
</ClInclude>
|
||||
</ItemGroup>
|
||||
|
347
msvc/projects/vc2017/jemalloc/jemalloc.vcxproj
Normal file
347
msvc/projects/vc2017/jemalloc/jemalloc.vcxproj
Normal file
@ -0,0 +1,347 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<ItemGroup Label="ProjectConfigurations">
|
||||
<ProjectConfiguration Include="Debug-static|Win32">
|
||||
<Configuration>Debug-static</Configuration>
|
||||
<Platform>Win32</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Debug-static|x64">
|
||||
<Configuration>Debug-static</Configuration>
|
||||
<Platform>x64</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Debug|Win32">
|
||||
<Configuration>Debug</Configuration>
|
||||
<Platform>Win32</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Release-static|Win32">
|
||||
<Configuration>Release-static</Configuration>
|
||||
<Platform>Win32</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Release-static|x64">
|
||||
<Configuration>Release-static</Configuration>
|
||||
<Platform>x64</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Release|Win32">
|
||||
<Configuration>Release</Configuration>
|
||||
<Platform>Win32</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Debug|x64">
|
||||
<Configuration>Debug</Configuration>
|
||||
<Platform>x64</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Release|x64">
|
||||
<Configuration>Release</Configuration>
|
||||
<Platform>x64</Platform>
|
||||
</ProjectConfiguration>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClCompile Include="..\..\..\..\src\arena.c" />
|
||||
<ClCompile Include="..\..\..\..\src\background_thread.c" />
|
||||
<ClCompile Include="..\..\..\..\src\base.c" />
|
||||
<ClCompile Include="..\..\..\..\src\bin.c" />
|
||||
<ClCompile Include="..\..\..\..\src\bitmap.c" />
|
||||
<ClCompile Include="..\..\..\..\src\ckh.c" />
|
||||
<ClCompile Include="..\..\..\..\src\ctl.c" />
|
||||
<ClCompile Include="..\..\..\..\src\div.c" />
|
||||
<ClCompile Include="..\..\..\..\src\extent.c" />
|
||||
<ClCompile Include="..\..\..\..\src\extent_dss.c" />
|
||||
<ClCompile Include="..\..\..\..\src\extent_mmap.c" />
|
||||
<ClCompile Include="..\..\..\..\src\hash.c" />
|
||||
<ClCompile Include="..\..\..\..\src\hooks.c" />
|
||||
<ClCompile Include="..\..\..\..\src\jemalloc.c" />
|
||||
<ClCompile Include="..\..\..\..\src\large.c" />
|
||||
<ClCompile Include="..\..\..\..\src\log.c" />
|
||||
<ClCompile Include="..\..\..\..\src\malloc_io.c" />
|
||||
<ClCompile Include="..\..\..\..\src\mutex.c" />
|
||||
<ClCompile Include="..\..\..\..\src\mutex_pool.c" />
|
||||
<ClCompile Include="..\..\..\..\src\nstime.c" />
|
||||
<ClCompile Include="..\..\..\..\src\pages.c" />
|
||||
<ClCompile Include="..\..\..\..\src\prng.c" />
|
||||
<ClCompile Include="..\..\..\..\src\prof.c" />
|
||||
<ClCompile Include="..\..\..\..\src\rtree.c" />
|
||||
<ClCompile Include="..\..\..\..\src\stats.c" />
|
||||
<ClCompile Include="..\..\..\..\src\sz.c" />
|
||||
<ClCompile Include="..\..\..\..\src\tcache.c" />
|
||||
<ClCompile Include="..\..\..\..\src\ticker.c" />
|
||||
<ClCompile Include="..\..\..\..\src\tsd.c" />
|
||||
<ClCompile Include="..\..\..\..\src\witness.c" />
|
||||
</ItemGroup>
|
||||
<PropertyGroup Label="Globals">
|
||||
<ProjectGuid>{8D6BB292-9E1C-413D-9F98-4864BDC1514A}</ProjectGuid>
|
||||
<Keyword>Win32Proj</Keyword>
|
||||
<RootNamespace>jemalloc</RootNamespace>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
|
||||
<ConfigurationType>DynamicLibrary</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
|
||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
|
||||
<ConfigurationType>DynamicLibrary</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
|
||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
|
||||
<ConfigurationType>DynamicLibrary</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
|
||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
|
||||
<ConfigurationType>DynamicLibrary</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
|
||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="Shared">
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
|
||||
<IntDir>$(Platform)\$(Configuration)\</IntDir>
|
||||
<TargetName>$(ProjectName)d</TargetName>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
|
||||
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
|
||||
<IntDir>$(Platform)\$(Configuration)\</IntDir>
|
||||
<TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
|
||||
<IntDir>$(Platform)\$(Configuration)\</IntDir>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
|
||||
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
|
||||
<IntDir>$(Platform)\$(Configuration)\</IntDir>
|
||||
<TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
|
||||
<IntDir>$(Platform)\$(Configuration)\</IntDir>
|
||||
<TargetName>$(ProjectName)d</TargetName>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
|
||||
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
|
||||
<IntDir>$(Platform)\$(Configuration)\</IntDir>
|
||||
<TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
|
||||
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
|
||||
<IntDir>$(Platform)\$(Configuration)\</IntDir>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
|
||||
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
|
||||
<IntDir>$(Platform)\$(Configuration)\</IntDir>
|
||||
<TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
|
||||
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
|
||||
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
|
||||
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
|
||||
<DebugInformationFormat>OldStyle</DebugInformationFormat>
|
||||
<MinimalRebuild>false</MinimalRebuild>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
|
||||
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
|
||||
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
|
||||
<ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
|
||||
<DebugInformationFormat>OldStyle</DebugInformationFormat>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||
<ImportGroup Label="ExtensionTargets">
|
||||
</ImportGroup>
|
||||
</Project>
|
101
msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters
Normal file
101
msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters
Normal file
@ -0,0 +1,101 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<ItemGroup>
|
||||
<Filter Include="Source Files">
|
||||
<UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
|
||||
<Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
|
||||
</Filter>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClCompile Include="..\..\..\..\src\arena.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\background_thread.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\base.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\bitmap.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\ckh.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\ctl.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\extent.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\extent_dss.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\extent_mmap.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\hash.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\hooks.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\jemalloc.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\large.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\malloc_io.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\mutex.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\mutex_pool.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\nstime.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\pages.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\prng.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\prof.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\rtree.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\stats.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\sz.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\tcache.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\ticker.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\tsd.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\witness.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\log.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\bin.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\div.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
</ItemGroup>
|
||||
</Project>
|
326
msvc/projects/vc2017/test_threads/test_threads.vcxproj
Normal file
326
msvc/projects/vc2017/test_threads/test_threads.vcxproj
Normal file
@ -0,0 +1,326 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<ItemGroup Label="ProjectConfigurations">
|
||||
<ProjectConfiguration Include="Debug-static|Win32">
|
||||
<Configuration>Debug-static</Configuration>
|
||||
<Platform>Win32</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Debug-static|x64">
|
||||
<Configuration>Debug-static</Configuration>
|
||||
<Platform>x64</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Debug|Win32">
|
||||
<Configuration>Debug</Configuration>
|
||||
<Platform>Win32</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Release-static|Win32">
|
||||
<Configuration>Release-static</Configuration>
|
||||
<Platform>Win32</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Release-static|x64">
|
||||
<Configuration>Release-static</Configuration>
|
||||
<Platform>x64</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Release|Win32">
|
||||
<Configuration>Release</Configuration>
|
||||
<Platform>Win32</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Debug|x64">
|
||||
<Configuration>Debug</Configuration>
|
||||
<Platform>x64</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Release|x64">
|
||||
<Configuration>Release</Configuration>
|
||||
<Platform>x64</Platform>
|
||||
</ProjectConfiguration>
|
||||
</ItemGroup>
|
||||
<PropertyGroup Label="Globals">
|
||||
<ProjectGuid>{09028CFD-4EB7-491D-869C-0708DB97ED44}</ProjectGuid>
|
||||
<Keyword>Win32Proj</Keyword>
|
||||
<RootNamespace>test_threads</RootNamespace>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="Shared">
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
|
||||
<IntDir>$(Platform)\$(Configuration)\</IntDir>
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
|
||||
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
|
||||
<IntDir>$(Platform)\$(Configuration)\</IntDir>
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
|
||||
<LinkIncremental>true</LinkIncremental>
|
||||
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
|
||||
<IntDir>$(Platform)\$(Configuration)\</IntDir>
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
|
||||
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
|
||||
<IntDir>$(Platform)\$(Configuration)\</IntDir>
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
|
||||
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
|
||||
<IntDir>$(Platform)\$(Configuration)\</IntDir>
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
|
||||
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
|
||||
<IntDir>$(Platform)\$(Configuration)\</IntDir>
|
||||
<LinkIncremental>false</LinkIncremental>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
|
||||
<AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
|
||||
<AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
|
||||
<AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
|
||||
<AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
|
||||
<AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
|
||||
<AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemGroup>
|
||||
<ClCompile Include="..\..\..\test_threads\test_threads.cpp" />
|
||||
<ClCompile Include="..\..\..\test_threads\test_threads_main.cpp" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="..\jemalloc\jemalloc.vcxproj">
|
||||
<Project>{8d6bb292-9e1c-413d-9f98-4864bdc1514a}</Project>
|
||||
</ProjectReference>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="..\..\..\test_threads\test_threads.h" />
|
||||
</ItemGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||
<ImportGroup Label="ExtensionTargets">
|
||||
</ImportGroup>
|
||||
</Project>
|
@ -0,0 +1,26 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<ItemGroup>
|
||||
<Filter Include="Source Files">
|
||||
<UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
|
||||
<Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
|
||||
</Filter>
|
||||
<Filter Include="Header Files">
|
||||
<UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
|
||||
<Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
|
||||
</Filter>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClCompile Include="..\..\..\test_threads\test_threads.cpp">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\test_threads\test_threads_main.cpp">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="..\..\..\test_threads\test_threads.h">
|
||||
<Filter>Header Files</Filter>
|
||||
</ClInclude>
|
||||
</ItemGroup>
|
||||
</Project>
|
@ -1,9 +1,14 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys
|
||||
from itertools import combinations
|
||||
from os import uname
|
||||
from multiprocessing import cpu_count
|
||||
|
||||
# Later, we want to test extended vaddr support. Apparently, the "real" way of
|
||||
# checking this is flaky on OS X.
|
||||
bits_64 = sys.maxsize > 2**32
|
||||
|
||||
nparallel = cpu_count() * 2
|
||||
|
||||
uname = uname()[0]
|
||||
@ -22,8 +27,10 @@ possible_config_opts = [
|
||||
'--enable-debug',
|
||||
'--enable-prof',
|
||||
'--disable-stats',
|
||||
'--with-malloc-conf=tcache:false',
|
||||
]
|
||||
if bits_64:
|
||||
possible_config_opts.append('--with-lg-vaddr=56')
|
||||
|
||||
possible_malloc_conf_opts = [
|
||||
'tcache:false',
|
||||
'dss:primary',
|
||||
@ -57,6 +64,11 @@ for cc, cxx in possible_compilers:
|
||||
else '')
|
||||
)
|
||||
|
||||
# We don't want to test large vaddr spaces in 32-bit mode.
|
||||
if ('-m32' in compiler_opts and '--with-lg-vaddr=56' in
|
||||
config_opts):
|
||||
continue
|
||||
|
||||
# Per CPU arenas are only supported on Linux.
|
||||
linux_supported = ('percpu_arena:percpu' in malloc_conf_opts \
|
||||
or 'background_thread:true' in malloc_conf_opts)
|
||||
|
406
src/arena.c
406
src/arena.c
@ -3,6 +3,7 @@
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/div.h"
|
||||
#include "jemalloc/internal/extent_dss.h"
|
||||
#include "jemalloc/internal/extent_mmap.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
@ -32,21 +33,6 @@ ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
|
||||
static atomic_zd_t dirty_decay_ms_default;
|
||||
static atomic_zd_t muzzy_decay_ms_default;
|
||||
|
||||
const arena_bin_info_t arena_bin_info[NBINS] = {
|
||||
#define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \
|
||||
{reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)},
|
||||
#define BIN_INFO_bin_no(reg_size, slab_size, nregs)
|
||||
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
|
||||
lg_delta_lookup) \
|
||||
BIN_INFO_bin_##bin((1U<<lg_grp) + (ndelta<<lg_delta), \
|
||||
(pgs << LG_PAGE), (pgs << LG_PAGE) / ((1U<<lg_grp) + \
|
||||
(ndelta<<lg_delta)))
|
||||
SIZE_CLASSES
|
||||
#undef BIN_INFO_bin_yes
|
||||
#undef BIN_INFO_bin_no
|
||||
#undef SC
|
||||
};
|
||||
|
||||
const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
|
||||
#define STEP(step, h, x, y) \
|
||||
h,
|
||||
@ -54,6 +40,8 @@ const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
|
||||
#undef STEP
|
||||
};
|
||||
|
||||
static div_info_t arena_binind_div_info[NBINS];
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* Function prototypes for static functions that are referenced prior to
|
||||
@ -62,157 +50,18 @@ const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
|
||||
|
||||
static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
|
||||
arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit,
|
||||
bool is_background_thread);
|
||||
size_t npages_decay_max, bool is_background_thread);
|
||||
static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
|
||||
bool is_background_thread, bool all);
|
||||
static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
||||
arena_bin_t *bin);
|
||||
bin_t *bin);
|
||||
static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
||||
arena_bin_t *bin);
|
||||
bin_t *bin);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static bool
|
||||
arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
|
||||
if (config_debug) {
|
||||
for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
|
||||
assert(((char *)arena_stats)[i] == 0);
|
||||
}
|
||||
}
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
if (malloc_mutex_init(&arena_stats->mtx, "arena_stats",
|
||||
WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
/* Memory is zeroed, so there is no need to clear stats. */
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
malloc_mutex_lock(tsdn, &arena_stats->mtx);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
malloc_mutex_unlock(tsdn, &arena_stats->mtx);
|
||||
#endif
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
arena_stats_u64_t *p) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
return atomic_load_u64(p, ATOMIC_RELAXED);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
return *p;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
arena_stats_u64_t *p, uint64_t x) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
atomic_fetch_add_u64(p, x, ATOMIC_RELAXED);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
*p += x;
|
||||
#endif
|
||||
}
|
||||
|
||||
UNUSED static void
|
||||
arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
arena_stats_u64_t *p, uint64_t x) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
|
||||
assert(r - x <= r);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
*p -= x;
|
||||
assert(*p + x >= *p);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Non-atomically sets *dst += src. *dst needs external synchronization.
|
||||
* This lets us avoid the cost of a fetch_add when its unnecessary (note that
|
||||
* the types here are atomic).
|
||||
*/
|
||||
static void
|
||||
arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
|
||||
atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED);
|
||||
#else
|
||||
*dst += src;
|
||||
#endif
|
||||
}
|
||||
|
||||
static size_t
|
||||
arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
return atomic_load_zu(p, ATOMIC_RELAXED);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
return atomic_load_zu(p, ATOMIC_RELAXED);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
|
||||
size_t x) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
|
||||
atomic_store_zu(p, cur + x, ATOMIC_RELAXED);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
|
||||
size_t x) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
|
||||
assert(r - x <= r);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
|
||||
atomic_store_zu(p, cur - x, ATOMIC_RELAXED);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Like the _u64 variant, needs an externally synchronized *dst. */
|
||||
static void
|
||||
arena_stats_accum_zu(atomic_zu_t *dst, size_t src) {
|
||||
size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
|
||||
atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
void
|
||||
arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
szind_t szind, uint64_t nrequests) {
|
||||
arena_stats_lock(tsdn, arena_stats);
|
||||
arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind -
|
||||
NBINS].nrequests, nrequests);
|
||||
arena_stats_unlock(tsdn, arena_stats);
|
||||
}
|
||||
|
||||
void
|
||||
arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
|
||||
arena_stats_lock(tsdn, arena_stats);
|
||||
arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size);
|
||||
arena_stats_unlock(tsdn, arena_stats);
|
||||
}
|
||||
|
||||
void
|
||||
arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
arena_basic_stats_merge(UNUSED tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
|
||||
size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
|
||||
*nthreads += arena_nthreads_get(arena, false);
|
||||
@ -228,15 +77,15 @@ void
|
||||
arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
|
||||
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
|
||||
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats) {
|
||||
bin_stats_t *bstats, arena_stats_large_t *lstats) {
|
||||
cassert(config_stats);
|
||||
|
||||
arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
|
||||
muzzy_decay_ms, nactive, ndirty, nmuzzy);
|
||||
|
||||
size_t base_allocated, base_resident, base_mapped;
|
||||
size_t base_allocated, base_resident, base_mapped, metadata_thp;
|
||||
base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
|
||||
&base_mapped);
|
||||
&base_mapped, &metadata_thp);
|
||||
|
||||
arena_stats_lock(tsdn, &arena->stats);
|
||||
|
||||
@ -267,6 +116,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
|
||||
arena_stats_accum_zu(&astats->base, base_allocated);
|
||||
arena_stats_accum_zu(&astats->internal, arena_internal_get(arena));
|
||||
arena_stats_accum_zu(&astats->metadata_thp, metadata_thp);
|
||||
arena_stats_accum_zu(&astats->resident, base_resident +
|
||||
(((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
|
||||
extents_npages_get(&arena->extents_dirty) +
|
||||
@ -303,16 +153,16 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
/* tcache_bytes counts currently cached bytes. */
|
||||
atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED);
|
||||
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
|
||||
tcache_t *tcache;
|
||||
ql_foreach(tcache, &arena->tcache_ql, link) {
|
||||
cache_bin_array_descriptor_t *descriptor;
|
||||
ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
|
||||
szind_t i = 0;
|
||||
for (; i < NBINS; i++) {
|
||||
tcache_bin_t *tbin = tcache_small_bin_get(tcache, i);
|
||||
cache_bin_t *tbin = &descriptor->bins_small[i];
|
||||
arena_stats_accum_zu(&astats->tcache_bytes,
|
||||
tbin->ncached * sz_index2size(i));
|
||||
}
|
||||
for (; i < nhbins; i++) {
|
||||
tcache_bin_t *tbin = tcache_large_bin_get(tcache, i);
|
||||
cache_bin_t *tbin = &descriptor->bins_large[i];
|
||||
arena_stats_accum_zu(&astats->tcache_bytes,
|
||||
tbin->ncached * sz_index2size(i));
|
||||
}
|
||||
@ -351,20 +201,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
nstime_subtract(&astats->uptime, &arena->create_time);
|
||||
|
||||
for (szind_t i = 0; i < NBINS; i++) {
|
||||
arena_bin_t *bin = &arena->bins[i];
|
||||
|
||||
malloc_mutex_lock(tsdn, &bin->lock);
|
||||
malloc_mutex_prof_read(tsdn, &bstats[i].mutex_data, &bin->lock);
|
||||
bstats[i].nmalloc += bin->stats.nmalloc;
|
||||
bstats[i].ndalloc += bin->stats.ndalloc;
|
||||
bstats[i].nrequests += bin->stats.nrequests;
|
||||
bstats[i].curregs += bin->stats.curregs;
|
||||
bstats[i].nfills += bin->stats.nfills;
|
||||
bstats[i].nflushes += bin->stats.nflushes;
|
||||
bstats[i].nslabs += bin->stats.nslabs;
|
||||
bstats[i].reslabs += bin->stats.reslabs;
|
||||
bstats[i].curslabs += bin->stats.curslabs;
|
||||
malloc_mutex_unlock(tsdn, &bin->lock);
|
||||
bin_stats_merge(tsdn, &bstats[i], &arena->bins[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -384,8 +221,7 @@ arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
|
||||
}
|
||||
|
||||
static void *
|
||||
arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab,
|
||||
const arena_bin_info_t *bin_info) {
|
||||
arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) {
|
||||
void *ret;
|
||||
arena_slab_data_t *slab_data = extent_slab_data_get(slab);
|
||||
size_t regind;
|
||||
@ -412,37 +248,22 @@ arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
|
||||
assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
|
||||
/* Freeing an interior pointer can cause assertion failure. */
|
||||
assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
|
||||
(uintptr_t)arena_bin_info[binind].reg_size == 0);
|
||||
(uintptr_t)bin_infos[binind].reg_size == 0);
|
||||
|
||||
diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
|
||||
|
||||
/* Avoid doing division with a variable divisor. */
|
||||
diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
|
||||
switch (binind) {
|
||||
#define REGIND_bin_yes(index, reg_size) \
|
||||
case index: \
|
||||
regind = diff / (reg_size); \
|
||||
assert(diff == regind * (reg_size)); \
|
||||
break;
|
||||
#define REGIND_bin_no(index, reg_size)
|
||||
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
|
||||
lg_delta_lookup) \
|
||||
REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta<<lg_delta))
|
||||
SIZE_CLASSES
|
||||
#undef REGIND_bin_yes
|
||||
#undef REGIND_bin_no
|
||||
#undef SC
|
||||
default: not_reached();
|
||||
}
|
||||
regind = div_compute(&arena_binind_div_info[binind], diff);
|
||||
|
||||
assert(regind < arena_bin_info[binind].nregs);
|
||||
assert(regind < bin_infos[binind].nregs);
|
||||
|
||||
return regind;
|
||||
}
|
||||
|
||||
static void
|
||||
arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab,
|
||||
arena_slab_data_t *slab_data, void *ptr) {
|
||||
arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) {
|
||||
szind_t binind = extent_szind_get(slab);
|
||||
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||
const bin_info_t *bin_info = &bin_infos[binind];
|
||||
size_t regind = arena_slab_regind(slab, binind, ptr);
|
||||
|
||||
assert(extent_nfree_get(slab) < bin_info->nregs);
|
||||
@ -692,7 +513,8 @@ arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
|
||||
bool is_background_thread) {
|
||||
if (current_npages > npages_limit) {
|
||||
arena_decay_to_limit(tsdn, arena, decay, extents, false,
|
||||
npages_limit, is_background_thread);
|
||||
npages_limit, current_npages - npages_limit,
|
||||
is_background_thread);
|
||||
}
|
||||
}
|
||||
|
||||
@ -738,7 +560,7 @@ arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
|
||||
}
|
||||
|
||||
static void
|
||||
arena_decay_reinit(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms) {
|
||||
arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) {
|
||||
arena_decay_ms_write(decay, decay_ms);
|
||||
if (decay_ms > 0) {
|
||||
nstime_init(&decay->interval, (uint64_t)decay_ms *
|
||||
@ -755,8 +577,8 @@ arena_decay_reinit(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms) {
|
||||
}
|
||||
|
||||
static bool
|
||||
arena_decay_init(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms,
|
||||
decay_stats_t *stats) {
|
||||
arena_decay_init(arena_decay_t *decay, ssize_t decay_ms,
|
||||
arena_stats_decay_t *stats) {
|
||||
if (config_debug) {
|
||||
for (size_t i = 0; i < sizeof(arena_decay_t); i++) {
|
||||
assert(((char *)decay)[i] == 0);
|
||||
@ -768,7 +590,7 @@ arena_decay_init(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms,
|
||||
return true;
|
||||
}
|
||||
decay->purging = false;
|
||||
arena_decay_reinit(decay, extents, decay_ms);
|
||||
arena_decay_reinit(decay, decay_ms);
|
||||
/* Memory is zeroed, so there is no need to clear stats. */
|
||||
if (config_stats) {
|
||||
decay->stats = stats;
|
||||
@ -798,7 +620,8 @@ arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
|
||||
if (decay_ms <= 0) {
|
||||
if (decay_ms == 0) {
|
||||
arena_decay_to_limit(tsdn, arena, decay, extents, false,
|
||||
0, is_background_thread);
|
||||
0, extents_npages_get(extents),
|
||||
is_background_thread);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -876,7 +699,7 @@ arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
|
||||
* infrequent, either between the {-1, 0, >0} states, or a one-time
|
||||
* arbitrary change during initial arena configuration.
|
||||
*/
|
||||
arena_decay_reinit(decay, extents, decay_ms);
|
||||
arena_decay_reinit(decay, decay_ms);
|
||||
arena_maybe_decay(tsdn, arena, decay, extents, false);
|
||||
malloc_mutex_unlock(tsdn, &decay->mtx);
|
||||
|
||||
@ -900,14 +723,15 @@ arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
|
||||
static size_t
|
||||
arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit,
|
||||
extent_list_t *decay_extents) {
|
||||
size_t npages_decay_max, extent_list_t *decay_extents) {
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
|
||||
/* Stash extents according to npages_limit. */
|
||||
size_t nstashed = 0;
|
||||
extent_t *extent;
|
||||
while ((extent = extents_evict(tsdn, arena, r_extent_hooks, extents,
|
||||
while (nstashed < npages_decay_max &&
|
||||
(extent = extents_evict(tsdn, arena, r_extent_hooks, extents,
|
||||
npages_limit)) != NULL) {
|
||||
extent_list_append(decay_extents, extent);
|
||||
nstashed += extent_size_get(extent) >> LG_PAGE;
|
||||
@ -982,12 +806,15 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
|
||||
}
|
||||
|
||||
/*
|
||||
* npages_limit: Decay as many dirty extents as possible without violating the
|
||||
* invariant: (extents_npages_get(extents) >= npages_limit)
|
||||
* npages_limit: Decay at most npages_decay_max pages without violating the
|
||||
* invariant: (extents_npages_get(extents) >= npages_limit). We need an upper
|
||||
* bound on number of pages in order to prevent unbounded growth (namely in
|
||||
* stashed), otherwise unbounded new pages could be added to extents during the
|
||||
* current decay run, so that the purging thread never finishes.
|
||||
*/
|
||||
static void
|
||||
arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
|
||||
extents_t *extents, bool all, size_t npages_limit,
|
||||
extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max,
|
||||
bool is_background_thread) {
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 1);
|
||||
@ -1005,7 +832,7 @@ arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
|
||||
extent_list_init(&decay_extents);
|
||||
|
||||
size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents,
|
||||
npages_limit, &decay_extents);
|
||||
npages_limit, npages_decay_max, &decay_extents);
|
||||
if (npurge != 0) {
|
||||
UNUSED size_t npurged = arena_decay_stashed(tsdn, arena,
|
||||
&extent_hooks, decay, extents, all, &decay_extents,
|
||||
@ -1023,7 +850,7 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
|
||||
if (all) {
|
||||
malloc_mutex_lock(tsdn, &decay->mtx);
|
||||
arena_decay_to_limit(tsdn, arena, decay, extents, all, 0,
|
||||
is_background_thread);
|
||||
extents_npages_get(extents), is_background_thread);
|
||||
malloc_mutex_unlock(tsdn, &decay->mtx);
|
||||
|
||||
return false;
|
||||
@ -1036,7 +863,7 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
|
||||
|
||||
bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents,
|
||||
is_background_thread);
|
||||
size_t npages_new;
|
||||
UNUSED size_t npages_new;
|
||||
if (epoch_advanced) {
|
||||
/* Backlog is updated on epoch advance. */
|
||||
npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1];
|
||||
@ -1045,7 +872,8 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
|
||||
|
||||
if (have_background_thread && background_thread_enabled() &&
|
||||
epoch_advanced && !is_background_thread) {
|
||||
background_thread_interval_check(tsdn, arena, decay, npages_new);
|
||||
background_thread_interval_check(tsdn, arena, decay,
|
||||
npages_new);
|
||||
}
|
||||
|
||||
return false;
|
||||
@ -1082,18 +910,18 @@ arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
|
||||
}
|
||||
|
||||
static void
|
||||
arena_bin_slabs_nonfull_insert(arena_bin_t *bin, extent_t *slab) {
|
||||
arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) {
|
||||
assert(extent_nfree_get(slab) > 0);
|
||||
extent_heap_insert(&bin->slabs_nonfull, slab);
|
||||
}
|
||||
|
||||
static void
|
||||
arena_bin_slabs_nonfull_remove(arena_bin_t *bin, extent_t *slab) {
|
||||
arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) {
|
||||
extent_heap_remove(&bin->slabs_nonfull, slab);
|
||||
}
|
||||
|
||||
static extent_t *
|
||||
arena_bin_slabs_nonfull_tryget(arena_bin_t *bin) {
|
||||
arena_bin_slabs_nonfull_tryget(bin_t *bin) {
|
||||
extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
|
||||
if (slab == NULL) {
|
||||
return NULL;
|
||||
@ -1105,7 +933,7 @@ arena_bin_slabs_nonfull_tryget(arena_bin_t *bin) {
|
||||
}
|
||||
|
||||
static void
|
||||
arena_bin_slabs_full_insert(arena_t *arena, arena_bin_t *bin, extent_t *slab) {
|
||||
arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) {
|
||||
assert(extent_nfree_get(slab) == 0);
|
||||
/*
|
||||
* Tracking extents is required by arena_reset, which is not allowed
|
||||
@ -1119,7 +947,7 @@ arena_bin_slabs_full_insert(arena_t *arena, arena_bin_t *bin, extent_t *slab) {
|
||||
}
|
||||
|
||||
static void
|
||||
arena_bin_slabs_full_remove(arena_t *arena, arena_bin_t *bin, extent_t *slab) {
|
||||
arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) {
|
||||
if (arena_is_auto(arena)) {
|
||||
return;
|
||||
}
|
||||
@ -1173,7 +1001,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
|
||||
/* Bins. */
|
||||
for (unsigned i = 0; i < NBINS; i++) {
|
||||
extent_t *slab;
|
||||
arena_bin_t *bin = &arena->bins[i];
|
||||
bin_t *bin = &arena->bins[i];
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
||||
if (bin->slabcur != NULL) {
|
||||
slab = bin->slabcur;
|
||||
@ -1262,7 +1090,7 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
|
||||
|
||||
static extent_t *
|
||||
arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, const arena_bin_info_t *bin_info,
|
||||
extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info,
|
||||
szind_t szind) {
|
||||
extent_t *slab;
|
||||
bool zero, commit;
|
||||
@ -1285,7 +1113,7 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
|
||||
|
||||
static extent_t *
|
||||
arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
||||
const arena_bin_info_t *bin_info) {
|
||||
const bin_info_t *bin_info) {
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
|
||||
@ -1321,10 +1149,10 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
||||
}
|
||||
|
||||
static extent_t *
|
||||
arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
|
||||
arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
||||
szind_t binind) {
|
||||
extent_t *slab;
|
||||
const arena_bin_info_t *bin_info;
|
||||
const bin_info_t *bin_info;
|
||||
|
||||
/* Look for a usable slab. */
|
||||
slab = arena_bin_slabs_nonfull_tryget(bin);
|
||||
@ -1333,7 +1161,7 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
|
||||
}
|
||||
/* No existing slabs have any space available. */
|
||||
|
||||
bin_info = &arena_bin_info[binind];
|
||||
bin_info = &bin_infos[binind];
|
||||
|
||||
/* Allocate a new slab. */
|
||||
malloc_mutex_unlock(tsdn, &bin->lock);
|
||||
@ -1364,12 +1192,12 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
|
||||
|
||||
/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
|
||||
static void *
|
||||
arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
|
||||
arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
|
||||
szind_t binind) {
|
||||
const arena_bin_info_t *bin_info;
|
||||
const bin_info_t *bin_info;
|
||||
extent_t *slab;
|
||||
|
||||
bin_info = &arena_bin_info[binind];
|
||||
bin_info = &bin_infos[binind];
|
||||
if (!arena_is_auto(arena) && bin->slabcur != NULL) {
|
||||
arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
|
||||
bin->slabcur = NULL;
|
||||
@ -1381,7 +1209,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
|
||||
* bin lock in arena_bin_nonfull_slab_get().
|
||||
*/
|
||||
if (extent_nfree_get(bin->slabcur) > 0) {
|
||||
void *ret = arena_slab_reg_alloc(tsdn, bin->slabcur,
|
||||
void *ret = arena_slab_reg_alloc(bin->slabcur,
|
||||
bin_info);
|
||||
if (slab != NULL) {
|
||||
/*
|
||||
@ -1415,14 +1243,14 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
|
||||
|
||||
assert(extent_nfree_get(bin->slabcur) > 0);
|
||||
|
||||
return arena_slab_reg_alloc(tsdn, slab, bin_info);
|
||||
return arena_slab_reg_alloc(slab, bin_info);
|
||||
}
|
||||
|
||||
void
|
||||
arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
||||
tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) {
|
||||
cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) {
|
||||
unsigned i, nfill;
|
||||
arena_bin_t *bin;
|
||||
bin_t *bin;
|
||||
|
||||
assert(tbin->ncached == 0);
|
||||
|
||||
@ -1437,8 +1265,7 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
||||
void *ptr;
|
||||
if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
|
||||
0) {
|
||||
ptr = arena_slab_reg_alloc(tsdn, slab,
|
||||
&arena_bin_info[binind]);
|
||||
ptr = arena_slab_reg_alloc(slab, &bin_infos[binind]);
|
||||
} else {
|
||||
ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind);
|
||||
}
|
||||
@ -1455,8 +1282,7 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
||||
break;
|
||||
}
|
||||
if (config_fill && unlikely(opt_junk_alloc)) {
|
||||
arena_alloc_junk_small(ptr, &arena_bin_info[binind],
|
||||
true);
|
||||
arena_alloc_junk_small(ptr, &bin_infos[binind], true);
|
||||
}
|
||||
/* Insert such that low regions get used first. */
|
||||
*(tbin->avail - nfill + i) = ptr;
|
||||
@ -1474,14 +1300,14 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
||||
}
|
||||
|
||||
void
|
||||
arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, bool zero) {
|
||||
arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) {
|
||||
if (!zero) {
|
||||
memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
arena_dalloc_junk_small_impl(void *ptr, const arena_bin_info_t *bin_info) {
|
||||
arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) {
|
||||
memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
|
||||
}
|
||||
arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
|
||||
@ -1490,7 +1316,7 @@ arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
|
||||
static void *
|
||||
arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
|
||||
void *ret;
|
||||
arena_bin_t *bin;
|
||||
bin_t *bin;
|
||||
size_t usize;
|
||||
extent_t *slab;
|
||||
|
||||
@ -1500,7 +1326,7 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
|
||||
|
||||
malloc_mutex_lock(tsdn, &bin->lock);
|
||||
if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
|
||||
ret = arena_slab_reg_alloc(tsdn, slab, &arena_bin_info[binind]);
|
||||
ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
|
||||
} else {
|
||||
ret = arena_bin_malloc_hard(tsdn, arena, bin, binind);
|
||||
}
|
||||
@ -1524,14 +1350,14 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
|
||||
if (config_fill) {
|
||||
if (unlikely(opt_junk_alloc)) {
|
||||
arena_alloc_junk_small(ret,
|
||||
&arena_bin_info[binind], false);
|
||||
&bin_infos[binind], false);
|
||||
} else if (unlikely(opt_zero)) {
|
||||
memset(ret, 0, usize);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (config_fill && unlikely(opt_junk_alloc)) {
|
||||
arena_alloc_junk_small(ret, &arena_bin_info[binind],
|
||||
arena_alloc_junk_small(ret, &bin_infos[binind],
|
||||
true);
|
||||
}
|
||||
memset(ret, 0, usize);
|
||||
@ -1636,13 +1462,13 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
||||
}
|
||||
|
||||
static void
|
||||
arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, arena_bin_t *bin) {
|
||||
arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
|
||||
/* Dissociate slab from bin. */
|
||||
if (slab == bin->slabcur) {
|
||||
bin->slabcur = NULL;
|
||||
} else {
|
||||
szind_t binind = extent_szind_get(slab);
|
||||
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||
const bin_info_t *bin_info = &bin_infos[binind];
|
||||
|
||||
/*
|
||||
* The following block's conditional is necessary because if the
|
||||
@ -1659,7 +1485,7 @@ arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, arena_bin_t *bin) {
|
||||
|
||||
static void
|
||||
arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
||||
arena_bin_t *bin) {
|
||||
bin_t *bin) {
|
||||
assert(slab != bin->slabcur);
|
||||
|
||||
malloc_mutex_unlock(tsdn, &bin->lock);
|
||||
@ -1673,8 +1499,8 @@ arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
||||
}
|
||||
|
||||
static void
|
||||
arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
||||
arena_bin_t *bin) {
|
||||
arena_bin_lower_slab(UNUSED tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
||||
bin_t *bin) {
|
||||
assert(extent_nfree_get(slab) > 0);
|
||||
|
||||
/*
|
||||
@ -1704,14 +1530,14 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
||||
void *ptr, bool junked) {
|
||||
arena_slab_data_t *slab_data = extent_slab_data_get(slab);
|
||||
szind_t binind = extent_szind_get(slab);
|
||||
arena_bin_t *bin = &arena->bins[binind];
|
||||
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||
bin_t *bin = &arena->bins[binind];
|
||||
const bin_info_t *bin_info = &bin_infos[binind];
|
||||
|
||||
if (!junked && config_fill && unlikely(opt_junk_free)) {
|
||||
arena_dalloc_junk_small(ptr, bin_info);
|
||||
}
|
||||
|
||||
arena_slab_reg_dalloc(tsdn, slab, slab_data, ptr);
|
||||
arena_slab_reg_dalloc(slab, slab_data, ptr);
|
||||
unsigned nfree = extent_nfree_get(slab);
|
||||
if (nfree == bin_info->nregs) {
|
||||
arena_dissociate_bin_slab(arena, slab, bin);
|
||||
@ -1736,7 +1562,7 @@ arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
static void
|
||||
arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
|
||||
szind_t binind = extent_szind_get(extent);
|
||||
arena_bin_t *bin = &arena->bins[binind];
|
||||
bin_t *bin = &arena->bins[binind];
|
||||
|
||||
malloc_mutex_lock(tsdn, &bin->lock);
|
||||
arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false);
|
||||
@ -1770,7 +1596,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
||||
* Avoid moving the allocation if the size class can be left the
|
||||
* same.
|
||||
*/
|
||||
assert(arena_bin_info[sz_size2index(oldsize)].reg_size ==
|
||||
assert(bin_infos[sz_size2index(oldsize)].reg_size ==
|
||||
oldsize);
|
||||
if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) !=
|
||||
sz_size2index(oldsize)) && (size > oldsize || usize_max <
|
||||
@ -1885,6 +1711,33 @@ arena_muzzy_decay_ms_default_set(ssize_t decay_ms) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
|
||||
size_t *new_limit) {
|
||||
assert(opt_retain);
|
||||
|
||||
pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0);
|
||||
if (new_limit != NULL) {
|
||||
size_t limit = *new_limit;
|
||||
/* Grow no more than the new limit. */
|
||||
if ((new_ind = sz_psz2ind(limit + 1) - 1) >
|
||||
EXTENT_GROW_MAX_PIND) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
|
||||
if (old_limit != NULL) {
|
||||
*old_limit = sz_pind2sz(arena->retain_grow_limit);
|
||||
}
|
||||
if (new_limit != NULL) {
|
||||
arena->retain_grow_limit = new_ind;
|
||||
}
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned
|
||||
arena_nthreads_get(arena_t *arena, bool internal) {
|
||||
return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED);
|
||||
@ -1935,6 +1788,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||
}
|
||||
|
||||
ql_new(&arena->tcache_ql);
|
||||
ql_new(&arena->cache_bin_array_descriptor_ql);
|
||||
if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
|
||||
WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) {
|
||||
goto label_error;
|
||||
@ -2001,16 +1855,17 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||
goto label_error;
|
||||
}
|
||||
|
||||
if (arena_decay_init(&arena->decay_dirty, &arena->extents_dirty,
|
||||
if (arena_decay_init(&arena->decay_dirty,
|
||||
arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) {
|
||||
goto label_error;
|
||||
}
|
||||
if (arena_decay_init(&arena->decay_muzzy, &arena->extents_muzzy,
|
||||
if (arena_decay_init(&arena->decay_muzzy,
|
||||
arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) {
|
||||
goto label_error;
|
||||
}
|
||||
|
||||
arena->extent_grow_next = sz_psz2ind(HUGEPAGE);
|
||||
arena->retain_grow_limit = EXTENT_GROW_MAX_PIND;
|
||||
if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
|
||||
WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
|
||||
goto label_error;
|
||||
@ -2024,17 +1879,10 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||
|
||||
/* Initialize bins. */
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
arena_bin_t *bin = &arena->bins[i];
|
||||
if (malloc_mutex_init(&bin->lock, "arena_bin",
|
||||
WITNESS_RANK_ARENA_BIN, malloc_mutex_rank_exclusive)) {
|
||||
bool err = bin_init(&arena->bins[i]);
|
||||
if (err) {
|
||||
goto label_error;
|
||||
}
|
||||
bin->slabcur = NULL;
|
||||
extent_heap_new(&bin->slabs_nonfull);
|
||||
extent_list_init(&bin->slabs_full);
|
||||
if (config_stats) {
|
||||
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
|
||||
}
|
||||
}
|
||||
|
||||
arena->base = base;
|
||||
@ -2070,6 +1918,16 @@ void
|
||||
arena_boot(void) {
|
||||
arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
|
||||
arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
|
||||
#define REGIND_bin_yes(index, reg_size) \
|
||||
div_init(&arena_binind_div_info[(index)], (reg_size));
|
||||
#define REGIND_bin_no(index, reg_size)
|
||||
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
|
||||
lg_delta_lookup) \
|
||||
REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta << lg_delta))
|
||||
SIZE_CLASSES
|
||||
#undef REGIND_bin_yes
|
||||
#undef REGIND_bin_no
|
||||
#undef SC
|
||||
}
|
||||
|
||||
void
|
||||
@ -2115,7 +1973,7 @@ arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
|
||||
void
|
||||
arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
|
||||
for (unsigned i = 0; i < NBINS; i++) {
|
||||
malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
|
||||
bin_prefork(tsdn, &arena->bins[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2124,7 +1982,7 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
|
||||
bin_postfork_parent(tsdn, &arena->bins[i]);
|
||||
}
|
||||
malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
|
||||
base_postfork_parent(tsdn, arena->base);
|
||||
@ -2154,15 +2012,21 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
|
||||
}
|
||||
if (config_stats) {
|
||||
ql_new(&arena->tcache_ql);
|
||||
ql_new(&arena->cache_bin_array_descriptor_ql);
|
||||
tcache_t *tcache = tcache_get(tsdn_tsd(tsdn));
|
||||
if (tcache != NULL && tcache->arena == arena) {
|
||||
ql_elm_new(tcache, link);
|
||||
ql_tail_insert(&arena->tcache_ql, tcache, link);
|
||||
cache_bin_array_descriptor_init(
|
||||
&tcache->cache_bin_array_descriptor,
|
||||
tcache->bins_small, tcache->bins_large);
|
||||
ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
|
||||
&tcache->cache_bin_array_descriptor, link);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
|
||||
bin_postfork_child(tsdn, &arena->bins[i]);
|
||||
}
|
||||
malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
|
||||
base_postfork_child(tsdn, arena->base);
|
||||
|
@ -11,12 +11,14 @@
|
||||
#define BACKGROUND_THREAD_DEFAULT false
|
||||
/* Read-only after initialization. */
|
||||
bool opt_background_thread = BACKGROUND_THREAD_DEFAULT;
|
||||
size_t opt_max_background_threads = MAX_BACKGROUND_THREAD_LIMIT;
|
||||
|
||||
/* Used for thread creation, termination and stats. */
|
||||
malloc_mutex_t background_thread_lock;
|
||||
/* Indicates global state. Atomic because decay reads this w/o locking. */
|
||||
atomic_b_t background_thread_enabled_state;
|
||||
size_t n_background_threads;
|
||||
size_t max_background_threads;
|
||||
/* Thread info per-index. */
|
||||
background_thread_info_t *background_thread_info;
|
||||
|
||||
@ -30,19 +32,20 @@ bool can_enable_background_thread;
|
||||
|
||||
static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
|
||||
void *(*)(void *), void *__restrict);
|
||||
static pthread_once_t once_control = PTHREAD_ONCE_INIT;
|
||||
|
||||
static void
|
||||
pthread_create_wrapper_once(void) {
|
||||
pthread_create_wrapper_init(void) {
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
if (!isthreaded) {
|
||||
isthreaded = true;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
int
|
||||
pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,
|
||||
void *(*start_routine)(void *), void *__restrict arg) {
|
||||
pthread_once(&once_control, pthread_create_wrapper_once);
|
||||
pthread_create_wrapper_init();
|
||||
|
||||
return pthread_create_fptr(thread, attr, start_routine, arg);
|
||||
}
|
||||
@ -286,7 +289,7 @@ background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigne
|
||||
uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
|
||||
unsigned narenas = narenas_total_get();
|
||||
|
||||
for (unsigned i = ind; i < narenas; i += ncpus) {
|
||||
for (unsigned i = ind; i < narenas; i += max_background_threads) {
|
||||
arena_t *arena = arena_get(tsdn, i, false);
|
||||
if (!arena) {
|
||||
continue;
|
||||
@ -379,35 +382,32 @@ background_thread_create_signals_masked(pthread_t *thread,
|
||||
return create_err;
|
||||
}
|
||||
|
||||
static void
|
||||
static bool
|
||||
check_background_thread_creation(tsd_t *tsd, unsigned *n_created,
|
||||
bool *created_threads) {
|
||||
bool ret = false;
|
||||
if (likely(*n_created == n_background_threads)) {
|
||||
return;
|
||||
return ret;
|
||||
}
|
||||
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_info[0].mtx);
|
||||
label_restart:
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
|
||||
for (unsigned i = 1; i < ncpus; i++) {
|
||||
tsdn_t *tsdn = tsd_tsdn(tsd);
|
||||
malloc_mutex_unlock(tsdn, &background_thread_info[0].mtx);
|
||||
for (unsigned i = 1; i < max_background_threads; i++) {
|
||||
if (created_threads[i]) {
|
||||
continue;
|
||||
}
|
||||
background_thread_info_t *info = &background_thread_info[i];
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
|
||||
assert(info->state != background_thread_paused);
|
||||
malloc_mutex_lock(tsdn, &info->mtx);
|
||||
/*
|
||||
* In case of the background_thread_paused state because of
|
||||
* arena reset, delay the creation.
|
||||
*/
|
||||
bool create = (info->state == background_thread_started);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
|
||||
malloc_mutex_unlock(tsdn, &info->mtx);
|
||||
if (!create) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* To avoid deadlock with prefork handlers (which waits for the
|
||||
* mutex held here), unlock before calling pthread_create().
|
||||
*/
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
|
||||
|
||||
pre_reentrancy(tsd, NULL);
|
||||
int err = background_thread_create_signals_masked(&info->thread,
|
||||
NULL, background_thread_entry, (void *)(uintptr_t)i);
|
||||
@ -423,19 +423,21 @@ label_restart:
|
||||
abort();
|
||||
}
|
||||
}
|
||||
/* Restart since we unlocked. */
|
||||
goto label_restart;
|
||||
/* Return to restart the loop since we unlocked. */
|
||||
ret = true;
|
||||
break;
|
||||
}
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_info[0].mtx);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
|
||||
malloc_mutex_lock(tsdn, &background_thread_info[0].mtx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
background_thread0_work(tsd_t *tsd) {
|
||||
/* Thread0 is also responsible for launching / terminating threads. */
|
||||
VARIABLE_ARRAY(bool, created_threads, ncpus);
|
||||
VARIABLE_ARRAY(bool, created_threads, max_background_threads);
|
||||
unsigned i;
|
||||
for (i = 1; i < ncpus; i++) {
|
||||
for (i = 1; i < max_background_threads; i++) {
|
||||
created_threads[i] = false;
|
||||
}
|
||||
/* Start working, and create more threads when asked. */
|
||||
@ -445,8 +447,10 @@ background_thread0_work(tsd_t *tsd) {
|
||||
&background_thread_info[0])) {
|
||||
continue;
|
||||
}
|
||||
check_background_thread_creation(tsd, &n_created,
|
||||
(bool *)&created_threads);
|
||||
if (check_background_thread_creation(tsd, &n_created,
|
||||
(bool *)&created_threads)) {
|
||||
continue;
|
||||
}
|
||||
background_work_sleep_once(tsd_tsdn(tsd),
|
||||
&background_thread_info[0], 0);
|
||||
}
|
||||
@ -456,15 +460,20 @@ background_thread0_work(tsd_t *tsd) {
|
||||
* the global background_thread mutex (and is waiting) for us.
|
||||
*/
|
||||
assert(!background_thread_enabled());
|
||||
for (i = 1; i < ncpus; i++) {
|
||||
for (i = 1; i < max_background_threads; i++) {
|
||||
background_thread_info_t *info = &background_thread_info[i];
|
||||
assert(info->state != background_thread_paused);
|
||||
if (created_threads[i]) {
|
||||
background_threads_disable_single(tsd, info);
|
||||
} else {
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
|
||||
/* Clear in case the thread wasn't created. */
|
||||
if (info->state != background_thread_stopped) {
|
||||
/* The thread was not created. */
|
||||
assert(info->state ==
|
||||
background_thread_started);
|
||||
n_background_threads--;
|
||||
info->state = background_thread_stopped;
|
||||
}
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
|
||||
}
|
||||
}
|
||||
@ -498,7 +507,7 @@ background_work(tsd_t *tsd, unsigned ind) {
|
||||
static void *
|
||||
background_thread_entry(void *ind_arg) {
|
||||
unsigned thread_ind = (unsigned)(uintptr_t)ind_arg;
|
||||
assert(thread_ind < ncpus);
|
||||
assert(thread_ind < max_background_threads);
|
||||
#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
|
||||
pthread_setname_np(pthread_self(), "jemalloc_bg_thd");
|
||||
#endif
|
||||
@ -532,7 +541,7 @@ background_thread_create(tsd_t *tsd, unsigned arena_ind) {
|
||||
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
|
||||
|
||||
/* We create at most NCPUs threads. */
|
||||
size_t thread_ind = arena_ind % ncpus;
|
||||
size_t thread_ind = arena_ind % max_background_threads;
|
||||
background_thread_info_t *info = &background_thread_info[thread_ind];
|
||||
|
||||
bool need_new_thread;
|
||||
@ -586,26 +595,29 @@ background_threads_enable(tsd_t *tsd) {
|
||||
assert(background_thread_enabled());
|
||||
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
|
||||
|
||||
VARIABLE_ARRAY(bool, marked, ncpus);
|
||||
VARIABLE_ARRAY(bool, marked, max_background_threads);
|
||||
unsigned i, nmarked;
|
||||
for (i = 0; i < ncpus; i++) {
|
||||
for (i = 0; i < max_background_threads; i++) {
|
||||
marked[i] = false;
|
||||
}
|
||||
nmarked = 0;
|
||||
/* Thread 0 is required and created at the end. */
|
||||
marked[0] = true;
|
||||
/* Mark the threads we need to create for thread 0. */
|
||||
unsigned n = narenas_total_get();
|
||||
for (i = 1; i < n; i++) {
|
||||
if (marked[i % ncpus] ||
|
||||
if (marked[i % max_background_threads] ||
|
||||
arena_get(tsd_tsdn(tsd), i, false) == NULL) {
|
||||
continue;
|
||||
}
|
||||
background_thread_info_t *info = &background_thread_info[i];
|
||||
background_thread_info_t *info = &background_thread_info[
|
||||
i % max_background_threads];
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
|
||||
assert(info->state == background_thread_stopped);
|
||||
background_thread_init(tsd, info);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
|
||||
marked[i % ncpus] = true;
|
||||
if (++nmarked == ncpus) {
|
||||
marked[i % max_background_threads] = true;
|
||||
if (++nmarked == max_background_threads) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -720,14 +732,14 @@ background_thread_prefork0(tsdn_t *tsdn) {
|
||||
|
||||
void
|
||||
background_thread_prefork1(tsdn_t *tsdn) {
|
||||
for (unsigned i = 0; i < ncpus; i++) {
|
||||
for (unsigned i = 0; i < max_background_threads; i++) {
|
||||
malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
background_thread_postfork_parent(tsdn_t *tsdn) {
|
||||
for (unsigned i = 0; i < ncpus; i++) {
|
||||
for (unsigned i = 0; i < max_background_threads; i++) {
|
||||
malloc_mutex_postfork_parent(tsdn,
|
||||
&background_thread_info[i].mtx);
|
||||
}
|
||||
@ -736,7 +748,7 @@ background_thread_postfork_parent(tsdn_t *tsdn) {
|
||||
|
||||
void
|
||||
background_thread_postfork_child(tsdn_t *tsdn) {
|
||||
for (unsigned i = 0; i < ncpus; i++) {
|
||||
for (unsigned i = 0; i < max_background_threads; i++) {
|
||||
malloc_mutex_postfork_child(tsdn,
|
||||
&background_thread_info[i].mtx);
|
||||
}
|
||||
@ -749,7 +761,7 @@ background_thread_postfork_child(tsdn_t *tsdn) {
|
||||
malloc_mutex_lock(tsdn, &background_thread_lock);
|
||||
n_background_threads = 0;
|
||||
background_thread_enabled_set(tsdn, false);
|
||||
for (unsigned i = 0; i < ncpus; i++) {
|
||||
for (unsigned i = 0; i < max_background_threads; i++) {
|
||||
background_thread_info_t *info = &background_thread_info[i];
|
||||
malloc_mutex_lock(tsdn, &info->mtx);
|
||||
info->state = background_thread_stopped;
|
||||
@ -773,7 +785,7 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
|
||||
stats->num_threads = n_background_threads;
|
||||
uint64_t num_runs = 0;
|
||||
nstime_init(&stats->run_interval, 0);
|
||||
for (unsigned i = 0; i < ncpus; i++) {
|
||||
for (unsigned i = 0; i < max_background_threads; i++) {
|
||||
background_thread_info_t *info = &background_thread_info[i];
|
||||
malloc_mutex_lock(tsdn, &info->mtx);
|
||||
if (info->state != background_thread_stopped) {
|
||||
@ -795,6 +807,26 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
|
||||
#undef BILLION
|
||||
#undef BACKGROUND_THREAD_MIN_INTERVAL_NS
|
||||
|
||||
static bool
|
||||
pthread_create_fptr_init(void) {
|
||||
if (pthread_create_fptr != NULL) {
|
||||
return false;
|
||||
}
|
||||
pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
|
||||
if (pthread_create_fptr == NULL) {
|
||||
can_enable_background_thread = false;
|
||||
if (config_lazy_lock || opt_background_thread) {
|
||||
malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
|
||||
"\"pthread_create\")\n");
|
||||
abort();
|
||||
}
|
||||
} else {
|
||||
can_enable_background_thread = true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* When lazy lock is enabled, we need to make sure setting isthreaded before
|
||||
* taking any background_thread locks. This is called early in ctl (instead of
|
||||
@ -805,7 +837,8 @@ void
|
||||
background_thread_ctl_init(tsdn_t *tsdn) {
|
||||
malloc_mutex_assert_not_owner(tsdn, &background_thread_lock);
|
||||
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
|
||||
pthread_once(&once_control, pthread_create_wrapper_once);
|
||||
pthread_create_fptr_init();
|
||||
pthread_create_wrapper_init();
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -818,18 +851,10 @@ background_thread_boot0(void) {
|
||||
"supports pthread only\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
|
||||
pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
|
||||
if (pthread_create_fptr == NULL) {
|
||||
can_enable_background_thread = false;
|
||||
if (config_lazy_lock || opt_background_thread) {
|
||||
malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
|
||||
"\"pthread_create\")\n");
|
||||
abort();
|
||||
}
|
||||
} else {
|
||||
can_enable_background_thread = true;
|
||||
if ((config_lazy_lock || opt_background_thread) &&
|
||||
pthread_create_fptr_init()) {
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
@ -841,6 +866,12 @@ background_thread_boot1(tsdn_t *tsdn) {
|
||||
assert(have_background_thread);
|
||||
assert(narenas_total_get() > 0);
|
||||
|
||||
if (opt_max_background_threads == MAX_BACKGROUND_THREAD_LIMIT &&
|
||||
ncpus < MAX_BACKGROUND_THREAD_LIMIT) {
|
||||
opt_max_background_threads = ncpus;
|
||||
}
|
||||
max_background_threads = opt_max_background_threads;
|
||||
|
||||
background_thread_enabled_set(tsdn, opt_background_thread);
|
||||
if (malloc_mutex_init(&background_thread_lock,
|
||||
"background_thread_global",
|
||||
@ -848,17 +879,15 @@ background_thread_boot1(tsdn_t *tsdn) {
|
||||
malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
if (opt_background_thread) {
|
||||
background_thread_ctl_init(tsdn);
|
||||
}
|
||||
|
||||
background_thread_info = (background_thread_info_t *)base_alloc(tsdn,
|
||||
b0get(), ncpus * sizeof(background_thread_info_t), CACHELINE);
|
||||
b0get(), opt_max_background_threads *
|
||||
sizeof(background_thread_info_t), CACHELINE);
|
||||
if (background_thread_info == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < ncpus; i++) {
|
||||
for (unsigned i = 0; i < max_background_threads; i++) {
|
||||
background_thread_info_t *info = &background_thread_info[i];
|
||||
/* Thread mutex is rank_inclusive because of thread0. */
|
||||
if (malloc_mutex_init(&info->mtx, "background_thread",
|
||||
|
164
src/base.c
164
src/base.c
@ -12,23 +12,38 @@
|
||||
|
||||
static base_t *b0;
|
||||
|
||||
metadata_thp_mode_t opt_metadata_thp = METADATA_THP_DEFAULT;
|
||||
|
||||
const char *metadata_thp_mode_names[] = {
|
||||
"disabled",
|
||||
"auto",
|
||||
"always"
|
||||
};
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static inline bool
|
||||
metadata_thp_madvise(void) {
|
||||
return (metadata_thp_enabled() &&
|
||||
(init_system_thp_mode == thp_mode_default));
|
||||
}
|
||||
|
||||
static void *
|
||||
base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
|
||||
void *addr;
|
||||
bool zero = true;
|
||||
bool commit = true;
|
||||
|
||||
/* Use huge page sizes and alignment regardless of opt_metadata_thp. */
|
||||
assert(size == HUGEPAGE_CEILING(size));
|
||||
|
||||
size_t alignment = HUGEPAGE;
|
||||
if (extent_hooks == &extent_hooks_default) {
|
||||
addr = extent_alloc_mmap(NULL, size, PAGE, &zero, &commit);
|
||||
addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit);
|
||||
} else {
|
||||
/* No arena context as we are creating new arenas. */
|
||||
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
|
||||
pre_reentrancy(tsd, NULL);
|
||||
addr = extent_hooks->alloc(extent_hooks, NULL, size, PAGE,
|
||||
addr = extent_hooks->alloc(extent_hooks, NULL, size, alignment,
|
||||
&zero, &commit, ind);
|
||||
post_reentrancy(tsd);
|
||||
}
|
||||
@ -51,16 +66,16 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
|
||||
*/
|
||||
if (extent_hooks == &extent_hooks_default) {
|
||||
if (!extent_dalloc_mmap(addr, size)) {
|
||||
return;
|
||||
goto label_done;
|
||||
}
|
||||
if (!pages_decommit(addr, size)) {
|
||||
return;
|
||||
goto label_done;
|
||||
}
|
||||
if (!pages_purge_forced(addr, size)) {
|
||||
return;
|
||||
goto label_done;
|
||||
}
|
||||
if (!pages_purge_lazy(addr, size)) {
|
||||
return;
|
||||
goto label_done;
|
||||
}
|
||||
/* Nothing worked. This should never happen. */
|
||||
not_reached();
|
||||
@ -70,27 +85,33 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
|
||||
if (extent_hooks->dalloc != NULL &&
|
||||
!extent_hooks->dalloc(extent_hooks, addr, size, true,
|
||||
ind)) {
|
||||
goto label_done;
|
||||
goto label_post_reentrancy;
|
||||
}
|
||||
if (extent_hooks->decommit != NULL &&
|
||||
!extent_hooks->decommit(extent_hooks, addr, size, 0, size,
|
||||
ind)) {
|
||||
goto label_done;
|
||||
goto label_post_reentrancy;
|
||||
}
|
||||
if (extent_hooks->purge_forced != NULL &&
|
||||
!extent_hooks->purge_forced(extent_hooks, addr, size, 0,
|
||||
size, ind)) {
|
||||
goto label_done;
|
||||
goto label_post_reentrancy;
|
||||
}
|
||||
if (extent_hooks->purge_lazy != NULL &&
|
||||
!extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
|
||||
ind)) {
|
||||
goto label_done;
|
||||
goto label_post_reentrancy;
|
||||
}
|
||||
/* Nothing worked. That's the application's problem. */
|
||||
label_done:
|
||||
label_post_reentrancy:
|
||||
post_reentrancy(tsd);
|
||||
return;
|
||||
}
|
||||
label_done:
|
||||
if (metadata_thp_madvise()) {
|
||||
/* Set NOHUGEPAGE after unmap to avoid kernel defrag. */
|
||||
assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
|
||||
(size & HUGEPAGE_MASK) == 0);
|
||||
pages_nohuge(addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
@ -105,6 +126,56 @@ base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
|
||||
extent_binit(extent, addr, size, sn);
|
||||
}
|
||||
|
||||
static size_t
|
||||
base_get_num_blocks(base_t *base, bool with_new_block) {
|
||||
base_block_t *b = base->blocks;
|
||||
assert(b != NULL);
|
||||
|
||||
size_t n_blocks = with_new_block ? 2 : 1;
|
||||
while (b->next != NULL) {
|
||||
n_blocks++;
|
||||
b = b->next;
|
||||
}
|
||||
|
||||
return n_blocks;
|
||||
}
|
||||
|
||||
static void
|
||||
base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
|
||||
assert(opt_metadata_thp == metadata_thp_auto);
|
||||
malloc_mutex_assert_owner(tsdn, &base->mtx);
|
||||
if (base->auto_thp_switched) {
|
||||
return;
|
||||
}
|
||||
/* Called when adding a new block. */
|
||||
bool should_switch;
|
||||
if (base_ind_get(base) != 0) {
|
||||
should_switch = (base_get_num_blocks(base, true) ==
|
||||
BASE_AUTO_THP_THRESHOLD);
|
||||
} else {
|
||||
should_switch = (base_get_num_blocks(base, true) ==
|
||||
BASE_AUTO_THP_THRESHOLD_A0);
|
||||
}
|
||||
if (!should_switch) {
|
||||
return;
|
||||
}
|
||||
|
||||
base->auto_thp_switched = true;
|
||||
assert(!config_stats || base->n_thp == 0);
|
||||
/* Make the initial blocks THP lazily. */
|
||||
base_block_t *block = base->blocks;
|
||||
while (block != NULL) {
|
||||
assert((block->size & HUGEPAGE_MASK) == 0);
|
||||
pages_huge(block, block->size);
|
||||
if (config_stats) {
|
||||
base->n_thp += HUGEPAGE_CEILING(block->size -
|
||||
extent_bsize_get(&block->extent)) >> LG_HUGEPAGE;
|
||||
}
|
||||
block = block->next;
|
||||
assert(block == NULL || (base_ind_get(base) == 0));
|
||||
}
|
||||
}
|
||||
|
||||
static void *
|
||||
base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
|
||||
size_t alignment) {
|
||||
@ -124,8 +195,8 @@ base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
|
||||
}
|
||||
|
||||
static void
|
||||
base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent,
|
||||
size_t gap_size, void *addr, size_t size) {
|
||||
base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
|
||||
void *addr, size_t size) {
|
||||
if (extent_bsize_get(extent) > 0) {
|
||||
/*
|
||||
* Compute the index for the largest size class that does not
|
||||
@ -140,23 +211,31 @@ base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent,
|
||||
base->allocated += size;
|
||||
/*
|
||||
* Add one PAGE to base_resident for every page boundary that is
|
||||
* crossed by the new allocation.
|
||||
* crossed by the new allocation. Adjust n_thp similarly when
|
||||
* metadata_thp is enabled.
|
||||
*/
|
||||
base->resident += PAGE_CEILING((uintptr_t)addr + size) -
|
||||
PAGE_CEILING((uintptr_t)addr - gap_size);
|
||||
assert(base->allocated <= base->resident);
|
||||
assert(base->resident <= base->mapped);
|
||||
if (metadata_thp_madvise() && (opt_metadata_thp ==
|
||||
metadata_thp_always || base->auto_thp_switched)) {
|
||||
base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size)
|
||||
- HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >>
|
||||
LG_HUGEPAGE;
|
||||
assert(base->mapped >= base->n_thp << LG_HUGEPAGE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void *
|
||||
base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent,
|
||||
size_t size, size_t alignment) {
|
||||
base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
|
||||
size_t alignment) {
|
||||
void *ret;
|
||||
size_t gap_size;
|
||||
|
||||
ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
|
||||
base_extent_bump_alloc_post(tsdn, base, extent, gap_size, ret, size);
|
||||
base_extent_bump_alloc_post(base, extent, gap_size, ret, size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -166,8 +245,8 @@ base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent,
|
||||
* On success a pointer to the initialized base_block_t header is returned.
|
||||
*/
|
||||
static base_block_t *
|
||||
base_block_alloc(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind,
|
||||
pszind_t *pind_last, size_t *extent_sn_next, size_t size,
|
||||
base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
|
||||
unsigned ind, pszind_t *pind_last, size_t *extent_sn_next, size_t size,
|
||||
size_t alignment) {
|
||||
alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
|
||||
size_t usize = ALIGNMENT_CEILING(size, alignment);
|
||||
@ -193,6 +272,25 @@ base_block_alloc(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind,
|
||||
if (block == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (metadata_thp_madvise()) {
|
||||
void *addr = (void *)block;
|
||||
assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
|
||||
(block_size & HUGEPAGE_MASK) == 0);
|
||||
if (opt_metadata_thp == metadata_thp_always) {
|
||||
pages_huge(addr, block_size);
|
||||
} else if (opt_metadata_thp == metadata_thp_auto &&
|
||||
base != NULL) {
|
||||
/* base != NULL indicates this is not a new base. */
|
||||
malloc_mutex_lock(tsdn, &base->mtx);
|
||||
base_auto_thp_switch(tsdn, base);
|
||||
if (base->auto_thp_switched) {
|
||||
pages_huge(addr, block_size);
|
||||
}
|
||||
malloc_mutex_unlock(tsdn, &base->mtx);
|
||||
}
|
||||
}
|
||||
|
||||
*pind_last = sz_psz2ind(block_size);
|
||||
block->size = block_size;
|
||||
block->next = NULL;
|
||||
@ -216,7 +314,7 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
|
||||
* called.
|
||||
*/
|
||||
malloc_mutex_unlock(tsdn, &base->mtx);
|
||||
base_block_t *block = base_block_alloc(tsdn, extent_hooks,
|
||||
base_block_t *block = base_block_alloc(tsdn, base, extent_hooks,
|
||||
base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
|
||||
alignment);
|
||||
malloc_mutex_lock(tsdn, &base->mtx);
|
||||
@ -229,8 +327,16 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
|
||||
base->allocated += sizeof(base_block_t);
|
||||
base->resident += PAGE_CEILING(sizeof(base_block_t));
|
||||
base->mapped += block->size;
|
||||
if (metadata_thp_madvise() &&
|
||||
!(opt_metadata_thp == metadata_thp_auto
|
||||
&& !base->auto_thp_switched)) {
|
||||
assert(base->n_thp > 0);
|
||||
base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >>
|
||||
LG_HUGEPAGE;
|
||||
}
|
||||
assert(base->allocated <= base->resident);
|
||||
assert(base->resident <= base->mapped);
|
||||
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
|
||||
}
|
||||
return &block->extent;
|
||||
}
|
||||
@ -244,7 +350,7 @@ base_t *
|
||||
base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||
pszind_t pind_last = 0;
|
||||
size_t extent_sn_next = 0;
|
||||
base_block_t *block = base_block_alloc(tsdn, extent_hooks, ind,
|
||||
base_block_t *block = base_block_alloc(tsdn, NULL, extent_hooks, ind,
|
||||
&pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
|
||||
if (block == NULL) {
|
||||
return NULL;
|
||||
@ -265,6 +371,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||
base->pind_last = pind_last;
|
||||
base->extent_sn_next = extent_sn_next;
|
||||
base->blocks = block;
|
||||
base->auto_thp_switched = false;
|
||||
for (szind_t i = 0; i < NSIZES; i++) {
|
||||
extent_heap_new(&base->avail[i]);
|
||||
}
|
||||
@ -272,10 +379,14 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||
base->allocated = sizeof(base_block_t);
|
||||
base->resident = PAGE_CEILING(sizeof(base_block_t));
|
||||
base->mapped = block->size;
|
||||
base->n_thp = (opt_metadata_thp == metadata_thp_always) &&
|
||||
metadata_thp_madvise() ? HUGEPAGE_CEILING(sizeof(base_block_t))
|
||||
>> LG_HUGEPAGE : 0;
|
||||
assert(base->allocated <= base->resident);
|
||||
assert(base->resident <= base->mapped);
|
||||
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
|
||||
}
|
||||
base_extent_bump_alloc_post(tsdn, base, &block->extent, gap_size, base,
|
||||
base_extent_bump_alloc_post(base, &block->extent, gap_size, base,
|
||||
base_size);
|
||||
|
||||
return base;
|
||||
@ -332,7 +443,7 @@ base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
ret = base_extent_bump_alloc(tsdn, base, extent, usize, alignment);
|
||||
ret = base_extent_bump_alloc(base, extent, usize, alignment);
|
||||
if (esn != NULL) {
|
||||
*esn = extent_sn_get(extent);
|
||||
}
|
||||
@ -368,7 +479,7 @@ base_alloc_extent(tsdn_t *tsdn, base_t *base) {
|
||||
|
||||
void
|
||||
base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
|
||||
size_t *mapped) {
|
||||
size_t *mapped, size_t *n_thp) {
|
||||
cassert(config_stats);
|
||||
|
||||
malloc_mutex_lock(tsdn, &base->mtx);
|
||||
@ -377,6 +488,7 @@ base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
|
||||
*allocated = base->allocated;
|
||||
*resident = base->resident;
|
||||
*mapped = base->mapped;
|
||||
*n_thp = base->n_thp;
|
||||
malloc_mutex_unlock(tsdn, &base->mtx);
|
||||
}
|
||||
|
||||
|
50
src/bin.c
Normal file
50
src/bin.c
Normal file
@ -0,0 +1,50 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/bin.h"
|
||||
#include "jemalloc/internal/witness.h"
|
||||
|
||||
const bin_info_t bin_infos[NBINS] = {
|
||||
#define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \
|
||||
{reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)},
|
||||
#define BIN_INFO_bin_no(reg_size, slab_size, nregs)
|
||||
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
|
||||
lg_delta_lookup) \
|
||||
BIN_INFO_bin_##bin((1U<<lg_grp) + (ndelta<<lg_delta), \
|
||||
(pgs << LG_PAGE), (pgs << LG_PAGE) / ((1U<<lg_grp) + \
|
||||
(ndelta<<lg_delta)))
|
||||
SIZE_CLASSES
|
||||
#undef BIN_INFO_bin_yes
|
||||
#undef BIN_INFO_bin_no
|
||||
#undef SC
|
||||
};
|
||||
|
||||
bool
|
||||
bin_init(bin_t *bin) {
|
||||
if (malloc_mutex_init(&bin->lock, "bin", WITNESS_RANK_BIN,
|
||||
malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
bin->slabcur = NULL;
|
||||
extent_heap_new(&bin->slabs_nonfull);
|
||||
extent_list_init(&bin->slabs_full);
|
||||
if (config_stats) {
|
||||
memset(&bin->stats, 0, sizeof(bin_stats_t));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
bin_prefork(tsdn_t *tsdn, bin_t *bin) {
|
||||
malloc_mutex_prefork(tsdn, &bin->lock);
|
||||
}
|
||||
|
||||
void
|
||||
bin_postfork_parent(tsdn_t *tsdn, bin_t *bin) {
|
||||
malloc_mutex_postfork_parent(tsdn, &bin->lock);
|
||||
}
|
||||
|
||||
void
|
||||
bin_postfork_child(tsdn_t *tsdn, bin_t *bin) {
|
||||
malloc_mutex_postfork_child(tsdn, &bin->lock);
|
||||
}
|
291
src/ctl.c
291
src/ctl.c
@ -57,6 +57,7 @@ static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
|
||||
CTL_PROTO(version)
|
||||
CTL_PROTO(epoch)
|
||||
CTL_PROTO(background_thread)
|
||||
CTL_PROTO(max_background_threads)
|
||||
CTL_PROTO(thread_tcache_enabled)
|
||||
CTL_PROTO(thread_tcache_flush)
|
||||
CTL_PROTO(thread_prof_name)
|
||||
@ -75,16 +76,17 @@ CTL_PROTO(config_prof)
|
||||
CTL_PROTO(config_prof_libgcc)
|
||||
CTL_PROTO(config_prof_libunwind)
|
||||
CTL_PROTO(config_stats)
|
||||
CTL_PROTO(config_thp)
|
||||
CTL_PROTO(config_utrace)
|
||||
CTL_PROTO(config_xmalloc)
|
||||
CTL_PROTO(opt_abort)
|
||||
CTL_PROTO(opt_abort_conf)
|
||||
CTL_PROTO(opt_metadata_thp)
|
||||
CTL_PROTO(opt_retain)
|
||||
CTL_PROTO(opt_dss)
|
||||
CTL_PROTO(opt_narenas)
|
||||
CTL_PROTO(opt_percpu_arena)
|
||||
CTL_PROTO(opt_background_thread)
|
||||
CTL_PROTO(opt_max_background_threads)
|
||||
CTL_PROTO(opt_dirty_decay_ms)
|
||||
CTL_PROTO(opt_muzzy_decay_ms)
|
||||
CTL_PROTO(opt_stats_print)
|
||||
@ -94,6 +96,8 @@ CTL_PROTO(opt_zero)
|
||||
CTL_PROTO(opt_utrace)
|
||||
CTL_PROTO(opt_xmalloc)
|
||||
CTL_PROTO(opt_tcache)
|
||||
CTL_PROTO(opt_thp)
|
||||
CTL_PROTO(opt_lg_extent_max_active_fit)
|
||||
CTL_PROTO(opt_lg_tcache_max)
|
||||
CTL_PROTO(opt_prof)
|
||||
CTL_PROTO(opt_prof_prefix)
|
||||
@ -117,6 +121,7 @@ CTL_PROTO(arena_i_dss)
|
||||
CTL_PROTO(arena_i_dirty_decay_ms)
|
||||
CTL_PROTO(arena_i_muzzy_decay_ms)
|
||||
CTL_PROTO(arena_i_extent_hooks)
|
||||
CTL_PROTO(arena_i_retain_grow_limit)
|
||||
INDEX_PROTO(arena_i)
|
||||
CTL_PROTO(arenas_bin_i_size)
|
||||
CTL_PROTO(arenas_bin_i_nregs)
|
||||
@ -134,6 +139,7 @@ CTL_PROTO(arenas_nbins)
|
||||
CTL_PROTO(arenas_nhbins)
|
||||
CTL_PROTO(arenas_nlextents)
|
||||
CTL_PROTO(arenas_create)
|
||||
CTL_PROTO(arenas_lookup)
|
||||
CTL_PROTO(prof_thread_active_init)
|
||||
CTL_PROTO(prof_active)
|
||||
CTL_PROTO(prof_dump)
|
||||
@ -182,6 +188,7 @@ CTL_PROTO(stats_arenas_i_muzzy_nmadvise)
|
||||
CTL_PROTO(stats_arenas_i_muzzy_purged)
|
||||
CTL_PROTO(stats_arenas_i_base)
|
||||
CTL_PROTO(stats_arenas_i_internal)
|
||||
CTL_PROTO(stats_arenas_i_metadata_thp)
|
||||
CTL_PROTO(stats_arenas_i_tcache_bytes)
|
||||
CTL_PROTO(stats_arenas_i_resident)
|
||||
INDEX_PROTO(stats_arenas_i)
|
||||
@ -191,6 +198,7 @@ CTL_PROTO(stats_background_thread_num_threads)
|
||||
CTL_PROTO(stats_background_thread_num_runs)
|
||||
CTL_PROTO(stats_background_thread_run_interval)
|
||||
CTL_PROTO(stats_metadata)
|
||||
CTL_PROTO(stats_metadata_thp)
|
||||
CTL_PROTO(stats_resident)
|
||||
CTL_PROTO(stats_mapped)
|
||||
CTL_PROTO(stats_retained)
|
||||
@ -266,7 +274,6 @@ static const ctl_named_node_t config_node[] = {
|
||||
{NAME("prof_libgcc"), CTL(config_prof_libgcc)},
|
||||
{NAME("prof_libunwind"), CTL(config_prof_libunwind)},
|
||||
{NAME("stats"), CTL(config_stats)},
|
||||
{NAME("thp"), CTL(config_thp)},
|
||||
{NAME("utrace"), CTL(config_utrace)},
|
||||
{NAME("xmalloc"), CTL(config_xmalloc)}
|
||||
};
|
||||
@ -274,11 +281,13 @@ static const ctl_named_node_t config_node[] = {
|
||||
static const ctl_named_node_t opt_node[] = {
|
||||
{NAME("abort"), CTL(opt_abort)},
|
||||
{NAME("abort_conf"), CTL(opt_abort_conf)},
|
||||
{NAME("metadata_thp"), CTL(opt_metadata_thp)},
|
||||
{NAME("retain"), CTL(opt_retain)},
|
||||
{NAME("dss"), CTL(opt_dss)},
|
||||
{NAME("narenas"), CTL(opt_narenas)},
|
||||
{NAME("percpu_arena"), CTL(opt_percpu_arena)},
|
||||
{NAME("background_thread"), CTL(opt_background_thread)},
|
||||
{NAME("max_background_threads"), CTL(opt_max_background_threads)},
|
||||
{NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
|
||||
{NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
|
||||
{NAME("stats_print"), CTL(opt_stats_print)},
|
||||
@ -288,6 +297,8 @@ static const ctl_named_node_t opt_node[] = {
|
||||
{NAME("utrace"), CTL(opt_utrace)},
|
||||
{NAME("xmalloc"), CTL(opt_xmalloc)},
|
||||
{NAME("tcache"), CTL(opt_tcache)},
|
||||
{NAME("thp"), CTL(opt_thp)},
|
||||
{NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)},
|
||||
{NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
|
||||
{NAME("prof"), CTL(opt_prof)},
|
||||
{NAME("prof_prefix"), CTL(opt_prof_prefix)},
|
||||
@ -316,7 +327,8 @@ static const ctl_named_node_t arena_i_node[] = {
|
||||
{NAME("dss"), CTL(arena_i_dss)},
|
||||
{NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)},
|
||||
{NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)},
|
||||
{NAME("extent_hooks"), CTL(arena_i_extent_hooks)}
|
||||
{NAME("extent_hooks"), CTL(arena_i_extent_hooks)},
|
||||
{NAME("retain_grow_limit"), CTL(arena_i_retain_grow_limit)}
|
||||
};
|
||||
static const ctl_named_node_t super_arena_i_node[] = {
|
||||
{NAME(""), CHILD(named, arena_i)}
|
||||
@ -362,7 +374,8 @@ static const ctl_named_node_t arenas_node[] = {
|
||||
{NAME("bin"), CHILD(indexed, arenas_bin)},
|
||||
{NAME("nlextents"), CTL(arenas_nlextents)},
|
||||
{NAME("lextent"), CHILD(indexed, arenas_lextent)},
|
||||
{NAME("create"), CTL(arenas_create)}
|
||||
{NAME("create"), CTL(arenas_create)},
|
||||
{NAME("lookup"), CTL(arenas_lookup)}
|
||||
};
|
||||
|
||||
static const ctl_named_node_t prof_node[] = {
|
||||
@ -474,6 +487,7 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
|
||||
{NAME("muzzy_purged"), CTL(stats_arenas_i_muzzy_purged)},
|
||||
{NAME("base"), CTL(stats_arenas_i_base)},
|
||||
{NAME("internal"), CTL(stats_arenas_i_internal)},
|
||||
{NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)},
|
||||
{NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)},
|
||||
{NAME("resident"), CTL(stats_arenas_i_resident)},
|
||||
{NAME("small"), CHILD(named, stats_arenas_i_small)},
|
||||
@ -512,6 +526,7 @@ static const ctl_named_node_t stats_node[] = {
|
||||
{NAME("allocated"), CTL(stats_allocated)},
|
||||
{NAME("active"), CTL(stats_active)},
|
||||
{NAME("metadata"), CTL(stats_metadata)},
|
||||
{NAME("metadata_thp"), CTL(stats_metadata_thp)},
|
||||
{NAME("resident"), CTL(stats_resident)},
|
||||
{NAME("mapped"), CTL(stats_mapped)},
|
||||
{NAME("retained"), CTL(stats_retained)},
|
||||
@ -525,6 +540,7 @@ static const ctl_named_node_t root_node[] = {
|
||||
{NAME("version"), CTL(version)},
|
||||
{NAME("epoch"), CTL(epoch)},
|
||||
{NAME("background_thread"), CTL(background_thread)},
|
||||
{NAME("max_background_threads"), CTL(max_background_threads)},
|
||||
{NAME("thread"), CHILD(named, thread)},
|
||||
{NAME("config"), CHILD(named, config)},
|
||||
{NAME("opt"), CHILD(named, opt)},
|
||||
@ -550,7 +566,7 @@ static const ctl_named_node_t super_root_node[] = {
|
||||
* synchronized by the ctl mutex.
|
||||
*/
|
||||
static void
|
||||
accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) {
|
||||
ctl_accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
|
||||
uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED);
|
||||
@ -562,7 +578,7 @@ accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) {
|
||||
|
||||
/* Likewise: with ctl mutex synchronization, reading is simple. */
|
||||
static uint64_t
|
||||
arena_stats_read_u64(arena_stats_u64_t *p) {
|
||||
ctl_arena_stats_read_u64(arena_stats_u64_t *p) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
return atomic_load_u64(p, ATOMIC_RELAXED);
|
||||
#else
|
||||
@ -570,7 +586,8 @@ arena_stats_read_u64(arena_stats_u64_t *p) {
|
||||
#endif
|
||||
}
|
||||
|
||||
static void accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
|
||||
static void
|
||||
accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
|
||||
size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
|
||||
size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED);
|
||||
atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED);
|
||||
@ -680,9 +697,9 @@ ctl_arena_clear(ctl_arena_t *ctl_arena) {
|
||||
ctl_arena->astats->ndalloc_small = 0;
|
||||
ctl_arena->astats->nrequests_small = 0;
|
||||
memset(ctl_arena->astats->bstats, 0, NBINS *
|
||||
sizeof(malloc_bin_stats_t));
|
||||
sizeof(bin_stats_t));
|
||||
memset(ctl_arena->astats->lstats, 0, (NSIZES - NBINS) *
|
||||
sizeof(malloc_large_stats_t));
|
||||
sizeof(arena_stats_large_t));
|
||||
}
|
||||
}
|
||||
|
||||
@ -745,18 +762,18 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
|
||||
&astats->astats.retained);
|
||||
}
|
||||
|
||||
accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
|
||||
ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
|
||||
&astats->astats.decay_dirty.npurge);
|
||||
accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise,
|
||||
ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise,
|
||||
&astats->astats.decay_dirty.nmadvise);
|
||||
accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged,
|
||||
ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged,
|
||||
&astats->astats.decay_dirty.purged);
|
||||
|
||||
accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge,
|
||||
ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge,
|
||||
&astats->astats.decay_muzzy.npurge);
|
||||
accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise,
|
||||
ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise,
|
||||
&astats->astats.decay_muzzy.nmadvise);
|
||||
accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged,
|
||||
ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged,
|
||||
&astats->astats.decay_muzzy.purged);
|
||||
|
||||
#define OP(mtx) malloc_mutex_prof_merge( \
|
||||
@ -773,6 +790,8 @@ MUTEX_PROF_ARENA_MUTEXES
|
||||
&astats->astats.internal);
|
||||
accum_atomic_zu(&sdstats->astats.resident,
|
||||
&astats->astats.resident);
|
||||
accum_atomic_zu(&sdstats->astats.metadata_thp,
|
||||
&astats->astats.metadata_thp);
|
||||
} else {
|
||||
assert(atomic_load_zu(
|
||||
&astats->astats.internal, ATOMIC_RELAXED) == 0);
|
||||
@ -794,11 +813,11 @@ MUTEX_PROF_ARENA_MUTEXES
|
||||
assert(atomic_load_zu(&astats->astats.allocated_large,
|
||||
ATOMIC_RELAXED) == 0);
|
||||
}
|
||||
accum_arena_stats_u64(&sdstats->astats.nmalloc_large,
|
||||
ctl_accum_arena_stats_u64(&sdstats->astats.nmalloc_large,
|
||||
&astats->astats.nmalloc_large);
|
||||
accum_arena_stats_u64(&sdstats->astats.ndalloc_large,
|
||||
ctl_accum_arena_stats_u64(&sdstats->astats.ndalloc_large,
|
||||
&astats->astats.ndalloc_large);
|
||||
accum_arena_stats_u64(&sdstats->astats.nrequests_large,
|
||||
ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large,
|
||||
&astats->astats.nrequests_large);
|
||||
|
||||
accum_atomic_zu(&sdstats->astats.tcache_bytes,
|
||||
@ -835,11 +854,11 @@ MUTEX_PROF_ARENA_MUTEXES
|
||||
}
|
||||
|
||||
for (i = 0; i < NSIZES - NBINS; i++) {
|
||||
accum_arena_stats_u64(&sdstats->lstats[i].nmalloc,
|
||||
ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc,
|
||||
&astats->lstats[i].nmalloc);
|
||||
accum_arena_stats_u64(&sdstats->lstats[i].ndalloc,
|
||||
ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc,
|
||||
&astats->lstats[i].ndalloc);
|
||||
accum_arena_stats_u64(&sdstats->lstats[i].nrequests,
|
||||
ctl_accum_arena_stats_u64(&sdstats->lstats[i].nrequests,
|
||||
&astats->lstats[i].nrequests);
|
||||
if (!destroyed) {
|
||||
sdstats->lstats[i].curlextents +=
|
||||
@ -938,6 +957,8 @@ ctl_refresh(tsdn_t *tsdn) {
|
||||
&ctl_sarena->astats->astats.base, ATOMIC_RELAXED) +
|
||||
atomic_load_zu(&ctl_sarena->astats->astats.internal,
|
||||
ATOMIC_RELAXED);
|
||||
ctl_stats->metadata_thp = atomic_load_zu(
|
||||
&ctl_sarena->astats->astats.metadata_thp, ATOMIC_RELAXED);
|
||||
ctl_stats->resident = atomic_load_zu(
|
||||
&ctl_sarena->astats->astats.resident, ATOMIC_RELAXED);
|
||||
ctl_stats->mapped = atomic_load_zu(
|
||||
@ -1549,6 +1570,71 @@ label_return:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
max_background_threads_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
|
||||
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
|
||||
int ret;
|
||||
size_t oldval;
|
||||
|
||||
if (!have_background_thread) {
|
||||
return ENOENT;
|
||||
}
|
||||
background_thread_ctl_init(tsd_tsdn(tsd));
|
||||
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
|
||||
if (newp == NULL) {
|
||||
oldval = max_background_threads;
|
||||
READ(oldval, size_t);
|
||||
} else {
|
||||
if (newlen != sizeof(size_t)) {
|
||||
ret = EINVAL;
|
||||
goto label_return;
|
||||
}
|
||||
oldval = max_background_threads;
|
||||
READ(oldval, size_t);
|
||||
|
||||
size_t newval = *(size_t *)newp;
|
||||
if (newval == oldval) {
|
||||
ret = 0;
|
||||
goto label_return;
|
||||
}
|
||||
if (newval > opt_max_background_threads) {
|
||||
ret = EINVAL;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
if (background_thread_enabled()) {
|
||||
if (!can_enable_background_thread) {
|
||||
malloc_printf("<jemalloc>: Error in dlsym("
|
||||
"RTLD_NEXT, \"pthread_create\"). Cannot "
|
||||
"enable background_thread\n");
|
||||
ret = EFAULT;
|
||||
goto label_return;
|
||||
}
|
||||
background_thread_enabled_set(tsd_tsdn(tsd), false);
|
||||
if (background_threads_disable(tsd)) {
|
||||
ret = EFAULT;
|
||||
goto label_return;
|
||||
}
|
||||
max_background_threads = newval;
|
||||
background_thread_enabled_set(tsd_tsdn(tsd), true);
|
||||
if (background_threads_enable(tsd)) {
|
||||
ret = EFAULT;
|
||||
goto label_return;
|
||||
}
|
||||
} else {
|
||||
max_background_threads = newval;
|
||||
}
|
||||
}
|
||||
ret = 0;
|
||||
label_return:
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
|
||||
@ -1560,7 +1646,6 @@ CTL_RO_CONFIG_GEN(config_prof, bool)
|
||||
CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
|
||||
CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
|
||||
CTL_RO_CONFIG_GEN(config_stats, bool)
|
||||
CTL_RO_CONFIG_GEN(config_thp, bool)
|
||||
CTL_RO_CONFIG_GEN(config_utrace, bool)
|
||||
CTL_RO_CONFIG_GEN(config_xmalloc, bool)
|
||||
|
||||
@ -1568,12 +1653,15 @@ CTL_RO_CONFIG_GEN(config_xmalloc, bool)
|
||||
|
||||
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
|
||||
CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
|
||||
CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp],
|
||||
const char *)
|
||||
CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
|
||||
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
|
||||
CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
|
||||
CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena],
|
||||
const char *)
|
||||
CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
|
||||
CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t)
|
||||
CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
|
||||
CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
|
||||
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
|
||||
@ -1583,6 +1671,9 @@ CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
|
||||
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
|
||||
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
|
||||
CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
|
||||
CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *)
|
||||
CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit,
|
||||
size_t)
|
||||
CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
|
||||
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
|
||||
@ -2162,10 +2253,31 @@ arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
|
||||
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
|
||||
MIB_UNSIGNED(arena_ind, 1);
|
||||
if (arena_ind < narenas_total_get() && (arena =
|
||||
arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
|
||||
if (newp != NULL) {
|
||||
if (arena_ind < narenas_total_get()) {
|
||||
extent_hooks_t *old_extent_hooks;
|
||||
arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
|
||||
if (arena == NULL) {
|
||||
if (arena_ind >= narenas_auto) {
|
||||
ret = EFAULT;
|
||||
goto label_return;
|
||||
}
|
||||
old_extent_hooks =
|
||||
(extent_hooks_t *)&extent_hooks_default;
|
||||
READ(old_extent_hooks, extent_hooks_t *);
|
||||
if (newp != NULL) {
|
||||
/* Initialize a new arena as a side effect. */
|
||||
extent_hooks_t *new_extent_hooks
|
||||
JEMALLOC_CC_SILENCE_INIT(NULL);
|
||||
WRITE(new_extent_hooks, extent_hooks_t *);
|
||||
arena = arena_init(tsd_tsdn(tsd), arena_ind,
|
||||
new_extent_hooks);
|
||||
if (arena == NULL) {
|
||||
ret = EFAULT;
|
||||
goto label_return;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (newp != NULL) {
|
||||
extent_hooks_t *new_extent_hooks
|
||||
JEMALLOC_CC_SILENCE_INIT(NULL);
|
||||
WRITE(new_extent_hooks, extent_hooks_t *);
|
||||
@ -2173,10 +2285,10 @@ arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
|
||||
new_extent_hooks);
|
||||
READ(old_extent_hooks, extent_hooks_t *);
|
||||
} else {
|
||||
extent_hooks_t *old_extent_hooks =
|
||||
extent_hooks_get(arena);
|
||||
old_extent_hooks = extent_hooks_get(arena);
|
||||
READ(old_extent_hooks, extent_hooks_t *);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ret = EFAULT;
|
||||
goto label_return;
|
||||
@ -2187,6 +2299,42 @@ label_return:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
|
||||
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
|
||||
int ret;
|
||||
unsigned arena_ind;
|
||||
arena_t *arena;
|
||||
|
||||
if (!opt_retain) {
|
||||
/* Only relevant when retain is enabled. */
|
||||
return ENOENT;
|
||||
}
|
||||
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
|
||||
MIB_UNSIGNED(arena_ind, 1);
|
||||
if (arena_ind < narenas_total_get() && (arena =
|
||||
arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
|
||||
size_t old_limit, new_limit;
|
||||
if (newp != NULL) {
|
||||
WRITE(new_limit, size_t);
|
||||
}
|
||||
bool err = arena_retain_grow_limit_get_set(tsd, arena,
|
||||
&old_limit, newp != NULL ? &new_limit : NULL);
|
||||
if (!err) {
|
||||
READ(old_limit, size_t);
|
||||
ret = 0;
|
||||
} else {
|
||||
ret = EFAULT;
|
||||
}
|
||||
} else {
|
||||
ret = EFAULT;
|
||||
}
|
||||
label_return:
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const ctl_named_node_t *
|
||||
arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
|
||||
const ctl_named_node_t *ret;
|
||||
@ -2279,9 +2427,9 @@ CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
|
||||
CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
|
||||
CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
|
||||
CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
|
||||
CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
|
||||
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
|
||||
CTL_RO_NL_GEN(arenas_bin_i_slab_size, arena_bin_info[mib[2]].slab_size, size_t)
|
||||
CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t)
|
||||
CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t)
|
||||
CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t)
|
||||
static const ctl_named_node_t *
|
||||
arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
|
||||
if (i > NBINS) {
|
||||
@ -2325,6 +2473,36 @@ label_return:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
arenas_lookup_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
||||
size_t *oldlenp, void *newp, size_t newlen) {
|
||||
int ret;
|
||||
unsigned arena_ind;
|
||||
void *ptr;
|
||||
extent_t *extent;
|
||||
arena_t *arena;
|
||||
|
||||
ptr = NULL;
|
||||
ret = EINVAL;
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
|
||||
WRITE(ptr, void *);
|
||||
extent = iealloc(tsd_tsdn(tsd), ptr);
|
||||
if (extent == NULL)
|
||||
goto label_return;
|
||||
|
||||
arena = extent_arena_get(extent);
|
||||
if (arena == NULL)
|
||||
goto label_return;
|
||||
|
||||
arena_ind = arena_ind_get(arena);
|
||||
READ(arena_ind, unsigned);
|
||||
|
||||
ret = 0;
|
||||
label_return:
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static int
|
||||
@ -2460,6 +2638,7 @@ CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_metadata_thp, ctl_stats->metadata_thp, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t)
|
||||
@ -2490,24 +2669,24 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
|
||||
size_t)
|
||||
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
|
||||
arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_dirty.npurge),
|
||||
uint64_t)
|
||||
ctl_arena_stats_read_u64(
|
||||
&arenas_i(mib[2])->astats->astats.decay_dirty.npurge), uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
|
||||
arena_stats_read_u64(
|
||||
ctl_arena_stats_read_u64(
|
||||
&arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
|
||||
arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_dirty.purged),
|
||||
uint64_t)
|
||||
ctl_arena_stats_read_u64(
|
||||
&arenas_i(mib[2])->astats->astats.decay_dirty.purged), uint64_t)
|
||||
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
|
||||
arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_muzzy.npurge),
|
||||
uint64_t)
|
||||
ctl_arena_stats_read_u64(
|
||||
&arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
|
||||
arena_stats_read_u64(
|
||||
ctl_arena_stats_read_u64(
|
||||
&arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
|
||||
arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_muzzy.purged),
|
||||
uint64_t)
|
||||
ctl_arena_stats_read_u64(
|
||||
&arenas_i(mib[2])->astats->astats.decay_muzzy.purged), uint64_t)
|
||||
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_base,
|
||||
atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED),
|
||||
@ -2515,6 +2694,9 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_base,
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
|
||||
atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
|
||||
size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp,
|
||||
atomic_load_zu(&arenas_i(mib[2])->astats->astats.metadata_thp,
|
||||
ATOMIC_RELAXED), size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
|
||||
atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes,
|
||||
ATOMIC_RELAXED), size_t)
|
||||
@ -2534,14 +2716,17 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
|
||||
atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large,
|
||||
ATOMIC_RELAXED), size_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
|
||||
arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.nmalloc_large),
|
||||
uint64_t)
|
||||
ctl_arena_stats_read_u64(
|
||||
&arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
|
||||
arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.ndalloc_large),
|
||||
uint64_t)
|
||||
ctl_arena_stats_read_u64(
|
||||
&arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t)
|
||||
/*
|
||||
* Note: "nmalloc" here instead of "nrequests" in the read. This is intentional.
|
||||
*/
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
|
||||
arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.nmalloc_large),
|
||||
uint64_t) /* Intentional. */
|
||||
ctl_arena_stats_read_u64(
|
||||
&arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) /* Intentional. */
|
||||
|
||||
/* Lock profiling related APIs below. */
|
||||
#define RO_MUTEX_CTL_GEN(n, l) \
|
||||
@ -2622,7 +2807,7 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
|
||||
MUTEX_PROF_RESET(arena->base->mtx);
|
||||
|
||||
for (szind_t i = 0; i < NBINS; i++) {
|
||||
arena_bin_t *bin = &arena->bins[i];
|
||||
bin_t *bin = &arena->bins[i];
|
||||
MUTEX_PROF_RESET(bin->lock);
|
||||
}
|
||||
}
|
||||
@ -2659,14 +2844,14 @@ stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
|
||||
}
|
||||
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
|
||||
arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc),
|
||||
uint64_t)
|
||||
ctl_arena_stats_read_u64(
|
||||
&arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
|
||||
arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc),
|
||||
uint64_t)
|
||||
ctl_arena_stats_read_u64(
|
||||
&arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
|
||||
arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].nrequests),
|
||||
uint64_t)
|
||||
ctl_arena_stats_read_u64(
|
||||
&arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t)
|
||||
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
|
||||
arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
|
||||
|
||||
|
55
src/div.c
Normal file
55
src/div.c
Normal file
@ -0,0 +1,55 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
|
||||
#include "jemalloc/internal/div.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
/*
|
||||
* Suppose we have n = q * d, all integers. We know n and d, and want q = n / d.
|
||||
*
|
||||
* For any k, we have (here, all division is exact; not C-style rounding):
|
||||
* floor(ceil(2^k / d) * n / 2^k) = floor((2^k + r) / d * n / 2^k), where
|
||||
* r = (-2^k) mod d.
|
||||
*
|
||||
* Expanding this out:
|
||||
* ... = floor(2^k / d * n / 2^k + r / d * n / 2^k)
|
||||
* = floor(n / d + (r / d) * (n / 2^k)).
|
||||
*
|
||||
* The fractional part of n / d is 0 (because of the assumption that d divides n
|
||||
* exactly), so we have:
|
||||
* ... = n / d + floor((r / d) * (n / 2^k))
|
||||
*
|
||||
* So that our initial expression is equal to the quantity we seek, so long as
|
||||
* (r / d) * (n / 2^k) < 1.
|
||||
*
|
||||
* r is a remainder mod d, so r < d and r / d < 1 always. We can make
|
||||
* n / 2 ^ k < 1 by setting k = 32. This gets us a value of magic that works.
|
||||
*/
|
||||
|
||||
void
|
||||
div_init(div_info_t *div_info, size_t d) {
|
||||
/* Nonsensical. */
|
||||
assert(d != 0);
|
||||
/*
|
||||
* This would make the value of magic too high to fit into a uint32_t
|
||||
* (we would want magic = 2^32 exactly). This would mess with code gen
|
||||
* on 32-bit machines.
|
||||
*/
|
||||
assert(d != 1);
|
||||
|
||||
uint64_t two_to_k = ((uint64_t)1 << 32);
|
||||
uint32_t magic = (uint32_t)(two_to_k / d);
|
||||
|
||||
/*
|
||||
* We want magic = ceil(2^k / d), but C gives us floor. We have to
|
||||
* increment it unless the result was exact (i.e. unless d is a power of
|
||||
* two).
|
||||
*/
|
||||
if (two_to_k % d != 0) {
|
||||
magic++;
|
||||
}
|
||||
div_info->magic = magic;
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
div_info->d = d;
|
||||
#endif
|
||||
}
|
496
src/extent.c
496
src/extent.c
@ -17,6 +17,8 @@ rtree_t extents_rtree;
|
||||
/* Keyed by the address of the extent_t being protected. */
|
||||
mutex_pool_t extent_mutex_pool;
|
||||
|
||||
size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
|
||||
|
||||
static const bitmap_info_t extents_bitmap_info =
|
||||
BITMAP_INFO_INITIALIZER(NPSIZES+1);
|
||||
|
||||
@ -117,7 +119,7 @@ static void extent_record(tsdn_t *tsdn, arena_t *arena,
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
rb_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, rb_link,
|
||||
ph_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, ph_link,
|
||||
extent_esnead_comp)
|
||||
|
||||
typedef enum {
|
||||
@ -304,8 +306,7 @@ extents_npages_get(extents_t *extents) {
|
||||
}
|
||||
|
||||
static void
|
||||
extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
|
||||
bool preserve_lru) {
|
||||
extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
|
||||
malloc_mutex_assert_owner(tsdn, &extents->mtx);
|
||||
assert(extent_state_get(extent) == extents->state);
|
||||
|
||||
@ -317,9 +318,7 @@ extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
|
||||
(size_t)pind);
|
||||
}
|
||||
extent_heap_insert(&extents->heaps[pind], extent);
|
||||
if (!preserve_lru) {
|
||||
extent_list_append(&extents->lru, extent);
|
||||
}
|
||||
size_t npages = size >> LG_PAGE;
|
||||
/*
|
||||
* All modifications to npages hold the mutex (as asserted above), so we
|
||||
@ -333,8 +332,7 @@ extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
|
||||
}
|
||||
|
||||
static void
|
||||
extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
|
||||
bool preserve_lru) {
|
||||
extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
|
||||
malloc_mutex_assert_owner(tsdn, &extents->mtx);
|
||||
assert(extent_state_get(extent) == extents->state);
|
||||
|
||||
@ -346,9 +344,7 @@ extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
|
||||
bitmap_set(extents->bitmap, &extents_bitmap_info,
|
||||
(size_t)pind);
|
||||
}
|
||||
if (!preserve_lru) {
|
||||
extent_list_remove(&extents->lru, extent);
|
||||
}
|
||||
size_t npages = size >> LG_PAGE;
|
||||
/*
|
||||
* As in extents_insert_locked, we hold extents->mtx and so don't need
|
||||
@ -361,6 +357,43 @@ extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
|
||||
cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
/*
|
||||
* Find an extent with size [min_size, max_size) to satisfy the alignment
|
||||
* requirement. For each size, try only the first extent in the heap.
|
||||
*/
|
||||
static extent_t *
|
||||
extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
|
||||
size_t alignment) {
|
||||
pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
|
||||
pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
|
||||
|
||||
for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
|
||||
&extents_bitmap_info, (size_t)pind); i < pind_max; i =
|
||||
(pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
|
||||
(size_t)i+1)) {
|
||||
assert(i < NPSIZES);
|
||||
assert(!extent_heap_empty(&extents->heaps[i]));
|
||||
extent_t *extent = extent_heap_first(&extents->heaps[i]);
|
||||
uintptr_t base = (uintptr_t)extent_base_get(extent);
|
||||
size_t candidate_size = extent_size_get(extent);
|
||||
assert(candidate_size >= min_size);
|
||||
|
||||
uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
|
||||
PAGE_CEILING(alignment));
|
||||
if (base > next_align || base + candidate_size <= next_align) {
|
||||
/* Overflow or not crossing the next alignment. */
|
||||
continue;
|
||||
}
|
||||
|
||||
size_t leadsize = next_align - base;
|
||||
if (candidate_size - leadsize >= min_size) {
|
||||
return extent;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Do any-best-fit extent selection, i.e. select any extent that best fits. */
|
||||
static extent_t *
|
||||
extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
|
||||
@ -369,8 +402,15 @@ extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
|
||||
pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
|
||||
(size_t)pind);
|
||||
if (i < NPSIZES+1) {
|
||||
/*
|
||||
* In order to reduce fragmentation, avoid reusing and splitting
|
||||
* large extents for much smaller sizes.
|
||||
*/
|
||||
if ((sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
|
||||
return NULL;
|
||||
}
|
||||
assert(!extent_heap_empty(&extents->heaps[i]));
|
||||
extent_t *extent = extent_heap_any(&extents->heaps[i]);
|
||||
extent_t *extent = extent_heap_first(&extents->heaps[i]);
|
||||
assert(extent_size_get(extent) >= size);
|
||||
return extent;
|
||||
}
|
||||
@ -415,12 +455,30 @@ extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
|
||||
*/
|
||||
static extent_t *
|
||||
extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
|
||||
size_t size) {
|
||||
size_t esize, size_t alignment) {
|
||||
malloc_mutex_assert_owner(tsdn, &extents->mtx);
|
||||
|
||||
return extents->delay_coalesce ? extents_best_fit_locked(tsdn, arena,
|
||||
extents, size) : extents_first_fit_locked(tsdn, arena, extents,
|
||||
size);
|
||||
size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
|
||||
/* Beware size_t wrap-around. */
|
||||
if (max_size < esize) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
extent_t *extent = extents->delay_coalesce ?
|
||||
extents_best_fit_locked(tsdn, arena, extents, max_size) :
|
||||
extents_first_fit_locked(tsdn, arena, extents, max_size);
|
||||
|
||||
if (alignment > PAGE && extent == NULL) {
|
||||
/*
|
||||
* max_size guarantees the alignment requirement but is rather
|
||||
* pessimistic. Next we try to satisfy the aligned allocation
|
||||
* with sizes in [esize, max_size).
|
||||
*/
|
||||
extent = extents_fit_alignment(extents, esize, max_size,
|
||||
alignment);
|
||||
}
|
||||
|
||||
return extent;
|
||||
}
|
||||
|
||||
static bool
|
||||
@ -436,7 +494,7 @@ extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
|
||||
if (!coalesced) {
|
||||
return true;
|
||||
}
|
||||
extents_insert_locked(tsdn, extents, extent, true);
|
||||
extents_insert_locked(tsdn, extents, extent);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -449,8 +507,10 @@ extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
|
||||
return extent_recycle(tsdn, arena, r_extent_hooks, extents, new_addr,
|
||||
size, pad, alignment, slab, szind, zero, commit, false);
|
||||
extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
|
||||
new_addr, size, pad, alignment, slab, szind, zero, commit, false);
|
||||
assert(extent == NULL || extent_dumpable_get(extent));
|
||||
return extent;
|
||||
}
|
||||
|
||||
void
|
||||
@ -458,6 +518,7 @@ extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
||||
extents_t *extents, extent_t *extent) {
|
||||
assert(extent_base_get(extent) != NULL);
|
||||
assert(extent_size_get(extent) != 0);
|
||||
assert(extent_dumpable_get(extent));
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
|
||||
@ -487,14 +548,13 @@ extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
||||
goto label_return;
|
||||
}
|
||||
/* Check the eviction limit. */
|
||||
size_t npages = extent_size_get(extent) >> LG_PAGE;
|
||||
size_t extents_npages = atomic_load_zu(&extents->npages,
|
||||
ATOMIC_RELAXED);
|
||||
if (extents_npages - npages < npages_min) {
|
||||
if (extents_npages <= npages_min) {
|
||||
extent = NULL;
|
||||
goto label_return;
|
||||
}
|
||||
extents_remove_locked(tsdn, extents, extent, false);
|
||||
extents_remove_locked(tsdn, extents, extent);
|
||||
if (!extents->delay_coalesce) {
|
||||
break;
|
||||
}
|
||||
@ -567,29 +627,29 @@ extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
|
||||
|
||||
static void
|
||||
extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
|
||||
extent_t *extent, bool preserve_lru) {
|
||||
extent_t *extent) {
|
||||
assert(extent_arena_get(extent) == arena);
|
||||
assert(extent_state_get(extent) == extent_state_active);
|
||||
|
||||
extent_state_set(extent, extents_state_get(extents));
|
||||
extents_insert_locked(tsdn, extents, extent, preserve_lru);
|
||||
extents_insert_locked(tsdn, extents, extent);
|
||||
}
|
||||
|
||||
static void
|
||||
extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
|
||||
extent_t *extent, bool preserve_lru) {
|
||||
extent_t *extent) {
|
||||
malloc_mutex_lock(tsdn, &extents->mtx);
|
||||
extent_deactivate_locked(tsdn, arena, extents, extent, preserve_lru);
|
||||
extent_deactivate_locked(tsdn, arena, extents, extent);
|
||||
malloc_mutex_unlock(tsdn, &extents->mtx);
|
||||
}
|
||||
|
||||
static void
|
||||
extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
|
||||
extent_t *extent, bool preserve_lru) {
|
||||
extent_t *extent) {
|
||||
assert(extent_arena_get(extent) == arena);
|
||||
assert(extent_state_get(extent) == extents_state_get(extents));
|
||||
|
||||
extents_remove_locked(tsdn, extents, extent, preserve_lru);
|
||||
extents_remove_locked(tsdn, extents, extent);
|
||||
extent_state_set(extent, extent_state_active);
|
||||
}
|
||||
|
||||
@ -723,6 +783,13 @@ extent_reregister(tsdn_t *tsdn, extent_t *extent) {
|
||||
assert(!err);
|
||||
}
|
||||
|
||||
/*
|
||||
* Removes all pointers to the given extent from the global rtree indices for
|
||||
* its interior. This is relevant for slab extents, for which we need to do
|
||||
* metadata lookups at places other than the head of the extent. We deregister
|
||||
* on the interior, then, when an extent moves from being an active slab to an
|
||||
* inactive state.
|
||||
*/
|
||||
static void
|
||||
extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
|
||||
extent_t *extent) {
|
||||
@ -737,8 +804,11 @@ extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Removes all pointers to the given extent from the global rtree.
|
||||
*/
|
||||
static void
|
||||
extent_deregister(tsdn_t *tsdn, extent_t *extent) {
|
||||
extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||
rtree_leaf_elm_t *elm_a, *elm_b;
|
||||
@ -755,16 +825,30 @@ extent_deregister(tsdn_t *tsdn, extent_t *extent) {
|
||||
|
||||
extent_unlock(tsdn, extent);
|
||||
|
||||
if (config_prof) {
|
||||
if (config_prof && gdump) {
|
||||
extent_gdump_sub(tsdn, extent);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
extent_deregister(tsdn_t *tsdn, extent_t *extent) {
|
||||
extent_deregister_impl(tsdn, extent, true);
|
||||
}
|
||||
|
||||
static void
|
||||
extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
|
||||
extent_deregister_impl(tsdn, extent, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* Tries to find and remove an extent from extents that can be used for the
|
||||
* given allocation request.
|
||||
*/
|
||||
static extent_t *
|
||||
extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
|
||||
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
|
||||
bool *zero, bool *commit, bool growing_retained) {
|
||||
bool growing_retained) {
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
||||
assert(alignment > 0);
|
||||
@ -786,11 +870,6 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
|
||||
}
|
||||
|
||||
size_t esize = size + pad;
|
||||
size_t alloc_size = esize + PAGE_CEILING(alignment) - PAGE;
|
||||
/* Beware size_t wrap-around. */
|
||||
if (alloc_size < esize) {
|
||||
return NULL;
|
||||
}
|
||||
malloc_mutex_lock(tsdn, &extents->mtx);
|
||||
extent_hooks_assure_initialized(arena, r_extent_hooks);
|
||||
extent_t *extent;
|
||||
@ -812,86 +891,172 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_unlock(tsdn, unlock_extent);
|
||||
}
|
||||
} else {
|
||||
extent = extents_fit_locked(tsdn, arena, extents, alloc_size);
|
||||
extent = extents_fit_locked(tsdn, arena, extents, esize,
|
||||
alignment);
|
||||
}
|
||||
if (extent == NULL) {
|
||||
malloc_mutex_unlock(tsdn, &extents->mtx);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
extent_activate_locked(tsdn, arena, extents, extent, false);
|
||||
extent_activate_locked(tsdn, arena, extents, extent);
|
||||
malloc_mutex_unlock(tsdn, &extents->mtx);
|
||||
|
||||
if (extent_zeroed_get(extent)) {
|
||||
*zero = true;
|
||||
}
|
||||
if (extent_committed_get(extent)) {
|
||||
*commit = true;
|
||||
}
|
||||
|
||||
return extent;
|
||||
}
|
||||
|
||||
static extent_t *
|
||||
extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
|
||||
/*
|
||||
* Given an allocation request and an extent guaranteed to be able to satisfy
|
||||
* it, this splits off lead and trail extents, leaving extent pointing to an
|
||||
* extent satisfying the allocation.
|
||||
* This function doesn't put lead or trail into any extents_t; it's the caller's
|
||||
* job to ensure that they can be reused.
|
||||
*/
|
||||
typedef enum {
|
||||
/*
|
||||
* Split successfully. lead, extent, and trail, are modified to extents
|
||||
* describing the ranges before, in, and after the given allocation.
|
||||
*/
|
||||
extent_split_interior_ok,
|
||||
/*
|
||||
* The extent can't satisfy the given allocation request. None of the
|
||||
* input extent_t *s are touched.
|
||||
*/
|
||||
extent_split_interior_cant_alloc,
|
||||
/*
|
||||
* In a potentially invalid state. Must leak (if *to_leak is non-NULL),
|
||||
* and salvage what's still salvageable (if *to_salvage is non-NULL).
|
||||
* None of lead, extent, or trail are valid.
|
||||
*/
|
||||
extent_split_interior_error
|
||||
} extent_split_interior_result_t;
|
||||
|
||||
static extent_split_interior_result_t
|
||||
extent_split_interior(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx,
|
||||
/* The result of splitting, in case of success. */
|
||||
extent_t **extent, extent_t **lead, extent_t **trail,
|
||||
/* The mess to clean up, in case of error. */
|
||||
extent_t **to_leak, extent_t **to_salvage,
|
||||
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
|
||||
szind_t szind, extent_t *extent, bool growing_retained) {
|
||||
szind_t szind, bool growing_retained) {
|
||||
size_t esize = size + pad;
|
||||
size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(extent),
|
||||
PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(extent);
|
||||
size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
|
||||
PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
|
||||
assert(new_addr == NULL || leadsize == 0);
|
||||
assert(extent_size_get(extent) >= leadsize + esize);
|
||||
size_t trailsize = extent_size_get(extent) - leadsize - esize;
|
||||
if (extent_size_get(*extent) < leadsize + esize) {
|
||||
return extent_split_interior_cant_alloc;
|
||||
}
|
||||
size_t trailsize = extent_size_get(*extent) - leadsize - esize;
|
||||
|
||||
*lead = NULL;
|
||||
*trail = NULL;
|
||||
*to_leak = NULL;
|
||||
*to_salvage = NULL;
|
||||
|
||||
/* Split the lead. */
|
||||
if (leadsize != 0) {
|
||||
extent_t *lead = extent;
|
||||
extent = extent_split_impl(tsdn, arena, r_extent_hooks,
|
||||
lead, leadsize, NSIZES, false, esize + trailsize, szind,
|
||||
*lead = *extent;
|
||||
*extent = extent_split_impl(tsdn, arena, r_extent_hooks,
|
||||
*lead, leadsize, NSIZES, false, esize + trailsize, szind,
|
||||
slab, growing_retained);
|
||||
if (extent == NULL) {
|
||||
extent_deregister(tsdn, lead);
|
||||
extents_leak(tsdn, arena, r_extent_hooks, extents,
|
||||
lead, growing_retained);
|
||||
return NULL;
|
||||
if (*extent == NULL) {
|
||||
*to_leak = *lead;
|
||||
*lead = NULL;
|
||||
return extent_split_interior_error;
|
||||
}
|
||||
extent_deactivate(tsdn, arena, extents, lead, false);
|
||||
}
|
||||
|
||||
/* Split the trail. */
|
||||
if (trailsize != 0) {
|
||||
extent_t *trail = extent_split_impl(tsdn, arena,
|
||||
r_extent_hooks, extent, esize, szind, slab, trailsize,
|
||||
NSIZES, false, growing_retained);
|
||||
if (trail == NULL) {
|
||||
extent_deregister(tsdn, extent);
|
||||
extents_leak(tsdn, arena, r_extent_hooks, extents,
|
||||
extent, growing_retained);
|
||||
return NULL;
|
||||
*trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
|
||||
esize, szind, slab, trailsize, NSIZES, false,
|
||||
growing_retained);
|
||||
if (*trail == NULL) {
|
||||
*to_leak = *extent;
|
||||
*to_salvage = *lead;
|
||||
*lead = NULL;
|
||||
*extent = NULL;
|
||||
return extent_split_interior_error;
|
||||
}
|
||||
extent_deactivate(tsdn, arena, extents, trail, false);
|
||||
} else if (leadsize == 0) {
|
||||
}
|
||||
|
||||
if (leadsize == 0 && trailsize == 0) {
|
||||
/*
|
||||
* Splitting causes szind to be set as a side effect, but no
|
||||
* splitting occurred.
|
||||
*/
|
||||
extent_szind_set(extent, szind);
|
||||
extent_szind_set(*extent, szind);
|
||||
if (szind != NSIZES) {
|
||||
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
|
||||
(uintptr_t)extent_addr_get(extent), szind, slab);
|
||||
if (slab && extent_size_get(extent) > PAGE) {
|
||||
(uintptr_t)extent_addr_get(*extent), szind, slab);
|
||||
if (slab && extent_size_get(*extent) > PAGE) {
|
||||
rtree_szind_slab_update(tsdn, &extents_rtree,
|
||||
rtree_ctx,
|
||||
(uintptr_t)extent_past_get(extent) -
|
||||
(uintptr_t)extent_past_get(*extent) -
|
||||
(uintptr_t)PAGE, szind, slab);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return extent;
|
||||
return extent_split_interior_ok;
|
||||
}
|
||||
|
||||
/*
|
||||
* This fulfills the indicated allocation request out of the given extent (which
|
||||
* the caller should have ensured was big enough). If there's any unused space
|
||||
* before or after the resulting allocation, that space is given its own extent
|
||||
* and put back into extents.
|
||||
*/
|
||||
static extent_t *
|
||||
extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
|
||||
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
|
||||
szind_t szind, extent_t *extent, bool growing_retained) {
|
||||
extent_t *lead;
|
||||
extent_t *trail;
|
||||
extent_t *to_leak;
|
||||
extent_t *to_salvage;
|
||||
|
||||
extent_split_interior_result_t result = extent_split_interior(
|
||||
tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
|
||||
&to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
|
||||
growing_retained);
|
||||
|
||||
if (result == extent_split_interior_ok) {
|
||||
if (lead != NULL) {
|
||||
extent_deactivate(tsdn, arena, extents, lead);
|
||||
}
|
||||
if (trail != NULL) {
|
||||
extent_deactivate(tsdn, arena, extents, trail);
|
||||
}
|
||||
return extent;
|
||||
} else {
|
||||
/*
|
||||
* We should have picked an extent that was large enough to
|
||||
* fulfill our allocation request.
|
||||
*/
|
||||
assert(result == extent_split_interior_error);
|
||||
if (to_salvage != NULL) {
|
||||
extent_deregister(tsdn, to_salvage);
|
||||
}
|
||||
if (to_leak != NULL) {
|
||||
void *leak = extent_base_get(to_leak);
|
||||
extent_deregister_no_gdump_sub(tsdn, to_leak);
|
||||
extents_leak(tsdn, arena, r_extent_hooks, extents,
|
||||
to_leak, growing_retained);
|
||||
assert(extent_lock_from_addr(tsdn, rtree_ctx, leak)
|
||||
== NULL);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
unreachable();
|
||||
}
|
||||
|
||||
/*
|
||||
* Tries to satisfy the given allocation request by reusing one of the extents
|
||||
* in the given extents_t.
|
||||
*/
|
||||
static extent_t *
|
||||
extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
||||
extents_t *extents, void *new_addr, size_t size, size_t pad,
|
||||
@ -906,16 +1071,12 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||
|
||||
bool committed = false;
|
||||
extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
|
||||
rtree_ctx, extents, new_addr, size, pad, alignment, slab, zero,
|
||||
&committed, growing_retained);
|
||||
rtree_ctx, extents, new_addr, size, pad, alignment, slab,
|
||||
growing_retained);
|
||||
if (extent == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
if (committed) {
|
||||
*commit = true;
|
||||
}
|
||||
|
||||
extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
|
||||
extents, new_addr, size, pad, alignment, slab, szind, extent,
|
||||
@ -934,6 +1095,13 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
||||
extent_zeroed_set(extent, true);
|
||||
}
|
||||
|
||||
if (extent_committed_get(extent)) {
|
||||
*commit = true;
|
||||
}
|
||||
if (extent_zeroed_get(extent)) {
|
||||
*zero = true;
|
||||
}
|
||||
|
||||
if (pad != 0) {
|
||||
extent_addr_randomize(tsdn, extent, alignment);
|
||||
}
|
||||
@ -999,11 +1167,12 @@ extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||
static void *
|
||||
extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
|
||||
size_t size, size_t alignment, bool *zero, bool *commit) {
|
||||
void *ret;
|
||||
|
||||
ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
|
||||
void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
|
||||
commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
|
||||
ATOMIC_RELAXED));
|
||||
if (have_madvise_huge && ret) {
|
||||
pages_set_thp_state(ret, size);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1028,8 +1197,19 @@ extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
|
||||
static void
|
||||
extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
|
||||
tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
|
||||
if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
|
||||
/*
|
||||
* The only legitimate case of customized extent hooks for a0 is
|
||||
* hooks with no allocation activities. One such example is to
|
||||
* place metadata on pre-allocated resources such as huge pages.
|
||||
* In that case, rely on reentrancy_level checks to catch
|
||||
* infinite recursions.
|
||||
*/
|
||||
pre_reentrancy(tsd, NULL);
|
||||
} else {
|
||||
pre_reentrancy(tsd, arena);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
extent_hook_post_reentrancy(tsdn_t *tsdn) {
|
||||
@ -1081,9 +1261,8 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
|
||||
|
||||
void *ptr;
|
||||
if (*r_extent_hooks == &extent_hooks_default) {
|
||||
ptr = extent_alloc_core(tsdn, arena, NULL, alloc_size, PAGE,
|
||||
&zeroed, &committed, (dss_prec_t)atomic_load_u(
|
||||
&arena->dss_prec, ATOMIC_RELAXED));
|
||||
ptr = extent_alloc_default_impl(tsdn, arena, NULL,
|
||||
alloc_size, PAGE, &zeroed, &committed);
|
||||
} else {
|
||||
extent_hook_pre_reentrancy(tsdn, arena);
|
||||
ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
|
||||
@ -1094,21 +1273,18 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
|
||||
|
||||
extent_init(extent, arena, ptr, alloc_size, false, NSIZES,
|
||||
arena_extent_sn_next(arena), extent_state_active, zeroed,
|
||||
committed);
|
||||
committed, true);
|
||||
if (ptr == NULL) {
|
||||
extent_dalloc(tsdn, arena, extent);
|
||||
goto label_err;
|
||||
}
|
||||
|
||||
if (extent_register_no_gdump_add(tsdn, extent)) {
|
||||
extents_leak(tsdn, arena, r_extent_hooks,
|
||||
&arena->extents_retained, extent, true);
|
||||
goto label_err;
|
||||
}
|
||||
|
||||
size_t leadsize = ALIGNMENT_CEILING((uintptr_t)ptr,
|
||||
PAGE_CEILING(alignment)) - (uintptr_t)ptr;
|
||||
assert(alloc_size >= leadsize + esize);
|
||||
size_t trailsize = alloc_size - leadsize - esize;
|
||||
if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
|
||||
*zero = true;
|
||||
}
|
||||
@ -1116,54 +1292,46 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
|
||||
*commit = true;
|
||||
}
|
||||
|
||||
/* Split the lead. */
|
||||
if (leadsize != 0) {
|
||||
extent_t *lead = extent;
|
||||
extent = extent_split_impl(tsdn, arena, r_extent_hooks, lead,
|
||||
leadsize, NSIZES, false, esize + trailsize, szind, slab,
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||
|
||||
extent_t *lead;
|
||||
extent_t *trail;
|
||||
extent_t *to_leak;
|
||||
extent_t *to_salvage;
|
||||
extent_split_interior_result_t result = extent_split_interior(
|
||||
tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
|
||||
&to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
|
||||
true);
|
||||
if (extent == NULL) {
|
||||
extent_deregister(tsdn, lead);
|
||||
extents_leak(tsdn, arena, r_extent_hooks,
|
||||
&arena->extents_retained, lead, true);
|
||||
goto label_err;
|
||||
}
|
||||
|
||||
if (result == extent_split_interior_ok) {
|
||||
if (lead != NULL) {
|
||||
extent_record(tsdn, arena, r_extent_hooks,
|
||||
&arena->extents_retained, lead, true);
|
||||
}
|
||||
|
||||
/* Split the trail. */
|
||||
if (trailsize != 0) {
|
||||
extent_t *trail = extent_split_impl(tsdn, arena, r_extent_hooks,
|
||||
extent, esize, szind, slab, trailsize, NSIZES, false, true);
|
||||
if (trail == NULL) {
|
||||
extent_deregister(tsdn, extent);
|
||||
extents_leak(tsdn, arena, r_extent_hooks,
|
||||
&arena->extents_retained, extent, true);
|
||||
goto label_err;
|
||||
}
|
||||
if (trail != NULL) {
|
||||
extent_record(tsdn, arena, r_extent_hooks,
|
||||
&arena->extents_retained, trail, true);
|
||||
} else if (leadsize == 0) {
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Splitting causes szind to be set as a side effect, but no
|
||||
* splitting occurred.
|
||||
* We should have allocated a sufficiently large extent; the
|
||||
* cant_alloc case should not occur.
|
||||
*/
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
|
||||
&rtree_ctx_fallback);
|
||||
|
||||
extent_szind_set(extent, szind);
|
||||
if (szind != NSIZES) {
|
||||
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
|
||||
(uintptr_t)extent_addr_get(extent), szind, slab);
|
||||
if (slab && extent_size_get(extent) > PAGE) {
|
||||
rtree_szind_slab_update(tsdn, &extents_rtree,
|
||||
rtree_ctx,
|
||||
(uintptr_t)extent_past_get(extent) -
|
||||
(uintptr_t)PAGE, szind, slab);
|
||||
assert(result == extent_split_interior_error);
|
||||
if (to_salvage != NULL) {
|
||||
if (config_prof) {
|
||||
extent_gdump_add(tsdn, to_salvage);
|
||||
}
|
||||
extent_record(tsdn, arena, r_extent_hooks,
|
||||
&arena->extents_retained, to_salvage, true);
|
||||
}
|
||||
if (to_leak != NULL) {
|
||||
extent_deregister_no_gdump_sub(tsdn, to_leak);
|
||||
extents_leak(tsdn, arena, r_extent_hooks,
|
||||
&arena->extents_retained, to_leak, true);
|
||||
}
|
||||
goto label_err;
|
||||
}
|
||||
|
||||
if (*commit && !extent_committed_get(extent)) {
|
||||
@ -1177,13 +1345,14 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
|
||||
}
|
||||
|
||||
/*
|
||||
* Increment extent_grow_next if doing so wouldn't exceed the legal
|
||||
* Increment extent_grow_next if doing so wouldn't exceed the allowed
|
||||
* range.
|
||||
*/
|
||||
if (arena->extent_grow_next + egn_skip + 1 < NPSIZES) {
|
||||
if (arena->extent_grow_next + egn_skip + 1 <=
|
||||
arena->retain_grow_limit) {
|
||||
arena->extent_grow_next += egn_skip + 1;
|
||||
} else {
|
||||
arena->extent_grow_next = NPSIZES - 1;
|
||||
arena->extent_grow_next = arena->retain_grow_limit;
|
||||
}
|
||||
/* All opportunities for failure are past. */
|
||||
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
|
||||
@ -1271,7 +1440,8 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
|
||||
return NULL;
|
||||
}
|
||||
extent_init(extent, arena, addr, esize, slab, szind,
|
||||
arena_extent_sn_next(arena), extent_state_active, zero, commit);
|
||||
arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
|
||||
true);
|
||||
if (pad != 0) {
|
||||
extent_addr_randomize(tsdn, extent, alignment);
|
||||
}
|
||||
@ -1296,10 +1466,20 @@ extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
|
||||
new_addr, size, pad, alignment, slab, szind, zero, commit);
|
||||
if (extent == NULL) {
|
||||
if (opt_retain && new_addr != NULL) {
|
||||
/*
|
||||
* When retain is enabled and new_addr is set, we do not
|
||||
* attempt extent_alloc_wrapper_hard which does mmap
|
||||
* that is very unlikely to succeed (unless it happens
|
||||
* to be at the end).
|
||||
*/
|
||||
return NULL;
|
||||
}
|
||||
extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
|
||||
new_addr, size, pad, alignment, slab, szind, zero, commit);
|
||||
}
|
||||
|
||||
assert(extent == NULL || extent_dumpable_get(extent));
|
||||
return extent;
|
||||
}
|
||||
|
||||
@ -1329,16 +1509,7 @@ extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
||||
bool growing_retained) {
|
||||
assert(extent_can_coalesce(arena, extents, inner, outer));
|
||||
|
||||
if (forward && extents->delay_coalesce) {
|
||||
/*
|
||||
* The extent that remains after coalescing must occupy the
|
||||
* outer extent's position in the LRU. For forward coalescing,
|
||||
* swap the inner extent into the LRU.
|
||||
*/
|
||||
extent_list_replace(&extents->lru, outer, inner);
|
||||
}
|
||||
extent_activate_locked(tsdn, arena, extents, outer,
|
||||
extents->delay_coalesce);
|
||||
extent_activate_locked(tsdn, arena, extents, outer);
|
||||
|
||||
malloc_mutex_unlock(tsdn, &extents->mtx);
|
||||
bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
|
||||
@ -1346,11 +1517,7 @@ extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
||||
malloc_mutex_lock(tsdn, &extents->mtx);
|
||||
|
||||
if (err) {
|
||||
if (forward && extents->delay_coalesce) {
|
||||
extent_list_replace(&extents->lru, inner, outer);
|
||||
}
|
||||
extent_deactivate_locked(tsdn, arena, extents, outer,
|
||||
extents->delay_coalesce);
|
||||
extent_deactivate_locked(tsdn, arena, extents, outer);
|
||||
}
|
||||
|
||||
return err;
|
||||
@ -1422,6 +1589,10 @@ extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
|
||||
return extent;
|
||||
}
|
||||
|
||||
/*
|
||||
* Does the metadata management portions of putting an unused extent into the
|
||||
* given extents_t (coalesces, deregisters slab interiors, the heap operations).
|
||||
*/
|
||||
static void
|
||||
extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
||||
extents_t *extents, extent_t *extent, bool growing_retained) {
|
||||
@ -1447,9 +1618,20 @@ extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
||||
if (!extents->delay_coalesce) {
|
||||
extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
|
||||
rtree_ctx, extents, extent, NULL, growing_retained);
|
||||
} else if (extent_size_get(extent) >= LARGE_MINCLASS) {
|
||||
/* Always coalesce large extents eagerly. */
|
||||
bool coalesced;
|
||||
size_t prev_size;
|
||||
do {
|
||||
prev_size = extent_size_get(extent);
|
||||
assert(extent_state_get(extent) == extent_state_active);
|
||||
extent = extent_try_coalesce(tsdn, arena,
|
||||
r_extent_hooks, rtree_ctx, extents, extent,
|
||||
&coalesced, growing_retained);
|
||||
} while (coalesced &&
|
||||
extent_size_get(extent) >= prev_size + LARGE_MINCLASS);
|
||||
}
|
||||
|
||||
extent_deactivate_locked(tsdn, arena, extents, extent, false);
|
||||
extent_deactivate_locked(tsdn, arena, extents, extent);
|
||||
|
||||
malloc_mutex_unlock(tsdn, &extents->mtx);
|
||||
}
|
||||
@ -1520,6 +1702,7 @@ extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
|
||||
void
|
||||
extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent) {
|
||||
assert(extent_dumpable_get(extent));
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
|
||||
@ -1780,6 +1963,13 @@ extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Accepts the extent to split, and the characteristics of each side of the
|
||||
* split. The 'a' parameters go with the 'lead' of the resulting pair of
|
||||
* extents (the lower addressed portion of the split), and the 'b' parameters go
|
||||
* with the trail (the higher addressed portion). This makes 'extent' the lead,
|
||||
* and returns the trail (except in case of error).
|
||||
*/
|
||||
static extent_t *
|
||||
extent_split_impl(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
|
||||
@ -1803,7 +1993,7 @@ extent_split_impl(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
|
||||
size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
|
||||
extent_state_get(extent), extent_zeroed_get(extent),
|
||||
extent_committed_get(extent));
|
||||
extent_committed_get(extent), extent_dumpable_get(extent));
|
||||
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||
@ -1814,7 +2004,7 @@ extent_split_impl(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_init(&lead, arena, extent_addr_get(extent), size_a,
|
||||
slab_a, szind_a, extent_sn_get(extent),
|
||||
extent_state_get(extent), extent_zeroed_get(extent),
|
||||
extent_committed_get(extent));
|
||||
extent_committed_get(extent), extent_dumpable_get(extent));
|
||||
|
||||
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
|
||||
true, &lead_elm_a, &lead_elm_b);
|
||||
|
@ -156,7 +156,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||
extent_init(gap, arena, gap_addr_page,
|
||||
gap_size_page, false, NSIZES,
|
||||
arena_extent_sn_next(arena),
|
||||
extent_state_active, false, true);
|
||||
extent_state_active, false, true, true);
|
||||
}
|
||||
/*
|
||||
* Compute the address just past the end of the desired
|
||||
@ -199,7 +199,8 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||
|
||||
extent_init(&extent, arena, ret, size,
|
||||
size, false, NSIZES,
|
||||
extent_state_active, false, true);
|
||||
extent_state_active, false, true,
|
||||
true);
|
||||
if (extent_purge_forced_wrapper(tsdn,
|
||||
arena, &extent_hooks, &extent, 0,
|
||||
size)) {
|
||||
|
198
src/jemalloc.c
198
src/jemalloc.c
@ -8,6 +8,7 @@
|
||||
#include "jemalloc/internal/extent_dss.h"
|
||||
#include "jemalloc/internal/extent_mmap.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
#include "jemalloc/internal/log.h"
|
||||
#include "jemalloc/internal/malloc_io.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
@ -848,10 +849,8 @@ malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
|
||||
size_t vlen) {
|
||||
malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
|
||||
(int)vlen, v);
|
||||
/* If abort_conf is set, error out after processing all options. */
|
||||
had_conf_error = true;
|
||||
if (opt_abort_conf) {
|
||||
malloc_abort_invalid_conf();
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1051,8 +1050,22 @@ malloc_conf_init(void) {
|
||||
|
||||
CONF_HANDLE_BOOL(opt_abort, "abort")
|
||||
CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
|
||||
if (opt_abort_conf && had_conf_error) {
|
||||
malloc_abort_invalid_conf();
|
||||
if (strncmp("metadata_thp", k, klen) == 0) {
|
||||
int i;
|
||||
bool match = false;
|
||||
for (i = 0; i < metadata_thp_mode_limit; i++) {
|
||||
if (strncmp(metadata_thp_mode_names[i],
|
||||
v, vlen) == 0) {
|
||||
opt_metadata_thp = i;
|
||||
match = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!match) {
|
||||
malloc_conf_error("Invalid conf value",
|
||||
k, klen, v, vlen);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
CONF_HANDLE_BOOL(opt_retain, "retain")
|
||||
if (strncmp("dss", k, klen) == 0) {
|
||||
@ -1128,12 +1141,14 @@ malloc_conf_init(void) {
|
||||
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
|
||||
}
|
||||
CONF_HANDLE_BOOL(opt_tcache, "tcache")
|
||||
CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit,
|
||||
"lg_extent_max_active_fit", 0,
|
||||
(sizeof(size_t) << 3), yes, yes, false)
|
||||
CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
|
||||
-1, (sizeof(size_t) << 3) - 1)
|
||||
if (strncmp("percpu_arena", k, klen) == 0) {
|
||||
int i;
|
||||
bool match = false;
|
||||
for (i = percpu_arena_mode_names_base; i <
|
||||
for (int i = percpu_arena_mode_names_base; i <
|
||||
percpu_arena_mode_names_limit; i++) {
|
||||
if (strncmp(percpu_arena_mode_names[i],
|
||||
v, vlen) == 0) {
|
||||
@ -1155,6 +1170,10 @@ malloc_conf_init(void) {
|
||||
}
|
||||
CONF_HANDLE_BOOL(opt_background_thread,
|
||||
"background_thread");
|
||||
CONF_HANDLE_SIZE_T(opt_max_background_threads,
|
||||
"max_background_threads", 1,
|
||||
opt_max_background_threads, yes, yes,
|
||||
true);
|
||||
if (config_prof) {
|
||||
CONF_HANDLE_BOOL(opt_prof, "prof")
|
||||
CONF_HANDLE_CHAR_P(opt_prof_prefix,
|
||||
@ -1173,6 +1192,37 @@ malloc_conf_init(void) {
|
||||
CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
|
||||
CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
|
||||
}
|
||||
if (config_log) {
|
||||
if (CONF_MATCH("log")) {
|
||||
size_t cpylen = (
|
||||
vlen <= sizeof(log_var_names) ?
|
||||
vlen : sizeof(log_var_names) - 1);
|
||||
strncpy(log_var_names, v, cpylen);
|
||||
log_var_names[cpylen] = '\0';
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (CONF_MATCH("thp")) {
|
||||
bool match = false;
|
||||
for (int i = 0; i < thp_mode_names_limit; i++) {
|
||||
if (strncmp(thp_mode_names[i],v, vlen)
|
||||
== 0) {
|
||||
if (!have_madvise_huge) {
|
||||
malloc_conf_error(
|
||||
"No THP support",
|
||||
k, klen, v, vlen);
|
||||
}
|
||||
opt_thp = i;
|
||||
match = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!match) {
|
||||
malloc_conf_error("Invalid conf value",
|
||||
k, klen, v, vlen);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
malloc_conf_error("Invalid conf pair", k, klen, v,
|
||||
vlen);
|
||||
#undef CONF_MATCH
|
||||
@ -1188,8 +1238,12 @@ malloc_conf_init(void) {
|
||||
#undef CONF_HANDLE_SSIZE_T
|
||||
#undef CONF_HANDLE_CHAR_P
|
||||
}
|
||||
if (opt_abort_conf && had_conf_error) {
|
||||
malloc_abort_invalid_conf();
|
||||
}
|
||||
}
|
||||
atomic_store_b(&log_init_done, true, ATOMIC_RELEASE);
|
||||
}
|
||||
|
||||
static bool
|
||||
malloc_init_hard_needed(void) {
|
||||
@ -1493,6 +1547,8 @@ malloc_init_hard(void) {
|
||||
post_reentrancy(tsd);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
|
||||
|
||||
witness_assert_lockless(witness_tsd_tsdn(
|
||||
tsd_witness_tsdp_get_unsafe(tsd)));
|
||||
malloc_tsd_boot1();
|
||||
/* Update TSD after tsd_boot1. */
|
||||
tsd = tsd_fetch();
|
||||
@ -1500,8 +1556,11 @@ malloc_init_hard(void) {
|
||||
assert(have_background_thread);
|
||||
/*
|
||||
* Need to finish init & unlock first before creating background
|
||||
* threads (pthread_create depends on malloc).
|
||||
* threads (pthread_create depends on malloc). ctl_init (which
|
||||
* sets isthreaded) needs to be called without holding any lock.
|
||||
*/
|
||||
background_thread_ctl_init(tsd_tsdn(tsd));
|
||||
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
|
||||
bool err = background_thread_create(tsd, 0);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
|
||||
@ -1701,7 +1760,7 @@ compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
|
||||
}
|
||||
|
||||
/* A size_t with its high-half bits all set to 1. */
|
||||
const static size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2);
|
||||
static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2);
|
||||
|
||||
*size = dopts->item_size * dopts->num_items;
|
||||
|
||||
@ -1962,6 +2021,8 @@ je_malloc(size_t size) {
|
||||
static_opts_t sopts;
|
||||
dynamic_opts_t dopts;
|
||||
|
||||
LOG("core.malloc.entry", "size: %zu", size);
|
||||
|
||||
static_opts_init(&sopts);
|
||||
dynamic_opts_init(&dopts);
|
||||
|
||||
@ -1976,6 +2037,8 @@ je_malloc(size_t size) {
|
||||
|
||||
imalloc(&sopts, &dopts);
|
||||
|
||||
LOG("core.malloc.exit", "result: %p", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1986,6 +2049,9 @@ je_posix_memalign(void **memptr, size_t alignment, size_t size) {
|
||||
static_opts_t sopts;
|
||||
dynamic_opts_t dopts;
|
||||
|
||||
LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, "
|
||||
"size: %zu", memptr, alignment, size);
|
||||
|
||||
static_opts_init(&sopts);
|
||||
dynamic_opts_init(&dopts);
|
||||
|
||||
@ -2002,6 +2068,10 @@ je_posix_memalign(void **memptr, size_t alignment, size_t size) {
|
||||
dopts.alignment = alignment;
|
||||
|
||||
ret = imalloc(&sopts, &dopts);
|
||||
|
||||
LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret,
|
||||
*memptr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2014,6 +2084,9 @@ je_aligned_alloc(size_t alignment, size_t size) {
|
||||
static_opts_t sopts;
|
||||
dynamic_opts_t dopts;
|
||||
|
||||
LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n",
|
||||
alignment, size);
|
||||
|
||||
static_opts_init(&sopts);
|
||||
dynamic_opts_init(&dopts);
|
||||
|
||||
@ -2032,6 +2105,9 @@ je_aligned_alloc(size_t alignment, size_t size) {
|
||||
dopts.alignment = alignment;
|
||||
|
||||
imalloc(&sopts, &dopts);
|
||||
|
||||
LOG("core.aligned_alloc.exit", "result: %p", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2043,6 +2119,8 @@ je_calloc(size_t num, size_t size) {
|
||||
static_opts_t sopts;
|
||||
dynamic_opts_t dopts;
|
||||
|
||||
LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size);
|
||||
|
||||
static_opts_init(&sopts);
|
||||
dynamic_opts_init(&dopts);
|
||||
|
||||
@ -2059,6 +2137,8 @@ je_calloc(size_t num, size_t size) {
|
||||
|
||||
imalloc(&sopts, &dopts);
|
||||
|
||||
LOG("core.calloc.exit", "result: %p", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2161,17 +2241,37 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
|
||||
assert(malloc_initialized() || IS_INITIALIZER);
|
||||
|
||||
alloc_ctx_t alloc_ctx, *ctx;
|
||||
if (config_prof && opt_prof) {
|
||||
if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) {
|
||||
/*
|
||||
* When cache_oblivious is disabled and ptr is not page aligned,
|
||||
* the allocation was not sampled -- usize can be used to
|
||||
* determine szind directly.
|
||||
*/
|
||||
alloc_ctx.szind = sz_size2index(usize);
|
||||
alloc_ctx.slab = true;
|
||||
ctx = &alloc_ctx;
|
||||
if (config_debug) {
|
||||
alloc_ctx_t dbg_ctx;
|
||||
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
|
||||
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree,
|
||||
rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind,
|
||||
&dbg_ctx.slab);
|
||||
assert(dbg_ctx.szind == alloc_ctx.szind);
|
||||
assert(dbg_ctx.slab == alloc_ctx.slab);
|
||||
}
|
||||
} else if (config_prof && opt_prof) {
|
||||
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
|
||||
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
|
||||
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
|
||||
assert(alloc_ctx.szind == sz_size2index(usize));
|
||||
ctx = &alloc_ctx;
|
||||
prof_free(tsd, ptr, usize, ctx);
|
||||
} else {
|
||||
ctx = NULL;
|
||||
}
|
||||
|
||||
if (config_prof && opt_prof) {
|
||||
prof_free(tsd, ptr, usize, ctx);
|
||||
}
|
||||
if (config_stats) {
|
||||
*tsd_thread_deallocatedp_get(tsd) += usize;
|
||||
}
|
||||
@ -2192,6 +2292,8 @@ je_realloc(void *ptr, size_t size) {
|
||||
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
size_t old_usize = 0;
|
||||
|
||||
LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size);
|
||||
|
||||
if (unlikely(size == 0)) {
|
||||
if (ptr != NULL) {
|
||||
/* realloc(ptr, 0) is equivalent to free(ptr). */
|
||||
@ -2204,6 +2306,8 @@ je_realloc(void *ptr, size_t size) {
|
||||
tcache = NULL;
|
||||
}
|
||||
ifree(tsd, ptr, tcache, true);
|
||||
|
||||
LOG("core.realloc.exit", "result: %p", NULL);
|
||||
return NULL;
|
||||
}
|
||||
size = 1;
|
||||
@ -2236,7 +2340,9 @@ je_realloc(void *ptr, size_t size) {
|
||||
tsdn = tsd_tsdn(tsd);
|
||||
} else {
|
||||
/* realloc(NULL, size) is equivalent to malloc(size). */
|
||||
return je_malloc(size);
|
||||
void *ret = je_malloc(size);
|
||||
LOG("core.realloc.exit", "result: %p", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (unlikely(ret == NULL)) {
|
||||
@ -2257,11 +2363,15 @@ je_realloc(void *ptr, size_t size) {
|
||||
}
|
||||
UTRACE(ptr, size, ret);
|
||||
check_entry_exit_locking(tsdn);
|
||||
|
||||
LOG("core.realloc.exit", "result: %p", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
|
||||
je_free(void *ptr) {
|
||||
LOG("core.free.entry", "ptr: %p", ptr);
|
||||
|
||||
UTRACE(ptr, 0, 0);
|
||||
if (likely(ptr != NULL)) {
|
||||
/*
|
||||
@ -2291,6 +2401,7 @@ je_free(void *ptr) {
|
||||
}
|
||||
check_entry_exit_locking(tsd_tsdn(tsd));
|
||||
}
|
||||
LOG("core.free.exit", "");
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2310,6 +2421,9 @@ je_memalign(size_t alignment, size_t size) {
|
||||
static_opts_t sopts;
|
||||
dynamic_opts_t dopts;
|
||||
|
||||
LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment,
|
||||
size);
|
||||
|
||||
static_opts_init(&sopts);
|
||||
dynamic_opts_init(&dopts);
|
||||
|
||||
@ -2327,6 +2441,8 @@ je_memalign(size_t alignment, size_t size) {
|
||||
dopts.alignment = alignment;
|
||||
|
||||
imalloc(&sopts, &dopts);
|
||||
|
||||
LOG("core.memalign.exit", "result: %p", ret);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
@ -2341,6 +2457,8 @@ je_valloc(size_t size) {
|
||||
static_opts_t sopts;
|
||||
dynamic_opts_t dopts;
|
||||
|
||||
LOG("core.valloc.entry", "size: %zu\n", size);
|
||||
|
||||
static_opts_init(&sopts);
|
||||
dynamic_opts_init(&dopts);
|
||||
|
||||
@ -2359,6 +2477,7 @@ je_valloc(size_t size) {
|
||||
|
||||
imalloc(&sopts, &dopts);
|
||||
|
||||
LOG("core.valloc.exit", "result: %p\n", ret);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
@ -2432,6 +2551,8 @@ je_mallocx(size_t size, int flags) {
|
||||
static_opts_t sopts;
|
||||
dynamic_opts_t dopts;
|
||||
|
||||
LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags);
|
||||
|
||||
static_opts_init(&sopts);
|
||||
dynamic_opts_init(&dopts);
|
||||
|
||||
@ -2465,6 +2586,8 @@ je_mallocx(size_t size, int flags) {
|
||||
}
|
||||
|
||||
imalloc(&sopts, &dopts);
|
||||
|
||||
LOG("core.mallocx.exit", "result: %p", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2545,6 +2668,10 @@ je_rallocx(void *ptr, size_t size, int flags) {
|
||||
arena_t *arena;
|
||||
tcache_t *tcache;
|
||||
|
||||
LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
|
||||
size, flags);
|
||||
|
||||
|
||||
assert(ptr != NULL);
|
||||
assert(size != 0);
|
||||
assert(malloc_initialized() || IS_INITIALIZER);
|
||||
@ -2607,6 +2734,8 @@ je_rallocx(void *ptr, size_t size, int flags) {
|
||||
}
|
||||
UTRACE(ptr, size, p);
|
||||
check_entry_exit_locking(tsd_tsdn(tsd));
|
||||
|
||||
LOG("core.rallocx.exit", "result: %p", p);
|
||||
return p;
|
||||
label_oom:
|
||||
if (config_xmalloc && unlikely(opt_xmalloc)) {
|
||||
@ -2615,6 +2744,8 @@ label_oom:
|
||||
}
|
||||
UTRACE(ptr, size, 0);
|
||||
check_entry_exit_locking(tsd_tsdn(tsd));
|
||||
|
||||
LOG("core.rallocx.exit", "result: %p", NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -2701,6 +2832,9 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
|
||||
size_t alignment = MALLOCX_ALIGN_GET(flags);
|
||||
bool zero = flags & MALLOCX_ZERO;
|
||||
|
||||
LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, "
|
||||
"flags: %d", ptr, size, extra, flags);
|
||||
|
||||
assert(ptr != NULL);
|
||||
assert(size != 0);
|
||||
assert(SIZE_T_MAX - size >= extra);
|
||||
@ -2750,15 +2884,19 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
|
||||
label_not_resized:
|
||||
UTRACE(ptr, size, ptr);
|
||||
check_entry_exit_locking(tsd_tsdn(tsd));
|
||||
|
||||
LOG("core.xallocx.exit", "result: %zu", usize);
|
||||
return usize;
|
||||
}
|
||||
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
|
||||
JEMALLOC_ATTR(pure)
|
||||
je_sallocx(const void *ptr, int flags) {
|
||||
je_sallocx(const void *ptr, UNUSED int flags) {
|
||||
size_t usize;
|
||||
tsdn_t *tsdn;
|
||||
|
||||
LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags);
|
||||
|
||||
assert(malloc_initialized() || IS_INITIALIZER);
|
||||
assert(ptr != NULL);
|
||||
|
||||
@ -2773,11 +2911,15 @@ je_sallocx(const void *ptr, int flags) {
|
||||
}
|
||||
|
||||
check_entry_exit_locking(tsdn);
|
||||
|
||||
LOG("core.sallocx.exit", "result: %zu", usize);
|
||||
return usize;
|
||||
}
|
||||
|
||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
|
||||
je_dallocx(void *ptr, int flags) {
|
||||
LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags);
|
||||
|
||||
assert(ptr != NULL);
|
||||
assert(malloc_initialized() || IS_INITIALIZER);
|
||||
|
||||
@ -2815,6 +2957,8 @@ je_dallocx(void *ptr, int flags) {
|
||||
ifree(tsd, ptr, tcache, true);
|
||||
}
|
||||
check_entry_exit_locking(tsd_tsdn(tsd));
|
||||
|
||||
LOG("core.dallocx.exit", "");
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
@ -2836,6 +2980,9 @@ je_sdallocx(void *ptr, size_t size, int flags) {
|
||||
assert(ptr != NULL);
|
||||
assert(malloc_initialized() || IS_INITIALIZER);
|
||||
|
||||
LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
|
||||
size, flags);
|
||||
|
||||
tsd_t *tsd = tsd_fetch();
|
||||
bool fast = tsd_fast(tsd);
|
||||
size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
|
||||
@ -2872,6 +3019,8 @@ je_sdallocx(void *ptr, size_t size, int flags) {
|
||||
isfree(tsd, ptr, usize, tcache, true);
|
||||
}
|
||||
check_entry_exit_locking(tsd_tsdn(tsd));
|
||||
|
||||
LOG("core.sdallocx.exit", "");
|
||||
}
|
||||
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
|
||||
@ -2883,6 +3032,7 @@ je_nallocx(size_t size, int flags) {
|
||||
assert(size != 0);
|
||||
|
||||
if (unlikely(malloc_init())) {
|
||||
LOG("core.nallocx.exit", "result: %zu", ZU(0));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2891,10 +3041,12 @@ je_nallocx(size_t size, int flags) {
|
||||
|
||||
usize = inallocx(tsdn, size, flags);
|
||||
if (unlikely(usize > LARGE_MAXCLASS)) {
|
||||
LOG("core.nallocx.exit", "result: %zu", ZU(0));
|
||||
return 0;
|
||||
}
|
||||
|
||||
check_entry_exit_locking(tsdn);
|
||||
LOG("core.nallocx.exit", "result: %zu", usize);
|
||||
return usize;
|
||||
}
|
||||
|
||||
@ -2904,7 +3056,10 @@ je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
|
||||
int ret;
|
||||
tsd_t *tsd;
|
||||
|
||||
LOG("core.mallctl.entry", "name: %s", name);
|
||||
|
||||
if (unlikely(malloc_init())) {
|
||||
LOG("core.mallctl.exit", "result: %d", EAGAIN);
|
||||
return EAGAIN;
|
||||
}
|
||||
|
||||
@ -2912,6 +3067,8 @@ je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
|
||||
check_entry_exit_locking(tsd_tsdn(tsd));
|
||||
ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
|
||||
check_entry_exit_locking(tsd_tsdn(tsd));
|
||||
|
||||
LOG("core.mallctl.exit", "result: %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2919,7 +3076,10 @@ JEMALLOC_EXPORT int JEMALLOC_NOTHROW
|
||||
je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
|
||||
int ret;
|
||||
|
||||
LOG("core.mallctlnametomib.entry", "name: %s", name);
|
||||
|
||||
if (unlikely(malloc_init())) {
|
||||
LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN);
|
||||
return EAGAIN;
|
||||
}
|
||||
|
||||
@ -2927,6 +3087,8 @@ je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
|
||||
check_entry_exit_locking(tsd_tsdn(tsd));
|
||||
ret = ctl_nametomib(tsd, name, mibp, miblenp);
|
||||
check_entry_exit_locking(tsd_tsdn(tsd));
|
||||
|
||||
LOG("core.mallctlnametomib.exit", "result: %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2936,7 +3098,10 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
int ret;
|
||||
tsd_t *tsd;
|
||||
|
||||
LOG("core.mallctlbymib.entry", "");
|
||||
|
||||
if (unlikely(malloc_init())) {
|
||||
LOG("core.mallctlbymib.exit", "result: %d", EAGAIN);
|
||||
return EAGAIN;
|
||||
}
|
||||
|
||||
@ -2944,6 +3109,7 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
check_entry_exit_locking(tsd_tsdn(tsd));
|
||||
ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
|
||||
check_entry_exit_locking(tsd_tsdn(tsd));
|
||||
LOG("core.mallctlbymib.exit", "result: %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2952,10 +3118,13 @@ je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *opts) {
|
||||
tsdn_t *tsdn;
|
||||
|
||||
LOG("core.malloc_stats_print.entry", "");
|
||||
|
||||
tsdn = tsdn_fetch();
|
||||
check_entry_exit_locking(tsdn);
|
||||
stats_print(write_cb, cbopaque, opts);
|
||||
check_entry_exit_locking(tsdn);
|
||||
LOG("core.malloc_stats_print.exit", "");
|
||||
}
|
||||
|
||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
|
||||
@ -2963,6 +3132,8 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
|
||||
size_t ret;
|
||||
tsdn_t *tsdn;
|
||||
|
||||
LOG("core.malloc_usable_size.entry", "ptr: %p", ptr);
|
||||
|
||||
assert(malloc_initialized() || IS_INITIALIZER);
|
||||
|
||||
tsdn = tsdn_fetch();
|
||||
@ -2980,6 +3151,7 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
|
||||
}
|
||||
|
||||
check_entry_exit_locking(tsdn);
|
||||
LOG("core.malloc_usable_size.exit", "result: %zu", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -39,12 +39,10 @@ void operator delete(void *ptr, std::size_t size) noexcept;
|
||||
void operator delete[](void *ptr, std::size_t size) noexcept;
|
||||
#endif
|
||||
|
||||
template <bool IsNoExcept>
|
||||
void *
|
||||
newImpl(std::size_t size) noexcept(IsNoExcept) {
|
||||
void *ptr = je_malloc(size);
|
||||
if (likely(ptr != nullptr))
|
||||
return ptr;
|
||||
JEMALLOC_NOINLINE
|
||||
static void *
|
||||
handleOOM(std::size_t size, bool nothrow) {
|
||||
void *ptr = nullptr;
|
||||
|
||||
while (ptr == nullptr) {
|
||||
std::new_handler handler;
|
||||
@ -68,11 +66,22 @@ newImpl(std::size_t size) noexcept(IsNoExcept) {
|
||||
ptr = je_malloc(size);
|
||||
}
|
||||
|
||||
if (ptr == nullptr && !IsNoExcept)
|
||||
if (ptr == nullptr && !nothrow)
|
||||
std::__throw_bad_alloc();
|
||||
return ptr;
|
||||
}
|
||||
|
||||
template <bool IsNoExcept>
|
||||
JEMALLOC_ALWAYS_INLINE
|
||||
void *
|
||||
newImpl(std::size_t size) noexcept(IsNoExcept) {
|
||||
void *ptr = je_malloc(size);
|
||||
if (likely(ptr != nullptr))
|
||||
return ptr;
|
||||
|
||||
return handleOOM(size, IsNoExcept);
|
||||
}
|
||||
|
||||
void *
|
||||
operator new(std::size_t size) {
|
||||
return newImpl<false>(size);
|
||||
|
78
src/log.c
Normal file
78
src/log.c
Normal file
@ -0,0 +1,78 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/log.h"
|
||||
|
||||
char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE];
|
||||
atomic_b_t log_init_done = ATOMIC_INIT(false);
|
||||
|
||||
/*
|
||||
* Returns true if we were able to pick out a segment. Fills in r_segment_end
|
||||
* with a pointer to the first character after the end of the string.
|
||||
*/
|
||||
static const char *
|
||||
log_var_extract_segment(const char* segment_begin) {
|
||||
const char *end;
|
||||
for (end = segment_begin; *end != '\0' && *end != '|'; end++) {
|
||||
}
|
||||
return end;
|
||||
}
|
||||
|
||||
static bool
|
||||
log_var_matches_segment(const char *segment_begin, const char *segment_end,
|
||||
const char *log_var_begin, const char *log_var_end) {
|
||||
assert(segment_begin <= segment_end);
|
||||
assert(log_var_begin < log_var_end);
|
||||
|
||||
ptrdiff_t segment_len = segment_end - segment_begin;
|
||||
ptrdiff_t log_var_len = log_var_end - log_var_begin;
|
||||
/* The special '.' segment matches everything. */
|
||||
if (segment_len == 1 && *segment_begin == '.') {
|
||||
return true;
|
||||
}
|
||||
if (segment_len == log_var_len) {
|
||||
return strncmp(segment_begin, log_var_begin, segment_len) == 0;
|
||||
} else if (segment_len < log_var_len) {
|
||||
return strncmp(segment_begin, log_var_begin, segment_len) == 0
|
||||
&& log_var_begin[segment_len] == '.';
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
unsigned
|
||||
log_var_update_state(log_var_t *log_var) {
|
||||
const char *log_var_begin = log_var->name;
|
||||
const char *log_var_end = log_var->name + strlen(log_var->name);
|
||||
|
||||
/* Pointer to one before the beginning of the current segment. */
|
||||
const char *segment_begin = log_var_names;
|
||||
|
||||
/*
|
||||
* If log_init done is false, we haven't parsed the malloc conf yet. To
|
||||
* avoid log-spew, we default to not displaying anything.
|
||||
*/
|
||||
if (!atomic_load_b(&log_init_done, ATOMIC_ACQUIRE)) {
|
||||
return LOG_INITIALIZED_NOT_ENABLED;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
const char *segment_end = log_var_extract_segment(
|
||||
segment_begin);
|
||||
assert(segment_end < log_var_names + JEMALLOC_LOG_VAR_BUFSIZE);
|
||||
if (log_var_matches_segment(segment_begin, segment_end,
|
||||
log_var_begin, log_var_end)) {
|
||||
atomic_store_u(&log_var->state, LOG_ENABLED,
|
||||
ATOMIC_RELAXED);
|
||||
return LOG_ENABLED;
|
||||
}
|
||||
if (*segment_end == '\0') {
|
||||
/* Hit the end of the segment string with no match. */
|
||||
atomic_store_u(&log_var->state,
|
||||
LOG_INITIALIZED_NOT_ENABLED, ATOMIC_RELAXED);
|
||||
return LOG_INITIALIZED_NOT_ENABLED;
|
||||
}
|
||||
/* Otherwise, skip the delimiter and continue. */
|
||||
segment_begin = segment_end + 1;
|
||||
}
|
||||
}
|
@ -70,20 +70,7 @@ static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
|
||||
/* malloc_message() setup. */
|
||||
static void
|
||||
wrtmessage(void *cbopaque, const char *s) {
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
|
||||
/*
|
||||
* Use syscall(2) rather than write(2) when possible in order to avoid
|
||||
* the possibility of memory allocation within libc. This is necessary
|
||||
* on FreeBSD; most operating systems do not have this problem though.
|
||||
*
|
||||
* syscall() returns long or int, depending on platform, so capture the
|
||||
* unused result in the widest plausible type to avoid compiler
|
||||
* warnings.
|
||||
*/
|
||||
UNUSED long result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
|
||||
#else
|
||||
UNUSED ssize_t result = write(STDERR_FILENO, s, strlen(s));
|
||||
#endif
|
||||
malloc_write_fd(STDERR_FILENO, s, strlen(s));
|
||||
}
|
||||
|
||||
JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
|
||||
@ -111,7 +98,7 @@ buferror(int err, char *buf, size_t buflen) {
|
||||
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
|
||||
(LPSTR)buf, (DWORD)buflen, NULL);
|
||||
return 0;
|
||||
#elif defined(__GLIBC__) && defined(_GNU_SOURCE)
|
||||
#elif defined(JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE) && defined(_GNU_SOURCE)
|
||||
char *b = strerror_r(err, buf, buflen);
|
||||
if (b != buf) {
|
||||
strncpy(buf, b, buflen);
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/malloc_io.h"
|
||||
#include "jemalloc/internal/spin.h"
|
||||
|
||||
#ifndef _CRT_SPINCOUNT
|
||||
#define _CRT_SPINCOUNT 4000
|
||||
@ -53,7 +54,7 @@ malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
|
||||
|
||||
int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN;
|
||||
do {
|
||||
CPU_SPINWAIT;
|
||||
spin_cpu_spinwait();
|
||||
if (!malloc_mutex_trylock_final(mutex)) {
|
||||
data->n_spin_acquired++;
|
||||
return;
|
||||
@ -173,7 +174,7 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
|
||||
mutex->lock_order = lock_order;
|
||||
if (lock_order == malloc_mutex_address_ordered) {
|
||||
witness_init(&mutex->witness, name, rank,
|
||||
mutex_addr_comp, &mutex);
|
||||
mutex_addr_comp, mutex);
|
||||
} else {
|
||||
witness_init(&mutex->witness, name, rank, NULL, NULL);
|
||||
}
|
||||
|
214
src/pages.c
214
src/pages.c
@ -10,6 +10,9 @@
|
||||
|
||||
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
|
||||
#include <sys/sysctl.h>
|
||||
#ifdef __FreeBSD__
|
||||
#include <vm/vm_param.h>
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
@ -25,6 +28,18 @@ static int mmap_flags;
|
||||
#endif
|
||||
static bool os_overcommits;
|
||||
|
||||
const char *thp_mode_names[] = {
|
||||
"default",
|
||||
"always",
|
||||
"never",
|
||||
"not supported"
|
||||
};
|
||||
thp_mode_t opt_thp = THP_MODE_DEFAULT;
|
||||
thp_mode_t init_system_thp_mode;
|
||||
|
||||
/* Runtime support for lazy purge. Irrelevant when !pages_can_purge_lazy. */
|
||||
static bool pages_can_purge_lazy_runtime = true;
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* Function prototypes for static functions that are referenced prior to
|
||||
@ -252,12 +267,25 @@ pages_purge_lazy(void *addr, size_t size) {
|
||||
if (!pages_can_purge_lazy) {
|
||||
return true;
|
||||
}
|
||||
if (!pages_can_purge_lazy_runtime) {
|
||||
/*
|
||||
* Built with lazy purge enabled, but detected it was not
|
||||
* supported on the current system.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
|
||||
return false;
|
||||
#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
|
||||
return (madvise(addr, size, MADV_FREE) != 0);
|
||||
return (madvise(addr, size,
|
||||
# ifdef MADV_FREE
|
||||
MADV_FREE
|
||||
# else
|
||||
JEMALLOC_MADV_FREE
|
||||
# endif
|
||||
) != 0);
|
||||
#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
|
||||
!defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
|
||||
return (madvise(addr, size, MADV_DONTNEED) != 0);
|
||||
@ -286,12 +314,13 @@ pages_purge_forced(void *addr, size_t size) {
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
pages_huge(void *addr, size_t size) {
|
||||
static bool
|
||||
pages_huge_impl(void *addr, size_t size, bool aligned) {
|
||||
if (aligned) {
|
||||
assert(HUGEPAGE_ADDR2BASE(addr) == addr);
|
||||
assert(HUGEPAGE_CEILING(size) == size);
|
||||
|
||||
#ifdef JEMALLOC_THP
|
||||
}
|
||||
#ifdef JEMALLOC_HAVE_MADVISE_HUGE
|
||||
return (madvise(addr, size, MADV_HUGEPAGE) != 0);
|
||||
#else
|
||||
return true;
|
||||
@ -299,23 +328,70 @@ pages_huge(void *addr, size_t size) {
|
||||
}
|
||||
|
||||
bool
|
||||
pages_nohuge(void *addr, size_t size) {
|
||||
pages_huge(void *addr, size_t size) {
|
||||
return pages_huge_impl(addr, size, true);
|
||||
}
|
||||
|
||||
static bool
|
||||
pages_huge_unaligned(void *addr, size_t size) {
|
||||
return pages_huge_impl(addr, size, false);
|
||||
}
|
||||
|
||||
static bool
|
||||
pages_nohuge_impl(void *addr, size_t size, bool aligned) {
|
||||
if (aligned) {
|
||||
assert(HUGEPAGE_ADDR2BASE(addr) == addr);
|
||||
assert(HUGEPAGE_CEILING(size) == size);
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_THP
|
||||
#ifdef JEMALLOC_HAVE_MADVISE_HUGE
|
||||
return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
pages_nohuge(void *addr, size_t size) {
|
||||
return pages_nohuge_impl(addr, size, true);
|
||||
}
|
||||
|
||||
static bool
|
||||
pages_nohuge_unaligned(void *addr, size_t size) {
|
||||
return pages_nohuge_impl(addr, size, false);
|
||||
}
|
||||
|
||||
bool
|
||||
pages_dontdump(void *addr, size_t size) {
|
||||
assert(PAGE_ADDR2BASE(addr) == addr);
|
||||
assert(PAGE_CEILING(size) == size);
|
||||
#ifdef JEMALLOC_MADVISE_DONTDUMP
|
||||
return madvise(addr, size, MADV_DONTDUMP) != 0;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
pages_dodump(void *addr, size_t size) {
|
||||
assert(PAGE_ADDR2BASE(addr) == addr);
|
||||
assert(PAGE_CEILING(size) == size);
|
||||
#ifdef JEMALLOC_MADVISE_DONTDUMP
|
||||
return madvise(addr, size, MADV_DODUMP) != 0;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
static size_t
|
||||
os_page_detect(void) {
|
||||
#ifdef _WIN32
|
||||
SYSTEM_INFO si;
|
||||
GetSystemInfo(&si);
|
||||
return si.dwPageSize;
|
||||
#elif defined(__FreeBSD__)
|
||||
return getpagesize();
|
||||
#else
|
||||
long result = sysconf(_SC_PAGESIZE);
|
||||
if (result == -1) {
|
||||
@ -332,9 +408,19 @@ os_overcommits_sysctl(void) {
|
||||
size_t sz;
|
||||
|
||||
sz = sizeof(vm_overcommit);
|
||||
#if defined(__FreeBSD__) && defined(VM_OVERCOMMIT)
|
||||
int mib[2];
|
||||
|
||||
mib[0] = CTL_VM;
|
||||
mib[1] = VM_OVERCOMMIT;
|
||||
if (sysctl(mib, 2, &vm_overcommit, &sz, NULL, 0) != 0) {
|
||||
return false; /* Error. */
|
||||
}
|
||||
#else
|
||||
if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) {
|
||||
return false; /* Error. */
|
||||
}
|
||||
#endif
|
||||
|
||||
return ((vm_overcommit & 0x3) == 0);
|
||||
}
|
||||
@ -350,27 +436,44 @@ static bool
|
||||
os_overcommits_proc(void) {
|
||||
int fd;
|
||||
char buf[1];
|
||||
ssize_t nread;
|
||||
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
|
||||
#if defined(O_CLOEXEC)
|
||||
fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY |
|
||||
O_CLOEXEC);
|
||||
#else
|
||||
fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
|
||||
if (fd != -1) {
|
||||
fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
|
||||
}
|
||||
#endif
|
||||
#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat)
|
||||
#if defined(O_CLOEXEC)
|
||||
fd = (int)syscall(SYS_openat,
|
||||
AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
|
||||
#else
|
||||
fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
|
||||
fd = (int)syscall(SYS_openat,
|
||||
AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY);
|
||||
if (fd != -1) {
|
||||
fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
#if defined(O_CLOEXEC)
|
||||
fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
|
||||
#else
|
||||
fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
|
||||
if (fd != -1) {
|
||||
fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
if (fd == -1) {
|
||||
return false; /* Error. */
|
||||
}
|
||||
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
|
||||
nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
|
||||
#else
|
||||
nread = read(fd, &buf, sizeof(buf));
|
||||
#endif
|
||||
|
||||
ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf));
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
|
||||
syscall(SYS_close, fd);
|
||||
#else
|
||||
@ -390,6 +493,71 @@ os_overcommits_proc(void) {
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
pages_set_thp_state (void *ptr, size_t size) {
|
||||
if (opt_thp == thp_mode_default || opt_thp == init_system_thp_mode) {
|
||||
return;
|
||||
}
|
||||
assert(opt_thp != thp_mode_not_supported &&
|
||||
init_system_thp_mode != thp_mode_not_supported);
|
||||
|
||||
if (opt_thp == thp_mode_always
|
||||
&& init_system_thp_mode != thp_mode_never) {
|
||||
assert(init_system_thp_mode == thp_mode_default);
|
||||
pages_huge_unaligned(ptr, size);
|
||||
} else if (opt_thp == thp_mode_never) {
|
||||
assert(init_system_thp_mode == thp_mode_default ||
|
||||
init_system_thp_mode == thp_mode_always);
|
||||
pages_nohuge_unaligned(ptr, size);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
init_thp_state(void) {
|
||||
if (!have_madvise_huge) {
|
||||
if (metadata_thp_enabled() && opt_abort) {
|
||||
malloc_write("<jemalloc>: no MADV_HUGEPAGE support\n");
|
||||
abort();
|
||||
}
|
||||
goto label_error;
|
||||
}
|
||||
|
||||
static const char sys_state_madvise[] = "always [madvise] never\n";
|
||||
static const char sys_state_always[] = "[always] madvise never\n";
|
||||
static const char sys_state_never[] = "always madvise [never]\n";
|
||||
char buf[sizeof(sys_state_madvise)];
|
||||
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
|
||||
int fd = (int)syscall(SYS_open,
|
||||
"/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
|
||||
#else
|
||||
int fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
|
||||
#endif
|
||||
if (fd == -1) {
|
||||
goto label_error;
|
||||
}
|
||||
|
||||
ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf));
|
||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
|
||||
syscall(SYS_close, fd);
|
||||
#else
|
||||
close(fd);
|
||||
#endif
|
||||
|
||||
if (strncmp(buf, sys_state_madvise, (size_t)nread) == 0) {
|
||||
init_system_thp_mode = thp_mode_default;
|
||||
} else if (strncmp(buf, sys_state_always, (size_t)nread) == 0) {
|
||||
init_system_thp_mode = thp_mode_always;
|
||||
} else if (strncmp(buf, sys_state_never, (size_t)nread) == 0) {
|
||||
init_system_thp_mode = thp_mode_never;
|
||||
} else {
|
||||
goto label_error;
|
||||
}
|
||||
return;
|
||||
label_error:
|
||||
opt_thp = init_system_thp_mode = thp_mode_not_supported;
|
||||
}
|
||||
|
||||
bool
|
||||
pages_boot(void) {
|
||||
os_page = os_page_detect();
|
||||
@ -418,5 +586,21 @@ pages_boot(void) {
|
||||
os_overcommits = false;
|
||||
#endif
|
||||
|
||||
init_thp_state();
|
||||
|
||||
/* Detect lazy purge runtime support. */
|
||||
if (pages_can_purge_lazy) {
|
||||
bool committed = false;
|
||||
void *madv_free_page = os_pages_map(NULL, PAGE, PAGE, &committed);
|
||||
if (madv_free_page == NULL) {
|
||||
return true;
|
||||
}
|
||||
assert(pages_can_purge_lazy_runtime);
|
||||
if (pages_purge_lazy(madv_free_page, PAGE)) {
|
||||
pages_can_purge_lazy_runtime = false;
|
||||
}
|
||||
os_pages_unmap(madv_free_page, PAGE);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
19
src/prof.c
19
src/prof.c
@ -978,7 +978,7 @@ prof_dump_flush(bool propagate_err) {
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
|
||||
err = malloc_write_fd(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
|
||||
if (err == -1) {
|
||||
if (!propagate_err) {
|
||||
malloc_write("<jemalloc>: write() failed during heap "
|
||||
@ -1409,7 +1409,15 @@ prof_open_maps(const char *format, ...) {
|
||||
va_start(ap, format);
|
||||
malloc_vsnprintf(filename, sizeof(filename), format, ap);
|
||||
va_end(ap);
|
||||
|
||||
#if defined(O_CLOEXEC)
|
||||
mfd = open(filename, O_RDONLY | O_CLOEXEC);
|
||||
#else
|
||||
mfd = open(filename, O_RDONLY);
|
||||
if (mfd != -1) {
|
||||
fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC);
|
||||
}
|
||||
#endif
|
||||
|
||||
return mfd;
|
||||
}
|
||||
@ -1463,8 +1471,9 @@ prof_dump_maps(bool propagate_err) {
|
||||
goto label_return;
|
||||
}
|
||||
}
|
||||
nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
|
||||
PROF_DUMP_BUFSIZE - prof_dump_buf_end);
|
||||
nread = malloc_read_fd(mfd,
|
||||
&prof_dump_buf[prof_dump_buf_end], PROF_DUMP_BUFSIZE
|
||||
- prof_dump_buf_end);
|
||||
} while (nread > 0);
|
||||
} else {
|
||||
ret = true;
|
||||
@ -1772,7 +1781,7 @@ prof_idump(tsdn_t *tsdn) {
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
if (!prof_booted || tsdn_null(tsdn)) {
|
||||
if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) {
|
||||
return;
|
||||
}
|
||||
tsd = tsdn_tsd(tsdn);
|
||||
@ -1829,7 +1838,7 @@ prof_gdump(tsdn_t *tsdn) {
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
if (!prof_booted || tsdn_null(tsdn)) {
|
||||
if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) {
|
||||
return;
|
||||
}
|
||||
tsd = tsdn_tsd(tsdn);
|
||||
|
@ -1,4 +0,0 @@
|
||||
#define JEMALLOC_SPIN_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
|
||||
#include "jemalloc/internal/spin.h"
|
1707
src/stats.c
1707
src/stats.c
File diff suppressed because it is too large
Load Diff
3
src/sz.c
3
src/sz.c
@ -26,7 +26,8 @@ const size_t sz_index2size_tab[NSIZES] = {
|
||||
JEMALLOC_ALIGNED(CACHELINE)
|
||||
const uint8_t sz_size2index_tab[] = {
|
||||
#if LG_TINY_MIN == 0
|
||||
#warning "Dangerous LG_TINY_MIN"
|
||||
/* The div module doesn't support division by 1. */
|
||||
#error "Unsupported LG_TINY_MIN"
|
||||
#define S2B_0(i) i,
|
||||
#elif LG_TINY_MIN == 1
|
||||
#warning "Dangerous LG_TINY_MIN"
|
||||
|
59
src/tcache.c
59
src/tcache.c
@ -12,7 +12,7 @@
|
||||
bool opt_tcache = true;
|
||||
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
|
||||
|
||||
tcache_bin_info_t *tcache_bin_info;
|
||||
cache_bin_info_t *tcache_bin_info;
|
||||
static unsigned stack_nelms; /* Total stack elms per tcache. */
|
||||
|
||||
unsigned nhbins;
|
||||
@ -40,7 +40,7 @@ void
|
||||
tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
|
||||
szind_t binind = tcache->next_gc_bin;
|
||||
|
||||
tcache_bin_t *tbin;
|
||||
cache_bin_t *tbin;
|
||||
if (binind < NBINS) {
|
||||
tbin = tcache_small_bin_get(tcache, binind);
|
||||
} else {
|
||||
@ -58,7 +58,7 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
|
||||
* Reduce fill count by 2X. Limit lg_fill_div such that
|
||||
* the fill count is always at least 1.
|
||||
*/
|
||||
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
|
||||
cache_bin_info_t *tbin_info = &tcache_bin_info[binind];
|
||||
if ((tbin_info->ncached_max >>
|
||||
(tcache->lg_fill_div[binind] + 1)) >= 1) {
|
||||
tcache->lg_fill_div[binind]++;
|
||||
@ -86,7 +86,7 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
|
||||
|
||||
void *
|
||||
tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
||||
tcache_bin_t *tbin, szind_t binind, bool *tcache_success) {
|
||||
cache_bin_t *tbin, szind_t binind, bool *tcache_success) {
|
||||
void *ret;
|
||||
|
||||
assert(tcache->arena != NULL);
|
||||
@ -95,18 +95,18 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
||||
if (config_prof) {
|
||||
tcache->prof_accumbytes = 0;
|
||||
}
|
||||
ret = tcache_alloc_easy(tbin, tcache_success);
|
||||
ret = cache_bin_alloc_easy(tbin, tcache_success);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
|
||||
szind_t binind, unsigned rem) {
|
||||
bool merged_stats = false;
|
||||
|
||||
assert(binind < NBINS);
|
||||
assert(rem <= tbin->ncached);
|
||||
assert((cache_bin_sz_t)rem <= tbin->ncached);
|
||||
|
||||
arena_t *arena = tcache->arena;
|
||||
assert(arena != NULL);
|
||||
@ -121,7 +121,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
/* Lock the arena bin associated with the first object. */
|
||||
extent_t *extent = item_extent[0];
|
||||
arena_t *bin_arena = extent_arena_get(extent);
|
||||
arena_bin_t *bin = &bin_arena->bins[binind];
|
||||
bin_t *bin = &bin_arena->bins[binind];
|
||||
|
||||
if (config_prof && bin_arena == arena) {
|
||||
if (arena_prof_accum(tsd_tsdn(tsd), arena,
|
||||
@ -169,7 +169,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
* The flush loop didn't happen to flush to this thread's
|
||||
* arena, so the stats didn't get merged. Manually do so now.
|
||||
*/
|
||||
arena_bin_t *bin = &arena->bins[binind];
|
||||
bin_t *bin = &arena->bins[binind];
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
||||
bin->stats.nflushes++;
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
@ -180,18 +180,18 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
|
||||
sizeof(void *));
|
||||
tbin->ncached = rem;
|
||||
if ((low_water_t)tbin->ncached < tbin->low_water) {
|
||||
if (tbin->ncached < tbin->low_water) {
|
||||
tbin->low_water = tbin->ncached;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind,
|
||||
unsigned rem, tcache_t *tcache) {
|
||||
bool merged_stats = false;
|
||||
|
||||
assert(binind < nhbins);
|
||||
assert(rem <= tbin->ncached);
|
||||
assert((cache_bin_sz_t)rem <= tbin->ncached);
|
||||
|
||||
arena_t *arena = tcache->arena;
|
||||
assert(arena != NULL);
|
||||
@ -278,7 +278,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
|
||||
sizeof(void *));
|
||||
tbin->ncached = rem;
|
||||
if ((low_water_t)tbin->ncached < tbin->low_water) {
|
||||
if (tbin->ncached < tbin->low_water) {
|
||||
tbin->low_water = tbin->ncached;
|
||||
}
|
||||
}
|
||||
@ -291,8 +291,15 @@ tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
|
||||
if (config_stats) {
|
||||
/* Link into list of extant tcaches. */
|
||||
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
|
||||
|
||||
ql_elm_new(tcache, link);
|
||||
ql_tail_insert(&arena->tcache_ql, tcache, link);
|
||||
cache_bin_array_descriptor_init(
|
||||
&tcache->cache_bin_array_descriptor, tcache->bins_small,
|
||||
tcache->bins_large);
|
||||
ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
|
||||
&tcache->cache_bin_array_descriptor, link);
|
||||
|
||||
malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
|
||||
}
|
||||
}
|
||||
@ -316,6 +323,8 @@ tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) {
|
||||
assert(in_ql);
|
||||
}
|
||||
ql_remove(&arena->tcache_ql, tcache, link);
|
||||
ql_remove(&arena->cache_bin_array_descriptor_ql,
|
||||
&tcache->cache_bin_array_descriptor, link);
|
||||
tcache_stats_merge(tsdn, tcache, arena);
|
||||
malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
|
||||
}
|
||||
@ -354,8 +363,8 @@ tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) {
|
||||
|
||||
size_t stack_offset = 0;
|
||||
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
|
||||
memset(tcache->tbins_small, 0, sizeof(tcache_bin_t) * NBINS);
|
||||
memset(tcache->tbins_large, 0, sizeof(tcache_bin_t) * (nhbins - NBINS));
|
||||
memset(tcache->bins_small, 0, sizeof(cache_bin_t) * NBINS);
|
||||
memset(tcache->bins_large, 0, sizeof(cache_bin_t) * (nhbins - NBINS));
|
||||
unsigned i = 0;
|
||||
for (; i < NBINS; i++) {
|
||||
tcache->lg_fill_div[i] = 1;
|
||||
@ -450,7 +459,7 @@ tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
|
||||
assert(tcache->arena != NULL);
|
||||
|
||||
for (unsigned i = 0; i < NBINS; i++) {
|
||||
tcache_bin_t *tbin = tcache_small_bin_get(tcache, i);
|
||||
cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
|
||||
tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
|
||||
|
||||
if (config_stats) {
|
||||
@ -458,7 +467,7 @@ tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
|
||||
}
|
||||
}
|
||||
for (unsigned i = NBINS; i < nhbins; i++) {
|
||||
tcache_bin_t *tbin = tcache_large_bin_get(tcache, i);
|
||||
cache_bin_t *tbin = tcache_large_bin_get(tcache, i);
|
||||
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
|
||||
|
||||
if (config_stats) {
|
||||
@ -524,8 +533,8 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
|
||||
|
||||
/* Merge and reset tcache stats. */
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
arena_bin_t *bin = &arena->bins[i];
|
||||
tcache_bin_t *tbin = tcache_small_bin_get(tcache, i);
|
||||
bin_t *bin = &arena->bins[i];
|
||||
cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
|
||||
malloc_mutex_lock(tsdn, &bin->lock);
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
malloc_mutex_unlock(tsdn, &bin->lock);
|
||||
@ -533,7 +542,7 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
|
||||
}
|
||||
|
||||
for (; i < nhbins; i++) {
|
||||
tcache_bin_t *tbin = tcache_large_bin_get(tcache, i);
|
||||
cache_bin_t *tbin = tcache_large_bin_get(tcache, i);
|
||||
arena_stats_large_nrequests_add(tsdn, &arena->stats, i,
|
||||
tbin->tstats.nrequests);
|
||||
tbin->tstats.nrequests = 0;
|
||||
@ -657,21 +666,21 @@ tcache_boot(tsdn_t *tsdn) {
|
||||
nhbins = sz_size2index(tcache_maxclass) + 1;
|
||||
|
||||
/* Initialize tcache_bin_info. */
|
||||
tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins
|
||||
* sizeof(tcache_bin_info_t), CACHELINE);
|
||||
tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins
|
||||
* sizeof(cache_bin_info_t), CACHELINE);
|
||||
if (tcache_bin_info == NULL) {
|
||||
return true;
|
||||
}
|
||||
stack_nelms = 0;
|
||||
unsigned i;
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
|
||||
if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
|
||||
tcache_bin_info[i].ncached_max =
|
||||
TCACHE_NSLOTS_SMALL_MIN;
|
||||
} else if ((arena_bin_info[i].nregs << 1) <=
|
||||
} else if ((bin_infos[i].nregs << 1) <=
|
||||
TCACHE_NSLOTS_SMALL_MAX) {
|
||||
tcache_bin_info[i].ncached_max =
|
||||
(arena_bin_info[i].nregs << 1);
|
||||
(bin_infos[i].nregs << 1);
|
||||
} else {
|
||||
tcache_bin_info[i].ncached_max =
|
||||
TCACHE_NSLOTS_SMALL_MAX;
|
||||
|
10
src/tsd.c
10
src/tsd.c
@ -71,6 +71,16 @@ tsd_data_init(tsd_t *tsd) {
|
||||
*/
|
||||
rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
|
||||
|
||||
/*
|
||||
* A nondeterministic seed based on the address of tsd reduces
|
||||
* the likelihood of lockstep non-uniform cache index
|
||||
* utilization among identical concurrent processes, but at the
|
||||
* cost of test repeatability. For debug builds, instead use a
|
||||
* deterministic seed.
|
||||
*/
|
||||
*tsd_offset_statep_get(tsd) = config_debug ? 0 :
|
||||
(uint64_t)(uintptr_t)tsd;
|
||||
|
||||
return tsd_tcache_enabled_data_init(tsd);
|
||||
}
|
||||
|
||||
|
28
src/zone.c
28
src/zone.c
@ -89,6 +89,7 @@ JEMALLOC_ATTR(weak_import);
|
||||
static malloc_zone_t *default_zone, *purgeable_zone;
|
||||
static malloc_zone_t jemalloc_zone;
|
||||
static struct malloc_introspection_t jemalloc_zone_introspect;
|
||||
static pid_t zone_force_lock_pid = -1;
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
@ -270,6 +271,12 @@ zone_log(malloc_zone_t *zone, void *address) {
|
||||
static void
|
||||
zone_force_lock(malloc_zone_t *zone) {
|
||||
if (isthreaded) {
|
||||
/*
|
||||
* See the note in zone_force_unlock, below, to see why we need
|
||||
* this.
|
||||
*/
|
||||
assert(zone_force_lock_pid == -1);
|
||||
zone_force_lock_pid = getpid();
|
||||
jemalloc_prefork();
|
||||
}
|
||||
}
|
||||
@ -277,15 +284,26 @@ zone_force_lock(malloc_zone_t *zone) {
|
||||
static void
|
||||
zone_force_unlock(malloc_zone_t *zone) {
|
||||
/*
|
||||
* Call jemalloc_postfork_child() rather than
|
||||
* jemalloc_postfork_parent(), because this function is executed by both
|
||||
* parent and child. The parent can tolerate having state
|
||||
* reinitialized, but the child cannot unlock mutexes that were locked
|
||||
* by the parent.
|
||||
* zone_force_lock and zone_force_unlock are the entry points to the
|
||||
* forking machinery on OS X. The tricky thing is, the child is not
|
||||
* allowed to unlock mutexes locked in the parent, even if owned by the
|
||||
* forking thread (and the mutex type we use in OS X will fail an assert
|
||||
* if we try). In the child, we can get away with reinitializing all
|
||||
* the mutexes, which has the effect of unlocking them. In the parent,
|
||||
* doing this would mean we wouldn't wake any waiters blocked on the
|
||||
* mutexes we unlock. So, we record the pid of the current thread in
|
||||
* zone_force_lock, and use that to detect if we're in the parent or
|
||||
* child here, to decide which unlock logic we need.
|
||||
*/
|
||||
if (isthreaded) {
|
||||
assert(zone_force_lock_pid != -1);
|
||||
if (getpid() == zone_force_lock_pid) {
|
||||
jemalloc_postfork_parent();
|
||||
} else {
|
||||
jemalloc_postfork_child();
|
||||
}
|
||||
zone_force_lock_pid = -1;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -266,6 +266,8 @@ extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
|
||||
"extent_hooks should be same as pointer used to set hooks");
|
||||
assert_ptr_eq(extent_hooks->merge, extent_merge_hook,
|
||||
"Wrong hook function");
|
||||
assert_ptr_eq((void *)((uintptr_t)addr_a + size_a), addr_b,
|
||||
"Extents not mergeable");
|
||||
called_merge = true;
|
||||
if (!try_merge) {
|
||||
return true;
|
||||
|
@ -98,7 +98,45 @@ test_extent_body(unsigned arena_ind) {
|
||||
dallocx(p, flags);
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_extent_manual_hook) {
|
||||
static void
|
||||
test_manual_hook_auto_arena(void) {
|
||||
unsigned narenas;
|
||||
size_t old_size, new_size, sz;
|
||||
size_t hooks_mib[3];
|
||||
size_t hooks_miblen;
|
||||
extent_hooks_t *new_hooks, *old_hooks;
|
||||
|
||||
extent_hooks_prep();
|
||||
|
||||
sz = sizeof(unsigned);
|
||||
/* Get number of auto arenas. */
|
||||
assert_d_eq(mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0),
|
||||
0, "Unexpected mallctl() failure");
|
||||
if (narenas == 1) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Install custom extent hooks on arena 1 (might not be initialized). */
|
||||
hooks_miblen = sizeof(hooks_mib)/sizeof(size_t);
|
||||
assert_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib,
|
||||
&hooks_miblen), 0, "Unexpected mallctlnametomib() failure");
|
||||
hooks_mib[1] = 1;
|
||||
old_size = sizeof(extent_hooks_t *);
|
||||
new_hooks = &hooks;
|
||||
new_size = sizeof(extent_hooks_t *);
|
||||
assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
|
||||
&old_size, (void *)&new_hooks, new_size), 0,
|
||||
"Unexpected extent_hooks error");
|
||||
static bool auto_arena_created = false;
|
||||
if (old_hooks != &hooks) {
|
||||
assert_b_eq(auto_arena_created, false,
|
||||
"Expected auto arena 1 created only once.");
|
||||
auto_arena_created = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
test_manual_hook_body(void) {
|
||||
unsigned arena_ind;
|
||||
size_t old_size, new_size, sz;
|
||||
size_t hooks_mib[3];
|
||||
@ -139,8 +177,9 @@ TEST_BEGIN(test_extent_manual_hook) {
|
||||
assert_ptr_ne(old_hooks->merge, extent_merge_hook,
|
||||
"Unexpected extent_hooks error");
|
||||
|
||||
test_skip_if(check_background_thread_enabled());
|
||||
if (!check_background_thread_enabled()) {
|
||||
test_extent_body(arena_ind);
|
||||
}
|
||||
|
||||
/* Restore extent hooks. */
|
||||
assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL,
|
||||
@ -165,6 +204,22 @@ TEST_BEGIN(test_extent_manual_hook) {
|
||||
assert_ptr_eq(old_hooks->merge, default_hooks->merge,
|
||||
"Unexpected extent_hooks error");
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_extent_manual_hook) {
|
||||
test_manual_hook_auto_arena();
|
||||
test_manual_hook_body();
|
||||
|
||||
/* Test failure paths. */
|
||||
try_split = false;
|
||||
test_manual_hook_body();
|
||||
try_merge = false;
|
||||
test_manual_hook_body();
|
||||
try_purge_lazy = false;
|
||||
try_purge_forced = false;
|
||||
test_manual_hook_body();
|
||||
|
||||
try_split = try_merge = try_purge_lazy = try_purge_forced = true;
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_extent_auto_hook) {
|
||||
|
@ -151,9 +151,17 @@ TEST_BEGIN(test_basic) {
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_alignment_and_size) {
|
||||
const char *percpu_arena;
|
||||
size_t sz = sizeof(percpu_arena);
|
||||
|
||||
if(mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) ||
|
||||
strcmp(percpu_arena, "disabled") != 0) {
|
||||
test_skip("test_alignment_and_size skipped: "
|
||||
"not working with percpu arena.");
|
||||
};
|
||||
#define MAXALIGN (((size_t)1) << 23)
|
||||
#define NITER 4
|
||||
size_t nsz, rsz, sz, alignment, total;
|
||||
size_t nsz, rsz, alignment, total;
|
||||
unsigned i;
|
||||
void *ps[NITER];
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user