Merge branch 'dev'
This commit is contained in:
commit
5018fe3f09
@ -36,7 +36,7 @@ install:
|
|||||||
build_script:
|
build_script:
|
||||||
- bash -c "autoconf"
|
- bash -c "autoconf"
|
||||||
- bash -c "./configure $CONFIG_FLAGS"
|
- bash -c "./configure $CONFIG_FLAGS"
|
||||||
- mingw32-make -j3
|
- mingw32-make
|
||||||
- file lib/jemalloc.dll
|
- file lib/jemalloc.dll
|
||||||
- mingw32-make -j3 tests
|
- mingw32-make tests
|
||||||
- mingw32-make -k check
|
- mingw32-make -k check
|
||||||
|
28
.gitignore
vendored
28
.gitignore
vendored
@ -1,5 +1,3 @@
|
|||||||
/*.gcov.*
|
|
||||||
|
|
||||||
/bin/jemalloc-config
|
/bin/jemalloc-config
|
||||||
/bin/jemalloc.sh
|
/bin/jemalloc.sh
|
||||||
/bin/jeprof
|
/bin/jeprof
|
||||||
@ -21,10 +19,14 @@
|
|||||||
|
|
||||||
/Makefile
|
/Makefile
|
||||||
|
|
||||||
/include/jemalloc/internal/jemalloc_internal.h
|
/include/jemalloc/internal/jemalloc_preamble.h
|
||||||
/include/jemalloc/internal/jemalloc_internal_defs.h
|
/include/jemalloc/internal/jemalloc_internal_defs.h
|
||||||
|
/include/jemalloc/internal/private_namespace.gen.h
|
||||||
/include/jemalloc/internal/private_namespace.h
|
/include/jemalloc/internal/private_namespace.h
|
||||||
/include/jemalloc/internal/private_unnamespace.h
|
/include/jemalloc/internal/private_namespace_jet.gen.h
|
||||||
|
/include/jemalloc/internal/private_namespace_jet.h
|
||||||
|
/include/jemalloc/internal/private_symbols.awk
|
||||||
|
/include/jemalloc/internal/private_symbols_jet.awk
|
||||||
/include/jemalloc/internal/public_namespace.h
|
/include/jemalloc/internal/public_namespace.h
|
||||||
/include/jemalloc/internal/public_symbols.txt
|
/include/jemalloc/internal/public_symbols.txt
|
||||||
/include/jemalloc/internal/public_unnamespace.h
|
/include/jemalloc/internal/public_unnamespace.h
|
||||||
@ -40,8 +42,9 @@
|
|||||||
/include/jemalloc/jemalloc_typedefs.h
|
/include/jemalloc/jemalloc_typedefs.h
|
||||||
|
|
||||||
/src/*.[od]
|
/src/*.[od]
|
||||||
/src/*.gcda
|
/src/*.sym
|
||||||
/src/*.gcno
|
|
||||||
|
/run_tests.out/
|
||||||
|
|
||||||
/test/test.sh
|
/test/test.sh
|
||||||
test/include/test/jemalloc_test.h
|
test/include/test/jemalloc_test.h
|
||||||
@ -50,26 +53,23 @@ test/include/test/jemalloc_test_defs.h
|
|||||||
/test/integration/[A-Za-z]*
|
/test/integration/[A-Za-z]*
|
||||||
!/test/integration/[A-Za-z]*.*
|
!/test/integration/[A-Za-z]*.*
|
||||||
/test/integration/*.[od]
|
/test/integration/*.[od]
|
||||||
/test/integration/*.gcda
|
|
||||||
/test/integration/*.gcno
|
|
||||||
/test/integration/*.out
|
/test/integration/*.out
|
||||||
|
|
||||||
|
/test/integration/cpp/[A-Za-z]*
|
||||||
|
!/test/integration/cpp/[A-Za-z]*.*
|
||||||
|
/test/integration/cpp/*.[od]
|
||||||
|
/test/integration/cpp/*.out
|
||||||
|
|
||||||
/test/src/*.[od]
|
/test/src/*.[od]
|
||||||
/test/src/*.gcda
|
|
||||||
/test/src/*.gcno
|
|
||||||
|
|
||||||
/test/stress/[A-Za-z]*
|
/test/stress/[A-Za-z]*
|
||||||
!/test/stress/[A-Za-z]*.*
|
!/test/stress/[A-Za-z]*.*
|
||||||
/test/stress/*.[od]
|
/test/stress/*.[od]
|
||||||
/test/stress/*.gcda
|
|
||||||
/test/stress/*.gcno
|
|
||||||
/test/stress/*.out
|
/test/stress/*.out
|
||||||
|
|
||||||
/test/unit/[A-Za-z]*
|
/test/unit/[A-Za-z]*
|
||||||
!/test/unit/[A-Za-z]*.*
|
!/test/unit/[A-Za-z]*.*
|
||||||
/test/unit/*.[od]
|
/test/unit/*.[od]
|
||||||
/test/unit/*.gcda
|
|
||||||
/test/unit/*.gcno
|
|
||||||
/test/unit/*.out
|
/test/unit/*.out
|
||||||
|
|
||||||
/VERSION
|
/VERSION
|
||||||
|
128
.travis.yml
128
.travis.yml
@ -3,90 +3,150 @@ language: generic
|
|||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS=""
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: osx
|
- os: osx
|
||||||
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS=""
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=clang COMPILER_FLAGS="" CONFIGURE_FLAGS=""
|
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc COMPILER_FLAGS="-m32" CONFIGURE_FLAGS=""
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
addons:
|
addons:
|
||||||
apt:
|
apt:
|
||||||
packages:
|
packages:
|
||||||
- gcc-multilib
|
- gcc-multilib
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-tcache"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: osx
|
|
||||||
env: CC=clang COMPILER_FLAGS="" CONFIGURE_FLAGS=""
|
|
||||||
- os: osx
|
|
||||||
env: CC=gcc COMPILER_FLAGS="-m32" CONFIGURE_FLAGS=""
|
|
||||||
- os: osx
|
|
||||||
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug"
|
|
||||||
- os: osx
|
|
||||||
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats"
|
|
||||||
- os: osx
|
|
||||||
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-tcache"
|
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=clang COMPILER_FLAGS="-m32" CONFIGURE_FLAGS=""
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: osx
|
||||||
|
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: osx
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: osx
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: osx
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: osx
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=clang CXX=clang++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
addons:
|
addons:
|
||||||
apt:
|
apt:
|
||||||
packages:
|
packages:
|
||||||
- gcc-multilib
|
- gcc-multilib
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=clang COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug"
|
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=clang COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof"
|
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=clang COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats"
|
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=clang COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-tcache"
|
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-debug"
|
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
addons:
|
addons:
|
||||||
apt:
|
apt:
|
||||||
packages:
|
packages:
|
||||||
- gcc-multilib
|
- gcc-multilib
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-prof"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
addons:
|
addons:
|
||||||
apt:
|
apt:
|
||||||
packages:
|
packages:
|
||||||
- gcc-multilib
|
- gcc-multilib
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--disable-stats"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
addons:
|
addons:
|
||||||
apt:
|
apt:
|
||||||
packages:
|
packages:
|
||||||
- gcc-multilib
|
- gcc-multilib
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--disable-tcache"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
addons:
|
addons:
|
||||||
apt:
|
apt:
|
||||||
packages:
|
packages:
|
||||||
- gcc-multilib
|
- gcc-multilib
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --enable-prof"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
addons:
|
||||||
|
apt:
|
||||||
|
packages:
|
||||||
|
- gcc-multilib
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-stats"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
addons:
|
||||||
|
apt:
|
||||||
|
packages:
|
||||||
|
- gcc-multilib
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-tcache"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
addons:
|
||||||
|
apt:
|
||||||
|
packages:
|
||||||
|
- gcc-multilib
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --disable-stats"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --disable-tcache"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --disable-tcache"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false,dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false,percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary,percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
- os: linux
|
||||||
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
|
||||||
|
|
||||||
before_script:
|
before_script:
|
||||||
- autoconf
|
- autoconf
|
||||||
- ./configure ${COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS" } $CONFIGURE_FLAGS
|
- ./configure ${COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS" CXX="$CXX $COMPILER_FLAGS" } $CONFIGURE_FLAGS
|
||||||
- make -j3
|
- make -j3
|
||||||
- make -j3 tests
|
- make -j3 tests
|
||||||
|
|
||||||
|
4
COPYING
4
COPYING
@ -1,10 +1,10 @@
|
|||||||
Unless otherwise specified, files in the jemalloc source distribution are
|
Unless otherwise specified, files in the jemalloc source distribution are
|
||||||
subject to the following license:
|
subject to the following license:
|
||||||
--------------------------------------------------------------------------------
|
--------------------------------------------------------------------------------
|
||||||
Copyright (C) 2002-2016 Jason Evans <jasone@canonware.com>.
|
Copyright (C) 2002-2017 Jason Evans <jasone@canonware.com>.
|
||||||
All rights reserved.
|
All rights reserved.
|
||||||
Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
|
Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
|
||||||
Copyright (C) 2009-2016 Facebook, Inc. All rights reserved.
|
Copyright (C) 2009-2017 Facebook, Inc. All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
modification, are permitted provided that the following conditions are met:
|
modification, are permitted provided that the following conditions are met:
|
||||||
|
191
ChangeLog
191
ChangeLog
@ -4,6 +4,193 @@ brevity. Much more detail can be found in the git revision history:
|
|||||||
|
|
||||||
https://github.com/jemalloc/jemalloc
|
https://github.com/jemalloc/jemalloc
|
||||||
|
|
||||||
|
* 5.0.0 (June 13, 2017)
|
||||||
|
|
||||||
|
Unlike all previous jemalloc releases, this release does not use naturally
|
||||||
|
aligned "chunks" for virtual memory management, and instead uses page-aligned
|
||||||
|
"extents". This change has few externally visible effects, but the internal
|
||||||
|
impacts are... extensive. Many other internal changes combine to make this
|
||||||
|
the most cohesively designed version of jemalloc so far, with ample
|
||||||
|
opportunity for further enhancements.
|
||||||
|
|
||||||
|
Continuous integration is now an integral aspect of development thanks to the
|
||||||
|
efforts of @davidtgoldblatt, and the dev branch tends to remain reasonably
|
||||||
|
stable on the tested platforms (Linux, FreeBSD, macOS, and Windows). As a
|
||||||
|
side effect the official release frequency may decrease over time.
|
||||||
|
|
||||||
|
New features:
|
||||||
|
- Implement optional per-CPU arena support; threads choose which arena to use
|
||||||
|
based on current CPU rather than on fixed thread-->arena associations.
|
||||||
|
(@interwq)
|
||||||
|
- Implement two-phase decay of unused dirty pages. Pages transition from
|
||||||
|
dirty-->muzzy-->clean, where the first phase transition relies on
|
||||||
|
madvise(... MADV_FREE) semantics, and the second phase transition discards
|
||||||
|
pages such that they are replaced with demand-zeroed pages on next access.
|
||||||
|
(@jasone)
|
||||||
|
- Increase decay time resolution from seconds to milliseconds. (@jasone)
|
||||||
|
- Implement opt-in per CPU background threads, and use them for asynchronous
|
||||||
|
decay-driven unused dirty page purging. (@interwq)
|
||||||
|
- Add mutex profiling, which collects a variety of statistics useful for
|
||||||
|
diagnosing overhead/contention issues. (@interwq)
|
||||||
|
- Add C++ new/delete operator bindings. (@djwatson)
|
||||||
|
- Support manually created arena destruction, such that all data and metadata
|
||||||
|
are discarded. Add MALLCTL_ARENAS_DESTROYED for accessing merged stats
|
||||||
|
associated with destroyed arenas. (@jasone)
|
||||||
|
- Add MALLCTL_ARENAS_ALL as a fixed index for use in accessing
|
||||||
|
merged/destroyed arena statistics via mallctl. (@jasone)
|
||||||
|
- Add opt.abort_conf to optionally abort if invalid configuration options are
|
||||||
|
detected during initialization. (@interwq)
|
||||||
|
- Add opt.stats_print_opts, so that e.g. JSON output can be selected for the
|
||||||
|
stats dumped during exit if opt.stats_print is true. (@jasone)
|
||||||
|
- Add --with-version=VERSION for use when embedding jemalloc into another
|
||||||
|
project's git repository. (@jasone)
|
||||||
|
- Add --disable-thp to support cross compiling. (@jasone)
|
||||||
|
- Add --with-lg-hugepage to support cross compiling. (@jasone)
|
||||||
|
- Add mallctl interfaces (various authors):
|
||||||
|
+ background_thread
|
||||||
|
+ opt.abort_conf
|
||||||
|
+ opt.retain
|
||||||
|
+ opt.percpu_arena
|
||||||
|
+ opt.background_thread
|
||||||
|
+ opt.{dirty,muzzy}_decay_ms
|
||||||
|
+ opt.stats_print_opts
|
||||||
|
+ arena.<i>.initialized
|
||||||
|
+ arena.<i>.destroy
|
||||||
|
+ arena.<i>.{dirty,muzzy}_decay_ms
|
||||||
|
+ arena.<i>.extent_hooks
|
||||||
|
+ arenas.{dirty,muzzy}_decay_ms
|
||||||
|
+ arenas.bin.<i>.slab_size
|
||||||
|
+ arenas.nlextents
|
||||||
|
+ arenas.lextent.<i>.size
|
||||||
|
+ arenas.create
|
||||||
|
+ stats.background_thread.{num_threads,num_runs,run_interval}
|
||||||
|
+ stats.mutexes.{ctl,background_thread,prof,reset}.
|
||||||
|
{num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds,
|
||||||
|
num_owner_switch}
|
||||||
|
+ stats.arenas.<i>.{dirty,muzzy}_decay_ms
|
||||||
|
+ stats.arenas.<i>.uptime
|
||||||
|
+ stats.arenas.<i>.{pmuzzy,base,internal,resident}
|
||||||
|
+ stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
|
||||||
|
+ stats.arenas.<i>.bins.<j>.{nslabs,reslabs,curslabs}
|
||||||
|
+ stats.arenas.<i>.bins.<j>.mutex.
|
||||||
|
{num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds,
|
||||||
|
num_owner_switch}
|
||||||
|
+ stats.arenas.<i>.lextents.<j>.{nmalloc,ndalloc,nrequests,curlextents}
|
||||||
|
+ stats.arenas.i.mutexes.{large,extent_avail,extents_dirty,extents_muzzy,
|
||||||
|
extents_retained,decay_dirty,decay_muzzy,base,tcache_list}.
|
||||||
|
{num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds,
|
||||||
|
num_owner_switch}
|
||||||
|
|
||||||
|
Portability improvements:
|
||||||
|
- Improve reentrant allocation support, such that deadlock is less likely if
|
||||||
|
e.g. a system library call in turn allocates memory. (@davidtgoldblatt,
|
||||||
|
@interwq)
|
||||||
|
- Support static linking of jemalloc with glibc. (@djwatson)
|
||||||
|
|
||||||
|
Optimizations and refactors:
|
||||||
|
- Organize virtual memory as "extents" of virtual memory pages, rather than as
|
||||||
|
naturally aligned "chunks", and store all metadata in arbitrarily distant
|
||||||
|
locations. This reduces virtual memory external fragmentation, and will
|
||||||
|
interact better with huge pages (not yet explicitly supported). (@jasone)
|
||||||
|
- Fold large and huge size classes together; only small and large size classes
|
||||||
|
remain. (@jasone)
|
||||||
|
- Unify the allocation paths, and merge most fast-path branching decisions.
|
||||||
|
(@davidtgoldblatt, @interwq)
|
||||||
|
- Embed per thread automatic tcache into thread-specific data, which reduces
|
||||||
|
conditional branches and dereferences. Also reorganize tcache to increase
|
||||||
|
fast-path data locality. (@interwq)
|
||||||
|
- Rewrite atomics to closely model the C11 API, convert various
|
||||||
|
synchronization from mutex-based to atomic, and use the explicit memory
|
||||||
|
ordering control to resolve various hypothetical races without increasing
|
||||||
|
synchronization overhead. (@davidtgoldblatt)
|
||||||
|
- Extensively optimize rtree via various methods:
|
||||||
|
+ Add multiple layers of rtree lookup caching, since rtree lookups are now
|
||||||
|
part of fast-path deallocation. (@interwq)
|
||||||
|
+ Determine rtree layout at compile time. (@jasone)
|
||||||
|
+ Make the tree shallower for common configurations. (@jasone)
|
||||||
|
+ Embed the root node in the top-level rtree data structure, thus avoiding
|
||||||
|
one level of indirection. (@jasone)
|
||||||
|
+ Further specialize leaf elements as compared to internal node elements,
|
||||||
|
and directly embed extent metadata needed for fast-path deallocation.
|
||||||
|
(@jasone)
|
||||||
|
+ Ignore leading always-zero address bits (architecture-specific).
|
||||||
|
(@jasone)
|
||||||
|
- Reorganize headers (ongoing work) to make them hermetic, and disentangle
|
||||||
|
various module dependencies. (@davidtgoldblatt)
|
||||||
|
- Convert various internal data structures such as size class metadata from
|
||||||
|
boot-time-initialized to compile-time-initialized. Propagate resulting data
|
||||||
|
structure simplifications, such as making arena metadata fixed-size.
|
||||||
|
(@jasone)
|
||||||
|
- Simplify size class lookups when constrained to size classes that are
|
||||||
|
multiples of the page size. This speeds lookups, but the primary benefit is
|
||||||
|
complexity reduction in code that was the source of numerous regressions.
|
||||||
|
(@jasone)
|
||||||
|
- Lock individual extents when possible for localized extent operations,
|
||||||
|
rather than relying on a top-level arena lock. (@davidtgoldblatt, @jasone)
|
||||||
|
- Use first fit layout policy instead of best fit, in order to improve
|
||||||
|
packing. (@jasone)
|
||||||
|
- If munmap(2) is not in use, use an exponential series to grow each arena's
|
||||||
|
virtual memory, so that the number of disjoint virtual memory mappings
|
||||||
|
remains low. (@jasone)
|
||||||
|
- Implement per arena base allocators, so that arenas never share any virtual
|
||||||
|
memory pages. (@jasone)
|
||||||
|
- Automatically generate private symbol name mangling macros. (@jasone)
|
||||||
|
|
||||||
|
Incompatible changes:
|
||||||
|
- Replace chunk hooks with an expanded/normalized set of extent hooks.
|
||||||
|
(@jasone)
|
||||||
|
- Remove ratio-based purging. (@jasone)
|
||||||
|
- Remove --disable-tcache. (@jasone)
|
||||||
|
- Remove --disable-tls. (@jasone)
|
||||||
|
- Remove --enable-ivsalloc. (@jasone)
|
||||||
|
- Remove --with-lg-size-class-group. (@jasone)
|
||||||
|
- Remove --with-lg-tiny-min. (@jasone)
|
||||||
|
- Remove --disable-cc-silence. (@jasone)
|
||||||
|
- Remove --enable-code-coverage. (@jasone)
|
||||||
|
- Remove --disable-munmap (replaced by opt.retain). (@jasone)
|
||||||
|
- Remove Valgrind support. (@jasone)
|
||||||
|
- Remove quarantine support. (@jasone)
|
||||||
|
- Remove redzone support. (@jasone)
|
||||||
|
- Remove mallctl interfaces (various authors):
|
||||||
|
+ config.munmap
|
||||||
|
+ config.tcache
|
||||||
|
+ config.tls
|
||||||
|
+ config.valgrind
|
||||||
|
+ opt.lg_chunk
|
||||||
|
+ opt.purge
|
||||||
|
+ opt.lg_dirty_mult
|
||||||
|
+ opt.decay_time
|
||||||
|
+ opt.quarantine
|
||||||
|
+ opt.redzone
|
||||||
|
+ opt.thp
|
||||||
|
+ arena.<i>.lg_dirty_mult
|
||||||
|
+ arena.<i>.decay_time
|
||||||
|
+ arena.<i>.chunk_hooks
|
||||||
|
+ arenas.initialized
|
||||||
|
+ arenas.lg_dirty_mult
|
||||||
|
+ arenas.decay_time
|
||||||
|
+ arenas.bin.<i>.run_size
|
||||||
|
+ arenas.nlruns
|
||||||
|
+ arenas.lrun.<i>.size
|
||||||
|
+ arenas.nhchunks
|
||||||
|
+ arenas.hchunk.<i>.size
|
||||||
|
+ arenas.extend
|
||||||
|
+ stats.cactive
|
||||||
|
+ stats.arenas.<i>.lg_dirty_mult
|
||||||
|
+ stats.arenas.<i>.decay_time
|
||||||
|
+ stats.arenas.<i>.metadata.{mapped,allocated}
|
||||||
|
+ stats.arenas.<i>.{npurge,nmadvise,purged}
|
||||||
|
+ stats.arenas.<i>.huge.{allocated,nmalloc,ndalloc,nrequests}
|
||||||
|
+ stats.arenas.<i>.bins.<j>.{nruns,reruns,curruns}
|
||||||
|
+ stats.arenas.<i>.lruns.<j>.{nmalloc,ndalloc,nrequests,curruns}
|
||||||
|
+ stats.arenas.<i>.hchunks.<j>.{nmalloc,ndalloc,nrequests,curhchunks}
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
- Improve interval-based profile dump triggering to dump only one profile when
|
||||||
|
a single allocation's size exceeds the interval. (@jasone)
|
||||||
|
- Use prefixed function names (as controlled by --with-jemalloc-prefix) when
|
||||||
|
pruning backtrace frames in jeprof. (@jasone)
|
||||||
|
|
||||||
* 4.5.0 (February 28, 2017)
|
* 4.5.0 (February 28, 2017)
|
||||||
|
|
||||||
This is the first release to benefit from much broader continuous integration
|
This is the first release to benefit from much broader continuous integration
|
||||||
@ -12,7 +199,7 @@ brevity. Much more detail can be found in the git revision history:
|
|||||||
regressions fixed by this release.
|
regressions fixed by this release.
|
||||||
|
|
||||||
New features:
|
New features:
|
||||||
- Add --disable-thp and the opt.thp to provide opt-out mechanisms for
|
- Add --disable-thp and the opt.thp mallctl to provide opt-out mechanisms for
|
||||||
transparent huge page integration. (@jasone)
|
transparent huge page integration. (@jasone)
|
||||||
- Update zone allocator integration to work with macOS 10.12. (@glandium)
|
- Update zone allocator integration to work with macOS 10.12. (@glandium)
|
||||||
- Restructure *CFLAGS configuration, so that CFLAGS behaves typically, and
|
- Restructure *CFLAGS configuration, so that CFLAGS behaves typically, and
|
||||||
@ -25,7 +212,7 @@ brevity. Much more detail can be found in the git revision history:
|
|||||||
- Handle race in per size class utilization computation. This functionality
|
- Handle race in per size class utilization computation. This functionality
|
||||||
was first released in 4.0.0. (@interwq)
|
was first released in 4.0.0. (@interwq)
|
||||||
- Fix lock order reversal during gdump. (@jasone)
|
- Fix lock order reversal during gdump. (@jasone)
|
||||||
- Fix-refactor tcache synchronization. This regression was first released in
|
- Fix/refactor tcache synchronization. This regression was first released in
|
||||||
4.0.0. (@jasone)
|
4.0.0. (@jasone)
|
||||||
- Fix various JSON-formatted malloc_stats_print() bugs. This functionality
|
- Fix various JSON-formatted malloc_stats_print() bugs. This functionality
|
||||||
was first released in 4.3.0. (@jasone)
|
was first released in 4.3.0. (@jasone)
|
||||||
|
@ -18,16 +18,19 @@ would create a dependency on xsltproc in packaged releases, hence the
|
|||||||
requirement to either run 'make dist' or avoid installing docs via the various
|
requirement to either run 'make dist' or avoid installing docs via the various
|
||||||
install_* targets documented below.
|
install_* targets documented below.
|
||||||
|
|
||||||
=== Advanced configuration =====================================================
|
|
||||||
|
## Advanced configuration
|
||||||
|
|
||||||
The 'configure' script supports numerous options that allow control of which
|
The 'configure' script supports numerous options that allow control of which
|
||||||
functionality is enabled, where jemalloc is installed, etc. Optionally, pass
|
functionality is enabled, where jemalloc is installed, etc. Optionally, pass
|
||||||
any of the following arguments (not a definitive list) to 'configure':
|
any of the following arguments (not a definitive list) to 'configure':
|
||||||
|
|
||||||
--help
|
* `--help`
|
||||||
|
|
||||||
Print a definitive list of options.
|
Print a definitive list of options.
|
||||||
|
|
||||||
--prefix=<install-root-dir>
|
* `--prefix=<install-root-dir>`
|
||||||
|
|
||||||
Set the base directory in which to install. For example:
|
Set the base directory in which to install. For example:
|
||||||
|
|
||||||
./configure --prefix=/usr/local
|
./configure --prefix=/usr/local
|
||||||
@ -35,15 +38,29 @@ any of the following arguments (not a definitive list) to 'configure':
|
|||||||
will cause files to be installed into /usr/local/include, /usr/local/lib,
|
will cause files to be installed into /usr/local/include, /usr/local/lib,
|
||||||
and /usr/local/man.
|
and /usr/local/man.
|
||||||
|
|
||||||
--with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid>
|
* `--with-version=(<major>.<minor>.<bugfix>-<nrev>-g<gid>|VERSION)`
|
||||||
Use the specified version string rather than trying to generate one (if in
|
|
||||||
a git repository) or use existing the VERSION file (if present).
|
The VERSION file is mandatory for successful configuration, and the
|
||||||
|
following steps are taken to assure its presence:
|
||||||
|
1) If --with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid> is specified,
|
||||||
|
generate VERSION using the specified value.
|
||||||
|
2) If --with-version is not specified in either form and the source
|
||||||
|
directory is inside a git repository, try to generate VERSION via 'git
|
||||||
|
describe' invocations that pattern-match release tags.
|
||||||
|
3) If VERSION is missing, generate it with a bogus version:
|
||||||
|
0.0.0-0-g0000000000000000000000000000000000000000
|
||||||
|
|
||||||
|
Note that --with-version=VERSION bypasses (1) and (2), which simplifies
|
||||||
|
VERSION configuration when embedding a jemalloc release into another
|
||||||
|
project's git repository.
|
||||||
|
|
||||||
|
* `--with-rpath=<colon-separated-rpath>`
|
||||||
|
|
||||||
--with-rpath=<colon-separated-rpath>
|
|
||||||
Embed one or more library paths, so that libjemalloc can find the libraries
|
Embed one or more library paths, so that libjemalloc can find the libraries
|
||||||
it is linked to. This works only on ELF-based systems.
|
it is linked to. This works only on ELF-based systems.
|
||||||
|
|
||||||
--with-mangling=<map>
|
* `--with-mangling=<map>`
|
||||||
|
|
||||||
Mangle public symbols specified in <map> which is a comma-separated list of
|
Mangle public symbols specified in <map> which is a comma-separated list of
|
||||||
name:mangled pairs.
|
name:mangled pairs.
|
||||||
|
|
||||||
@ -56,7 +73,8 @@ any of the following arguments (not a definitive list) to 'configure':
|
|||||||
--with-jemalloc-prefix, and mangled symbols are then ignored when applying
|
--with-jemalloc-prefix, and mangled symbols are then ignored when applying
|
||||||
the prefix.
|
the prefix.
|
||||||
|
|
||||||
--with-jemalloc-prefix=<prefix>
|
* `--with-jemalloc-prefix=<prefix>`
|
||||||
|
|
||||||
Prefix all public APIs with <prefix>. For example, if <prefix> is
|
Prefix all public APIs with <prefix>. For example, if <prefix> is
|
||||||
"prefix_", API changes like the following occur:
|
"prefix_", API changes like the following occur:
|
||||||
|
|
||||||
@ -72,63 +90,46 @@ any of the following arguments (not a definitive list) to 'configure':
|
|||||||
jemalloc overlays the default malloc zone, but makes no attempt to actually
|
jemalloc overlays the default malloc zone, but makes no attempt to actually
|
||||||
replace the "malloc", "calloc", etc. symbols.
|
replace the "malloc", "calloc", etc. symbols.
|
||||||
|
|
||||||
--without-export
|
* `--without-export`
|
||||||
|
|
||||||
Don't export public APIs. This can be useful when building jemalloc as a
|
Don't export public APIs. This can be useful when building jemalloc as a
|
||||||
static library, or to avoid exporting public APIs when using the zone
|
static library, or to avoid exporting public APIs when using the zone
|
||||||
allocator on OSX.
|
allocator on OSX.
|
||||||
|
|
||||||
--with-private-namespace=<prefix>
|
* `--with-private-namespace=<prefix>`
|
||||||
|
|
||||||
Prefix all library-private APIs with <prefix>je_. For shared libraries,
|
Prefix all library-private APIs with <prefix>je_. For shared libraries,
|
||||||
symbol visibility mechanisms prevent these symbols from being exported, but
|
symbol visibility mechanisms prevent these symbols from being exported, but
|
||||||
for static libraries, naming collisions are a real possibility. By
|
for static libraries, naming collisions are a real possibility. By
|
||||||
default, <prefix> is empty, which results in a symbol prefix of je_ .
|
default, <prefix> is empty, which results in a symbol prefix of je_ .
|
||||||
|
|
||||||
--with-install-suffix=<suffix>
|
* `--with-install-suffix=<suffix>`
|
||||||
|
|
||||||
Append <suffix> to the base name of all installed files, such that multiple
|
Append <suffix> to the base name of all installed files, such that multiple
|
||||||
versions of jemalloc can coexist in the same installation directory. For
|
versions of jemalloc can coexist in the same installation directory. For
|
||||||
example, libjemalloc.so.0 becomes libjemalloc<suffix>.so.0.
|
example, libjemalloc.so.0 becomes libjemalloc<suffix>.so.0.
|
||||||
|
|
||||||
--with-malloc-conf=<malloc_conf>
|
* `--with-malloc-conf=<malloc_conf>`
|
||||||
Embed <malloc_conf> as a run-time options string that is processed prior to
|
|
||||||
|
Embed `<malloc_conf>` as a run-time options string that is processed prior to
|
||||||
the malloc_conf global variable, the /etc/malloc.conf symlink, and the
|
the malloc_conf global variable, the /etc/malloc.conf symlink, and the
|
||||||
MALLOC_CONF environment variable. For example, to change the default chunk
|
MALLOC_CONF environment variable. For example, to change the default decay
|
||||||
size to 256 KiB:
|
time to 30 seconds:
|
||||||
|
|
||||||
--with-malloc-conf=lg_chunk:18
|
--with-malloc-conf=decay_ms:30000
|
||||||
|
|
||||||
--disable-cc-silence
|
* `--enable-debug`
|
||||||
Disable code that silences non-useful compiler warnings. This is mainly
|
|
||||||
useful during development when auditing the set of warnings that are being
|
|
||||||
silenced.
|
|
||||||
|
|
||||||
--enable-debug
|
|
||||||
Enable assertions and validation code. This incurs a substantial
|
Enable assertions and validation code. This incurs a substantial
|
||||||
performance hit, but is very useful during application development.
|
performance hit, but is very useful during application development.
|
||||||
Implies --enable-ivsalloc.
|
|
||||||
|
|
||||||
--enable-code-coverage
|
* `--disable-stats`
|
||||||
Enable code coverage support, for use during jemalloc test development.
|
|
||||||
Additional testing targets are available if this option is enabled:
|
|
||||||
|
|
||||||
coverage
|
|
||||||
coverage_unit
|
|
||||||
coverage_integration
|
|
||||||
coverage_stress
|
|
||||||
|
|
||||||
These targets do not clear code coverage results from previous runs, and
|
|
||||||
there are interactions between the various coverage targets, so it is
|
|
||||||
usually advisable to run 'make clean' between repeated code coverage runs.
|
|
||||||
|
|
||||||
--disable-stats
|
|
||||||
Disable statistics gathering functionality. See the "opt.stats_print"
|
Disable statistics gathering functionality. See the "opt.stats_print"
|
||||||
option documentation for usage details.
|
option documentation for usage details.
|
||||||
|
|
||||||
--enable-ivsalloc
|
* `--enable-prof`
|
||||||
Enable validation code, which verifies that pointers reside within
|
|
||||||
jemalloc-owned chunks before dereferencing them. This incurs a minor
|
|
||||||
performance hit.
|
|
||||||
|
|
||||||
--enable-prof
|
|
||||||
Enable heap profiling and leak detection functionality. See the "opt.prof"
|
Enable heap profiling and leak detection functionality. See the "opt.prof"
|
||||||
option documentation for usage details. When enabled, there are several
|
option documentation for usage details. When enabled, there are several
|
||||||
approaches to backtracing, and the configure script chooses the first one
|
approaches to backtracing, and the configure script chooses the first one
|
||||||
@ -138,73 +139,60 @@ any of the following arguments (not a definitive list) to 'configure':
|
|||||||
+ libgcc (unless --disable-prof-libgcc)
|
+ libgcc (unless --disable-prof-libgcc)
|
||||||
+ gcc intrinsics (unless --disable-prof-gcc)
|
+ gcc intrinsics (unless --disable-prof-gcc)
|
||||||
|
|
||||||
--enable-prof-libunwind
|
* `--enable-prof-libunwind`
|
||||||
|
|
||||||
Use the libunwind library (http://www.nongnu.org/libunwind/) for stack
|
Use the libunwind library (http://www.nongnu.org/libunwind/) for stack
|
||||||
backtracing.
|
backtracing.
|
||||||
|
|
||||||
--disable-prof-libgcc
|
* `--disable-prof-libgcc`
|
||||||
|
|
||||||
Disable the use of libgcc's backtracing functionality.
|
Disable the use of libgcc's backtracing functionality.
|
||||||
|
|
||||||
--disable-prof-gcc
|
* `--disable-prof-gcc`
|
||||||
|
|
||||||
Disable the use of gcc intrinsics for backtracing.
|
Disable the use of gcc intrinsics for backtracing.
|
||||||
|
|
||||||
--with-static-libunwind=<libunwind.a>
|
* `--with-static-libunwind=<libunwind.a>`
|
||||||
|
|
||||||
Statically link against the specified libunwind.a rather than dynamically
|
Statically link against the specified libunwind.a rather than dynamically
|
||||||
linking with -lunwind.
|
linking with -lunwind.
|
||||||
|
|
||||||
--disable-tcache
|
* `--disable-thp`
|
||||||
Disable thread-specific caches for small objects. Objects are cached and
|
|
||||||
released in bulk, thus reducing the total number of mutex operations. See
|
|
||||||
the "opt.tcache" option for usage details.
|
|
||||||
|
|
||||||
--disable-thp
|
Disable transparent huge page (THP) integration. This option can be useful
|
||||||
Disable transparent huge page (THP) integration. On systems with THP
|
when cross compiling.
|
||||||
support, THPs are explicitly disabled as a side effect of unused dirty page
|
|
||||||
purging for chunks that back small and/or large allocations, because such
|
|
||||||
chunks typically comprise active, unused dirty, and untouched clean
|
|
||||||
pages.
|
|
||||||
|
|
||||||
--disable-munmap
|
* `--disable-fill`
|
||||||
Disable virtual memory deallocation via munmap(2); instead keep track of
|
|
||||||
the virtual memory for later use. munmap() is disabled by default (i.e.
|
|
||||||
--disable-munmap is implied) on Linux, which has a quirk in its virtual
|
|
||||||
memory allocation algorithm that causes semi-permanent VM map holes under
|
|
||||||
normal jemalloc operation.
|
|
||||||
|
|
||||||
--disable-fill
|
Disable support for junk/zero filling of memory. See the "opt.junk" and
|
||||||
Disable support for junk/zero filling of memory, quarantine, and redzones.
|
"opt.zero" option documentation for usage details.
|
||||||
See the "opt.junk", "opt.zero", "opt.quarantine", and "opt.redzone" option
|
|
||||||
documentation for usage details.
|
|
||||||
|
|
||||||
--disable-valgrind
|
* `--disable-zone-allocator`
|
||||||
Disable support for Valgrind.
|
|
||||||
|
|
||||||
--disable-zone-allocator
|
|
||||||
Disable zone allocator for Darwin. This means jemalloc won't be hooked as
|
Disable zone allocator for Darwin. This means jemalloc won't be hooked as
|
||||||
the default allocator on OSX/iOS.
|
the default allocator on OSX/iOS.
|
||||||
|
|
||||||
--enable-utrace
|
* `--enable-utrace`
|
||||||
|
|
||||||
Enable utrace(2)-based allocation tracing. This feature is not broadly
|
Enable utrace(2)-based allocation tracing. This feature is not broadly
|
||||||
portable (FreeBSD has it, but Linux and OS X do not).
|
portable (FreeBSD has it, but Linux and OS X do not).
|
||||||
|
|
||||||
--enable-xmalloc
|
* `--enable-xmalloc`
|
||||||
|
|
||||||
Enable support for optional immediate termination due to out-of-memory
|
Enable support for optional immediate termination due to out-of-memory
|
||||||
errors, as is commonly implemented by "xmalloc" wrapper function for malloc.
|
errors, as is commonly implemented by "xmalloc" wrapper function for malloc.
|
||||||
See the "opt.xmalloc" option documentation for usage details.
|
See the "opt.xmalloc" option documentation for usage details.
|
||||||
|
|
||||||
--enable-lazy-lock
|
* `--enable-lazy-lock`
|
||||||
|
|
||||||
Enable code that wraps pthread_create() to detect when an application
|
Enable code that wraps pthread_create() to detect when an application
|
||||||
switches from single-threaded to multi-threaded mode, so that it can avoid
|
switches from single-threaded to multi-threaded mode, so that it can avoid
|
||||||
mutex locking/unlocking operations while in single-threaded mode. In
|
mutex locking/unlocking operations while in single-threaded mode. In
|
||||||
practice, this feature usually has little impact on performance unless
|
practice, this feature usually has little impact on performance unless
|
||||||
thread-specific caching is disabled.
|
thread-specific caching is disabled.
|
||||||
|
|
||||||
--disable-tls
|
* `--disable-cache-oblivious`
|
||||||
Disable thread-local storage (TLS), which allows for fast access to
|
|
||||||
thread-local variables via the __thread keyword. If TLS is available,
|
|
||||||
jemalloc uses it for several purposes.
|
|
||||||
|
|
||||||
--disable-cache-oblivious
|
|
||||||
Disable cache-oblivious large allocation alignment for large allocation
|
Disable cache-oblivious large allocation alignment for large allocation
|
||||||
requests with no alignment constraints. If this feature is disabled, all
|
requests with no alignment constraints. If this feature is disabled, all
|
||||||
large allocations are page-aligned as an implementation artifact, which can
|
large allocations are page-aligned as an implementation artifact, which can
|
||||||
@ -213,61 +201,51 @@ any of the following arguments (not a definitive list) to 'configure':
|
|||||||
most extreme case increases physical memory usage for the 16 KiB size class
|
most extreme case increases physical memory usage for the 16 KiB size class
|
||||||
to 20 KiB.
|
to 20 KiB.
|
||||||
|
|
||||||
--disable-syscall
|
* `--disable-syscall`
|
||||||
|
|
||||||
Disable use of syscall(2) rather than {open,read,write,close}(2). This is
|
Disable use of syscall(2) rather than {open,read,write,close}(2). This is
|
||||||
intended as a workaround for systems that place security limitations on
|
intended as a workaround for systems that place security limitations on
|
||||||
syscall(2).
|
syscall(2).
|
||||||
|
|
||||||
--with-xslroot=<path>
|
* `--disable-cxx`
|
||||||
|
|
||||||
|
Disable C++ integration. This will cause new and delete operator
|
||||||
|
implementations to be omitted.
|
||||||
|
|
||||||
|
* `--with-xslroot=<path>`
|
||||||
|
|
||||||
Specify where to find DocBook XSL stylesheets when building the
|
Specify where to find DocBook XSL stylesheets when building the
|
||||||
documentation.
|
documentation.
|
||||||
|
|
||||||
--with-lg-page=<lg-page>
|
* `--with-lg-page=<lg-page>`
|
||||||
Specify the base 2 log of the system page size. This option is only useful
|
|
||||||
when cross compiling, since the configure script automatically determines
|
Specify the base 2 log of the allocator page size, which must in turn be at
|
||||||
the host's page size by default.
|
least as large as the system page size. By default the configure script
|
||||||
|
determines the host's page size and sets the allocator page size equal to
|
||||||
|
the system page size, so this option need not be specified unless the
|
||||||
|
system page size may change between configuration and execution, e.g. when
|
||||||
|
cross compiling.
|
||||||
|
|
||||||
|
* `--with-lg-page-sizes=<lg-page-sizes>`
|
||||||
|
|
||||||
--with-lg-page-sizes=<lg-page-sizes>
|
|
||||||
Specify the comma-separated base 2 logs of the page sizes to support. This
|
Specify the comma-separated base 2 logs of the page sizes to support. This
|
||||||
option may be useful when cross-compiling in combination with
|
option may be useful when cross compiling in combination with
|
||||||
--with-lg-page, but its primary use case is for integration with FreeBSD's
|
`--with-lg-page`, but its primary use case is for integration with FreeBSD's
|
||||||
libc, wherein jemalloc is embedded.
|
libc, wherein jemalloc is embedded.
|
||||||
|
|
||||||
--with-lg-size-class-group=<lg-size-class-group>
|
* `--with-lg-hugepage=<lg-hugepage>`
|
||||||
Specify the base 2 log of how many size classes to use for each doubling in
|
|
||||||
size. By default jemalloc uses <lg-size-class-group>=2, which results in
|
|
||||||
e.g. the following size classes:
|
|
||||||
|
|
||||||
[...], 64,
|
Specify the base 2 log of the system huge page size. This option is useful
|
||||||
80, 96, 112, 128,
|
when cross compiling, or when overriding the default for systems that do
|
||||||
160, [...]
|
not explicitly support huge pages.
|
||||||
|
|
||||||
<lg-size-class-group>=3 results in e.g. the following size classes:
|
* `--with-lg-quantum=<lg-quantum>`
|
||||||
|
|
||||||
[...], 64,
|
|
||||||
72, 80, 88, 96, 104, 112, 120, 128,
|
|
||||||
144, [...]
|
|
||||||
|
|
||||||
The minimal <lg-size-class-group>=0 causes jemalloc to only provide size
|
|
||||||
classes that are powers of 2:
|
|
||||||
|
|
||||||
[...],
|
|
||||||
64,
|
|
||||||
128,
|
|
||||||
256,
|
|
||||||
[...]
|
|
||||||
|
|
||||||
An implementation detail currently limits the total number of small size
|
|
||||||
classes to 255, and a compilation error will result if the
|
|
||||||
<lg-size-class-group> you specify cannot be supported. The limit is
|
|
||||||
roughly <lg-size-class-group>=4, depending on page size.
|
|
||||||
|
|
||||||
--with-lg-quantum=<lg-quantum>
|
|
||||||
Specify the base 2 log of the minimum allocation alignment. jemalloc needs
|
Specify the base 2 log of the minimum allocation alignment. jemalloc needs
|
||||||
to know the minimum alignment that meets the following C standard
|
to know the minimum alignment that meets the following C standard
|
||||||
requirement (quoted from the April 12, 2011 draft of the C11 standard):
|
requirement (quoted from the April 12, 2011 draft of the C11 standard):
|
||||||
|
|
||||||
The pointer returned if the allocation succeeds is suitably aligned so
|
> The pointer returned if the allocation succeeds is suitably aligned so
|
||||||
that it may be assigned to a pointer to any type of object with a
|
that it may be assigned to a pointer to any type of object with a
|
||||||
fundamental alignment requirement and then used to access such an object
|
fundamental alignment requirement and then used to access such an object
|
||||||
or an array of such objects in the space allocated [...]
|
or an array of such objects in the space allocated [...]
|
||||||
@ -275,67 +253,53 @@ any of the following arguments (not a definitive list) to 'configure':
|
|||||||
This setting is architecture-specific, and although jemalloc includes known
|
This setting is architecture-specific, and although jemalloc includes known
|
||||||
safe values for the most commonly used modern architectures, there is a
|
safe values for the most commonly used modern architectures, there is a
|
||||||
wrinkle related to GNU libc (glibc) that may impact your choice of
|
wrinkle related to GNU libc (glibc) that may impact your choice of
|
||||||
<lg-quantum>. On most modern architectures, this mandates 16-byte alignment
|
<lg-quantum>. On most modern architectures, this mandates 16-byte
|
||||||
(<lg-quantum>=4), but the glibc developers chose not to meet this
|
alignment (<lg-quantum>=4), but the glibc developers chose not to meet this
|
||||||
requirement for performance reasons. An old discussion can be found at
|
requirement for performance reasons. An old discussion can be found at
|
||||||
https://sourceware.org/bugzilla/show_bug.cgi?id=206 . Unlike glibc,
|
<https://sourceware.org/bugzilla/show_bug.cgi?id=206> . Unlike glibc,
|
||||||
jemalloc does follow the C standard by default (caveat: jemalloc
|
jemalloc does follow the C standard by default (caveat: jemalloc
|
||||||
technically cheats if --with-lg-tiny-min is smaller than
|
technically cheats for size classes smaller than the quantum), but the fact
|
||||||
--with-lg-quantum), but the fact that Linux systems already work around
|
that Linux systems already work around this allocator noncompliance means
|
||||||
this allocator noncompliance means that it is generally safe in practice to
|
that it is generally safe in practice to let jemalloc's minimum alignment
|
||||||
let jemalloc's minimum alignment follow glibc's lead. If you specify
|
follow glibc's lead. If you specify `--with-lg-quantum=3` during
|
||||||
--with-lg-quantum=3 during configuration, jemalloc will provide additional
|
configuration, jemalloc will provide additional size classes that are not
|
||||||
size classes that are not 16-byte-aligned (24, 40, and 56, assuming
|
16-byte-aligned (24, 40, and 56).
|
||||||
--with-lg-size-class-group=2).
|
|
||||||
|
|
||||||
--with-lg-tiny-min=<lg-tiny-min>
|
|
||||||
Specify the base 2 log of the minimum tiny size class to support. Tiny
|
|
||||||
size classes are powers of 2 less than the quantum, and are only
|
|
||||||
incorporated if <lg-tiny-min> is less than <lg-quantum> (see
|
|
||||||
--with-lg-quantum). Tiny size classes technically violate the C standard
|
|
||||||
requirement for minimum alignment, and crashes could conceivably result if
|
|
||||||
the compiler were to generate instructions that made alignment assumptions,
|
|
||||||
both because illegal instruction traps could result, and because accesses
|
|
||||||
could straddle page boundaries and cause segmentation faults due to
|
|
||||||
accessing unmapped addresses.
|
|
||||||
|
|
||||||
The default of <lg-tiny-min>=3 works well in practice even on architectures
|
|
||||||
that technically require 16-byte alignment, probably for the same reason
|
|
||||||
--with-lg-quantum=3 works. Smaller tiny size classes can, and will, cause
|
|
||||||
crashes (see https://bugzilla.mozilla.org/show_bug.cgi?id=691003 for an
|
|
||||||
example).
|
|
||||||
|
|
||||||
This option is rarely useful, and is mainly provided as documentation of a
|
|
||||||
subtle implementation detail. If you do use this option, specify a
|
|
||||||
value in [3, ..., <lg-quantum>].
|
|
||||||
|
|
||||||
The following environment variables (not a definitive list) impact configure's
|
The following environment variables (not a definitive list) impact configure's
|
||||||
behavior:
|
behavior:
|
||||||
|
|
||||||
CFLAGS="?"
|
* `CFLAGS="?"`
|
||||||
Pass these flags to the C compiler. Any flags set by the configure script
|
* `CXXFLAGS="?"`
|
||||||
are prepended, which means explicitly set flags generally take precedence.
|
|
||||||
Take care when specifying flags such as -Werror, because configure tests may
|
|
||||||
be affected in undesirable ways.
|
|
||||||
|
|
||||||
EXTRA_CFLAGS="?"
|
Pass these flags to the C/C++ compiler. Any flags set by the configure
|
||||||
Append these flags to CFLAGS, without passing them to the compiler during
|
script are prepended, which means explicitly set flags generally take
|
||||||
configuration. This makes it possible to add flags such as -Werror, while
|
precedence. Take care when specifying flags such as -Werror, because
|
||||||
allowing the configure script to determine what other flags are appropriate
|
configure tests may be affected in undesirable ways.
|
||||||
for the specified configuration.
|
|
||||||
|
* `EXTRA_CFLAGS="?"`
|
||||||
|
* `EXTRA_CXXFLAGS="?"`
|
||||||
|
|
||||||
|
Append these flags to CFLAGS/CXXFLAGS, without passing them to the
|
||||||
|
compiler(s) during configuration. This makes it possible to add flags such
|
||||||
|
as -Werror, while allowing the configure script to determine what other
|
||||||
|
flags are appropriate for the specified configuration.
|
||||||
|
|
||||||
|
* `CPPFLAGS="?"`
|
||||||
|
|
||||||
CPPFLAGS="?"
|
|
||||||
Pass these flags to the C preprocessor. Note that CFLAGS is not passed to
|
Pass these flags to the C preprocessor. Note that CFLAGS is not passed to
|
||||||
'cpp' when 'configure' is looking for include files, so you must use
|
'cpp' when 'configure' is looking for include files, so you must use
|
||||||
CPPFLAGS instead if you need to help 'configure' find header files.
|
CPPFLAGS instead if you need to help 'configure' find header files.
|
||||||
|
|
||||||
LD_LIBRARY_PATH="?"
|
* `LD_LIBRARY_PATH="?"`
|
||||||
|
|
||||||
'ld' uses this colon-separated list to find libraries.
|
'ld' uses this colon-separated list to find libraries.
|
||||||
|
|
||||||
LDFLAGS="?"
|
* `LDFLAGS="?"`
|
||||||
|
|
||||||
Pass these flags when linking.
|
Pass these flags when linking.
|
||||||
|
|
||||||
PATH="?"
|
* `PATH="?"`
|
||||||
|
|
||||||
'configure' uses this to find programs.
|
'configure' uses this to find programs.
|
||||||
|
|
||||||
In some cases it may be necessary to work around configuration results that do
|
In some cases it may be necessary to work around configuration results that do
|
||||||
@ -347,7 +311,8 @@ e.g.:
|
|||||||
|
|
||||||
echo "je_cv_madv_free=no" > config.cache && ./configure -C
|
echo "je_cv_madv_free=no" > config.cache && ./configure -C
|
||||||
|
|
||||||
=== Advanced compilation =======================================================
|
|
||||||
|
## Advanced compilation
|
||||||
|
|
||||||
To build only parts of jemalloc, use the following targets:
|
To build only parts of jemalloc, use the following targets:
|
||||||
|
|
||||||
@ -375,40 +340,51 @@ To clean up build results to varying degrees, use the following make targets:
|
|||||||
distclean
|
distclean
|
||||||
relclean
|
relclean
|
||||||
|
|
||||||
=== Advanced installation ======================================================
|
|
||||||
|
## Advanced installation
|
||||||
|
|
||||||
Optionally, define make variables when invoking make, including (not
|
Optionally, define make variables when invoking make, including (not
|
||||||
exclusively):
|
exclusively):
|
||||||
|
|
||||||
INCLUDEDIR="?"
|
* `INCLUDEDIR="?"`
|
||||||
|
|
||||||
Use this as the installation prefix for header files.
|
Use this as the installation prefix for header files.
|
||||||
|
|
||||||
LIBDIR="?"
|
* `LIBDIR="?"`
|
||||||
|
|
||||||
Use this as the installation prefix for libraries.
|
Use this as the installation prefix for libraries.
|
||||||
|
|
||||||
MANDIR="?"
|
* `MANDIR="?"`
|
||||||
|
|
||||||
Use this as the installation prefix for man pages.
|
Use this as the installation prefix for man pages.
|
||||||
|
|
||||||
DESTDIR="?"
|
* `DESTDIR="?"`
|
||||||
|
|
||||||
Prepend DESTDIR to INCLUDEDIR, LIBDIR, DATADIR, and MANDIR. This is useful
|
Prepend DESTDIR to INCLUDEDIR, LIBDIR, DATADIR, and MANDIR. This is useful
|
||||||
when installing to a different path than was specified via --prefix.
|
when installing to a different path than was specified via --prefix.
|
||||||
|
|
||||||
CC="?"
|
* `CC="?"`
|
||||||
|
|
||||||
Use this to invoke the C compiler.
|
Use this to invoke the C compiler.
|
||||||
|
|
||||||
CFLAGS="?"
|
* `CFLAGS="?"`
|
||||||
|
|
||||||
Pass these flags to the compiler.
|
Pass these flags to the compiler.
|
||||||
|
|
||||||
CPPFLAGS="?"
|
* `CPPFLAGS="?"`
|
||||||
|
|
||||||
Pass these flags to the C preprocessor.
|
Pass these flags to the C preprocessor.
|
||||||
|
|
||||||
LDFLAGS="?"
|
* `LDFLAGS="?"`
|
||||||
|
|
||||||
Pass these flags when linking.
|
Pass these flags when linking.
|
||||||
|
|
||||||
PATH="?"
|
* `PATH="?"`
|
||||||
|
|
||||||
Use this to search for programs used during configuration and building.
|
Use this to search for programs used during configuration and building.
|
||||||
|
|
||||||
=== Development ================================================================
|
|
||||||
|
## Development
|
||||||
|
|
||||||
If you intend to make non-trivial changes to jemalloc, use the 'autogen.sh'
|
If you intend to make non-trivial changes to jemalloc, use the 'autogen.sh'
|
||||||
script rather than 'configure'. This re-generates 'configure', enables
|
script rather than 'configure'. This re-generates 'configure', enables
|
||||||
@ -425,7 +401,8 @@ directory, issue configuration and build commands:
|
|||||||
../configure --enable-autogen
|
../configure --enable-autogen
|
||||||
make
|
make
|
||||||
|
|
||||||
=== Documentation ==============================================================
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
The manual page is generated in both html and roff formats. Any web browser
|
The manual page is generated in both html and roff formats. Any web browser
|
||||||
can be used to view the html manual. The roff manual page can be formatted
|
can be used to view the html manual. The roff manual page can be formatted
|
223
Makefile.in
223
Makefile.in
@ -9,6 +9,7 @@ vpath % .
|
|||||||
SHELL := /bin/sh
|
SHELL := /bin/sh
|
||||||
|
|
||||||
CC := @CC@
|
CC := @CC@
|
||||||
|
CXX := @CXX@
|
||||||
|
|
||||||
# Configuration parameters.
|
# Configuration parameters.
|
||||||
DESTDIR =
|
DESTDIR =
|
||||||
@ -28,6 +29,10 @@ CONFIGURE_CFLAGS := @CONFIGURE_CFLAGS@
|
|||||||
SPECIFIED_CFLAGS := @SPECIFIED_CFLAGS@
|
SPECIFIED_CFLAGS := @SPECIFIED_CFLAGS@
|
||||||
EXTRA_CFLAGS := @EXTRA_CFLAGS@
|
EXTRA_CFLAGS := @EXTRA_CFLAGS@
|
||||||
CFLAGS := $(strip $(CONFIGURE_CFLAGS) $(SPECIFIED_CFLAGS) $(EXTRA_CFLAGS))
|
CFLAGS := $(strip $(CONFIGURE_CFLAGS) $(SPECIFIED_CFLAGS) $(EXTRA_CFLAGS))
|
||||||
|
CONFIGURE_CXXFLAGS := @CONFIGURE_CXXFLAGS@
|
||||||
|
SPECIFIED_CXXFLAGS := @SPECIFIED_CXXFLAGS@
|
||||||
|
EXTRA_CXXFLAGS := @EXTRA_CXXFLAGS@
|
||||||
|
CXXFLAGS := $(strip $(CONFIGURE_CXXFLAGS) $(SPECIFIED_CXXFLAGS) $(EXTRA_CXXFLAGS))
|
||||||
LDFLAGS := @LDFLAGS@
|
LDFLAGS := @LDFLAGS@
|
||||||
EXTRA_LDFLAGS := @EXTRA_LDFLAGS@
|
EXTRA_LDFLAGS := @EXTRA_LDFLAGS@
|
||||||
LIBS := @LIBS@
|
LIBS := @LIBS@
|
||||||
@ -50,9 +55,7 @@ cfghdrs_out := @cfghdrs_out@
|
|||||||
cfgoutputs_in := $(addprefix $(srcroot),@cfgoutputs_in@)
|
cfgoutputs_in := $(addprefix $(srcroot),@cfgoutputs_in@)
|
||||||
cfgoutputs_out := @cfgoutputs_out@
|
cfgoutputs_out := @cfgoutputs_out@
|
||||||
enable_autogen := @enable_autogen@
|
enable_autogen := @enable_autogen@
|
||||||
enable_code_coverage := @enable_code_coverage@
|
|
||||||
enable_prof := @enable_prof@
|
enable_prof := @enable_prof@
|
||||||
enable_valgrind := @enable_valgrind@
|
|
||||||
enable_zone_allocator := @enable_zone_allocator@
|
enable_zone_allocator := @enable_zone_allocator@
|
||||||
MALLOC_CONF := @JEMALLOC_CPREFIX@MALLOC_CONF
|
MALLOC_CONF := @JEMALLOC_CPREFIX@MALLOC_CONF
|
||||||
link_whole_archive := @link_whole_archive@
|
link_whole_archive := @link_whole_archive@
|
||||||
@ -65,6 +68,8 @@ TEST_LD_MODE = @TEST_LD_MODE@
|
|||||||
MKLIB = @MKLIB@
|
MKLIB = @MKLIB@
|
||||||
AR = @AR@
|
AR = @AR@
|
||||||
ARFLAGS = @ARFLAGS@
|
ARFLAGS = @ARFLAGS@
|
||||||
|
DUMP_SYMS = @DUMP_SYMS@
|
||||||
|
AWK := @AWK@
|
||||||
CC_MM = @CC_MM@
|
CC_MM = @CC_MM@
|
||||||
LM := @LM@
|
LM := @LM@
|
||||||
INSTALL = @INSTALL@
|
INSTALL = @INSTALL@
|
||||||
@ -86,35 +91,32 @@ BINS := $(objroot)bin/jemalloc-config $(objroot)bin/jemalloc.sh $(objroot)bin/je
|
|||||||
C_HDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h
|
C_HDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h
|
||||||
C_SRCS := $(srcroot)src/jemalloc.c \
|
C_SRCS := $(srcroot)src/jemalloc.c \
|
||||||
$(srcroot)src/arena.c \
|
$(srcroot)src/arena.c \
|
||||||
$(srcroot)src/atomic.c \
|
$(srcroot)src/background_thread.c \
|
||||||
$(srcroot)src/base.c \
|
$(srcroot)src/base.c \
|
||||||
$(srcroot)src/bitmap.c \
|
$(srcroot)src/bitmap.c \
|
||||||
$(srcroot)src/chunk.c \
|
|
||||||
$(srcroot)src/chunk_dss.c \
|
|
||||||
$(srcroot)src/chunk_mmap.c \
|
|
||||||
$(srcroot)src/ckh.c \
|
$(srcroot)src/ckh.c \
|
||||||
$(srcroot)src/ctl.c \
|
$(srcroot)src/ctl.c \
|
||||||
$(srcroot)src/extent.c \
|
$(srcroot)src/extent.c \
|
||||||
|
$(srcroot)src/extent_dss.c \
|
||||||
|
$(srcroot)src/extent_mmap.c \
|
||||||
$(srcroot)src/hash.c \
|
$(srcroot)src/hash.c \
|
||||||
$(srcroot)src/huge.c \
|
$(srcroot)src/hooks.c \
|
||||||
$(srcroot)src/mb.c \
|
$(srcroot)src/large.c \
|
||||||
|
$(srcroot)src/malloc_io.c \
|
||||||
$(srcroot)src/mutex.c \
|
$(srcroot)src/mutex.c \
|
||||||
|
$(srcroot)src/mutex_pool.c \
|
||||||
$(srcroot)src/nstime.c \
|
$(srcroot)src/nstime.c \
|
||||||
$(srcroot)src/pages.c \
|
$(srcroot)src/pages.c \
|
||||||
$(srcroot)src/prng.c \
|
$(srcroot)src/prng.c \
|
||||||
$(srcroot)src/prof.c \
|
$(srcroot)src/prof.c \
|
||||||
$(srcroot)src/quarantine.c \
|
|
||||||
$(srcroot)src/rtree.c \
|
$(srcroot)src/rtree.c \
|
||||||
$(srcroot)src/stats.c \
|
$(srcroot)src/stats.c \
|
||||||
$(srcroot)src/spin.c \
|
$(srcroot)src/spin.c \
|
||||||
|
$(srcroot)src/sz.c \
|
||||||
$(srcroot)src/tcache.c \
|
$(srcroot)src/tcache.c \
|
||||||
$(srcroot)src/ticker.c \
|
$(srcroot)src/ticker.c \
|
||||||
$(srcroot)src/tsd.c \
|
$(srcroot)src/tsd.c \
|
||||||
$(srcroot)src/util.c \
|
|
||||||
$(srcroot)src/witness.c
|
$(srcroot)src/witness.c
|
||||||
ifeq ($(enable_valgrind), 1)
|
|
||||||
C_SRCS += $(srcroot)src/valgrind.c
|
|
||||||
endif
|
|
||||||
ifeq ($(enable_zone_allocator), 1)
|
ifeq ($(enable_zone_allocator), 1)
|
||||||
C_SRCS += $(srcroot)src/zone.c
|
C_SRCS += $(srcroot)src/zone.c
|
||||||
endif
|
endif
|
||||||
@ -148,24 +150,29 @@ C_TESTLIB_SRCS := $(srcroot)test/src/btalloc.c $(srcroot)test/src/btalloc_0.c \
|
|||||||
$(srcroot)test/src/thd.c $(srcroot)test/src/timer.c
|
$(srcroot)test/src/thd.c $(srcroot)test/src/timer.c
|
||||||
ifeq (1, $(link_whole_archive))
|
ifeq (1, $(link_whole_archive))
|
||||||
C_UTIL_INTEGRATION_SRCS :=
|
C_UTIL_INTEGRATION_SRCS :=
|
||||||
|
C_UTIL_CPP_SRCS :=
|
||||||
else
|
else
|
||||||
C_UTIL_INTEGRATION_SRCS := $(srcroot)src/nstime.c $(srcroot)src/util.c
|
C_UTIL_INTEGRATION_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c
|
||||||
|
C_UTIL_CPP_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c
|
||||||
endif
|
endif
|
||||||
TESTS_UNIT := \
|
TESTS_UNIT := \
|
||||||
$(srcroot)test/unit/a0.c \
|
$(srcroot)test/unit/a0.c \
|
||||||
$(srcroot)test/unit/arena_reset.c \
|
$(srcroot)test/unit/arena_reset.c \
|
||||||
$(srcroot)test/unit/atomic.c \
|
$(srcroot)test/unit/atomic.c \
|
||||||
|
$(srcroot)test/unit/background_thread.c \
|
||||||
|
$(srcroot)test/unit/base.c \
|
||||||
$(srcroot)test/unit/bitmap.c \
|
$(srcroot)test/unit/bitmap.c \
|
||||||
$(srcroot)test/unit/ckh.c \
|
$(srcroot)test/unit/ckh.c \
|
||||||
$(srcroot)test/unit/decay.c \
|
$(srcroot)test/unit/decay.c \
|
||||||
$(srcroot)test/unit/extent_quantize.c \
|
$(srcroot)test/unit/extent_quantize.c \
|
||||||
$(srcroot)test/unit/fork.c \
|
$(srcroot)test/unit/fork.c \
|
||||||
$(srcroot)test/unit/hash.c \
|
$(srcroot)test/unit/hash.c \
|
||||||
|
$(srcroot)test/unit/hooks.c \
|
||||||
$(srcroot)test/unit/junk.c \
|
$(srcroot)test/unit/junk.c \
|
||||||
$(srcroot)test/unit/junk_alloc.c \
|
$(srcroot)test/unit/junk_alloc.c \
|
||||||
$(srcroot)test/unit/junk_free.c \
|
$(srcroot)test/unit/junk_free.c \
|
||||||
$(srcroot)test/unit/lg_chunk.c \
|
|
||||||
$(srcroot)test/unit/mallctl.c \
|
$(srcroot)test/unit/mallctl.c \
|
||||||
|
$(srcroot)test/unit/malloc_io.c \
|
||||||
$(srcroot)test/unit/math.c \
|
$(srcroot)test/unit/math.c \
|
||||||
$(srcroot)test/unit/mq.c \
|
$(srcroot)test/unit/mq.c \
|
||||||
$(srcroot)test/unit/mtx.c \
|
$(srcroot)test/unit/mtx.c \
|
||||||
@ -178,41 +185,62 @@ TESTS_UNIT := \
|
|||||||
$(srcroot)test/unit/prof_gdump.c \
|
$(srcroot)test/unit/prof_gdump.c \
|
||||||
$(srcroot)test/unit/prof_idump.c \
|
$(srcroot)test/unit/prof_idump.c \
|
||||||
$(srcroot)test/unit/prof_reset.c \
|
$(srcroot)test/unit/prof_reset.c \
|
||||||
|
$(srcroot)test/unit/prof_tctx.c \
|
||||||
$(srcroot)test/unit/prof_thread_name.c \
|
$(srcroot)test/unit/prof_thread_name.c \
|
||||||
$(srcroot)test/unit/ql.c \
|
$(srcroot)test/unit/ql.c \
|
||||||
$(srcroot)test/unit/qr.c \
|
$(srcroot)test/unit/qr.c \
|
||||||
$(srcroot)test/unit/quarantine.c \
|
|
||||||
$(srcroot)test/unit/rb.c \
|
$(srcroot)test/unit/rb.c \
|
||||||
|
$(srcroot)test/unit/retained.c \
|
||||||
$(srcroot)test/unit/rtree.c \
|
$(srcroot)test/unit/rtree.c \
|
||||||
$(srcroot)test/unit/run_quantize.c \
|
|
||||||
$(srcroot)test/unit/SFMT.c \
|
$(srcroot)test/unit/SFMT.c \
|
||||||
$(srcroot)test/unit/size_classes.c \
|
$(srcroot)test/unit/size_classes.c \
|
||||||
|
$(srcroot)test/unit/slab.c \
|
||||||
$(srcroot)test/unit/smoothstep.c \
|
$(srcroot)test/unit/smoothstep.c \
|
||||||
|
$(srcroot)test/unit/spin.c \
|
||||||
$(srcroot)test/unit/stats.c \
|
$(srcroot)test/unit/stats.c \
|
||||||
$(srcroot)test/unit/stats_print.c \
|
$(srcroot)test/unit/stats_print.c \
|
||||||
$(srcroot)test/unit/ticker.c \
|
$(srcroot)test/unit/ticker.c \
|
||||||
$(srcroot)test/unit/nstime.c \
|
$(srcroot)test/unit/nstime.c \
|
||||||
$(srcroot)test/unit/tsd.c \
|
$(srcroot)test/unit/tsd.c \
|
||||||
$(srcroot)test/unit/util.c \
|
|
||||||
$(srcroot)test/unit/witness.c \
|
$(srcroot)test/unit/witness.c \
|
||||||
$(srcroot)test/unit/zero.c
|
$(srcroot)test/unit/zero.c
|
||||||
|
ifeq (@enable_prof@, 1)
|
||||||
|
TESTS_UNIT += \
|
||||||
|
$(srcroot)test/unit/arena_reset_prof.c
|
||||||
|
endif
|
||||||
TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
|
TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
|
||||||
$(srcroot)test/integration/allocated.c \
|
$(srcroot)test/integration/allocated.c \
|
||||||
$(srcroot)test/integration/sdallocx.c \
|
$(srcroot)test/integration/extent.c \
|
||||||
$(srcroot)test/integration/mallocx.c \
|
$(srcroot)test/integration/mallocx.c \
|
||||||
$(srcroot)test/integration/MALLOCX_ARENA.c \
|
$(srcroot)test/integration/MALLOCX_ARENA.c \
|
||||||
$(srcroot)test/integration/overflow.c \
|
$(srcroot)test/integration/overflow.c \
|
||||||
$(srcroot)test/integration/posix_memalign.c \
|
$(srcroot)test/integration/posix_memalign.c \
|
||||||
$(srcroot)test/integration/rallocx.c \
|
$(srcroot)test/integration/rallocx.c \
|
||||||
|
$(srcroot)test/integration/sdallocx.c \
|
||||||
$(srcroot)test/integration/thread_arena.c \
|
$(srcroot)test/integration/thread_arena.c \
|
||||||
$(srcroot)test/integration/thread_tcache_enabled.c \
|
$(srcroot)test/integration/thread_tcache_enabled.c \
|
||||||
$(srcroot)test/integration/xallocx.c \
|
$(srcroot)test/integration/xallocx.c
|
||||||
$(srcroot)test/integration/chunk.c
|
ifeq (@enable_cxx@, 1)
|
||||||
|
CPP_SRCS := $(srcroot)src/jemalloc_cpp.cpp
|
||||||
|
TESTS_INTEGRATION_CPP := $(srcroot)test/integration/cpp/basic.cpp
|
||||||
|
else
|
||||||
|
CPP_SRCS :=
|
||||||
|
TESTS_INTEGRATION_CPP :=
|
||||||
|
endif
|
||||||
TESTS_STRESS := $(srcroot)test/stress/microbench.c
|
TESTS_STRESS := $(srcroot)test/stress/microbench.c
|
||||||
TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_STRESS)
|
|
||||||
|
|
||||||
|
TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_INTEGRATION_CPP) $(TESTS_STRESS)
|
||||||
|
|
||||||
|
PRIVATE_NAMESPACE_HDRS := $(objroot)include/jemalloc/internal/private_namespace.h $(objroot)include/jemalloc/internal/private_namespace_jet.h
|
||||||
|
PRIVATE_NAMESPACE_GEN_HDRS := $(PRIVATE_NAMESPACE_HDRS:%.h=%.gen.h)
|
||||||
|
C_SYM_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.sym.$(O))
|
||||||
|
C_SYMS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.sym)
|
||||||
C_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.$(O))
|
C_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.$(O))
|
||||||
|
CPP_OBJS := $(CPP_SRCS:$(srcroot)%.cpp=$(objroot)%.$(O))
|
||||||
C_PIC_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.pic.$(O))
|
C_PIC_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.pic.$(O))
|
||||||
|
CPP_PIC_OBJS := $(CPP_SRCS:$(srcroot)%.cpp=$(objroot)%.pic.$(O))
|
||||||
|
C_JET_SYM_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.sym.$(O))
|
||||||
|
C_JET_SYMS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.sym)
|
||||||
C_JET_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.$(O))
|
C_JET_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.$(O))
|
||||||
C_TESTLIB_UNIT_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.unit.$(O))
|
C_TESTLIB_UNIT_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.unit.$(O))
|
||||||
C_TESTLIB_INTEGRATION_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O))
|
C_TESTLIB_INTEGRATION_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O))
|
||||||
@ -222,15 +250,17 @@ C_TESTLIB_OBJS := $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_
|
|||||||
|
|
||||||
TESTS_UNIT_OBJS := $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%.$(O))
|
TESTS_UNIT_OBJS := $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%.$(O))
|
||||||
TESTS_INTEGRATION_OBJS := $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%.$(O))
|
TESTS_INTEGRATION_OBJS := $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%.$(O))
|
||||||
|
TESTS_INTEGRATION_CPP_OBJS := $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%.$(O))
|
||||||
TESTS_STRESS_OBJS := $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%.$(O))
|
TESTS_STRESS_OBJS := $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%.$(O))
|
||||||
TESTS_OBJS := $(TESTS_UNIT_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_STRESS_OBJS)
|
TESTS_OBJS := $(TESTS_UNIT_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_STRESS_OBJS)
|
||||||
|
TESTS_CPP_OBJS := $(TESTS_INTEGRATION_CPP_OBJS)
|
||||||
|
|
||||||
.PHONY: all dist build_doc_html build_doc_man build_doc
|
.PHONY: all dist build_doc_html build_doc_man build_doc
|
||||||
.PHONY: install_bin install_include install_lib
|
.PHONY: install_bin install_include install_lib
|
||||||
.PHONY: install_doc_html install_doc_man install_doc install
|
.PHONY: install_doc_html install_doc_man install_doc install
|
||||||
.PHONY: tests check clean distclean relclean
|
.PHONY: tests check clean distclean relclean
|
||||||
|
|
||||||
.SECONDARY : $(TESTS_OBJS)
|
.SECONDARY : $(PRIVATE_NAMESPACE_GEN_HDRS) $(TESTS_OBJS) $(TESTS_CPP_OBJS)
|
||||||
|
|
||||||
# Default target.
|
# Default target.
|
||||||
all: build_lib
|
all: build_lib
|
||||||
@ -251,18 +281,32 @@ build_doc: $(DOCS)
|
|||||||
# Include generated dependency files.
|
# Include generated dependency files.
|
||||||
#
|
#
|
||||||
ifdef CC_MM
|
ifdef CC_MM
|
||||||
|
-include $(C_SYM_OBJS:%.$(O)=%.d)
|
||||||
-include $(C_OBJS:%.$(O)=%.d)
|
-include $(C_OBJS:%.$(O)=%.d)
|
||||||
|
-include $(CPP_OBJS:%.$(O)=%.d)
|
||||||
-include $(C_PIC_OBJS:%.$(O)=%.d)
|
-include $(C_PIC_OBJS:%.$(O)=%.d)
|
||||||
|
-include $(CPP_PIC_OBJS:%.$(O)=%.d)
|
||||||
|
-include $(C_JET_SYM_OBJS:%.$(O)=%.d)
|
||||||
-include $(C_JET_OBJS:%.$(O)=%.d)
|
-include $(C_JET_OBJS:%.$(O)=%.d)
|
||||||
-include $(C_TESTLIB_OBJS:%.$(O)=%.d)
|
-include $(C_TESTLIB_OBJS:%.$(O)=%.d)
|
||||||
-include $(TESTS_OBJS:%.$(O)=%.d)
|
-include $(TESTS_OBJS:%.$(O)=%.d)
|
||||||
|
-include $(TESTS_CPP_OBJS:%.$(O)=%.d)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
$(C_SYM_OBJS): $(objroot)src/%.sym.$(O): $(srcroot)src/%.c
|
||||||
|
$(C_SYM_OBJS): CPPFLAGS += -DJEMALLOC_NO_PRIVATE_NAMESPACE
|
||||||
|
$(C_SYMS): $(objroot)src/%.sym: $(objroot)src/%.sym.$(O)
|
||||||
$(C_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.c
|
$(C_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.c
|
||||||
|
$(CPP_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.cpp
|
||||||
$(C_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.c
|
$(C_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.c
|
||||||
$(C_PIC_OBJS): CFLAGS += $(PIC_CFLAGS)
|
$(C_PIC_OBJS): CFLAGS += $(PIC_CFLAGS)
|
||||||
|
$(CPP_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.cpp
|
||||||
|
$(CPP_PIC_OBJS): CXXFLAGS += $(PIC_CFLAGS)
|
||||||
|
$(C_JET_SYM_OBJS): $(objroot)src/%.jet.sym.$(O): $(srcroot)src/%.c
|
||||||
|
$(C_JET_SYM_OBJS): CPPFLAGS += -DJEMALLOC_JET -DJEMALLOC_NO_PRIVATE_NAMESPACE
|
||||||
|
$(C_JET_SYMS): $(objroot)src/%.jet.sym: $(objroot)src/%.jet.sym.$(O)
|
||||||
$(C_JET_OBJS): $(objroot)src/%.jet.$(O): $(srcroot)src/%.c
|
$(C_JET_OBJS): $(objroot)src/%.jet.$(O): $(srcroot)src/%.c
|
||||||
$(C_JET_OBJS): CFLAGS += -DJEMALLOC_JET
|
$(C_JET_OBJS): CPPFLAGS += -DJEMALLOC_JET
|
||||||
$(C_TESTLIB_UNIT_OBJS): $(objroot)test/src/%.unit.$(O): $(srcroot)test/src/%.c
|
$(C_TESTLIB_UNIT_OBJS): $(objroot)test/src/%.unit.$(O): $(srcroot)test/src/%.c
|
||||||
$(C_TESTLIB_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
|
$(C_TESTLIB_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
|
||||||
$(C_TESTLIB_INTEGRATION_OBJS): $(objroot)test/src/%.integration.$(O): $(srcroot)test/src/%.c
|
$(C_TESTLIB_INTEGRATION_OBJS): $(objroot)test/src/%.integration.$(O): $(srcroot)test/src/%.c
|
||||||
@ -273,54 +317,88 @@ $(C_TESTLIB_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST -DJEMALLOC_STRESS_T
|
|||||||
$(C_TESTLIB_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
|
$(C_TESTLIB_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
|
||||||
$(TESTS_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
|
$(TESTS_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
|
||||||
$(TESTS_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST
|
$(TESTS_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST
|
||||||
|
$(TESTS_INTEGRATION_CPP_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_CPP_TEST
|
||||||
$(TESTS_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST
|
$(TESTS_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST
|
||||||
$(TESTS_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c
|
$(TESTS_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c
|
||||||
|
$(TESTS_CPP_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.cpp
|
||||||
$(TESTS_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
|
$(TESTS_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
|
||||||
|
$(TESTS_CPP_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
|
||||||
ifneq ($(IMPORTLIB),$(SO))
|
ifneq ($(IMPORTLIB),$(SO))
|
||||||
$(C_OBJS) $(C_JET_OBJS): CPPFLAGS += -DDLLEXPORT
|
$(CPP_OBJS) $(C_SYM_OBJS) $(C_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS): CPPFLAGS += -DDLLEXPORT
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifndef CC_MM
|
|
||||||
# Dependencies.
|
# Dependencies.
|
||||||
|
ifndef CC_MM
|
||||||
HEADER_DIRS = $(srcroot)include/jemalloc/internal \
|
HEADER_DIRS = $(srcroot)include/jemalloc/internal \
|
||||||
$(objroot)include/jemalloc $(objroot)include/jemalloc/internal
|
$(objroot)include/jemalloc $(objroot)include/jemalloc/internal
|
||||||
HEADERS = $(wildcard $(foreach dir,$(HEADER_DIRS),$(dir)/*.h))
|
HEADERS = $(filter-out $(PRIVATE_NAMESPACE_HDRS),$(wildcard $(foreach dir,$(HEADER_DIRS),$(dir)/*.h)))
|
||||||
$(C_OBJS) $(C_PIC_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): $(HEADERS)
|
$(C_SYM_OBJS) $(C_OBJS) $(CPP_OBJS) $(C_PIC_OBJS) $(CPP_PIC_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS) $(TESTS_CPP_OBJS): $(HEADERS)
|
||||||
$(TESTS_OBJS): $(objroot)test/include/test/jemalloc_test.h
|
$(TESTS_OBJS) $(TESTS_CPP_OBJS): $(objroot)test/include/test/jemalloc_test.h
|
||||||
endif
|
endif
|
||||||
|
|
||||||
$(C_OBJS) $(C_PIC_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): %.$(O):
|
$(C_OBJS) $(CPP_OBJS) $(C_PIC_OBJS) $(CPP_PIC_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_INTEGRATION_CPP_OBJS): $(objroot)include/jemalloc/internal/private_namespace.h
|
||||||
|
$(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_STRESS_OBJS) $(TESTS_UNIT_OBJS) $(TESTS_STRESS_OBJS): $(objroot)include/jemalloc/internal/private_namespace_jet.h
|
||||||
|
|
||||||
|
$(C_SYM_OBJS) $(C_OBJS) $(C_PIC_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): %.$(O):
|
||||||
@mkdir -p $(@D)
|
@mkdir -p $(@D)
|
||||||
$(CC) $(CFLAGS) -c $(CPPFLAGS) $(CTARGET) $<
|
$(CC) $(CFLAGS) -c $(CPPFLAGS) $(CTARGET) $<
|
||||||
ifdef CC_MM
|
ifdef CC_MM
|
||||||
@$(CC) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $<
|
@$(CC) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $<
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
$(C_SYMS): %.sym:
|
||||||
|
@mkdir -p $(@D)
|
||||||
|
$(DUMP_SYMS) $< | $(AWK) -f $(objroot)include/jemalloc/internal/private_symbols.awk > $@
|
||||||
|
|
||||||
|
$(C_JET_SYMS): %.sym:
|
||||||
|
@mkdir -p $(@D)
|
||||||
|
$(DUMP_SYMS) $< | $(AWK) -f $(objroot)include/jemalloc/internal/private_symbols_jet.awk > $@
|
||||||
|
|
||||||
|
$(objroot)include/jemalloc/internal/private_namespace.gen.h: $(C_SYMS)
|
||||||
|
$(SHELL) $(srcroot)include/jemalloc/internal/private_namespace.sh $^ > $@
|
||||||
|
|
||||||
|
$(objroot)include/jemalloc/internal/private_namespace_jet.gen.h: $(C_JET_SYMS)
|
||||||
|
$(SHELL) $(srcroot)include/jemalloc/internal/private_namespace.sh $^ > $@
|
||||||
|
|
||||||
|
%.h: %.gen.h
|
||||||
|
@if ! `cmp -s $< $@` ; then echo "cp $< $<"; cp $< $@ ; fi
|
||||||
|
|
||||||
|
$(CPP_OBJS) $(CPP_PIC_OBJS) $(TESTS_CPP_OBJS): %.$(O):
|
||||||
|
@mkdir -p $(@D)
|
||||||
|
$(CXX) $(CXXFLAGS) -c $(CPPFLAGS) $(CTARGET) $<
|
||||||
|
ifdef CC_MM
|
||||||
|
@$(CXX) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $<
|
||||||
|
endif
|
||||||
|
|
||||||
ifneq ($(SOREV),$(SO))
|
ifneq ($(SOREV),$(SO))
|
||||||
%.$(SO) : %.$(SOREV)
|
%.$(SO) : %.$(SOREV)
|
||||||
@mkdir -p $(@D)
|
@mkdir -p $(@D)
|
||||||
ln -sf $(<F) $@
|
ln -sf $(<F) $@
|
||||||
endif
|
endif
|
||||||
|
|
||||||
$(objroot)lib/$(LIBJEMALLOC).$(SOREV) : $(if $(PIC_CFLAGS),$(C_PIC_OBJS),$(C_OBJS))
|
$(objroot)lib/$(LIBJEMALLOC).$(SOREV) : $(if $(PIC_CFLAGS),$(C_PIC_OBJS),$(C_OBJS)) $(if $(PIC_CFLAGS),$(CPP_PIC_OBJS),$(CPP_OBJS))
|
||||||
@mkdir -p $(@D)
|
@mkdir -p $(@D)
|
||||||
$(CC) $(DSO_LDFLAGS) $(call RPATH,$(RPATH_EXTRA)) $(LDTARGET) $+ $(LDFLAGS) $(LIBS) $(EXTRA_LDFLAGS)
|
$(CC) $(DSO_LDFLAGS) $(call RPATH,$(RPATH_EXTRA)) $(LDTARGET) $+ $(LDFLAGS) $(LIBS) $(EXTRA_LDFLAGS)
|
||||||
|
|
||||||
$(objroot)lib/$(LIBJEMALLOC)_pic.$(A) : $(C_PIC_OBJS)
|
$(objroot)lib/$(LIBJEMALLOC)_pic.$(A) : $(C_PIC_OBJS) $(CPP_PIC_OBJS)
|
||||||
$(objroot)lib/$(LIBJEMALLOC).$(A) : $(C_OBJS)
|
$(objroot)lib/$(LIBJEMALLOC).$(A) : $(C_OBJS) $(CPP_OBJS)
|
||||||
$(objroot)lib/$(LIBJEMALLOC)_s.$(A) : $(C_OBJS)
|
$(objroot)lib/$(LIBJEMALLOC)_s.$(A) : $(C_OBJS) $(CPP_OBJS)
|
||||||
|
|
||||||
$(STATIC_LIBS):
|
$(STATIC_LIBS):
|
||||||
@mkdir -p $(@D)
|
@mkdir -p $(@D)
|
||||||
$(AR) $(ARFLAGS)@AROUT@ $+
|
$(AR) $(ARFLAGS)@AROUT@ $+
|
||||||
|
|
||||||
$(objroot)test/unit/%$(EXE): $(objroot)test/unit/%.$(O) $(TESTS_UNIT_LINK_OBJS) $(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS)
|
$(objroot)test/unit/%$(EXE): $(objroot)test/unit/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS)
|
||||||
@mkdir -p $(@D)
|
@mkdir -p $(@D)
|
||||||
$(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS)
|
$(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS)
|
||||||
|
|
||||||
$(objroot)test/integration/%$(EXE): $(objroot)test/integration/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
|
$(objroot)test/integration/%$(EXE): $(objroot)test/integration/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
|
||||||
@mkdir -p $(@D)
|
@mkdir -p $(@D)
|
||||||
$(CC) $(TEST_LD_MODE) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LJEMALLOC) $(LDFLAGS) $(filter-out -lm,$(filter -lrt -lpthread,$(LIBS))) $(LM) $(EXTRA_LDFLAGS)
|
$(CC) $(TEST_LD_MODE) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LJEMALLOC) $(LDFLAGS) $(filter-out -lm,$(filter -lrt -lpthread -lstdc++,$(LIBS))) $(LM) $(EXTRA_LDFLAGS)
|
||||||
|
|
||||||
|
$(objroot)test/integration/cpp/%$(EXE): $(objroot)test/integration/cpp/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
|
||||||
|
@mkdir -p $(@D)
|
||||||
|
$(CXX) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(EXTRA_LDFLAGS)
|
||||||
|
|
||||||
$(objroot)test/stress/%$(EXE): $(objroot)test/stress/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_STRESS_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
|
$(objroot)test/stress/%$(EXE): $(objroot)test/stress/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_STRESS_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
|
||||||
@mkdir -p $(@D)
|
@mkdir -p $(@D)
|
||||||
@ -386,7 +464,7 @@ install_doc: install_doc_html install_doc_man
|
|||||||
install: install_bin install_include install_lib install_doc
|
install: install_bin install_include install_lib install_doc
|
||||||
|
|
||||||
tests_unit: $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%$(EXE))
|
tests_unit: $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%$(EXE))
|
||||||
tests_integration: $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%$(EXE))
|
tests_integration: $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%$(EXE)) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%$(EXE))
|
||||||
tests_stress: $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%$(EXE))
|
tests_stress: $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%$(EXE))
|
||||||
tests: tests_unit tests_integration tests_stress
|
tests: tests_unit tests_integration tests_stress
|
||||||
|
|
||||||
@ -399,78 +477,51 @@ stress_dir:
|
|||||||
check_dir: check_unit_dir check_integration_dir
|
check_dir: check_unit_dir check_integration_dir
|
||||||
|
|
||||||
check_unit: tests_unit check_unit_dir
|
check_unit: tests_unit check_unit_dir
|
||||||
$(MALLOC_CONF)="purge:ratio" $(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
|
$(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
|
||||||
$(MALLOC_CONF)="purge:decay" $(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
|
|
||||||
check_integration_prof: tests_integration check_integration_dir
|
check_integration_prof: tests_integration check_integration_dir
|
||||||
ifeq ($(enable_prof), 1)
|
ifeq ($(enable_prof), 1)
|
||||||
$(MALLOC_CONF)="prof:true" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
|
$(MALLOC_CONF)="prof:true" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
|
||||||
$(MALLOC_CONF)="prof:true,prof_active:false" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
|
$(MALLOC_CONF)="prof:true,prof_active:false" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
|
||||||
endif
|
endif
|
||||||
check_integration_decay: tests_integration check_integration_dir
|
check_integration_decay: tests_integration check_integration_dir
|
||||||
$(MALLOC_CONF)="purge:decay,decay_time:-1" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
|
$(MALLOC_CONF)="dirty_decay_ms:-1,muzzy_decay_ms:-1" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
|
||||||
$(MALLOC_CONF)="purge:decay,decay_time:0" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
|
$(MALLOC_CONF)="dirty_decay_ms:0,muzzy_decay_ms:0" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
|
||||||
$(MALLOC_CONF)="purge:decay" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
|
|
||||||
check_integration: tests_integration check_integration_dir
|
check_integration: tests_integration check_integration_dir
|
||||||
$(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
|
$(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
|
||||||
stress: tests_stress stress_dir
|
stress: tests_stress stress_dir
|
||||||
$(SHELL) $(objroot)test/test.sh $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%)
|
$(SHELL) $(objroot)test/test.sh $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%)
|
||||||
check: check_unit check_integration check_integration_decay check_integration_prof
|
check: check_unit check_integration check_integration_decay check_integration_prof
|
||||||
|
|
||||||
ifeq ($(enable_code_coverage), 1)
|
|
||||||
coverage_unit: check_unit
|
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)src jet $(C_JET_OBJS)
|
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)test/src unit $(C_TESTLIB_UNIT_OBJS)
|
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)test/unit unit $(TESTS_UNIT_OBJS)
|
|
||||||
|
|
||||||
coverage_integration: check_integration
|
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)src pic $(C_PIC_OBJS)
|
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)src integration $(C_UTIL_INTEGRATION_OBJS)
|
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)test/src integration $(C_TESTLIB_INTEGRATION_OBJS)
|
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)test/integration integration $(TESTS_INTEGRATION_OBJS)
|
|
||||||
|
|
||||||
coverage_stress: stress
|
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)src pic $(C_PIC_OBJS)
|
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)src jet $(C_JET_OBJS)
|
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)test/src stress $(C_TESTLIB_STRESS_OBJS)
|
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)test/stress stress $(TESTS_STRESS_OBJS)
|
|
||||||
|
|
||||||
coverage: check
|
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)src pic $(C_PIC_OBJS)
|
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)src jet $(C_JET_OBJS)
|
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)src integration $(C_UTIL_INTEGRATION_OBJS)
|
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)test/src unit $(C_TESTLIB_UNIT_OBJS)
|
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)test/src integration $(C_TESTLIB_INTEGRATION_OBJS)
|
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)test/src stress $(C_TESTLIB_STRESS_OBJS)
|
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)test/unit unit $(TESTS_UNIT_OBJS) $(TESTS_UNIT_AUX_OBJS)
|
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)test/integration integration $(TESTS_INTEGRATION_OBJS)
|
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)test/stress integration $(TESTS_STRESS_OBJS)
|
|
||||||
endif
|
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
|
rm -f $(PRIVATE_NAMESPACE_HDRS)
|
||||||
|
rm -f $(PRIVATE_NAMESPACE_GEN_HDRS)
|
||||||
|
rm -f $(C_SYM_OBJS)
|
||||||
|
rm -f $(C_SYMS)
|
||||||
rm -f $(C_OBJS)
|
rm -f $(C_OBJS)
|
||||||
|
rm -f $(CPP_OBJS)
|
||||||
rm -f $(C_PIC_OBJS)
|
rm -f $(C_PIC_OBJS)
|
||||||
|
rm -f $(CPP_PIC_OBJS)
|
||||||
|
rm -f $(C_JET_SYM_OBJS)
|
||||||
|
rm -f $(C_JET_SYMS)
|
||||||
rm -f $(C_JET_OBJS)
|
rm -f $(C_JET_OBJS)
|
||||||
rm -f $(C_TESTLIB_OBJS)
|
rm -f $(C_TESTLIB_OBJS)
|
||||||
|
rm -f $(C_SYM_OBJS:%.$(O)=%.d)
|
||||||
rm -f $(C_OBJS:%.$(O)=%.d)
|
rm -f $(C_OBJS:%.$(O)=%.d)
|
||||||
rm -f $(C_OBJS:%.$(O)=%.gcda)
|
rm -f $(CPP_OBJS:%.$(O)=%.d)
|
||||||
rm -f $(C_OBJS:%.$(O)=%.gcno)
|
|
||||||
rm -f $(C_PIC_OBJS:%.$(O)=%.d)
|
rm -f $(C_PIC_OBJS:%.$(O)=%.d)
|
||||||
rm -f $(C_PIC_OBJS:%.$(O)=%.gcda)
|
rm -f $(CPP_PIC_OBJS:%.$(O)=%.d)
|
||||||
rm -f $(C_PIC_OBJS:%.$(O)=%.gcno)
|
rm -f $(C_JET_SYM_OBJS:%.$(O)=%.d)
|
||||||
rm -f $(C_JET_OBJS:%.$(O)=%.d)
|
rm -f $(C_JET_OBJS:%.$(O)=%.d)
|
||||||
rm -f $(C_JET_OBJS:%.$(O)=%.gcda)
|
|
||||||
rm -f $(C_JET_OBJS:%.$(O)=%.gcno)
|
|
||||||
rm -f $(C_TESTLIB_OBJS:%.$(O)=%.d)
|
rm -f $(C_TESTLIB_OBJS:%.$(O)=%.d)
|
||||||
rm -f $(C_TESTLIB_OBJS:%.$(O)=%.gcda)
|
|
||||||
rm -f $(C_TESTLIB_OBJS:%.$(O)=%.gcno)
|
|
||||||
rm -f $(TESTS_OBJS:%.$(O)=%$(EXE))
|
rm -f $(TESTS_OBJS:%.$(O)=%$(EXE))
|
||||||
rm -f $(TESTS_OBJS)
|
rm -f $(TESTS_OBJS)
|
||||||
rm -f $(TESTS_OBJS:%.$(O)=%.d)
|
rm -f $(TESTS_OBJS:%.$(O)=%.d)
|
||||||
rm -f $(TESTS_OBJS:%.$(O)=%.gcda)
|
|
||||||
rm -f $(TESTS_OBJS:%.$(O)=%.gcno)
|
|
||||||
rm -f $(TESTS_OBJS:%.$(O)=%.out)
|
rm -f $(TESTS_OBJS:%.$(O)=%.out)
|
||||||
|
rm -f $(TESTS_CPP_OBJS:%.$(O)=%$(EXE))
|
||||||
|
rm -f $(TESTS_CPP_OBJS)
|
||||||
|
rm -f $(TESTS_CPP_OBJS:%.$(O)=%.d)
|
||||||
|
rm -f $(TESTS_CPP_OBJS:%.$(O)=%.out)
|
||||||
rm -f $(DSOS) $(STATIC_LIBS)
|
rm -f $(DSOS) $(STATIC_LIBS)
|
||||||
rm -f $(objroot)*.gcov.*
|
|
||||||
|
|
||||||
distclean: clean
|
distclean: clean
|
||||||
rm -f $(objroot)bin/jemalloc-config
|
rm -f $(objroot)bin/jemalloc-config
|
||||||
|
12
README
12
README
@ -3,12 +3,12 @@ fragmentation avoidance and scalable concurrency support. jemalloc first came
|
|||||||
into use as the FreeBSD libc allocator in 2005, and since then it has found its
|
into use as the FreeBSD libc allocator in 2005, and since then it has found its
|
||||||
way into numerous applications that rely on its predictable behavior. In 2010
|
way into numerous applications that rely on its predictable behavior. In 2010
|
||||||
jemalloc development efforts broadened to include developer support features
|
jemalloc development efforts broadened to include developer support features
|
||||||
such as heap profiling, Valgrind integration, and extensive monitoring/tuning
|
such as heap profiling and extensive monitoring/tuning hooks. Modern jemalloc
|
||||||
hooks. Modern jemalloc releases continue to be integrated back into FreeBSD,
|
releases continue to be integrated back into FreeBSD, and therefore versatility
|
||||||
and therefore versatility remains critical. Ongoing development efforts trend
|
remains critical. Ongoing development efforts trend toward making jemalloc
|
||||||
toward making jemalloc among the best allocators for a broad range of demanding
|
among the best allocators for a broad range of demanding applications, and
|
||||||
applications, and eliminating/mitigating weaknesses that have practical
|
eliminating/mitigating weaknesses that have practical repercussions for real
|
||||||
repercussions for real world applications.
|
world applications.
|
||||||
|
|
||||||
The COPYING file contains copyright and licensing information.
|
The COPYING file contains copyright and licensing information.
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@ Options:
|
|||||||
--cc : Print compiler used to build jemalloc.
|
--cc : Print compiler used to build jemalloc.
|
||||||
--cflags : Print compiler flags used to build jemalloc.
|
--cflags : Print compiler flags used to build jemalloc.
|
||||||
--cppflags : Print preprocessor flags used to build jemalloc.
|
--cppflags : Print preprocessor flags used to build jemalloc.
|
||||||
|
--cxxflags : Print C++ compiler flags used to build jemalloc.
|
||||||
--ldflags : Print library flags used to build jemalloc.
|
--ldflags : Print library flags used to build jemalloc.
|
||||||
--libs : Print libraries jemalloc was linked against.
|
--libs : Print libraries jemalloc was linked against.
|
||||||
EOF
|
EOF
|
||||||
@ -67,6 +68,9 @@ case "$1" in
|
|||||||
--cppflags)
|
--cppflags)
|
||||||
echo "@CPPFLAGS@"
|
echo "@CPPFLAGS@"
|
||||||
;;
|
;;
|
||||||
|
--cxxflags)
|
||||||
|
echo "@CXXFLAGS@"
|
||||||
|
;;
|
||||||
--ldflags)
|
--ldflags)
|
||||||
echo "@LDFLAGS@ @EXTRA_LDFLAGS@"
|
echo "@LDFLAGS@ @EXTRA_LDFLAGS@"
|
||||||
;;
|
;;
|
||||||
|
@ -71,6 +71,7 @@
|
|||||||
use strict;
|
use strict;
|
||||||
use warnings;
|
use warnings;
|
||||||
use Getopt::Long;
|
use Getopt::Long;
|
||||||
|
use Cwd;
|
||||||
|
|
||||||
my $JEPROF_VERSION = "@jemalloc_version@";
|
my $JEPROF_VERSION = "@jemalloc_version@";
|
||||||
my $PPROF_VERSION = "2.0";
|
my $PPROF_VERSION = "2.0";
|
||||||
@ -2891,21 +2892,21 @@ sub RemoveUninterestingFrames {
|
|||||||
my %skip = ();
|
my %skip = ();
|
||||||
my $skip_regexp = 'NOMATCH';
|
my $skip_regexp = 'NOMATCH';
|
||||||
if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
|
if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
|
||||||
foreach my $name ('calloc',
|
foreach my $name ('@JEMALLOC_PREFIX@calloc',
|
||||||
'cfree',
|
'cfree',
|
||||||
'malloc',
|
'@JEMALLOC_PREFIX@malloc',
|
||||||
'free',
|
'@JEMALLOC_PREFIX@free',
|
||||||
'memalign',
|
'@JEMALLOC_PREFIX@memalign',
|
||||||
'posix_memalign',
|
'@JEMALLOC_PREFIX@posix_memalign',
|
||||||
'aligned_alloc',
|
'@JEMALLOC_PREFIX@aligned_alloc',
|
||||||
'pvalloc',
|
'pvalloc',
|
||||||
'valloc',
|
'@JEMALLOC_PREFIX@valloc',
|
||||||
'realloc',
|
'@JEMALLOC_PREFIX@realloc',
|
||||||
'mallocx', # jemalloc
|
'@JEMALLOC_PREFIX@mallocx',
|
||||||
'rallocx', # jemalloc
|
'@JEMALLOC_PREFIX@rallocx',
|
||||||
'xallocx', # jemalloc
|
'@JEMALLOC_PREFIX@xallocx',
|
||||||
'dallocx', # jemalloc
|
'@JEMALLOC_PREFIX@dallocx',
|
||||||
'sdallocx', # jemalloc
|
'@JEMALLOC_PREFIX@sdallocx',
|
||||||
'tc_calloc',
|
'tc_calloc',
|
||||||
'tc_cfree',
|
'tc_cfree',
|
||||||
'tc_malloc',
|
'tc_malloc',
|
||||||
@ -4570,7 +4571,7 @@ sub ParseTextSectionHeader {
|
|||||||
# Split /proc/pid/maps dump into a list of libraries
|
# Split /proc/pid/maps dump into a list of libraries
|
||||||
sub ParseLibraries {
|
sub ParseLibraries {
|
||||||
return if $main::use_symbol_page; # We don't need libraries info.
|
return if $main::use_symbol_page; # We don't need libraries info.
|
||||||
my $prog = shift;
|
my $prog = Cwd::abs_path(shift);
|
||||||
my $map = shift;
|
my $map = shift;
|
||||||
my $pcs = shift;
|
my $pcs = shift;
|
||||||
|
|
||||||
@ -4603,6 +4604,16 @@ sub ParseLibraries {
|
|||||||
$finish = HexExtend($2);
|
$finish = HexExtend($2);
|
||||||
$offset = $zero_offset;
|
$offset = $zero_offset;
|
||||||
$lib = $3;
|
$lib = $3;
|
||||||
|
} elsif (($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+)$/i) && ($4 eq $prog)) {
|
||||||
|
# PIEs and address space randomization do not play well with our
|
||||||
|
# default assumption that main executable is at lowest
|
||||||
|
# addresses. So we're detecting main executable in
|
||||||
|
# /proc/self/maps as well.
|
||||||
|
$start = HexExtend($1);
|
||||||
|
$finish = HexExtend($2);
|
||||||
|
$offset = HexExtend($3);
|
||||||
|
$lib = $4;
|
||||||
|
$lib =~ s|\\|/|g; # turn windows-style paths into unix-style paths
|
||||||
}
|
}
|
||||||
# FreeBSD 10.0 virtual memory map /proc/curproc/map as defined in
|
# FreeBSD 10.0 virtual memory map /proc/curproc/map as defined in
|
||||||
# function procfs_doprocmap (sys/fs/procfs/procfs_map.c)
|
# function procfs_doprocmap (sys/fs/procfs/procfs_map.c)
|
||||||
|
609
configure.ac
609
configure.ac
@ -66,6 +66,29 @@ CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}"
|
|||||||
JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS)
|
JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
CONFIGURE_CXXFLAGS=
|
||||||
|
SPECIFIED_CXXFLAGS="${CXXFLAGS}"
|
||||||
|
dnl JE_CXXFLAGS_ADD(cxxflag)
|
||||||
|
AC_DEFUN([JE_CXXFLAGS_ADD],
|
||||||
|
[
|
||||||
|
AC_MSG_CHECKING([whether compiler supports $1])
|
||||||
|
T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}"
|
||||||
|
JE_APPEND_VS(CONFIGURE_CXXFLAGS, $1)
|
||||||
|
JE_CONCAT_VVV(CXXFLAGS, CONFIGURE_CXXFLAGS, SPECIFIED_CXXFLAGS)
|
||||||
|
AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
|
||||||
|
[[
|
||||||
|
]], [[
|
||||||
|
return 0;
|
||||||
|
]])],
|
||||||
|
[je_cv_cxxflags_added=$1]
|
||||||
|
AC_MSG_RESULT([yes]),
|
||||||
|
[je_cv_cxxflags_added=]
|
||||||
|
AC_MSG_RESULT([no])
|
||||||
|
[CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}"]
|
||||||
|
)
|
||||||
|
JE_CONCAT_VVV(CXXFLAGS, CONFIGURE_CXXFLAGS, SPECIFIED_CXXFLAGS)
|
||||||
|
])
|
||||||
|
|
||||||
dnl JE_COMPILABLE(label, hcode, mcode, rvar)
|
dnl JE_COMPILABLE(label, hcode, mcode, rvar)
|
||||||
dnl
|
dnl
|
||||||
dnl Use AC_LINK_IFELSE() rather than AC_COMPILE_IFELSE() so that linker errors
|
dnl Use AC_LINK_IFELSE() rather than AC_COMPILE_IFELSE() so that linker errors
|
||||||
@ -216,9 +239,9 @@ if test "x$GCC" = "xyes" ; then
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
JE_CFLAGS_ADD([-Wall])
|
JE_CFLAGS_ADD([-Wall])
|
||||||
JE_CFLAGS_ADD([-Werror=declaration-after-statement])
|
|
||||||
JE_CFLAGS_ADD([-Wshorten-64-to-32])
|
JE_CFLAGS_ADD([-Wshorten-64-to-32])
|
||||||
JE_CFLAGS_ADD([-Wsign-compare])
|
JE_CFLAGS_ADD([-Wsign-compare])
|
||||||
|
JE_CFLAGS_ADD([-Wundef])
|
||||||
JE_CFLAGS_ADD([-pipe])
|
JE_CFLAGS_ADD([-pipe])
|
||||||
JE_CFLAGS_ADD([-g3])
|
JE_CFLAGS_ADD([-g3])
|
||||||
elif test "x$je_cv_msvc" = "xyes" ; then
|
elif test "x$je_cv_msvc" = "xyes" ; then
|
||||||
@ -235,18 +258,56 @@ if test "x$je_cv_cray" = "xyes" ; then
|
|||||||
JE_CFLAGS_ADD([-hipa2])
|
JE_CFLAGS_ADD([-hipa2])
|
||||||
JE_CFLAGS_ADD([-hnognu])
|
JE_CFLAGS_ADD([-hnognu])
|
||||||
fi
|
fi
|
||||||
if test "x$enable_cc_silence" != "xno" ; then
|
|
||||||
dnl ignore unreachable code warning
|
dnl ignore unreachable code warning
|
||||||
JE_CFLAGS_ADD([-hnomessage=128])
|
JE_CFLAGS_ADD([-hnomessage=128])
|
||||||
dnl ignore redefinition of "malloc", "free", etc warning
|
dnl ignore redefinition of "malloc", "free", etc warning
|
||||||
JE_CFLAGS_ADD([-hnomessage=1357])
|
JE_CFLAGS_ADD([-hnomessage=1357])
|
||||||
fi
|
fi
|
||||||
fi
|
|
||||||
AC_SUBST([CONFIGURE_CFLAGS])
|
AC_SUBST([CONFIGURE_CFLAGS])
|
||||||
AC_SUBST([SPECIFIED_CFLAGS])
|
AC_SUBST([SPECIFIED_CFLAGS])
|
||||||
AC_SUBST([EXTRA_CFLAGS])
|
AC_SUBST([EXTRA_CFLAGS])
|
||||||
AC_PROG_CPP
|
AC_PROG_CPP
|
||||||
|
|
||||||
|
AC_ARG_ENABLE([cxx],
|
||||||
|
[AS_HELP_STRING([--disable-cxx], [Disable C++ integration])],
|
||||||
|
if test "x$enable_cxx" = "xno" ; then
|
||||||
|
enable_cxx="0"
|
||||||
|
else
|
||||||
|
enable_cxx="1"
|
||||||
|
fi
|
||||||
|
,
|
||||||
|
enable_cxx="1"
|
||||||
|
)
|
||||||
|
if test "x$enable_cxx" = "x1" ; then
|
||||||
|
dnl Require at least c++14, which is the first version to support sized
|
||||||
|
dnl deallocation. C++ support is not compiled otherwise.
|
||||||
|
m4_include([m4/ax_cxx_compile_stdcxx.m4])
|
||||||
|
AX_CXX_COMPILE_STDCXX([14], [noext], [optional])
|
||||||
|
if test "x${HAVE_CXX14}" = "x1" ; then
|
||||||
|
JE_CXXFLAGS_ADD([-Wall])
|
||||||
|
JE_CXXFLAGS_ADD([-g3])
|
||||||
|
|
||||||
|
SAVED_LIBS="${LIBS}"
|
||||||
|
JE_APPEND_VS(LIBS, -lstdc++)
|
||||||
|
JE_COMPILABLE([libstdc++ linkage], [
|
||||||
|
#include <stdlib.h>
|
||||||
|
], [[
|
||||||
|
int *arr = (int *)malloc(sizeof(int) * 42);
|
||||||
|
if (arr == NULL)
|
||||||
|
return 1;
|
||||||
|
]], [je_cv_libstdcxx])
|
||||||
|
if test "x${je_cv_libstdcxx}" = "xno" ; then
|
||||||
|
LIBS="${SAVED_LIBS}"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
enable_cxx="0"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
AC_SUBST([enable_cxx])
|
||||||
|
AC_SUBST([CONFIGURE_CXXFLAGS])
|
||||||
|
AC_SUBST([SPECIFIED_CXXFLAGS])
|
||||||
|
AC_SUBST([EXTRA_CXXFLAGS])
|
||||||
|
|
||||||
AC_C_BIGENDIAN([ac_cv_big_endian=1], [ac_cv_big_endian=0])
|
AC_C_BIGENDIAN([ac_cv_big_endian=1], [ac_cv_big_endian=0])
|
||||||
if test "x${ac_cv_big_endian}" = "x1" ; then
|
if test "x${ac_cv_big_endian}" = "x1" ; then
|
||||||
AC_DEFINE_UNQUOTED([JEMALLOC_BIG_ENDIAN], [ ])
|
AC_DEFINE_UNQUOTED([JEMALLOC_BIG_ENDIAN], [ ])
|
||||||
@ -336,14 +397,86 @@ case "${host_cpu}" in
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
powerpc)
|
powerpc*)
|
||||||
AC_DEFINE_UNQUOTED([HAVE_ALTIVEC], [ ])
|
AC_DEFINE_UNQUOTED([HAVE_ALTIVEC], [ ])
|
||||||
|
CPU_SPINWAIT='__asm__ volatile("or 31,31,31")'
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
AC_DEFINE_UNQUOTED([CPU_SPINWAIT], [$CPU_SPINWAIT])
|
AC_DEFINE_UNQUOTED([CPU_SPINWAIT], [$CPU_SPINWAIT])
|
||||||
|
|
||||||
|
case "${host_cpu}" in
|
||||||
|
aarch64)
|
||||||
|
AC_MSG_CHECKING([number of significant virtual address bits])
|
||||||
|
LG_VADDR=48
|
||||||
|
AC_MSG_RESULT([$LG_VADDR])
|
||||||
|
;;
|
||||||
|
x86_64)
|
||||||
|
AC_CACHE_CHECK([number of significant virtual address bits],
|
||||||
|
[je_cv_lg_vaddr],
|
||||||
|
AC_RUN_IFELSE([AC_LANG_PROGRAM(
|
||||||
|
[[
|
||||||
|
#include <stdio.h>
|
||||||
|
#ifdef _WIN32
|
||||||
|
#include <limits.h>
|
||||||
|
#include <intrin.h>
|
||||||
|
typedef unsigned __int32 uint32_t;
|
||||||
|
#else
|
||||||
|
#include <stdint.h>
|
||||||
|
#endif
|
||||||
|
]], [[
|
||||||
|
uint32_t r[[4]];
|
||||||
|
uint32_t eax_in = 0x80000008U;
|
||||||
|
#ifdef _WIN32
|
||||||
|
__cpuid((int *)r, (int)eax_in);
|
||||||
|
#else
|
||||||
|
asm volatile ("cpuid"
|
||||||
|
: "=a" (r[[0]]), "=b" (r[[1]]), "=c" (r[[2]]), "=d" (r[[3]])
|
||||||
|
: "a" (eax_in), "c" (0)
|
||||||
|
);
|
||||||
|
#endif
|
||||||
|
uint32_t eax_out = r[[0]];
|
||||||
|
uint32_t vaddr = ((eax_out & 0x0000ff00U) >> 8);
|
||||||
|
FILE *f = fopen("conftest.out", "w");
|
||||||
|
if (f == NULL) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
if (vaddr > (sizeof(void *) << 3)) {
|
||||||
|
vaddr = sizeof(void *) << 3;
|
||||||
|
}
|
||||||
|
fprintf(f, "%u", vaddr);
|
||||||
|
fclose(f);
|
||||||
|
return 0;
|
||||||
|
]])],
|
||||||
|
[je_cv_lg_vaddr=`cat conftest.out`],
|
||||||
|
[je_cv_lg_vaddr=error],
|
||||||
|
[je_cv_lg_vaddr=57]))
|
||||||
|
if test "x${je_cv_lg_vaddr}" != "x" ; then
|
||||||
|
LG_VADDR="${je_cv_lg_vaddr}"
|
||||||
|
fi
|
||||||
|
if test "x${LG_VADDR}" != "xerror" ; then
|
||||||
|
AC_DEFINE_UNQUOTED([LG_VADDR], [$LG_VADDR])
|
||||||
|
else
|
||||||
|
AC_MSG_ERROR([cannot determine number of significant virtual address bits])
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
AC_MSG_CHECKING([number of significant virtual address bits])
|
||||||
|
if test "x${LG_SIZEOF_PTR}" = "x3" ; then
|
||||||
|
LG_VADDR=64
|
||||||
|
elif test "x${LG_SIZEOF_PTR}" = "x2" ; then
|
||||||
|
LG_VADDR=32
|
||||||
|
elif test "x${LG_SIZEOF_PTR}" = "xLG_SIZEOF_PTR_WIN" ; then
|
||||||
|
LG_VADDR="(1U << (LG_SIZEOF_PTR_WIN+3))"
|
||||||
|
else
|
||||||
|
AC_MSG_ERROR([Unsupported lg(pointer size): ${LG_SIZEOF_PTR}])
|
||||||
|
fi
|
||||||
|
AC_MSG_RESULT([$LG_VADDR])
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
AC_DEFINE_UNQUOTED([LG_VADDR], [$LG_VADDR])
|
||||||
|
|
||||||
LD_PRELOAD_VAR="LD_PRELOAD"
|
LD_PRELOAD_VAR="LD_PRELOAD"
|
||||||
so="so"
|
so="so"
|
||||||
importlib="${so}"
|
importlib="${so}"
|
||||||
@ -377,6 +510,8 @@ AN_PROGRAM([ar], [AC_PROG_AR])
|
|||||||
AC_DEFUN([AC_PROG_AR], [AC_CHECK_TOOL(AR, ar, :)])
|
AC_DEFUN([AC_PROG_AR], [AC_CHECK_TOOL(AR, ar, :)])
|
||||||
AC_PROG_AR
|
AC_PROG_AR
|
||||||
|
|
||||||
|
AC_PROG_AWK
|
||||||
|
|
||||||
dnl Platform-specific settings. abi and RPATH can probably be determined
|
dnl Platform-specific settings. abi and RPATH can probably be determined
|
||||||
dnl programmatically, but doing so is error-prone, which makes it generally
|
dnl programmatically, but doing so is error-prone, which makes it generally
|
||||||
dnl not worth the trouble.
|
dnl not worth the trouble.
|
||||||
@ -384,8 +519,10 @@ dnl
|
|||||||
dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the
|
dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the
|
||||||
dnl definitions need to be seen before any headers are included, which is a pain
|
dnl definitions need to be seen before any headers are included, which is a pain
|
||||||
dnl to make happen otherwise.
|
dnl to make happen otherwise.
|
||||||
default_munmap="1"
|
default_retain="0"
|
||||||
maps_coalesce="1"
|
maps_coalesce="1"
|
||||||
|
DUMP_SYMS="nm -a"
|
||||||
|
SYM_PREFIX=""
|
||||||
case "${host}" in
|
case "${host}" in
|
||||||
*-*-darwin* | *-*-ios*)
|
*-*-darwin* | *-*-ios*)
|
||||||
abi="macho"
|
abi="macho"
|
||||||
@ -397,6 +534,7 @@ case "${host}" in
|
|||||||
DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)'
|
DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)'
|
||||||
SOREV="${rev}.${so}"
|
SOREV="${rev}.${so}"
|
||||||
sbrk_deprecated="1"
|
sbrk_deprecated="1"
|
||||||
|
SYM_PREFIX="_"
|
||||||
;;
|
;;
|
||||||
*-*-freebsd*)
|
*-*-freebsd*)
|
||||||
abi="elf"
|
abi="elf"
|
||||||
@ -417,22 +555,28 @@ case "${host}" in
|
|||||||
dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
|
dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
|
||||||
JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
|
JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
|
||||||
abi="elf"
|
abi="elf"
|
||||||
|
AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS])
|
||||||
AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
|
AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
|
||||||
AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ])
|
AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ])
|
||||||
AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
|
AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
|
||||||
AC_DEFINE([JEMALLOC_C11ATOMICS])
|
AC_DEFINE([JEMALLOC_C11_ATOMICS])
|
||||||
force_tls="0"
|
force_tls="0"
|
||||||
default_munmap="0"
|
if test "${LG_SIZEOF_PTR}" = "3"; then
|
||||||
|
default_retain="1"
|
||||||
|
fi
|
||||||
;;
|
;;
|
||||||
*-*-linux* | *-*-kfreebsd*)
|
*-*-linux* | *-*-kfreebsd*)
|
||||||
dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
|
dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
|
||||||
JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
|
JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
|
||||||
abi="elf"
|
abi="elf"
|
||||||
|
AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS])
|
||||||
AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
|
AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
|
||||||
AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ])
|
AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ])
|
||||||
AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
|
AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
|
||||||
AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ])
|
AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ])
|
||||||
default_munmap="0"
|
if test "${LG_SIZEOF_PTR}" = "3"; then
|
||||||
|
default_retain="1"
|
||||||
|
fi
|
||||||
;;
|
;;
|
||||||
*-*-netbsd*)
|
*-*-netbsd*)
|
||||||
AC_MSG_CHECKING([ABI])
|
AC_MSG_CHECKING([ABI])
|
||||||
@ -455,7 +599,7 @@ case "${host}" in
|
|||||||
JE_APPEND_VS(LIBS, -lposix4 -lsocket -lnsl)
|
JE_APPEND_VS(LIBS, -lposix4 -lsocket -lnsl)
|
||||||
;;
|
;;
|
||||||
*-ibm-aix*)
|
*-ibm-aix*)
|
||||||
if "$LG_SIZEOF_PTR" = "8"; then
|
if test "${LG_SIZEOF_PTR}" = "3"; then
|
||||||
dnl 64bit AIX
|
dnl 64bit AIX
|
||||||
LD_PRELOAD_VAR="LDR_PRELOAD64"
|
LD_PRELOAD_VAR="LDR_PRELOAD64"
|
||||||
else
|
else
|
||||||
@ -485,6 +629,7 @@ case "${host}" in
|
|||||||
DSO_LDFLAGS="-shared"
|
DSO_LDFLAGS="-shared"
|
||||||
link_whole_archive="1"
|
link_whole_archive="1"
|
||||||
fi
|
fi
|
||||||
|
DUMP_SYMS="dumpbin /SYMBOLS"
|
||||||
a="lib"
|
a="lib"
|
||||||
libprefix=""
|
libprefix=""
|
||||||
SOREV="${so}"
|
SOREV="${so}"
|
||||||
@ -532,6 +677,7 @@ AC_SUBST([TEST_LD_MODE])
|
|||||||
AC_SUBST([MKLIB])
|
AC_SUBST([MKLIB])
|
||||||
AC_SUBST([ARFLAGS])
|
AC_SUBST([ARFLAGS])
|
||||||
AC_SUBST([AROUT])
|
AC_SUBST([AROUT])
|
||||||
|
AC_SUBST([DUMP_SYMS])
|
||||||
AC_SUBST([CC_MM])
|
AC_SUBST([CC_MM])
|
||||||
|
|
||||||
dnl Determine whether libm must be linked to use e.g. log(3).
|
dnl Determine whether libm must be linked to use e.g. log(3).
|
||||||
@ -551,6 +697,7 @@ if test "x${je_cv_attribute}" = "xyes" ; then
|
|||||||
AC_DEFINE([JEMALLOC_HAVE_ATTR], [ ])
|
AC_DEFINE([JEMALLOC_HAVE_ATTR], [ ])
|
||||||
if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then
|
if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then
|
||||||
JE_CFLAGS_ADD([-fvisibility=hidden])
|
JE_CFLAGS_ADD([-fvisibility=hidden])
|
||||||
|
JE_CXXFLAGS_ADD([-fvisibility=hidden])
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
dnl Check for tls_model attribute support (clang 3.0 still lacks support).
|
dnl Check for tls_model attribute support (clang 3.0 still lacks support).
|
||||||
@ -633,41 +780,6 @@ AC_PROG_RANLIB
|
|||||||
AC_PATH_PROG([LD], [ld], [false], [$PATH])
|
AC_PATH_PROG([LD], [ld], [false], [$PATH])
|
||||||
AC_PATH_PROG([AUTOCONF], [autoconf], [false], [$PATH])
|
AC_PATH_PROG([AUTOCONF], [autoconf], [false], [$PATH])
|
||||||
|
|
||||||
public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free mallocx rallocx xallocx sallocx dallocx sdallocx nallocx mallctl mallctlnametomib mallctlbymib malloc_stats_print malloc_usable_size"
|
|
||||||
|
|
||||||
dnl Check for allocator-related functions that should be wrapped.
|
|
||||||
AC_CHECK_FUNC([memalign],
|
|
||||||
[AC_DEFINE([JEMALLOC_OVERRIDE_MEMALIGN], [ ])
|
|
||||||
public_syms="${public_syms} memalign"])
|
|
||||||
AC_CHECK_FUNC([valloc],
|
|
||||||
[AC_DEFINE([JEMALLOC_OVERRIDE_VALLOC], [ ])
|
|
||||||
public_syms="${public_syms} valloc"])
|
|
||||||
|
|
||||||
dnl Do not compute test code coverage by default.
|
|
||||||
GCOV_FLAGS=
|
|
||||||
AC_ARG_ENABLE([code-coverage],
|
|
||||||
[AS_HELP_STRING([--enable-code-coverage],
|
|
||||||
[Enable code coverage])],
|
|
||||||
[if test "x$enable_code_coverage" = "xno" ; then
|
|
||||||
enable_code_coverage="0"
|
|
||||||
else
|
|
||||||
enable_code_coverage="1"
|
|
||||||
fi
|
|
||||||
],
|
|
||||||
[enable_code_coverage="0"]
|
|
||||||
)
|
|
||||||
if test "x$enable_code_coverage" = "x1" ; then
|
|
||||||
deoptimize="no"
|
|
||||||
echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || deoptimize="yes"
|
|
||||||
if test "x${deoptimize}" = "xyes" ; then
|
|
||||||
JE_CFLAGS_ADD([-O0])
|
|
||||||
fi
|
|
||||||
JE_CFLAGS_ADD([-fprofile-arcs -ftest-coverage])
|
|
||||||
EXTRA_LDFLAGS="$EXTRA_LDFLAGS -fprofile-arcs -ftest-coverage"
|
|
||||||
AC_DEFINE([JEMALLOC_CODE_COVERAGE], [ ])
|
|
||||||
fi
|
|
||||||
AC_SUBST([enable_code_coverage])
|
|
||||||
|
|
||||||
dnl Perform no name mangling by default.
|
dnl Perform no name mangling by default.
|
||||||
AC_ARG_WITH([mangling],
|
AC_ARG_WITH([mangling],
|
||||||
[AS_HELP_STRING([--with-mangling=<map>], [Mangle symbols in <map>])],
|
[AS_HELP_STRING([--with-mangling=<map>], [Mangle symbols in <map>])],
|
||||||
@ -683,11 +795,14 @@ else
|
|||||||
JEMALLOC_PREFIX="je_"
|
JEMALLOC_PREFIX="je_"
|
||||||
fi]
|
fi]
|
||||||
)
|
)
|
||||||
if test "x$JEMALLOC_PREFIX" != "x" ; then
|
if test "x$JEMALLOC_PREFIX" = "x" ; then
|
||||||
|
AC_DEFINE([JEMALLOC_IS_MALLOC])
|
||||||
|
else
|
||||||
JEMALLOC_CPREFIX=`echo ${JEMALLOC_PREFIX} | tr "a-z" "A-Z"`
|
JEMALLOC_CPREFIX=`echo ${JEMALLOC_PREFIX} | tr "a-z" "A-Z"`
|
||||||
AC_DEFINE_UNQUOTED([JEMALLOC_PREFIX], ["$JEMALLOC_PREFIX"])
|
AC_DEFINE_UNQUOTED([JEMALLOC_PREFIX], ["$JEMALLOC_PREFIX"])
|
||||||
AC_DEFINE_UNQUOTED([JEMALLOC_CPREFIX], ["$JEMALLOC_CPREFIX"])
|
AC_DEFINE_UNQUOTED([JEMALLOC_CPREFIX], ["$JEMALLOC_CPREFIX"])
|
||||||
fi
|
fi
|
||||||
|
AC_SUBST([JEMALLOC_PREFIX])
|
||||||
AC_SUBST([JEMALLOC_CPREFIX])
|
AC_SUBST([JEMALLOC_CPREFIX])
|
||||||
|
|
||||||
AC_ARG_WITH([export],
|
AC_ARG_WITH([export],
|
||||||
@ -697,6 +812,49 @@ AC_ARG_WITH([export],
|
|||||||
fi]
|
fi]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
public_syms="aligned_alloc calloc dallocx free mallctl mallctlbymib mallctlnametomib malloc malloc_conf malloc_message malloc_stats_print malloc_usable_size mallocx nallocx posix_memalign rallocx realloc sallocx sdallocx xallocx"
|
||||||
|
dnl Check for additional platform-specific public API functions.
|
||||||
|
AC_CHECK_FUNC([memalign],
|
||||||
|
[AC_DEFINE([JEMALLOC_OVERRIDE_MEMALIGN], [ ])
|
||||||
|
public_syms="${public_syms} memalign"])
|
||||||
|
AC_CHECK_FUNC([valloc],
|
||||||
|
[AC_DEFINE([JEMALLOC_OVERRIDE_VALLOC], [ ])
|
||||||
|
public_syms="${public_syms} valloc"])
|
||||||
|
|
||||||
|
dnl Check for allocator-related functions that should be wrapped.
|
||||||
|
wrap_syms=
|
||||||
|
if test "x${JEMALLOC_PREFIX}" = "x" ; then
|
||||||
|
AC_CHECK_FUNC([__libc_calloc],
|
||||||
|
[AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_CALLOC], [ ])
|
||||||
|
wrap_syms="${wrap_syms} __libc_calloc"])
|
||||||
|
AC_CHECK_FUNC([__libc_free],
|
||||||
|
[AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_FREE], [ ])
|
||||||
|
wrap_syms="${wrap_syms} __libc_free"])
|
||||||
|
AC_CHECK_FUNC([__libc_malloc],
|
||||||
|
[AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_MALLOC], [ ])
|
||||||
|
wrap_syms="${wrap_syms} __libc_malloc"])
|
||||||
|
AC_CHECK_FUNC([__libc_memalign],
|
||||||
|
[AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_MEMALIGN], [ ])
|
||||||
|
wrap_syms="${wrap_syms} __libc_memalign"])
|
||||||
|
AC_CHECK_FUNC([__libc_realloc],
|
||||||
|
[AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_REALLOC], [ ])
|
||||||
|
wrap_syms="${wrap_syms} __libc_realloc"])
|
||||||
|
AC_CHECK_FUNC([__libc_valloc],
|
||||||
|
[AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_VALLOC], [ ])
|
||||||
|
wrap_syms="${wrap_syms} __libc_valloc"])
|
||||||
|
AC_CHECK_FUNC([__posix_memalign],
|
||||||
|
[AC_DEFINE([JEMALLOC_OVERRIDE___POSIX_MEMALIGN], [ ])
|
||||||
|
wrap_syms="${wrap_syms} __posix_memalign"])
|
||||||
|
fi
|
||||||
|
|
||||||
|
case "${host}" in
|
||||||
|
*-*-mingw* | *-*-cygwin*)
|
||||||
|
wrap_syms="${wrap_syms} tls_callback"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
dnl Mangle library-private APIs.
|
dnl Mangle library-private APIs.
|
||||||
AC_ARG_WITH([private_namespace],
|
AC_ARG_WITH([private_namespace],
|
||||||
[AS_HELP_STRING([--with-private-namespace=<prefix>], [Prefix to prepend to all library-private APIs])],
|
[AS_HELP_STRING([--with-private-namespace=<prefix>], [Prefix to prepend to all library-private APIs])],
|
||||||
@ -738,7 +896,7 @@ cfgoutputs_in="${cfgoutputs_in} doc/jemalloc.xml.in"
|
|||||||
cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_macros.h.in"
|
cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_macros.h.in"
|
||||||
cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_protos.h.in"
|
cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_protos.h.in"
|
||||||
cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_typedefs.h.in"
|
cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_typedefs.h.in"
|
||||||
cfgoutputs_in="${cfgoutputs_in} include/jemalloc/internal/jemalloc_internal.h.in"
|
cfgoutputs_in="${cfgoutputs_in} include/jemalloc/internal/jemalloc_preamble.h.in"
|
||||||
cfgoutputs_in="${cfgoutputs_in} test/test.sh.in"
|
cfgoutputs_in="${cfgoutputs_in} test/test.sh.in"
|
||||||
cfgoutputs_in="${cfgoutputs_in} test/include/test/jemalloc_test.h.in"
|
cfgoutputs_in="${cfgoutputs_in} test/include/test/jemalloc_test.h.in"
|
||||||
|
|
||||||
@ -750,7 +908,7 @@ cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml"
|
|||||||
cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h"
|
cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h"
|
||||||
cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h"
|
cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h"
|
||||||
cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_typedefs.h"
|
cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_typedefs.h"
|
||||||
cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_internal.h"
|
cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_preamble.h"
|
||||||
cfgoutputs_out="${cfgoutputs_out} test/test.sh"
|
cfgoutputs_out="${cfgoutputs_out} test/test.sh"
|
||||||
cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h"
|
cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h"
|
||||||
|
|
||||||
@ -762,15 +920,14 @@ cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in"
|
|||||||
cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in"
|
cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in"
|
||||||
cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in"
|
cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in"
|
||||||
cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_typedefs.h:include/jemalloc/jemalloc_typedefs.h.in"
|
cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_typedefs.h:include/jemalloc/jemalloc_typedefs.h.in"
|
||||||
cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_internal.h"
|
cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_preamble.h"
|
||||||
cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in"
|
cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in"
|
||||||
cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in"
|
cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in"
|
||||||
|
|
||||||
cfghdrs_in="include/jemalloc/jemalloc_defs.h.in"
|
cfghdrs_in="include/jemalloc/jemalloc_defs.h.in"
|
||||||
cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/jemalloc_internal_defs.h.in"
|
cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/jemalloc_internal_defs.h.in"
|
||||||
|
cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_symbols.sh"
|
||||||
cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_namespace.sh"
|
cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_namespace.sh"
|
||||||
cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_unnamespace.sh"
|
|
||||||
cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_symbols.txt"
|
|
||||||
cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_namespace.sh"
|
cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_namespace.sh"
|
||||||
cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_unnamespace.sh"
|
cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_unnamespace.sh"
|
||||||
cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/size_classes.sh"
|
cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/size_classes.sh"
|
||||||
@ -781,8 +938,8 @@ cfghdrs_in="${cfghdrs_in} test/include/test/jemalloc_test_defs.h.in"
|
|||||||
|
|
||||||
cfghdrs_out="include/jemalloc/jemalloc_defs.h"
|
cfghdrs_out="include/jemalloc/jemalloc_defs.h"
|
||||||
cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h"
|
cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h"
|
||||||
cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_namespace.h"
|
cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols.awk"
|
||||||
cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_unnamespace.h"
|
cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols_jet.awk"
|
||||||
cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt"
|
cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt"
|
||||||
cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h"
|
cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h"
|
||||||
cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h"
|
cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h"
|
||||||
@ -798,26 +955,10 @@ cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.i
|
|||||||
cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:include/jemalloc/internal/jemalloc_internal_defs.h.in"
|
cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:include/jemalloc/internal/jemalloc_internal_defs.h.in"
|
||||||
cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:test/include/test/jemalloc_test_defs.h.in"
|
cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:test/include/test/jemalloc_test_defs.h.in"
|
||||||
|
|
||||||
dnl Silence irrelevant compiler warnings by default.
|
|
||||||
AC_ARG_ENABLE([cc-silence],
|
|
||||||
[AS_HELP_STRING([--disable-cc-silence],
|
|
||||||
[Do not silence irrelevant compiler warnings])],
|
|
||||||
[if test "x$enable_cc_silence" = "xno" ; then
|
|
||||||
enable_cc_silence="0"
|
|
||||||
else
|
|
||||||
enable_cc_silence="1"
|
|
||||||
fi
|
|
||||||
],
|
|
||||||
[enable_cc_silence="1"]
|
|
||||||
)
|
|
||||||
if test "x$enable_cc_silence" = "x1" ; then
|
|
||||||
AC_DEFINE([JEMALLOC_CC_SILENCE], [ ])
|
|
||||||
fi
|
|
||||||
|
|
||||||
dnl Do not compile with debugging by default.
|
dnl Do not compile with debugging by default.
|
||||||
AC_ARG_ENABLE([debug],
|
AC_ARG_ENABLE([debug],
|
||||||
[AS_HELP_STRING([--enable-debug],
|
[AS_HELP_STRING([--enable-debug],
|
||||||
[Build debugging code (implies --enable-ivsalloc)])],
|
[Build debugging code])],
|
||||||
[if test "x$enable_debug" = "xno" ; then
|
[if test "x$enable_debug" = "xno" ; then
|
||||||
enable_debug="0"
|
enable_debug="0"
|
||||||
else
|
else
|
||||||
@ -831,35 +972,21 @@ if test "x$enable_debug" = "x1" ; then
|
|||||||
fi
|
fi
|
||||||
if test "x$enable_debug" = "x1" ; then
|
if test "x$enable_debug" = "x1" ; then
|
||||||
AC_DEFINE([JEMALLOC_DEBUG], [ ])
|
AC_DEFINE([JEMALLOC_DEBUG], [ ])
|
||||||
enable_ivsalloc="1"
|
|
||||||
fi
|
fi
|
||||||
AC_SUBST([enable_debug])
|
AC_SUBST([enable_debug])
|
||||||
|
|
||||||
dnl Do not validate pointers by default.
|
|
||||||
AC_ARG_ENABLE([ivsalloc],
|
|
||||||
[AS_HELP_STRING([--enable-ivsalloc],
|
|
||||||
[Validate pointers passed through the public API])],
|
|
||||||
[if test "x$enable_ivsalloc" = "xno" ; then
|
|
||||||
enable_ivsalloc="0"
|
|
||||||
else
|
|
||||||
enable_ivsalloc="1"
|
|
||||||
fi
|
|
||||||
],
|
|
||||||
[enable_ivsalloc="0"]
|
|
||||||
)
|
|
||||||
if test "x$enable_ivsalloc" = "x1" ; then
|
|
||||||
AC_DEFINE([JEMALLOC_IVSALLOC], [ ])
|
|
||||||
fi
|
|
||||||
|
|
||||||
dnl Only optimize if not debugging.
|
dnl Only optimize if not debugging.
|
||||||
if test "x$enable_debug" = "x0" ; then
|
if test "x$enable_debug" = "x0" ; then
|
||||||
if test "x$GCC" = "xyes" ; then
|
if test "x$GCC" = "xyes" ; then
|
||||||
JE_CFLAGS_ADD([-O3])
|
JE_CFLAGS_ADD([-O3])
|
||||||
|
JE_CXXFLAGS_ADD([-O3])
|
||||||
JE_CFLAGS_ADD([-funroll-loops])
|
JE_CFLAGS_ADD([-funroll-loops])
|
||||||
elif test "x$je_cv_msvc" = "xyes" ; then
|
elif test "x$je_cv_msvc" = "xyes" ; then
|
||||||
JE_CFLAGS_ADD([-O2])
|
JE_CFLAGS_ADD([-O2])
|
||||||
|
JE_CXXFLAGS_ADD([-O2])
|
||||||
else
|
else
|
||||||
JE_CFLAGS_ADD([-O])
|
JE_CFLAGS_ADD([-O])
|
||||||
|
JE_CXXFLAGS_ADD([-O])
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -948,7 +1075,9 @@ fi
|
|||||||
if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \
|
if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \
|
||||||
-a "x$GCC" = "xyes" ; then
|
-a "x$GCC" = "xyes" ; then
|
||||||
AC_CHECK_HEADERS([unwind.h], , [enable_prof_libgcc="0"])
|
AC_CHECK_HEADERS([unwind.h], , [enable_prof_libgcc="0"])
|
||||||
|
if test "x${enable_prof_libgcc}" = "x1" ; then
|
||||||
AC_CHECK_LIB([gcc], [_Unwind_Backtrace], [JE_APPEND_VS(LIBS, -lgcc)], [enable_prof_libgcc="0"])
|
AC_CHECK_LIB([gcc], [_Unwind_Backtrace], [JE_APPEND_VS(LIBS, -lgcc)], [enable_prof_libgcc="0"])
|
||||||
|
fi
|
||||||
if test "x${enable_prof_libgcc}" = "x1" ; then
|
if test "x${enable_prof_libgcc}" = "x1" ; then
|
||||||
backtrace_method="libgcc"
|
backtrace_method="libgcc"
|
||||||
AC_DEFINE([JEMALLOC_PROF_LIBGCC], [ ])
|
AC_DEFINE([JEMALLOC_PROF_LIBGCC], [ ])
|
||||||
@ -991,43 +1120,16 @@ if test "x$enable_prof" = "x1" ; then
|
|||||||
fi
|
fi
|
||||||
AC_SUBST([enable_prof])
|
AC_SUBST([enable_prof])
|
||||||
|
|
||||||
dnl Enable thread-specific caching by default.
|
|
||||||
AC_ARG_ENABLE([tcache],
|
|
||||||
[AS_HELP_STRING([--disable-tcache], [Disable per thread caches])],
|
|
||||||
[if test "x$enable_tcache" = "xno" ; then
|
|
||||||
enable_tcache="0"
|
|
||||||
else
|
|
||||||
enable_tcache="1"
|
|
||||||
fi
|
|
||||||
],
|
|
||||||
[enable_tcache="1"]
|
|
||||||
)
|
|
||||||
if test "x$enable_tcache" = "x1" ; then
|
|
||||||
AC_DEFINE([JEMALLOC_TCACHE], [ ])
|
|
||||||
fi
|
|
||||||
AC_SUBST([enable_tcache])
|
|
||||||
|
|
||||||
dnl Indicate whether adjacent virtual memory mappings automatically coalesce
|
dnl Indicate whether adjacent virtual memory mappings automatically coalesce
|
||||||
dnl (and fragment on demand).
|
dnl (and fragment on demand).
|
||||||
if test "x${maps_coalesce}" = "x1" ; then
|
if test "x${maps_coalesce}" = "x1" ; then
|
||||||
AC_DEFINE([JEMALLOC_MAPS_COALESCE], [ ])
|
AC_DEFINE([JEMALLOC_MAPS_COALESCE], [ ])
|
||||||
fi
|
fi
|
||||||
|
|
||||||
dnl Enable VM deallocation via munmap() by default.
|
dnl Indicate whether to retain memory (rather than using munmap()) by default.
|
||||||
AC_ARG_ENABLE([munmap],
|
if test "x$default_retain" = "x1" ; then
|
||||||
[AS_HELP_STRING([--disable-munmap], [Disable VM deallocation via munmap(2)])],
|
AC_DEFINE([JEMALLOC_RETAIN], [ ])
|
||||||
[if test "x$enable_munmap" = "xno" ; then
|
|
||||||
enable_munmap="0"
|
|
||||||
else
|
|
||||||
enable_munmap="1"
|
|
||||||
fi
|
fi
|
||||||
],
|
|
||||||
[enable_munmap="${default_munmap}"]
|
|
||||||
)
|
|
||||||
if test "x$enable_munmap" = "x1" ; then
|
|
||||||
AC_DEFINE([JEMALLOC_MUNMAP], [ ])
|
|
||||||
fi
|
|
||||||
AC_SUBST([enable_munmap])
|
|
||||||
|
|
||||||
dnl Enable allocation from DSS if supported by the OS.
|
dnl Enable allocation from DSS if supported by the OS.
|
||||||
have_dss="1"
|
have_dss="1"
|
||||||
@ -1048,8 +1150,7 @@ fi
|
|||||||
|
|
||||||
dnl Support the junk/zero filling option by default.
|
dnl Support the junk/zero filling option by default.
|
||||||
AC_ARG_ENABLE([fill],
|
AC_ARG_ENABLE([fill],
|
||||||
[AS_HELP_STRING([--disable-fill],
|
[AS_HELP_STRING([--disable-fill], [Disable support for junk/zero filling])],
|
||||||
[Disable support for junk/zero filling, quarantine, and redzones])],
|
|
||||||
[if test "x$enable_fill" = "xno" ; then
|
[if test "x$enable_fill" = "xno" ; then
|
||||||
enable_fill="0"
|
enable_fill="0"
|
||||||
else
|
else
|
||||||
@ -1091,35 +1192,6 @@ if test "x$enable_utrace" = "x1" ; then
|
|||||||
fi
|
fi
|
||||||
AC_SUBST([enable_utrace])
|
AC_SUBST([enable_utrace])
|
||||||
|
|
||||||
dnl Support Valgrind by default.
|
|
||||||
AC_ARG_ENABLE([valgrind],
|
|
||||||
[AS_HELP_STRING([--disable-valgrind], [Disable support for Valgrind])],
|
|
||||||
[if test "x$enable_valgrind" = "xno" ; then
|
|
||||||
enable_valgrind="0"
|
|
||||||
else
|
|
||||||
enable_valgrind="1"
|
|
||||||
fi
|
|
||||||
],
|
|
||||||
[enable_valgrind="1"]
|
|
||||||
)
|
|
||||||
if test "x$enable_valgrind" = "x1" ; then
|
|
||||||
JE_COMPILABLE([valgrind], [
|
|
||||||
#include <valgrind/valgrind.h>
|
|
||||||
#include <valgrind/memcheck.h>
|
|
||||||
|
|
||||||
#if !defined(VALGRIND_RESIZEINPLACE_BLOCK)
|
|
||||||
# error "Incompatible Valgrind version"
|
|
||||||
#endif
|
|
||||||
], [], [je_cv_valgrind])
|
|
||||||
if test "x${je_cv_valgrind}" = "xno" ; then
|
|
||||||
enable_valgrind="0"
|
|
||||||
fi
|
|
||||||
if test "x$enable_valgrind" = "x1" ; then
|
|
||||||
AC_DEFINE([JEMALLOC_VALGRIND], [ ])
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
AC_SUBST([enable_valgrind])
|
|
||||||
|
|
||||||
dnl Do not support the xmalloc option by default.
|
dnl Do not support the xmalloc option by default.
|
||||||
AC_ARG_ENABLE([xmalloc],
|
AC_ARG_ENABLE([xmalloc],
|
||||||
[AS_HELP_STRING([--enable-xmalloc], [Support xmalloc option])],
|
[AS_HELP_STRING([--enable-xmalloc], [Support xmalloc option])],
|
||||||
@ -1210,13 +1282,6 @@ else
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
AC_ARG_WITH([lg_tiny_min],
|
|
||||||
[AS_HELP_STRING([--with-lg-tiny-min=<lg-tiny-min>],
|
|
||||||
[Base 2 log of minimum tiny size class to support])],
|
|
||||||
[LG_TINY_MIN="$with_lg_tiny_min"],
|
|
||||||
[LG_TINY_MIN="3"])
|
|
||||||
AC_DEFINE_UNQUOTED([LG_TINY_MIN], [$LG_TINY_MIN])
|
|
||||||
|
|
||||||
AC_ARG_WITH([lg_quantum],
|
AC_ARG_WITH([lg_quantum],
|
||||||
[AS_HELP_STRING([--with-lg-quantum=<lg-quantum>],
|
[AS_HELP_STRING([--with-lg-quantum=<lg-quantum>],
|
||||||
[Base 2 log of minimum allocation alignment])],
|
[Base 2 log of minimum allocation alignment])],
|
||||||
@ -1280,17 +1345,41 @@ else
|
|||||||
AC_MSG_ERROR([cannot determine value for LG_PAGE])
|
AC_MSG_ERROR([cannot determine value for LG_PAGE])
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
AC_ARG_WITH([lg_hugepage],
|
||||||
|
[AS_HELP_STRING([--with-lg-hugepage=<lg-hugepage>],
|
||||||
|
[Base 2 log of system huge page size])],
|
||||||
|
[je_cv_lg_hugepage="${with_lg_hugepage}"],
|
||||||
|
[je_cv_lg_hugepage=""])
|
||||||
|
if test "x${je_cv_lg_hugepage}" = "x" ; then
|
||||||
|
dnl Look in /proc/meminfo (Linux-specific) for information on the default huge
|
||||||
|
dnl page size, if any. The relevant line looks like:
|
||||||
|
dnl
|
||||||
|
dnl Hugepagesize: 2048 kB
|
||||||
|
if test -e "/proc/meminfo" ; then
|
||||||
|
hpsk=[`cat /proc/meminfo 2>/dev/null | \
|
||||||
|
grep -e '^Hugepagesize:[[:space:]]\+[0-9]\+[[:space:]]kB$' | \
|
||||||
|
awk '{print $2}'`]
|
||||||
|
if test "x${hpsk}" != "x" ; then
|
||||||
|
je_cv_lg_hugepage=10
|
||||||
|
while test "${hpsk}" -gt 1 ; do
|
||||||
|
hpsk="$((hpsk / 2))"
|
||||||
|
je_cv_lg_hugepage="$((je_cv_lg_hugepage + 1))"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
dnl Set default if unable to automatically configure.
|
||||||
|
if test "x${je_cv_lg_hugepage}" = "x" ; then
|
||||||
|
je_cv_lg_hugepage=21
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
AC_DEFINE_UNQUOTED([LG_HUGEPAGE], [${je_cv_lg_hugepage}])
|
||||||
|
|
||||||
AC_ARG_WITH([lg_page_sizes],
|
AC_ARG_WITH([lg_page_sizes],
|
||||||
[AS_HELP_STRING([--with-lg-page-sizes=<lg-page-sizes>],
|
[AS_HELP_STRING([--with-lg-page-sizes=<lg-page-sizes>],
|
||||||
[Base 2 logs of system page sizes to support])],
|
[Base 2 logs of system page sizes to support])],
|
||||||
[LG_PAGE_SIZES="$with_lg_page_sizes"], [LG_PAGE_SIZES="$LG_PAGE"])
|
[LG_PAGE_SIZES="$with_lg_page_sizes"], [LG_PAGE_SIZES="$LG_PAGE"])
|
||||||
|
|
||||||
AC_ARG_WITH([lg_size_class_group],
|
|
||||||
[AS_HELP_STRING([--with-lg-size-class-group=<lg-size-class-group>],
|
|
||||||
[Base 2 log of size classes per doubling])],
|
|
||||||
[LG_SIZE_CLASS_GROUP="$with_lg_size_class_group"],
|
|
||||||
[LG_SIZE_CLASS_GROUP="2"])
|
|
||||||
|
|
||||||
dnl ============================================================================
|
dnl ============================================================================
|
||||||
dnl jemalloc configuration.
|
dnl jemalloc configuration.
|
||||||
dnl
|
dnl
|
||||||
@ -1300,10 +1389,14 @@ AC_ARG_WITH([version],
|
|||||||
[Version string])],
|
[Version string])],
|
||||||
[
|
[
|
||||||
echo "${with_version}" | grep ['^[0-9]\+\.[0-9]\+\.[0-9]\+-[0-9]\+-g[0-9a-f]\+$'] 2>&1 1>/dev/null
|
echo "${with_version}" | grep ['^[0-9]\+\.[0-9]\+\.[0-9]\+-[0-9]\+-g[0-9a-f]\+$'] 2>&1 1>/dev/null
|
||||||
if test $? -ne 0 ; then
|
if test $? -eq 0 ; then
|
||||||
AC_MSG_ERROR([${with_version} does not match <major>.<minor>.<bugfix>-<nrev>-g<gid>])
|
|
||||||
fi
|
|
||||||
echo "$with_version" > "${objroot}VERSION"
|
echo "$with_version" > "${objroot}VERSION"
|
||||||
|
else
|
||||||
|
echo "${with_version}" | grep ['^VERSION$'] 2>&1 1>/dev/null
|
||||||
|
if test $? -ne 0 ; then
|
||||||
|
AC_MSG_ERROR([${with_version} does not match <major>.<minor>.<bugfix>-<nrev>-g<gid> or VERSION])
|
||||||
|
fi
|
||||||
|
fi
|
||||||
], [
|
], [
|
||||||
dnl Set VERSION if source directory is inside a git repository.
|
dnl Set VERSION if source directory is inside a git repository.
|
||||||
if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then
|
if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then
|
||||||
@ -1351,12 +1444,24 @@ dnl ============================================================================
|
|||||||
dnl Configure pthreads.
|
dnl Configure pthreads.
|
||||||
|
|
||||||
if test "x$abi" != "xpecoff" ; then
|
if test "x$abi" != "xpecoff" ; then
|
||||||
|
AC_DEFINE([JEMALLOC_HAVE_PTHREAD], [ ])
|
||||||
AC_CHECK_HEADERS([pthread.h], , [AC_MSG_ERROR([pthread.h is missing])])
|
AC_CHECK_HEADERS([pthread.h], , [AC_MSG_ERROR([pthread.h is missing])])
|
||||||
dnl Some systems may embed pthreads functionality in libc; check for libpthread
|
dnl Some systems may embed pthreads functionality in libc; check for libpthread
|
||||||
dnl first, but try libc too before failing.
|
dnl first, but try libc too before failing.
|
||||||
AC_CHECK_LIB([pthread], [pthread_create], [JE_APPEND_VS(LIBS, -lpthread)],
|
AC_CHECK_LIB([pthread], [pthread_create], [JE_APPEND_VS(LIBS, -lpthread)],
|
||||||
[AC_SEARCH_LIBS([pthread_create], , ,
|
[AC_SEARCH_LIBS([pthread_create], , ,
|
||||||
AC_MSG_ERROR([libpthread is missing]))])
|
AC_MSG_ERROR([libpthread is missing]))])
|
||||||
|
wrap_syms="${wrap_syms} pthread_create"
|
||||||
|
have_pthread="1"
|
||||||
|
dnl Check if we have dlsym support.
|
||||||
|
have_dlsym="1"
|
||||||
|
AC_CHECK_HEADERS([dlfcn.h],
|
||||||
|
AC_CHECK_FUNC([dlsym], [],
|
||||||
|
[AC_CHECK_LIB([dl], [dlsym], [LIBS="$LIBS -ldl"], [have_dlsym="0"])]),
|
||||||
|
[have_dlsym="0"])
|
||||||
|
if test "x$have_dlsym" = "x1" ; then
|
||||||
|
AC_DEFINE([JEMALLOC_HAVE_DLSYM], [ ])
|
||||||
|
fi
|
||||||
JE_COMPILABLE([pthread_atfork(3)], [
|
JE_COMPILABLE([pthread_atfork(3)], [
|
||||||
#include <pthread.h>
|
#include <pthread.h>
|
||||||
], [
|
], [
|
||||||
@ -1462,6 +1567,24 @@ if test "x$have_secure_getenv" = "x1" ; then
|
|||||||
AC_DEFINE([JEMALLOC_HAVE_SECURE_GETENV], [ ])
|
AC_DEFINE([JEMALLOC_HAVE_SECURE_GETENV], [ ])
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
dnl Check if the GNU-specific sched_getcpu function exists.
|
||||||
|
AC_CHECK_FUNC([sched_getcpu],
|
||||||
|
[have_sched_getcpu="1"],
|
||||||
|
[have_sched_getcpu="0"]
|
||||||
|
)
|
||||||
|
if test "x$have_sched_getcpu" = "x1" ; then
|
||||||
|
AC_DEFINE([JEMALLOC_HAVE_SCHED_GETCPU], [ ])
|
||||||
|
fi
|
||||||
|
|
||||||
|
dnl Check if the GNU-specific sched_setaffinity function exists.
|
||||||
|
AC_CHECK_FUNC([sched_setaffinity],
|
||||||
|
[have_sched_setaffinity="1"],
|
||||||
|
[have_sched_setaffinity="0"]
|
||||||
|
)
|
||||||
|
if test "x$have_sched_setaffinity" = "x1" ; then
|
||||||
|
AC_DEFINE([JEMALLOC_HAVE_SCHED_SETAFFINITY], [ ])
|
||||||
|
fi
|
||||||
|
|
||||||
dnl Check if the Solaris/BSD issetugid function exists.
|
dnl Check if the Solaris/BSD issetugid function exists.
|
||||||
AC_CHECK_FUNC([issetugid],
|
AC_CHECK_FUNC([issetugid],
|
||||||
[have_issetugid="1"],
|
[have_issetugid="1"],
|
||||||
@ -1481,6 +1604,7 @@ AC_CHECK_FUNC([_malloc_thread_cleanup],
|
|||||||
)
|
)
|
||||||
if test "x$have__malloc_thread_cleanup" = "x1" ; then
|
if test "x$have__malloc_thread_cleanup" = "x1" ; then
|
||||||
AC_DEFINE([JEMALLOC_MALLOC_THREAD_CLEANUP], [ ])
|
AC_DEFINE([JEMALLOC_MALLOC_THREAD_CLEANUP], [ ])
|
||||||
|
wrap_syms="${wrap_syms} _malloc_thread_cleanup"
|
||||||
force_tls="1"
|
force_tls="1"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -1493,6 +1617,7 @@ AC_CHECK_FUNC([_pthread_mutex_init_calloc_cb],
|
|||||||
)
|
)
|
||||||
if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then
|
if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then
|
||||||
AC_DEFINE([JEMALLOC_MUTEX_INIT_CB])
|
AC_DEFINE([JEMALLOC_MUTEX_INIT_CB])
|
||||||
|
wrap_syms="${wrap_syms} _malloc_prefork _malloc_postfork"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
dnl Disable lazy locking by default.
|
dnl Disable lazy locking by default.
|
||||||
@ -1520,38 +1645,22 @@ if test "x${enable_lazy_lock}" = "x1" -a "x${abi}" = "xpecoff" ; then
|
|||||||
enable_lazy_lock="0"
|
enable_lazy_lock="0"
|
||||||
fi
|
fi
|
||||||
if test "x$enable_lazy_lock" = "x1" ; then
|
if test "x$enable_lazy_lock" = "x1" ; then
|
||||||
if test "x$abi" != "xpecoff" ; then
|
if test "x$have_dlsym" = "x1" ; then
|
||||||
AC_CHECK_HEADERS([dlfcn.h], , [AC_MSG_ERROR([dlfcn.h is missing])])
|
|
||||||
AC_CHECK_FUNC([dlsym], [],
|
|
||||||
[AC_CHECK_LIB([dl], [dlsym], [JE_APPEND_VS(LIBS, -ldl)],
|
|
||||||
[AC_MSG_ERROR([libdl is missing])])
|
|
||||||
])
|
|
||||||
fi
|
|
||||||
AC_DEFINE([JEMALLOC_LAZY_LOCK], [ ])
|
AC_DEFINE([JEMALLOC_LAZY_LOCK], [ ])
|
||||||
|
else
|
||||||
|
AC_MSG_ERROR([Missing dlsym support: lazy-lock cannot be enabled.])
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
AC_SUBST([enable_lazy_lock])
|
AC_SUBST([enable_lazy_lock])
|
||||||
|
|
||||||
AC_ARG_ENABLE([tls],
|
dnl Automatically configure TLS.
|
||||||
[AS_HELP_STRING([--disable-tls], [Disable thread-local storage (__thread keyword)])],
|
|
||||||
if test "x$enable_tls" = "xno" ; then
|
|
||||||
enable_tls="0"
|
|
||||||
else
|
|
||||||
enable_tls="1"
|
|
||||||
fi
|
|
||||||
,
|
|
||||||
enable_tls=""
|
|
||||||
)
|
|
||||||
if test "x${enable_tls}" = "x" ; then
|
|
||||||
if test "x${force_tls}" = "x1" ; then
|
if test "x${force_tls}" = "x1" ; then
|
||||||
AC_MSG_RESULT([Forcing TLS to avoid allocator/threading bootstrap issues])
|
|
||||||
enable_tls="1"
|
enable_tls="1"
|
||||||
elif test "x${force_tls}" = "x0" ; then
|
elif test "x${force_tls}" = "x0" ; then
|
||||||
AC_MSG_RESULT([Forcing no TLS to avoid allocator/threading bootstrap issues])
|
|
||||||
enable_tls="0"
|
enable_tls="0"
|
||||||
else
|
else
|
||||||
enable_tls="1"
|
enable_tls="1"
|
||||||
fi
|
fi
|
||||||
fi
|
|
||||||
if test "x${enable_tls}" = "x1" ; then
|
if test "x${enable_tls}" = "x1" ; then
|
||||||
AC_MSG_CHECKING([for TLS])
|
AC_MSG_CHECKING([for TLS])
|
||||||
AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
|
AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
|
||||||
@ -1570,12 +1679,7 @@ else
|
|||||||
fi
|
fi
|
||||||
AC_SUBST([enable_tls])
|
AC_SUBST([enable_tls])
|
||||||
if test "x${enable_tls}" = "x1" ; then
|
if test "x${enable_tls}" = "x1" ; then
|
||||||
if test "x${force_tls}" = "x0" ; then
|
|
||||||
AC_MSG_WARN([TLS enabled despite being marked unusable on this platform])
|
|
||||||
fi
|
|
||||||
AC_DEFINE_UNQUOTED([JEMALLOC_TLS], [ ])
|
AC_DEFINE_UNQUOTED([JEMALLOC_TLS], [ ])
|
||||||
elif test "x${force_tls}" = "x1" ; then
|
|
||||||
AC_MSG_WARN([TLS disabled despite being marked critical on this platform])
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
dnl ============================================================================
|
dnl ============================================================================
|
||||||
@ -1593,37 +1697,45 @@ JE_COMPILABLE([C11 atomics], [
|
|||||||
uint64_t x = 1;
|
uint64_t x = 1;
|
||||||
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
|
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
|
||||||
uint64_t r = atomic_fetch_add(a, x) + x;
|
uint64_t r = atomic_fetch_add(a, x) + x;
|
||||||
return (r == 0);
|
return r == 0;
|
||||||
], [je_cv_c11atomics])
|
], [je_cv_c11_atomics])
|
||||||
if test "x${je_cv_c11atomics}" = "xyes" ; then
|
if test "x${je_cv_c11_atomics}" = "xyes" ; then
|
||||||
AC_DEFINE([JEMALLOC_C11ATOMICS])
|
AC_DEFINE([JEMALLOC_C11_ATOMICS])
|
||||||
fi
|
fi
|
||||||
|
|
||||||
dnl ============================================================================
|
dnl ============================================================================
|
||||||
dnl Check for atomic(9) operations as provided on FreeBSD.
|
dnl Check for GCC-style __atomic atomics.
|
||||||
|
|
||||||
JE_COMPILABLE([atomic(9)], [
|
JE_COMPILABLE([GCC __atomic atomics], [
|
||||||
#include <sys/types.h>
|
|
||||||
#include <machine/atomic.h>
|
|
||||||
#include <inttypes.h>
|
|
||||||
], [
|
], [
|
||||||
{
|
int x = 0;
|
||||||
uint32_t x32 = 0;
|
int val = 1;
|
||||||
volatile uint32_t *x32p = &x32;
|
int y = __atomic_fetch_add(&x, val, __ATOMIC_RELAXED);
|
||||||
atomic_fetchadd_32(x32p, 1);
|
int after_add = x;
|
||||||
}
|
return after_add == 1;
|
||||||
{
|
], [je_cv_gcc_atomic_atomics])
|
||||||
unsigned long xlong = 0;
|
if test "x${je_cv_gcc_atomic_atomics}" = "xyes" ; then
|
||||||
volatile unsigned long *xlongp = &xlong;
|
AC_DEFINE([JEMALLOC_GCC_ATOMIC_ATOMICS])
|
||||||
atomic_fetchadd_long(xlongp, 1);
|
fi
|
||||||
}
|
|
||||||
], [je_cv_atomic9])
|
dnl ============================================================================
|
||||||
if test "x${je_cv_atomic9}" = "xyes" ; then
|
dnl Check for GCC-style __sync atomics.
|
||||||
AC_DEFINE([JEMALLOC_ATOMIC9])
|
|
||||||
|
JE_COMPILABLE([GCC __sync atomics], [
|
||||||
|
], [
|
||||||
|
int x = 0;
|
||||||
|
int before_add = __sync_fetch_and_add(&x, 1);
|
||||||
|
int after_add = x;
|
||||||
|
return (before_add == 0) && (after_add == 1);
|
||||||
|
], [je_cv_gcc_sync_atomics])
|
||||||
|
if test "x${je_cv_gcc_sync_atomics}" = "xyes" ; then
|
||||||
|
AC_DEFINE([JEMALLOC_GCC_SYNC_ATOMICS])
|
||||||
fi
|
fi
|
||||||
|
|
||||||
dnl ============================================================================
|
dnl ============================================================================
|
||||||
dnl Check for atomic(3) operations as provided on Darwin.
|
dnl Check for atomic(3) operations as provided on Darwin.
|
||||||
|
dnl We need this not for the atomic operations (which are provided above), but
|
||||||
|
dnl rather for the OSSpinLock type it exposes.
|
||||||
|
|
||||||
JE_COMPILABLE([Darwin OSAtomic*()], [
|
JE_COMPILABLE([Darwin OSAtomic*()], [
|
||||||
#include <libkern/OSAtomic.h>
|
#include <libkern/OSAtomic.h>
|
||||||
@ -1682,15 +1794,12 @@ if test "x${je_cv_madvise}" = "xyes" ; then
|
|||||||
madvise((void *)0, 0, MADV_HUGEPAGE);
|
madvise((void *)0, 0, MADV_HUGEPAGE);
|
||||||
madvise((void *)0, 0, MADV_NOHUGEPAGE);
|
madvise((void *)0, 0, MADV_NOHUGEPAGE);
|
||||||
], [je_cv_thp])
|
], [je_cv_thp])
|
||||||
if test "x${je_cv_thp}" = "xyes" ; then
|
|
||||||
AC_DEFINE([JEMALLOC_HAVE_MADVISE_HUGE], [ ])
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
dnl Enable transparent huge page support by default.
|
dnl Enable transparent huge page support by default.
|
||||||
AC_ARG_ENABLE([thp],
|
AC_ARG_ENABLE([thp],
|
||||||
[AS_HELP_STRING([--disable-thp],
|
[AS_HELP_STRING([--disable-thp],
|
||||||
[Disable transparent huge page supprot])],
|
[Disable transparent huge page support])],
|
||||||
[if test "x$enable_thp" = "xno" -o "x${je_cv_thp}" != "xyes" ; then
|
[if test "x$enable_thp" = "xno" -o "x${je_cv_thp}" != "xyes" ; then
|
||||||
enable_thp="0"
|
enable_thp="0"
|
||||||
else
|
else
|
||||||
@ -1825,6 +1934,15 @@ if test "x${enable_zone_allocator}" = "x1" ; then
|
|||||||
AC_DEFINE([JEMALLOC_ZONE], [ ])
|
AC_DEFINE([JEMALLOC_ZONE], [ ])
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
dnl ============================================================================
|
||||||
|
dnl Enable background threads if possible.
|
||||||
|
|
||||||
|
if test "x${have_pthread}" = "x1" -a "x${have_dlsym}" = "x1" \
|
||||||
|
-a "x${je_cv_os_unfair_lock}" != "xyes" \
|
||||||
|
-a "x${je_cv_osspin}" != "xyes" ; then
|
||||||
|
AC_DEFINE([JEMALLOC_BACKGROUND_THREAD])
|
||||||
|
fi
|
||||||
|
|
||||||
dnl ============================================================================
|
dnl ============================================================================
|
||||||
dnl Check for glibc malloc hooks
|
dnl Check for glibc malloc hooks
|
||||||
|
|
||||||
@ -1841,7 +1959,10 @@ extern void *(* __realloc_hook)(void *ptr, size_t size);
|
|||||||
if (__free_hook && ptr) __free_hook(ptr);
|
if (__free_hook && ptr) __free_hook(ptr);
|
||||||
], [je_cv_glibc_malloc_hook])
|
], [je_cv_glibc_malloc_hook])
|
||||||
if test "x${je_cv_glibc_malloc_hook}" = "xyes" ; then
|
if test "x${je_cv_glibc_malloc_hook}" = "xyes" ; then
|
||||||
|
if test "x${JEMALLOC_PREFIX}" = "x" ; then
|
||||||
AC_DEFINE([JEMALLOC_GLIBC_MALLOC_HOOK], [ ])
|
AC_DEFINE([JEMALLOC_GLIBC_MALLOC_HOOK], [ ])
|
||||||
|
wrap_syms="${wrap_syms} __free_hook __malloc_hook __realloc_hook"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
JE_COMPILABLE([glibc memalign hook], [
|
JE_COMPILABLE([glibc memalign hook], [
|
||||||
@ -1853,7 +1974,10 @@ extern void *(* __memalign_hook)(size_t alignment, size_t size);
|
|||||||
if (__memalign_hook) ptr = __memalign_hook(16, 7);
|
if (__memalign_hook) ptr = __memalign_hook(16, 7);
|
||||||
], [je_cv_glibc_memalign_hook])
|
], [je_cv_glibc_memalign_hook])
|
||||||
if test "x${je_cv_glibc_memalign_hook}" = "xyes" ; then
|
if test "x${je_cv_glibc_memalign_hook}" = "xyes" ; then
|
||||||
|
if test "x${JEMALLOC_PREFIX}" = "x" ; then
|
||||||
AC_DEFINE([JEMALLOC_GLIBC_MEMALIGN_HOOK], [ ])
|
AC_DEFINE([JEMALLOC_GLIBC_MEMALIGN_HOOK], [ ])
|
||||||
|
wrap_syms="${wrap_syms} __memalign_hook"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
JE_COMPILABLE([pthreads adaptive mutexes], [
|
JE_COMPILABLE([pthreads adaptive mutexes], [
|
||||||
@ -1875,20 +1999,6 @@ AC_HEADER_STDBOOL
|
|||||||
dnl ============================================================================
|
dnl ============================================================================
|
||||||
dnl Define commands that generate output files.
|
dnl Define commands that generate output files.
|
||||||
|
|
||||||
AC_CONFIG_COMMANDS([include/jemalloc/internal/private_namespace.h], [
|
|
||||||
mkdir -p "${objroot}include/jemalloc/internal"
|
|
||||||
"${srcdir}/include/jemalloc/internal/private_namespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_namespace.h"
|
|
||||||
], [
|
|
||||||
srcdir="${srcdir}"
|
|
||||||
objroot="${objroot}"
|
|
||||||
])
|
|
||||||
AC_CONFIG_COMMANDS([include/jemalloc/internal/private_unnamespace.h], [
|
|
||||||
mkdir -p "${objroot}include/jemalloc/internal"
|
|
||||||
"${srcdir}/include/jemalloc/internal/private_unnamespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_unnamespace.h"
|
|
||||||
], [
|
|
||||||
srcdir="${srcdir}"
|
|
||||||
objroot="${objroot}"
|
|
||||||
])
|
|
||||||
AC_CONFIG_COMMANDS([include/jemalloc/internal/public_symbols.txt], [
|
AC_CONFIG_COMMANDS([include/jemalloc/internal/public_symbols.txt], [
|
||||||
f="${objroot}include/jemalloc/internal/public_symbols.txt"
|
f="${objroot}include/jemalloc/internal/public_symbols.txt"
|
||||||
mkdir -p "${objroot}include/jemalloc/internal"
|
mkdir -p "${objroot}include/jemalloc/internal"
|
||||||
@ -1912,6 +2022,31 @@ AC_CONFIG_COMMANDS([include/jemalloc/internal/public_symbols.txt], [
|
|||||||
public_syms="${public_syms}"
|
public_syms="${public_syms}"
|
||||||
JEMALLOC_PREFIX="${JEMALLOC_PREFIX}"
|
JEMALLOC_PREFIX="${JEMALLOC_PREFIX}"
|
||||||
])
|
])
|
||||||
|
AC_CONFIG_COMMANDS([include/jemalloc/internal/private_symbols.awk], [
|
||||||
|
f="${objroot}include/jemalloc/internal/private_symbols.awk"
|
||||||
|
mkdir -p "${objroot}include/jemalloc/internal"
|
||||||
|
export_syms=`for sym in ${public_syms}; do echo "${JEMALLOC_PREFIX}${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;`
|
||||||
|
"${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols.awk"
|
||||||
|
], [
|
||||||
|
srcdir="${srcdir}"
|
||||||
|
objroot="${objroot}"
|
||||||
|
public_syms="${public_syms}"
|
||||||
|
wrap_syms="${wrap_syms}"
|
||||||
|
SYM_PREFIX="${SYM_PREFIX}"
|
||||||
|
JEMALLOC_PREFIX="${JEMALLOC_PREFIX}"
|
||||||
|
])
|
||||||
|
AC_CONFIG_COMMANDS([include/jemalloc/internal/private_symbols_jet.awk], [
|
||||||
|
f="${objroot}include/jemalloc/internal/private_symbols_jet.awk"
|
||||||
|
mkdir -p "${objroot}include/jemalloc/internal"
|
||||||
|
export_syms=`for sym in ${public_syms}; do echo "jet_${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;`
|
||||||
|
"${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols_jet.awk"
|
||||||
|
], [
|
||||||
|
srcdir="${srcdir}"
|
||||||
|
objroot="${objroot}"
|
||||||
|
public_syms="${public_syms}"
|
||||||
|
wrap_syms="${wrap_syms}"
|
||||||
|
SYM_PREFIX="${SYM_PREFIX}"
|
||||||
|
])
|
||||||
AC_CONFIG_COMMANDS([include/jemalloc/internal/public_namespace.h], [
|
AC_CONFIG_COMMANDS([include/jemalloc/internal/public_namespace.h], [
|
||||||
mkdir -p "${objroot}include/jemalloc/internal"
|
mkdir -p "${objroot}include/jemalloc/internal"
|
||||||
"${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h"
|
"${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h"
|
||||||
@ -1928,15 +2063,13 @@ AC_CONFIG_COMMANDS([include/jemalloc/internal/public_unnamespace.h], [
|
|||||||
])
|
])
|
||||||
AC_CONFIG_COMMANDS([include/jemalloc/internal/size_classes.h], [
|
AC_CONFIG_COMMANDS([include/jemalloc/internal/size_classes.h], [
|
||||||
mkdir -p "${objroot}include/jemalloc/internal"
|
mkdir -p "${objroot}include/jemalloc/internal"
|
||||||
"${SHELL}" "${srcdir}/include/jemalloc/internal/size_classes.sh" "${LG_QUANTA}" ${LG_TINY_MIN} "${LG_PAGE_SIZES}" ${LG_SIZE_CLASS_GROUP} > "${objroot}include/jemalloc/internal/size_classes.h"
|
"${SHELL}" "${srcdir}/include/jemalloc/internal/size_classes.sh" "${LG_QUANTA}" 3 "${LG_PAGE_SIZES}" 2 > "${objroot}include/jemalloc/internal/size_classes.h"
|
||||||
], [
|
], [
|
||||||
SHELL="${SHELL}"
|
SHELL="${SHELL}"
|
||||||
srcdir="${srcdir}"
|
srcdir="${srcdir}"
|
||||||
objroot="${objroot}"
|
objroot="${objroot}"
|
||||||
LG_QUANTA="${LG_QUANTA}"
|
LG_QUANTA="${LG_QUANTA}"
|
||||||
LG_TINY_MIN=${LG_TINY_MIN}
|
|
||||||
LG_PAGE_SIZES="${LG_PAGE_SIZES}"
|
LG_PAGE_SIZES="${LG_PAGE_SIZES}"
|
||||||
LG_SIZE_CLASS_GROUP=${LG_SIZE_CLASS_GROUP}
|
|
||||||
])
|
])
|
||||||
AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_protos_jet.h], [
|
AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_protos_jet.h], [
|
||||||
mkdir -p "${objroot}include/jemalloc"
|
mkdir -p "${objroot}include/jemalloc"
|
||||||
@ -2000,8 +2133,13 @@ AC_MSG_RESULT([CONFIGURE_CFLAGS : ${CONFIGURE_CFLAGS}])
|
|||||||
AC_MSG_RESULT([SPECIFIED_CFLAGS : ${SPECIFIED_CFLAGS}])
|
AC_MSG_RESULT([SPECIFIED_CFLAGS : ${SPECIFIED_CFLAGS}])
|
||||||
AC_MSG_RESULT([EXTRA_CFLAGS : ${EXTRA_CFLAGS}])
|
AC_MSG_RESULT([EXTRA_CFLAGS : ${EXTRA_CFLAGS}])
|
||||||
AC_MSG_RESULT([CPPFLAGS : ${CPPFLAGS}])
|
AC_MSG_RESULT([CPPFLAGS : ${CPPFLAGS}])
|
||||||
|
AC_MSG_RESULT([CXX : ${CXX}])
|
||||||
|
AC_MSG_RESULT([CONFIGURE_CXXFLAGS : ${CONFIGURE_CXXFLAGS}])
|
||||||
|
AC_MSG_RESULT([SPECIFIED_CXXFLAGS : ${SPECIFIED_CXXFLAGS}])
|
||||||
|
AC_MSG_RESULT([EXTRA_CXXFLAGS : ${EXTRA_CXXFLAGS}])
|
||||||
AC_MSG_RESULT([LDFLAGS : ${LDFLAGS}])
|
AC_MSG_RESULT([LDFLAGS : ${LDFLAGS}])
|
||||||
AC_MSG_RESULT([EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}])
|
AC_MSG_RESULT([EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}])
|
||||||
|
AC_MSG_RESULT([DSO_LDFLAGS : ${DSO_LDFLAGS}])
|
||||||
AC_MSG_RESULT([LIBS : ${LIBS}])
|
AC_MSG_RESULT([LIBS : ${LIBS}])
|
||||||
AC_MSG_RESULT([RPATH_EXTRA : ${RPATH_EXTRA}])
|
AC_MSG_RESULT([RPATH_EXTRA : ${RPATH_EXTRA}])
|
||||||
AC_MSG_RESULT([])
|
AC_MSG_RESULT([])
|
||||||
@ -2026,22 +2164,17 @@ AC_MSG_RESULT([ : ${JEMALLOC_PRIVATE_NAMESPACE}])
|
|||||||
AC_MSG_RESULT([install_suffix : ${install_suffix}])
|
AC_MSG_RESULT([install_suffix : ${install_suffix}])
|
||||||
AC_MSG_RESULT([malloc_conf : ${config_malloc_conf}])
|
AC_MSG_RESULT([malloc_conf : ${config_malloc_conf}])
|
||||||
AC_MSG_RESULT([autogen : ${enable_autogen}])
|
AC_MSG_RESULT([autogen : ${enable_autogen}])
|
||||||
AC_MSG_RESULT([cc-silence : ${enable_cc_silence}])
|
|
||||||
AC_MSG_RESULT([debug : ${enable_debug}])
|
AC_MSG_RESULT([debug : ${enable_debug}])
|
||||||
AC_MSG_RESULT([code-coverage : ${enable_code_coverage}])
|
|
||||||
AC_MSG_RESULT([stats : ${enable_stats}])
|
AC_MSG_RESULT([stats : ${enable_stats}])
|
||||||
AC_MSG_RESULT([prof : ${enable_prof}])
|
AC_MSG_RESULT([prof : ${enable_prof}])
|
||||||
AC_MSG_RESULT([prof-libunwind : ${enable_prof_libunwind}])
|
AC_MSG_RESULT([prof-libunwind : ${enable_prof_libunwind}])
|
||||||
AC_MSG_RESULT([prof-libgcc : ${enable_prof_libgcc}])
|
AC_MSG_RESULT([prof-libgcc : ${enable_prof_libgcc}])
|
||||||
AC_MSG_RESULT([prof-gcc : ${enable_prof_gcc}])
|
AC_MSG_RESULT([prof-gcc : ${enable_prof_gcc}])
|
||||||
AC_MSG_RESULT([tcache : ${enable_tcache}])
|
|
||||||
AC_MSG_RESULT([thp : ${enable_thp}])
|
AC_MSG_RESULT([thp : ${enable_thp}])
|
||||||
AC_MSG_RESULT([fill : ${enable_fill}])
|
AC_MSG_RESULT([fill : ${enable_fill}])
|
||||||
AC_MSG_RESULT([utrace : ${enable_utrace}])
|
AC_MSG_RESULT([utrace : ${enable_utrace}])
|
||||||
AC_MSG_RESULT([valgrind : ${enable_valgrind}])
|
|
||||||
AC_MSG_RESULT([xmalloc : ${enable_xmalloc}])
|
AC_MSG_RESULT([xmalloc : ${enable_xmalloc}])
|
||||||
AC_MSG_RESULT([munmap : ${enable_munmap}])
|
|
||||||
AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}])
|
AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}])
|
||||||
AC_MSG_RESULT([tls : ${enable_tls}])
|
|
||||||
AC_MSG_RESULT([cache-oblivious : ${enable_cache_oblivious}])
|
AC_MSG_RESULT([cache-oblivious : ${enable_cache_oblivious}])
|
||||||
|
AC_MSG_RESULT([cxx : ${enable_cxx}])
|
||||||
AC_MSG_RESULT([===============================================================================])
|
AC_MSG_RESULT([===============================================================================])
|
||||||
|
16
coverage.sh
16
coverage.sh
@ -1,16 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
objdir=$1
|
|
||||||
suffix=$2
|
|
||||||
shift 2
|
|
||||||
objs=$@
|
|
||||||
|
|
||||||
gcov -b -p -f -o "${objdir}" ${objs}
|
|
||||||
|
|
||||||
# Move gcov outputs so that subsequent gcov invocations won't clobber results
|
|
||||||
# for the same sources with different compilation flags.
|
|
||||||
for f in `find . -maxdepth 1 -type f -name '*.gcov'` ; do
|
|
||||||
mv "${f}" "${f}.${suffix}"
|
|
||||||
done
|
|
1505
doc/jemalloc.xml.in
1505
doc/jemalloc.xml.in
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
96
include/jemalloc/internal/arena_externs.h
Normal file
96
include/jemalloc/internal/arena_externs.h
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H
|
||||||
|
#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/extent_dss.h"
|
||||||
|
#include "jemalloc/internal/pages.h"
|
||||||
|
#include "jemalloc/internal/size_classes.h"
|
||||||
|
#include "jemalloc/internal/stats.h"
|
||||||
|
|
||||||
|
extern ssize_t opt_dirty_decay_ms;
|
||||||
|
extern ssize_t opt_muzzy_decay_ms;
|
||||||
|
|
||||||
|
extern const arena_bin_info_t arena_bin_info[NBINS];
|
||||||
|
|
||||||
|
extern percpu_arena_mode_t opt_percpu_arena;
|
||||||
|
extern const char *percpu_arena_mode_names[];
|
||||||
|
|
||||||
|
extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS];
|
||||||
|
extern malloc_mutex_t arenas_lock;
|
||||||
|
|
||||||
|
void arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||||
|
szind_t szind, uint64_t nrequests);
|
||||||
|
void arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||||
|
size_t size);
|
||||||
|
void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
|
||||||
|
ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
|
||||||
|
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||||
|
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
|
||||||
|
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
|
||||||
|
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats);
|
||||||
|
void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
extent_hooks_t **r_extent_hooks, extent_t *extent);
|
||||||
|
#ifdef JEMALLOC_JET
|
||||||
|
size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr);
|
||||||
|
#endif
|
||||||
|
extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
size_t usize, size_t alignment, bool *zero);
|
||||||
|
void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
extent_t *extent);
|
||||||
|
void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
extent_t *extent, size_t oldsize);
|
||||||
|
void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
extent_t *extent, size_t oldsize);
|
||||||
|
ssize_t arena_dirty_decay_ms_get(arena_t *arena);
|
||||||
|
bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
|
||||||
|
ssize_t arena_muzzy_decay_ms_get(arena_t *arena);
|
||||||
|
bool arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
|
||||||
|
void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
|
||||||
|
bool all);
|
||||||
|
void arena_reset(tsd_t *tsd, arena_t *arena);
|
||||||
|
void arena_destroy(tsd_t *tsd, arena_t *arena);
|
||||||
|
void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
||||||
|
tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
|
||||||
|
void arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info,
|
||||||
|
bool zero);
|
||||||
|
|
||||||
|
typedef void (arena_dalloc_junk_small_t)(void *, const arena_bin_info_t *);
|
||||||
|
extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small;
|
||||||
|
|
||||||
|
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
|
||||||
|
szind_t ind, bool zero);
|
||||||
|
void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||||
|
size_t alignment, bool zero, tcache_t *tcache);
|
||||||
|
void arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize);
|
||||||
|
void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
||||||
|
bool slow_path);
|
||||||
|
void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
extent_t *extent, void *ptr);
|
||||||
|
void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
|
||||||
|
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
||||||
|
size_t extra, bool zero);
|
||||||
|
void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
|
||||||
|
size_t size, size_t alignment, bool zero, tcache_t *tcache);
|
||||||
|
dss_prec_t arena_dss_prec_get(arena_t *arena);
|
||||||
|
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
|
||||||
|
ssize_t arena_dirty_decay_ms_default_get(void);
|
||||||
|
bool arena_dirty_decay_ms_default_set(ssize_t decay_ms);
|
||||||
|
ssize_t arena_muzzy_decay_ms_default_get(void);
|
||||||
|
bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms);
|
||||||
|
unsigned arena_nthreads_get(arena_t *arena, bool internal);
|
||||||
|
void arena_nthreads_inc(arena_t *arena, bool internal);
|
||||||
|
void arena_nthreads_dec(arena_t *arena, bool internal);
|
||||||
|
size_t arena_extent_sn_next(arena_t *arena);
|
||||||
|
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
|
||||||
|
void arena_boot(void);
|
||||||
|
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
|
||||||
|
void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
|
||||||
|
void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
|
||||||
|
void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
|
||||||
|
void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
|
||||||
|
void arena_prefork5(tsdn_t *tsdn, arena_t *arena);
|
||||||
|
void arena_prefork6(tsdn_t *tsdn, arena_t *arena);
|
||||||
|
void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
|
||||||
|
void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */
|
57
include/jemalloc/internal/arena_inlines_a.h
Normal file
57
include/jemalloc/internal/arena_inlines_a.h
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H
|
||||||
|
#define JEMALLOC_INTERNAL_ARENA_INLINES_A_H
|
||||||
|
|
||||||
|
static inline unsigned
|
||||||
|
arena_ind_get(const arena_t *arena) {
|
||||||
|
return base_ind_get(arena->base);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
arena_internal_add(arena_t *arena, size_t size) {
|
||||||
|
atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
arena_internal_sub(arena_t *arena, size_t size) {
|
||||||
|
atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline size_t
|
||||||
|
arena_internal_get(arena_t *arena) {
|
||||||
|
return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) {
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
|
if (likely(prof_interval == 0)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return prof_accum_add(tsdn, &arena->prof_accum, accumbytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
percpu_arena_update(tsd_t *tsd, unsigned cpu) {
|
||||||
|
assert(have_percpu_arena);
|
||||||
|
arena_t *oldarena = tsd_arena_get(tsd);
|
||||||
|
assert(oldarena != NULL);
|
||||||
|
unsigned oldind = arena_ind_get(oldarena);
|
||||||
|
|
||||||
|
if (oldind != cpu) {
|
||||||
|
unsigned newind = cpu;
|
||||||
|
arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true);
|
||||||
|
assert(newarena != NULL);
|
||||||
|
|
||||||
|
/* Set new arena/tcache associations. */
|
||||||
|
arena_migrate(tsd, oldind, newind);
|
||||||
|
tcache_t *tcache = tcache_get(tsd);
|
||||||
|
if (tcache != NULL) {
|
||||||
|
tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
|
||||||
|
newarena);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */
|
361
include/jemalloc/internal/arena_inlines_b.h
Normal file
361
include/jemalloc/internal/arena_inlines_b.h
Normal file
@ -0,0 +1,361 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
|
||||||
|
#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||||
|
#include "jemalloc/internal/mutex.h"
|
||||||
|
#include "jemalloc/internal/rtree.h"
|
||||||
|
#include "jemalloc/internal/size_classes.h"
|
||||||
|
#include "jemalloc/internal/sz.h"
|
||||||
|
#include "jemalloc/internal/ticker.h"
|
||||||
|
|
||||||
|
static inline szind_t
|
||||||
|
arena_bin_index(arena_t *arena, arena_bin_t *bin) {
|
||||||
|
szind_t binind = (szind_t)(bin - arena->bins);
|
||||||
|
assert(binind < NBINS);
|
||||||
|
return binind;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
||||||
|
arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
|
||||||
|
cassert(config_prof);
|
||||||
|
assert(ptr != NULL);
|
||||||
|
|
||||||
|
/* Static check. */
|
||||||
|
if (alloc_ctx == NULL) {
|
||||||
|
const extent_t *extent = iealloc(tsdn, ptr);
|
||||||
|
if (unlikely(!extent_slab_get(extent))) {
|
||||||
|
return large_prof_tctx_get(tsdn, extent);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (unlikely(!alloc_ctx->slab)) {
|
||||||
|
return large_prof_tctx_get(tsdn, iealloc(tsdn, ptr));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return (prof_tctx_t *)(uintptr_t)1U;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
|
||||||
|
alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
|
||||||
|
cassert(config_prof);
|
||||||
|
assert(ptr != NULL);
|
||||||
|
|
||||||
|
/* Static check. */
|
||||||
|
if (alloc_ctx == NULL) {
|
||||||
|
extent_t *extent = iealloc(tsdn, ptr);
|
||||||
|
if (unlikely(!extent_slab_get(extent))) {
|
||||||
|
large_prof_tctx_set(tsdn, extent, tctx);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (unlikely(!alloc_ctx->slab)) {
|
||||||
|
large_prof_tctx_set(tsdn, iealloc(tsdn, ptr), tctx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
|
||||||
|
cassert(config_prof);
|
||||||
|
assert(ptr != NULL);
|
||||||
|
|
||||||
|
extent_t *extent = iealloc(tsdn, ptr);
|
||||||
|
assert(!extent_slab_get(extent));
|
||||||
|
|
||||||
|
large_prof_tctx_reset(tsdn, extent);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
|
||||||
|
tsd_t *tsd;
|
||||||
|
ticker_t *decay_ticker;
|
||||||
|
|
||||||
|
if (unlikely(tsdn_null(tsdn))) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
tsd = tsdn_tsd(tsdn);
|
||||||
|
decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena));
|
||||||
|
if (unlikely(decay_ticker == NULL)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (unlikely(ticker_ticks(decay_ticker, nticks))) {
|
||||||
|
arena_decay(tsdn, arena, false, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
|
||||||
|
malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx);
|
||||||
|
malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx);
|
||||||
|
|
||||||
|
arena_decay_ticks(tsdn, arena, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
|
arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
|
||||||
|
tcache_t *tcache, bool slow_path) {
|
||||||
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
||||||
|
assert(size != 0);
|
||||||
|
|
||||||
|
if (likely(tcache != NULL)) {
|
||||||
|
if (likely(size <= SMALL_MAXCLASS)) {
|
||||||
|
return tcache_alloc_small(tsdn_tsd(tsdn), arena,
|
||||||
|
tcache, size, ind, zero, slow_path);
|
||||||
|
}
|
||||||
|
if (likely(size <= tcache_maxclass)) {
|
||||||
|
return tcache_alloc_large(tsdn_tsd(tsdn), arena,
|
||||||
|
tcache, size, ind, zero, slow_path);
|
||||||
|
}
|
||||||
|
/* (size > tcache_maxclass) case falls through. */
|
||||||
|
assert(size > tcache_maxclass);
|
||||||
|
}
|
||||||
|
|
||||||
|
return arena_malloc_hard(tsdn, arena, size, ind, zero);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE arena_t *
|
||||||
|
arena_aalloc(tsdn_t *tsdn, const void *ptr) {
|
||||||
|
return extent_arena_get(iealloc(tsdn, ptr));
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
|
arena_salloc(tsdn_t *tsdn, const void *ptr) {
|
||||||
|
assert(ptr != NULL);
|
||||||
|
|
||||||
|
rtree_ctx_t rtree_ctx_fallback;
|
||||||
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||||
|
|
||||||
|
szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx,
|
||||||
|
(uintptr_t)ptr, true);
|
||||||
|
assert(szind != NSIZES);
|
||||||
|
|
||||||
|
return sz_index2size(szind);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
|
arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
|
||||||
|
/*
|
||||||
|
* Return 0 if ptr is not within an extent managed by jemalloc. This
|
||||||
|
* function has two extra costs relative to isalloc():
|
||||||
|
* - The rtree calls cannot claim to be dependent lookups, which induces
|
||||||
|
* rtree lookup load dependencies.
|
||||||
|
* - The lookup may fail, so there is an extra branch to check for
|
||||||
|
* failure.
|
||||||
|
*/
|
||||||
|
|
||||||
|
rtree_ctx_t rtree_ctx_fallback;
|
||||||
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||||
|
|
||||||
|
extent_t *extent;
|
||||||
|
szind_t szind;
|
||||||
|
if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
|
||||||
|
(uintptr_t)ptr, false, &extent, &szind)) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (extent == NULL) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
assert(extent_state_get(extent) == extent_state_active);
|
||||||
|
/* Only slab members should be looked up via interior pointers. */
|
||||||
|
assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
|
||||||
|
|
||||||
|
assert(szind != NSIZES);
|
||||||
|
|
||||||
|
return sz_index2size(szind);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
|
||||||
|
assert(ptr != NULL);
|
||||||
|
|
||||||
|
rtree_ctx_t rtree_ctx_fallback;
|
||||||
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||||
|
|
||||||
|
szind_t szind;
|
||||||
|
bool slab;
|
||||||
|
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
|
||||||
|
true, &szind, &slab);
|
||||||
|
|
||||||
|
if (config_debug) {
|
||||||
|
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
|
||||||
|
rtree_ctx, (uintptr_t)ptr, true);
|
||||||
|
assert(szind == extent_szind_get(extent));
|
||||||
|
assert(szind < NSIZES);
|
||||||
|
assert(slab == extent_slab_get(extent));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (likely(slab)) {
|
||||||
|
/* Small allocation. */
|
||||||
|
arena_dalloc_small(tsdn, ptr);
|
||||||
|
} else {
|
||||||
|
extent_t *extent = iealloc(tsdn, ptr);
|
||||||
|
large_dalloc(tsdn, extent);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
||||||
|
alloc_ctx_t *alloc_ctx, bool slow_path) {
|
||||||
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
||||||
|
assert(ptr != NULL);
|
||||||
|
|
||||||
|
if (unlikely(tcache == NULL)) {
|
||||||
|
arena_dalloc_no_tcache(tsdn, ptr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
szind_t szind;
|
||||||
|
bool slab;
|
||||||
|
rtree_ctx_t *rtree_ctx;
|
||||||
|
if (alloc_ctx != NULL) {
|
||||||
|
szind = alloc_ctx->szind;
|
||||||
|
slab = alloc_ctx->slab;
|
||||||
|
assert(szind != NSIZES);
|
||||||
|
} else {
|
||||||
|
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
|
||||||
|
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
|
||||||
|
(uintptr_t)ptr, true, &szind, &slab);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config_debug) {
|
||||||
|
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
|
||||||
|
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
|
||||||
|
rtree_ctx, (uintptr_t)ptr, true);
|
||||||
|
assert(szind == extent_szind_get(extent));
|
||||||
|
assert(szind < NSIZES);
|
||||||
|
assert(slab == extent_slab_get(extent));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (likely(slab)) {
|
||||||
|
/* Small allocation. */
|
||||||
|
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
|
||||||
|
slow_path);
|
||||||
|
} else {
|
||||||
|
if (szind < nhbins) {
|
||||||
|
if (config_prof && unlikely(szind < NBINS)) {
|
||||||
|
arena_dalloc_promoted(tsdn, ptr, tcache,
|
||||||
|
slow_path);
|
||||||
|
} else {
|
||||||
|
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
|
||||||
|
szind, slow_path);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
extent_t *extent = iealloc(tsdn, ptr);
|
||||||
|
large_dalloc(tsdn, extent);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
|
||||||
|
assert(ptr != NULL);
|
||||||
|
assert(size <= LARGE_MAXCLASS);
|
||||||
|
|
||||||
|
szind_t szind;
|
||||||
|
bool slab;
|
||||||
|
if (!config_prof || !opt_prof) {
|
||||||
|
/*
|
||||||
|
* There is no risk of being confused by a promoted sampled
|
||||||
|
* object, so base szind and slab on the given size.
|
||||||
|
*/
|
||||||
|
szind = sz_size2index(size);
|
||||||
|
slab = (szind < NBINS);
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((config_prof && opt_prof) || config_debug) {
|
||||||
|
rtree_ctx_t rtree_ctx_fallback;
|
||||||
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
|
||||||
|
&rtree_ctx_fallback);
|
||||||
|
|
||||||
|
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
|
||||||
|
(uintptr_t)ptr, true, &szind, &slab);
|
||||||
|
|
||||||
|
assert(szind == sz_size2index(size));
|
||||||
|
assert((config_prof && opt_prof) || slab == (szind < NBINS));
|
||||||
|
|
||||||
|
if (config_debug) {
|
||||||
|
extent_t *extent = rtree_extent_read(tsdn,
|
||||||
|
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
|
||||||
|
assert(szind == extent_szind_get(extent));
|
||||||
|
assert(slab == extent_slab_get(extent));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (likely(slab)) {
|
||||||
|
/* Small allocation. */
|
||||||
|
arena_dalloc_small(tsdn, ptr);
|
||||||
|
} else {
|
||||||
|
extent_t *extent = iealloc(tsdn, ptr);
|
||||||
|
large_dalloc(tsdn, extent);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
||||||
|
alloc_ctx_t *alloc_ctx, bool slow_path) {
|
||||||
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
||||||
|
assert(ptr != NULL);
|
||||||
|
assert(size <= LARGE_MAXCLASS);
|
||||||
|
|
||||||
|
if (unlikely(tcache == NULL)) {
|
||||||
|
arena_sdalloc_no_tcache(tsdn, ptr, size);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
szind_t szind;
|
||||||
|
bool slab;
|
||||||
|
UNUSED alloc_ctx_t local_ctx;
|
||||||
|
if (config_prof && opt_prof) {
|
||||||
|
if (alloc_ctx == NULL) {
|
||||||
|
/* Uncommon case and should be a static check. */
|
||||||
|
rtree_ctx_t rtree_ctx_fallback;
|
||||||
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
|
||||||
|
&rtree_ctx_fallback);
|
||||||
|
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
|
||||||
|
(uintptr_t)ptr, true, &local_ctx.szind,
|
||||||
|
&local_ctx.slab);
|
||||||
|
assert(local_ctx.szind == sz_size2index(size));
|
||||||
|
alloc_ctx = &local_ctx;
|
||||||
|
}
|
||||||
|
slab = alloc_ctx->slab;
|
||||||
|
szind = alloc_ctx->szind;
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* There is no risk of being confused by a promoted sampled
|
||||||
|
* object, so base szind and slab on the given size.
|
||||||
|
*/
|
||||||
|
szind = sz_size2index(size);
|
||||||
|
slab = (szind < NBINS);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config_debug) {
|
||||||
|
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
|
||||||
|
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
|
||||||
|
(uintptr_t)ptr, true, &szind, &slab);
|
||||||
|
extent_t *extent = rtree_extent_read(tsdn,
|
||||||
|
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
|
||||||
|
assert(szind == extent_szind_get(extent));
|
||||||
|
assert(slab == extent_slab_get(extent));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (likely(slab)) {
|
||||||
|
/* Small allocation. */
|
||||||
|
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
|
||||||
|
slow_path);
|
||||||
|
} else {
|
||||||
|
if (szind < nhbins) {
|
||||||
|
if (config_prof && unlikely(szind < NBINS)) {
|
||||||
|
arena_dalloc_promoted(tsdn, ptr, tcache,
|
||||||
|
slow_path);
|
||||||
|
} else {
|
||||||
|
tcache_dalloc_large(tsdn_tsd(tsdn),
|
||||||
|
tcache, ptr, szind, slow_path);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
extent_t *extent = iealloc(tsdn, ptr);
|
||||||
|
large_dalloc(tsdn, extent);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */
|
11
include/jemalloc/internal/arena_structs_a.h
Normal file
11
include/jemalloc/internal/arena_structs_a.h
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
|
||||||
|
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/bitmap.h"
|
||||||
|
|
||||||
|
struct arena_slab_data_s {
|
||||||
|
/* Per region allocated/deallocated bitmap. */
|
||||||
|
bitmap_t bitmap[BITMAP_GROUPS_MAX];
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H */
|
284
include/jemalloc/internal/arena_structs_b.h
Normal file
284
include/jemalloc/internal/arena_structs_b.h
Normal file
@ -0,0 +1,284 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
|
||||||
|
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/atomic.h"
|
||||||
|
#include "jemalloc/internal/bitmap.h"
|
||||||
|
#include "jemalloc/internal/extent_dss.h"
|
||||||
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||||
|
#include "jemalloc/internal/mutex.h"
|
||||||
|
#include "jemalloc/internal/nstime.h"
|
||||||
|
#include "jemalloc/internal/ql.h"
|
||||||
|
#include "jemalloc/internal/size_classes.h"
|
||||||
|
#include "jemalloc/internal/smoothstep.h"
|
||||||
|
#include "jemalloc/internal/stats.h"
|
||||||
|
#include "jemalloc/internal/ticker.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Read-only information associated with each element of arena_t's bins array
|
||||||
|
* is stored separately, partly to reduce memory usage (only one copy, rather
|
||||||
|
* than one per arena), but mainly to avoid false cacheline sharing.
|
||||||
|
*
|
||||||
|
* Each slab has the following layout:
|
||||||
|
*
|
||||||
|
* /--------------------\
|
||||||
|
* | region 0 |
|
||||||
|
* |--------------------|
|
||||||
|
* | region 1 |
|
||||||
|
* |--------------------|
|
||||||
|
* | ... |
|
||||||
|
* | ... |
|
||||||
|
* | ... |
|
||||||
|
* |--------------------|
|
||||||
|
* | region nregs-1 |
|
||||||
|
* \--------------------/
|
||||||
|
*/
|
||||||
|
struct arena_bin_info_s {
|
||||||
|
/* Size of regions in a slab for this bin's size class. */
|
||||||
|
size_t reg_size;
|
||||||
|
|
||||||
|
/* Total size of a slab for this bin's size class. */
|
||||||
|
size_t slab_size;
|
||||||
|
|
||||||
|
/* Total number of regions in a slab for this bin's size class. */
|
||||||
|
uint32_t nregs;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Metadata used to manipulate bitmaps for slabs associated with this
|
||||||
|
* bin.
|
||||||
|
*/
|
||||||
|
bitmap_info_t bitmap_info;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct arena_decay_s {
|
||||||
|
/* Synchronizes all non-atomic fields. */
|
||||||
|
malloc_mutex_t mtx;
|
||||||
|
/*
|
||||||
|
* True if a thread is currently purging the extents associated with
|
||||||
|
* this decay structure.
|
||||||
|
*/
|
||||||
|
bool purging;
|
||||||
|
/*
|
||||||
|
* Approximate time in milliseconds from the creation of a set of unused
|
||||||
|
* dirty pages until an equivalent set of unused dirty pages is purged
|
||||||
|
* and/or reused.
|
||||||
|
*/
|
||||||
|
atomic_zd_t time_ms;
|
||||||
|
/* time / SMOOTHSTEP_NSTEPS. */
|
||||||
|
nstime_t interval;
|
||||||
|
/*
|
||||||
|
* Time at which the current decay interval logically started. We do
|
||||||
|
* not actually advance to a new epoch until sometime after it starts
|
||||||
|
* because of scheduling and computation delays, and it is even possible
|
||||||
|
* to completely skip epochs. In all cases, during epoch advancement we
|
||||||
|
* merge all relevant activity into the most recently recorded epoch.
|
||||||
|
*/
|
||||||
|
nstime_t epoch;
|
||||||
|
/* Deadline randomness generator. */
|
||||||
|
uint64_t jitter_state;
|
||||||
|
/*
|
||||||
|
* Deadline for current epoch. This is the sum of interval and per
|
||||||
|
* epoch jitter which is a uniform random variable in [0..interval).
|
||||||
|
* Epochs always advance by precise multiples of interval, but we
|
||||||
|
* randomize the deadline to reduce the likelihood of arenas purging in
|
||||||
|
* lockstep.
|
||||||
|
*/
|
||||||
|
nstime_t deadline;
|
||||||
|
/*
|
||||||
|
* Number of unpurged pages at beginning of current epoch. During epoch
|
||||||
|
* advancement we use the delta between arena->decay_*.nunpurged and
|
||||||
|
* extents_npages_get(&arena->extents_*) to determine how many dirty
|
||||||
|
* pages, if any, were generated.
|
||||||
|
*/
|
||||||
|
size_t nunpurged;
|
||||||
|
/*
|
||||||
|
* Trailing log of how many unused dirty pages were generated during
|
||||||
|
* each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
|
||||||
|
* element is the most recent epoch. Corresponding epoch times are
|
||||||
|
* relative to epoch.
|
||||||
|
*/
|
||||||
|
size_t backlog[SMOOTHSTEP_NSTEPS];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Pointer to associated stats. These stats are embedded directly in
|
||||||
|
* the arena's stats due to how stats structures are shared between the
|
||||||
|
* arena and ctl code.
|
||||||
|
*
|
||||||
|
* Synchronization: Same as associated arena's stats field. */
|
||||||
|
decay_stats_t *stats;
|
||||||
|
/* Peak number of pages in associated extents. Used for debug only. */
|
||||||
|
uint64_t ceil_npages;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct arena_bin_s {
|
||||||
|
/* All operations on arena_bin_t fields require lock ownership. */
|
||||||
|
malloc_mutex_t lock;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Current slab being used to service allocations of this bin's size
|
||||||
|
* class. slabcur is independent of slabs_{nonfull,full}; whenever
|
||||||
|
* slabcur is reassigned, the previous slab must be deallocated or
|
||||||
|
* inserted into slabs_{nonfull,full}.
|
||||||
|
*/
|
||||||
|
extent_t *slabcur;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Heap of non-full slabs. This heap is used to assure that new
|
||||||
|
* allocations come from the non-full slab that is oldest/lowest in
|
||||||
|
* memory.
|
||||||
|
*/
|
||||||
|
extent_heap_t slabs_nonfull;
|
||||||
|
|
||||||
|
/* List used to track full slabs. */
|
||||||
|
extent_list_t slabs_full;
|
||||||
|
|
||||||
|
/* Bin statistics. */
|
||||||
|
malloc_bin_stats_t stats;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct arena_s {
|
||||||
|
/*
|
||||||
|
* Number of threads currently assigned to this arena. Each thread has
|
||||||
|
* two distinct assignments, one for application-serving allocation, and
|
||||||
|
* the other for internal metadata allocation. Internal metadata must
|
||||||
|
* not be allocated from arenas explicitly created via the arenas.create
|
||||||
|
* mallctl, because the arena.<i>.reset mallctl indiscriminately
|
||||||
|
* discards all allocations for the affected arena.
|
||||||
|
*
|
||||||
|
* 0: Application allocation.
|
||||||
|
* 1: Internal metadata allocation.
|
||||||
|
*
|
||||||
|
* Synchronization: atomic.
|
||||||
|
*/
|
||||||
|
atomic_u_t nthreads[2];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When percpu_arena is enabled, to amortize the cost of reading /
|
||||||
|
* updating the current CPU id, track the most recent thread accessing
|
||||||
|
* this arena, and only read CPU if there is a mismatch.
|
||||||
|
*/
|
||||||
|
tsdn_t *last_thd;
|
||||||
|
|
||||||
|
/* Synchronization: internal. */
|
||||||
|
arena_stats_t stats;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* List of tcaches for extant threads associated with this arena.
|
||||||
|
* Stats from these are merged incrementally, and at exit if
|
||||||
|
* opt_stats_print is enabled.
|
||||||
|
*
|
||||||
|
* Synchronization: tcache_ql_mtx.
|
||||||
|
*/
|
||||||
|
ql_head(tcache_t) tcache_ql;
|
||||||
|
malloc_mutex_t tcache_ql_mtx;
|
||||||
|
|
||||||
|
/* Synchronization: internal. */
|
||||||
|
prof_accum_t prof_accum;
|
||||||
|
uint64_t prof_accumbytes;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* PRNG state for cache index randomization of large allocation base
|
||||||
|
* pointers.
|
||||||
|
*
|
||||||
|
* Synchronization: atomic.
|
||||||
|
*/
|
||||||
|
atomic_zu_t offset_state;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Extent serial number generator state.
|
||||||
|
*
|
||||||
|
* Synchronization: atomic.
|
||||||
|
*/
|
||||||
|
atomic_zu_t extent_sn_next;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Represents a dss_prec_t, but atomically.
|
||||||
|
*
|
||||||
|
* Synchronization: atomic.
|
||||||
|
*/
|
||||||
|
atomic_u_t dss_prec;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Number of pages in active extents.
|
||||||
|
*
|
||||||
|
* Synchronization: atomic.
|
||||||
|
*/
|
||||||
|
atomic_zu_t nactive;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Extant large allocations.
|
||||||
|
*
|
||||||
|
* Synchronization: large_mtx.
|
||||||
|
*/
|
||||||
|
extent_list_t large;
|
||||||
|
/* Synchronizes all large allocation/update/deallocation. */
|
||||||
|
malloc_mutex_t large_mtx;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Collections of extents that were previously allocated. These are
|
||||||
|
* used when allocating extents, in an attempt to re-use address space.
|
||||||
|
*
|
||||||
|
* Synchronization: internal.
|
||||||
|
*/
|
||||||
|
extents_t extents_dirty;
|
||||||
|
extents_t extents_muzzy;
|
||||||
|
extents_t extents_retained;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Decay-based purging state, responsible for scheduling extent state
|
||||||
|
* transitions.
|
||||||
|
*
|
||||||
|
* Synchronization: internal.
|
||||||
|
*/
|
||||||
|
arena_decay_t decay_dirty; /* dirty --> muzzy */
|
||||||
|
arena_decay_t decay_muzzy; /* muzzy --> retained */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Next extent size class in a growing series to use when satisfying a
|
||||||
|
* request via the extent hooks (only if opt_retain). This limits the
|
||||||
|
* number of disjoint virtual memory ranges so that extent merging can
|
||||||
|
* be effective even if multiple arenas' extent allocation requests are
|
||||||
|
* highly interleaved.
|
||||||
|
*
|
||||||
|
* Synchronization: extent_grow_mtx
|
||||||
|
*/
|
||||||
|
pszind_t extent_grow_next;
|
||||||
|
malloc_mutex_t extent_grow_mtx;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Available extent structures that were allocated via
|
||||||
|
* base_alloc_extent().
|
||||||
|
*
|
||||||
|
* Synchronization: extent_avail_mtx.
|
||||||
|
*/
|
||||||
|
extent_tree_t extent_avail;
|
||||||
|
malloc_mutex_t extent_avail_mtx;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* bins is used to store heaps of free regions.
|
||||||
|
*
|
||||||
|
* Synchronization: internal.
|
||||||
|
*/
|
||||||
|
arena_bin_t bins[NBINS];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Base allocator, from which arena metadata are allocated.
|
||||||
|
*
|
||||||
|
* Synchronization: internal.
|
||||||
|
*/
|
||||||
|
base_t *base;
|
||||||
|
/* Used to determine uptime. Read-only after initialization. */
|
||||||
|
nstime_t create_time;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Used in conjunction with tsd for fast arena-related context lookup. */
|
||||||
|
struct arena_tdata_s {
|
||||||
|
ticker_t decay_ticker;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Used to pass rtree lookup context down the path. */
|
||||||
|
struct alloc_ctx_s {
|
||||||
|
szind_t szind;
|
||||||
|
bool slab;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */
|
45
include/jemalloc/internal/arena_types.h
Normal file
45
include/jemalloc/internal/arena_types.h
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H
|
||||||
|
#define JEMALLOC_INTERNAL_ARENA_TYPES_H
|
||||||
|
|
||||||
|
/* Maximum number of regions in one slab. */
|
||||||
|
#define LG_SLAB_MAXREGS (LG_PAGE - LG_TINY_MIN)
|
||||||
|
#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
|
||||||
|
|
||||||
|
/* Default decay times in milliseconds. */
|
||||||
|
#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000)
|
||||||
|
#define MUZZY_DECAY_MS_DEFAULT ZD(10 * 1000)
|
||||||
|
/* Number of event ticks between time checks. */
|
||||||
|
#define DECAY_NTICKS_PER_UPDATE 1000
|
||||||
|
|
||||||
|
typedef struct arena_slab_data_s arena_slab_data_t;
|
||||||
|
typedef struct arena_bin_info_s arena_bin_info_t;
|
||||||
|
typedef struct arena_decay_s arena_decay_t;
|
||||||
|
typedef struct arena_bin_s arena_bin_t;
|
||||||
|
typedef struct arena_s arena_t;
|
||||||
|
typedef struct arena_tdata_s arena_tdata_t;
|
||||||
|
typedef struct alloc_ctx_s alloc_ctx_t;
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
percpu_arena_mode_names_base = 0, /* Used for options processing. */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* *_uninit are used only during bootstrapping, and must correspond
|
||||||
|
* to initialized variant plus percpu_arena_mode_enabled_base.
|
||||||
|
*/
|
||||||
|
percpu_arena_uninit = 0,
|
||||||
|
per_phycpu_arena_uninit = 1,
|
||||||
|
|
||||||
|
/* All non-disabled modes must come after percpu_arena_disabled. */
|
||||||
|
percpu_arena_disabled = 2,
|
||||||
|
|
||||||
|
percpu_arena_mode_names_limit = 3, /* Used for options processing. */
|
||||||
|
percpu_arena_mode_enabled_base = 3,
|
||||||
|
|
||||||
|
percpu_arena = 3,
|
||||||
|
per_phycpu_arena = 4 /* Hyper threads share arena. */
|
||||||
|
} percpu_arena_mode_t;
|
||||||
|
|
||||||
|
#define PERCPU_ARENA_ENABLED(m) ((m) >= percpu_arena_mode_enabled_base)
|
||||||
|
#define PERCPU_ARENA_DEFAULT percpu_arena_disabled
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */
|
@ -1,3 +1,6 @@
|
|||||||
|
#include "jemalloc/internal/malloc_io.h"
|
||||||
|
#include "jemalloc/internal/util.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Define a custom assert() in order to reduce the chances of deadlock during
|
* Define a custom assert() in order to reduce the chances of deadlock during
|
||||||
* assertion failure.
|
* assertion failure.
|
||||||
@ -37,9 +40,17 @@
|
|||||||
|
|
||||||
#ifndef assert_not_implemented
|
#ifndef assert_not_implemented
|
||||||
#define assert_not_implemented(e) do { \
|
#define assert_not_implemented(e) do { \
|
||||||
if (unlikely(config_debug && !(e))) \
|
if (unlikely(config_debug && !(e))) { \
|
||||||
not_implemented(); \
|
not_implemented(); \
|
||||||
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
|
||||||
|
#ifndef cassert
|
||||||
|
#define cassert(c) do { \
|
||||||
|
if (unlikely(!(c))) { \
|
||||||
|
not_reached(); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
#endif
|
||||||
|
@ -1,651 +1,77 @@
|
|||||||
/******************************************************************************/
|
#ifndef JEMALLOC_INTERNAL_ATOMIC_H
|
||||||
#ifdef JEMALLOC_H_TYPES
|
#define JEMALLOC_INTERNAL_ATOMIC_H
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
#define ATOMIC_INLINE static inline
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS)
|
||||||
/******************************************************************************/
|
# include "jemalloc/internal/atomic_gcc_atomic.h"
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
#elif defined(JEMALLOC_GCC_SYNC_ATOMICS)
|
||||||
|
# include "jemalloc/internal/atomic_gcc_sync.h"
|
||||||
#define atomic_read_uint64(p) atomic_add_uint64(p, 0)
|
#elif defined(_MSC_VER)
|
||||||
#define atomic_read_uint32(p) atomic_add_uint32(p, 0)
|
# include "jemalloc/internal/atomic_msvc.h"
|
||||||
#define atomic_read_p(p) atomic_add_p(p, NULL)
|
#elif defined(JEMALLOC_C11_ATOMICS)
|
||||||
#define atomic_read_z(p) atomic_add_z(p, 0)
|
# include "jemalloc/internal/atomic_c11.h"
|
||||||
#define atomic_read_u(p) atomic_add_u(p, 0)
|
#else
|
||||||
|
# error "Don't have atomics implemented on this platform."
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All arithmetic functions return the arithmetic result of the atomic
|
* This header gives more or less a backport of C11 atomics. The user can write
|
||||||
* operation. Some atomic operation APIs return the value prior to mutation, in
|
* JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate
|
||||||
* which case the following functions must redundantly compute the result so
|
* counterparts of the C11 atomic functions for type, as so:
|
||||||
* that it can be returned. These functions are normally inlined, so the extra
|
* JEMALLOC_GENERATE_ATOMICS(int *, pi, 3);
|
||||||
* operations can be optimized away if the return values aren't used by the
|
* and then write things like:
|
||||||
* callers.
|
* int *some_ptr;
|
||||||
|
* atomic_pi_t atomic_ptr_to_int;
|
||||||
|
* atomic_store_pi(&atomic_ptr_to_int, some_ptr, ATOMIC_RELAXED);
|
||||||
|
* int *prev_value = atomic_exchange_pi(&ptr_to_int, NULL, ATOMIC_ACQ_REL);
|
||||||
|
* assert(some_ptr == prev_value);
|
||||||
|
* and expect things to work in the obvious way.
|
||||||
*
|
*
|
||||||
* <t> atomic_read_<t>(<t> *p) { return (*p); }
|
* Also included (with naming differences to avoid conflicts with the standard
|
||||||
* <t> atomic_add_<t>(<t> *p, <t> x) { return (*p += x); }
|
* library):
|
||||||
* <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p -= x); }
|
* atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence).
|
||||||
* bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
|
* ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT).
|
||||||
* {
|
|
||||||
* if (*p != c)
|
|
||||||
* return (true);
|
|
||||||
* *p = s;
|
|
||||||
* return (false);
|
|
||||||
* }
|
|
||||||
* void atomic_write_<t>(<t> *p, <t> x) { *p = x; }
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
uint64_t atomic_add_uint64(uint64_t *p, uint64_t x);
|
|
||||||
uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
|
|
||||||
bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s);
|
|
||||||
void atomic_write_uint64(uint64_t *p, uint64_t x);
|
|
||||||
uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
|
|
||||||
uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
|
|
||||||
bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s);
|
|
||||||
void atomic_write_uint32(uint32_t *p, uint32_t x);
|
|
||||||
void *atomic_add_p(void **p, void *x);
|
|
||||||
void *atomic_sub_p(void **p, void *x);
|
|
||||||
bool atomic_cas_p(void **p, void *c, void *s);
|
|
||||||
void atomic_write_p(void **p, const void *x);
|
|
||||||
size_t atomic_add_z(size_t *p, size_t x);
|
|
||||||
size_t atomic_sub_z(size_t *p, size_t x);
|
|
||||||
bool atomic_cas_z(size_t *p, size_t c, size_t s);
|
|
||||||
void atomic_write_z(size_t *p, size_t x);
|
|
||||||
unsigned atomic_add_u(unsigned *p, unsigned x);
|
|
||||||
unsigned atomic_sub_u(unsigned *p, unsigned x);
|
|
||||||
bool atomic_cas_u(unsigned *p, unsigned c, unsigned s);
|
|
||||||
void atomic_write_u(unsigned *p, unsigned x);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
|
|
||||||
/******************************************************************************/
|
|
||||||
/* 64-bit operations. */
|
|
||||||
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
|
|
||||||
# if (defined(__amd64__) || defined(__x86_64__))
|
|
||||||
JEMALLOC_INLINE uint64_t
|
|
||||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
|
||||||
{
|
|
||||||
uint64_t t = x;
|
|
||||||
|
|
||||||
asm volatile (
|
|
||||||
"lock; xaddq %0, %1;"
|
|
||||||
: "+r" (t), "=m" (*p) /* Outputs. */
|
|
||||||
: "m" (*p) /* Inputs. */
|
|
||||||
);
|
|
||||||
|
|
||||||
return (t + x);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE uint64_t
|
|
||||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
|
||||||
{
|
|
||||||
uint64_t t;
|
|
||||||
|
|
||||||
x = (uint64_t)(-(int64_t)x);
|
|
||||||
t = x;
|
|
||||||
asm volatile (
|
|
||||||
"lock; xaddq %0, %1;"
|
|
||||||
: "+r" (t), "=m" (*p) /* Outputs. */
|
|
||||||
: "m" (*p) /* Inputs. */
|
|
||||||
);
|
|
||||||
|
|
||||||
return (t + x);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
|
|
||||||
{
|
|
||||||
uint8_t success;
|
|
||||||
|
|
||||||
asm volatile (
|
|
||||||
"lock; cmpxchgq %4, %0;"
|
|
||||||
"sete %1;"
|
|
||||||
: "=m" (*p), "=a" (success) /* Outputs. */
|
|
||||||
: "m" (*p), "a" (c), "r" (s) /* Inputs. */
|
|
||||||
: "memory" /* Clobbers. */
|
|
||||||
);
|
|
||||||
|
|
||||||
return (!(bool)success);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
atomic_write_uint64(uint64_t *p, uint64_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
asm volatile (
|
|
||||||
"xchgq %1, %0;" /* Lock is implied by xchgq. */
|
|
||||||
: "=m" (*p), "+r" (x) /* Outputs. */
|
|
||||||
: "m" (*p) /* Inputs. */
|
|
||||||
: "memory" /* Clobbers. */
|
|
||||||
);
|
|
||||||
}
|
|
||||||
# elif (defined(JEMALLOC_C11ATOMICS))
|
|
||||||
JEMALLOC_INLINE uint64_t
|
|
||||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
|
||||||
{
|
|
||||||
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
|
|
||||||
return (atomic_fetch_add(a, x) + x);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE uint64_t
|
|
||||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
|
||||||
{
|
|
||||||
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
|
|
||||||
return (atomic_fetch_sub(a, x) - x);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
|
|
||||||
{
|
|
||||||
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
|
|
||||||
return (!atomic_compare_exchange_strong(a, &c, s));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
atomic_write_uint64(uint64_t *p, uint64_t x)
|
|
||||||
{
|
|
||||||
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
|
|
||||||
atomic_store(a, x);
|
|
||||||
}
|
|
||||||
# elif (defined(JEMALLOC_ATOMIC9))
|
|
||||||
JEMALLOC_INLINE uint64_t
|
|
||||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* atomic_fetchadd_64() doesn't exist, but we only ever use this
|
* Pure convenience, so that we don't have to type "atomic_memory_order_"
|
||||||
* function on LP64 systems, so atomic_fetchadd_long() will do.
|
* quite so often.
|
||||||
*/
|
*/
|
||||||
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
#define ATOMIC_RELAXED atomic_memory_order_relaxed
|
||||||
|
#define ATOMIC_ACQUIRE atomic_memory_order_acquire
|
||||||
|
#define ATOMIC_RELEASE atomic_memory_order_release
|
||||||
|
#define ATOMIC_ACQ_REL atomic_memory_order_acq_rel
|
||||||
|
#define ATOMIC_SEQ_CST atomic_memory_order_seq_cst
|
||||||
|
|
||||||
return (atomic_fetchadd_long(p, (unsigned long)x) + x);
|
/*
|
||||||
}
|
* Not all platforms have 64-bit atomics. If we do, this #define exposes that
|
||||||
|
* fact.
|
||||||
JEMALLOC_INLINE uint64_t
|
*/
|
||||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
|
||||||
{
|
# define JEMALLOC_ATOMIC_U64
|
||||||
|
|
||||||
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
|
||||||
|
|
||||||
return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
|
|
||||||
{
|
|
||||||
|
|
||||||
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
|
||||||
|
|
||||||
return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
atomic_write_uint64(uint64_t *p, uint64_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
|
||||||
|
|
||||||
atomic_store_rel_long(p, x);
|
|
||||||
}
|
|
||||||
# elif (defined(JEMALLOC_OSATOMIC))
|
|
||||||
JEMALLOC_INLINE uint64_t
|
|
||||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE uint64_t
|
|
||||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
atomic_write_uint64(uint64_t *p, uint64_t x)
|
|
||||||
{
|
|
||||||
uint64_t o;
|
|
||||||
|
|
||||||
/*The documented OSAtomic*() API does not expose an atomic exchange. */
|
|
||||||
do {
|
|
||||||
o = atomic_read_uint64(p);
|
|
||||||
} while (atomic_cas_uint64(p, o, x));
|
|
||||||
}
|
|
||||||
# elif (defined(_MSC_VER))
|
|
||||||
JEMALLOC_INLINE uint64_t
|
|
||||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (InterlockedExchangeAdd64(p, x) + x);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE uint64_t
|
|
||||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
|
|
||||||
{
|
|
||||||
uint64_t o;
|
|
||||||
|
|
||||||
o = InterlockedCompareExchange64(p, s, c);
|
|
||||||
return (o != c);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
atomic_write_uint64(uint64_t *p, uint64_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
InterlockedExchange64(p, x);
|
|
||||||
}
|
|
||||||
# elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \
|
|
||||||
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
|
|
||||||
JEMALLOC_INLINE uint64_t
|
|
||||||
atomic_add_uint64(uint64_t *p, uint64_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (__sync_add_and_fetch(p, x));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE uint64_t
|
|
||||||
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (__sync_sub_and_fetch(p, x));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (!__sync_bool_compare_and_swap(p, c, s));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
atomic_write_uint64(uint64_t *p, uint64_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
__sync_lock_test_and_set(p, x);
|
|
||||||
}
|
|
||||||
# else
|
|
||||||
# error "Missing implementation for 64-bit atomic operations"
|
|
||||||
# endif
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/******************************************************************************/
|
JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR)
|
||||||
/* 32-bit operations. */
|
|
||||||
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
|
|
||||||
JEMALLOC_INLINE uint32_t
|
|
||||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
|
||||||
{
|
|
||||||
uint32_t t = x;
|
|
||||||
|
|
||||||
asm volatile (
|
/*
|
||||||
"lock; xaddl %0, %1;"
|
* There's no actual guarantee that sizeof(bool) == 1, but it's true on the only
|
||||||
: "+r" (t), "=m" (*p) /* Outputs. */
|
* platform that actually needs to know the size, MSVC.
|
||||||
: "m" (*p) /* Inputs. */
|
*/
|
||||||
);
|
JEMALLOC_GENERATE_ATOMICS(bool, b, 0)
|
||||||
|
|
||||||
return (t + x);
|
JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT)
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE uint32_t
|
JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR)
|
||||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
|
||||||
{
|
|
||||||
uint32_t t;
|
|
||||||
|
|
||||||
x = (uint32_t)(-(int32_t)x);
|
JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR)
|
||||||
t = x;
|
|
||||||
asm volatile (
|
|
||||||
"lock; xaddl %0, %1;"
|
|
||||||
: "+r" (t), "=m" (*p) /* Outputs. */
|
|
||||||
: "m" (*p) /* Inputs. */
|
|
||||||
);
|
|
||||||
|
|
||||||
return (t + x);
|
JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2)
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
#ifdef JEMALLOC_ATOMIC_U64
|
||||||
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
|
JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3)
|
||||||
{
|
|
||||||
uint8_t success;
|
|
||||||
|
|
||||||
asm volatile (
|
|
||||||
"lock; cmpxchgl %4, %0;"
|
|
||||||
"sete %1;"
|
|
||||||
: "=m" (*p), "=a" (success) /* Outputs. */
|
|
||||||
: "m" (*p), "a" (c), "r" (s) /* Inputs. */
|
|
||||||
: "memory"
|
|
||||||
);
|
|
||||||
|
|
||||||
return (!(bool)success);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
atomic_write_uint32(uint32_t *p, uint32_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
asm volatile (
|
|
||||||
"xchgl %1, %0;" /* Lock is implied by xchgl. */
|
|
||||||
: "=m" (*p), "+r" (x) /* Outputs. */
|
|
||||||
: "m" (*p) /* Inputs. */
|
|
||||||
: "memory" /* Clobbers. */
|
|
||||||
);
|
|
||||||
}
|
|
||||||
# elif (defined(JEMALLOC_C11ATOMICS))
|
|
||||||
JEMALLOC_INLINE uint32_t
|
|
||||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
|
||||||
{
|
|
||||||
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
|
|
||||||
return (atomic_fetch_add(a, x) + x);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE uint32_t
|
|
||||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
|
||||||
{
|
|
||||||
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
|
|
||||||
return (atomic_fetch_sub(a, x) - x);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
|
|
||||||
{
|
|
||||||
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
|
|
||||||
return (!atomic_compare_exchange_strong(a, &c, s));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
atomic_write_uint32(uint32_t *p, uint32_t x)
|
|
||||||
{
|
|
||||||
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
|
|
||||||
atomic_store(a, x);
|
|
||||||
}
|
|
||||||
#elif (defined(JEMALLOC_ATOMIC9))
|
|
||||||
JEMALLOC_INLINE uint32_t
|
|
||||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (atomic_fetchadd_32(p, x) + x);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE uint32_t
|
|
||||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (!atomic_cmpset_32(p, c, s));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
atomic_write_uint32(uint32_t *p, uint32_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
atomic_store_rel_32(p, x);
|
|
||||||
}
|
|
||||||
#elif (defined(JEMALLOC_OSATOMIC))
|
|
||||||
JEMALLOC_INLINE uint32_t
|
|
||||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE uint32_t
|
|
||||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
atomic_write_uint32(uint32_t *p, uint32_t x)
|
|
||||||
{
|
|
||||||
uint32_t o;
|
|
||||||
|
|
||||||
/*The documented OSAtomic*() API does not expose an atomic exchange. */
|
|
||||||
do {
|
|
||||||
o = atomic_read_uint32(p);
|
|
||||||
} while (atomic_cas_uint32(p, o, x));
|
|
||||||
}
|
|
||||||
#elif (defined(_MSC_VER))
|
|
||||||
JEMALLOC_INLINE uint32_t
|
|
||||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (InterlockedExchangeAdd(p, x) + x);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE uint32_t
|
|
||||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (InterlockedExchangeAdd(p, -((int32_t)x)) - x);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
|
|
||||||
{
|
|
||||||
uint32_t o;
|
|
||||||
|
|
||||||
o = InterlockedCompareExchange(p, s, c);
|
|
||||||
return (o != c);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
atomic_write_uint32(uint32_t *p, uint32_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
InterlockedExchange(p, x);
|
|
||||||
}
|
|
||||||
#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \
|
|
||||||
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
|
|
||||||
JEMALLOC_INLINE uint32_t
|
|
||||||
atomic_add_uint32(uint32_t *p, uint32_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (__sync_add_and_fetch(p, x));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE uint32_t
|
|
||||||
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (__sync_sub_and_fetch(p, x));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (!__sync_bool_compare_and_swap(p, c, s));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
atomic_write_uint32(uint32_t *p, uint32_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
__sync_lock_test_and_set(p, x);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
# error "Missing implementation for 32-bit atomic operations"
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/******************************************************************************/
|
#undef ATOMIC_INLINE
|
||||||
/* Pointer operations. */
|
|
||||||
JEMALLOC_INLINE void *
|
|
||||||
atomic_add_p(void **p, void *x)
|
|
||||||
{
|
|
||||||
|
|
||||||
#if (LG_SIZEOF_PTR == 3)
|
#endif /* JEMALLOC_INTERNAL_ATOMIC_H */
|
||||||
return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
|
|
||||||
#elif (LG_SIZEOF_PTR == 2)
|
|
||||||
return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void *
|
|
||||||
atomic_sub_p(void **p, void *x)
|
|
||||||
{
|
|
||||||
|
|
||||||
#if (LG_SIZEOF_PTR == 3)
|
|
||||||
return ((void *)atomic_add_uint64((uint64_t *)p,
|
|
||||||
(uint64_t)-((int64_t)x)));
|
|
||||||
#elif (LG_SIZEOF_PTR == 2)
|
|
||||||
return ((void *)atomic_add_uint32((uint32_t *)p,
|
|
||||||
(uint32_t)-((int32_t)x)));
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
atomic_cas_p(void **p, void *c, void *s)
|
|
||||||
{
|
|
||||||
|
|
||||||
#if (LG_SIZEOF_PTR == 3)
|
|
||||||
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
|
|
||||||
#elif (LG_SIZEOF_PTR == 2)
|
|
||||||
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
atomic_write_p(void **p, const void *x)
|
|
||||||
{
|
|
||||||
|
|
||||||
#if (LG_SIZEOF_PTR == 3)
|
|
||||||
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
|
|
||||||
#elif (LG_SIZEOF_PTR == 2)
|
|
||||||
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
/******************************************************************************/
|
|
||||||
/* size_t operations. */
|
|
||||||
JEMALLOC_INLINE size_t
|
|
||||||
atomic_add_z(size_t *p, size_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
#if (LG_SIZEOF_PTR == 3)
|
|
||||||
return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
|
|
||||||
#elif (LG_SIZEOF_PTR == 2)
|
|
||||||
return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
|
||||||
atomic_sub_z(size_t *p, size_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
#if (LG_SIZEOF_PTR == 3)
|
|
||||||
return ((size_t)atomic_add_uint64((uint64_t *)p,
|
|
||||||
(uint64_t)-((int64_t)x)));
|
|
||||||
#elif (LG_SIZEOF_PTR == 2)
|
|
||||||
return ((size_t)atomic_add_uint32((uint32_t *)p,
|
|
||||||
(uint32_t)-((int32_t)x)));
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
atomic_cas_z(size_t *p, size_t c, size_t s)
|
|
||||||
{
|
|
||||||
|
|
||||||
#if (LG_SIZEOF_PTR == 3)
|
|
||||||
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
|
|
||||||
#elif (LG_SIZEOF_PTR == 2)
|
|
||||||
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
atomic_write_z(size_t *p, size_t x)
|
|
||||||
{
|
|
||||||
|
|
||||||
#if (LG_SIZEOF_PTR == 3)
|
|
||||||
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
|
|
||||||
#elif (LG_SIZEOF_PTR == 2)
|
|
||||||
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
/******************************************************************************/
|
|
||||||
/* unsigned operations. */
|
|
||||||
JEMALLOC_INLINE unsigned
|
|
||||||
atomic_add_u(unsigned *p, unsigned x)
|
|
||||||
{
|
|
||||||
|
|
||||||
#if (LG_SIZEOF_INT == 3)
|
|
||||||
return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
|
|
||||||
#elif (LG_SIZEOF_INT == 2)
|
|
||||||
return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE unsigned
|
|
||||||
atomic_sub_u(unsigned *p, unsigned x)
|
|
||||||
{
|
|
||||||
|
|
||||||
#if (LG_SIZEOF_INT == 3)
|
|
||||||
return ((unsigned)atomic_add_uint64((uint64_t *)p,
|
|
||||||
(uint64_t)-((int64_t)x)));
|
|
||||||
#elif (LG_SIZEOF_INT == 2)
|
|
||||||
return ((unsigned)atomic_add_uint32((uint32_t *)p,
|
|
||||||
(uint32_t)-((int32_t)x)));
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
atomic_cas_u(unsigned *p, unsigned c, unsigned s)
|
|
||||||
{
|
|
||||||
|
|
||||||
#if (LG_SIZEOF_INT == 3)
|
|
||||||
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
|
|
||||||
#elif (LG_SIZEOF_INT == 2)
|
|
||||||
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
atomic_write_u(unsigned *p, unsigned x)
|
|
||||||
{
|
|
||||||
|
|
||||||
#if (LG_SIZEOF_INT == 3)
|
|
||||||
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
|
|
||||||
#elif (LG_SIZEOF_INT == 2)
|
|
||||||
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
/******************************************************************************/
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
/******************************************************************************/
|
|
||||||
|
97
include/jemalloc/internal/atomic_c11.h
Normal file
97
include/jemalloc/internal/atomic_c11.h
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_ATOMIC_C11_H
|
||||||
|
#define JEMALLOC_INTERNAL_ATOMIC_C11_H
|
||||||
|
|
||||||
|
#include <stdatomic.h>
|
||||||
|
|
||||||
|
#define ATOMIC_INIT(...) ATOMIC_VAR_INIT(__VA_ARGS__)
|
||||||
|
|
||||||
|
#define atomic_memory_order_t memory_order
|
||||||
|
#define atomic_memory_order_relaxed memory_order_relaxed
|
||||||
|
#define atomic_memory_order_acquire memory_order_acquire
|
||||||
|
#define atomic_memory_order_release memory_order_release
|
||||||
|
#define atomic_memory_order_acq_rel memory_order_acq_rel
|
||||||
|
#define atomic_memory_order_seq_cst memory_order_seq_cst
|
||||||
|
|
||||||
|
#define atomic_fence atomic_thread_fence
|
||||||
|
|
||||||
|
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
|
||||||
|
/* unused */ lg_size) \
|
||||||
|
typedef _Atomic(type) atomic_##short_type##_t; \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_load_##short_type(const atomic_##short_type##_t *a, \
|
||||||
|
atomic_memory_order_t mo) { \
|
||||||
|
/* \
|
||||||
|
* A strict interpretation of the C standard prevents \
|
||||||
|
* atomic_load from taking a const argument, but it's \
|
||||||
|
* convenient for our purposes. This cast is a workaround. \
|
||||||
|
*/ \
|
||||||
|
atomic_##short_type##_t* a_nonconst = \
|
||||||
|
(atomic_##short_type##_t*)a; \
|
||||||
|
return atomic_load_explicit(a_nonconst, mo); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE void \
|
||||||
|
atomic_store_##short_type(atomic_##short_type##_t *a, \
|
||||||
|
type val, atomic_memory_order_t mo) { \
|
||||||
|
atomic_store_explicit(a, val, mo); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
|
||||||
|
atomic_memory_order_t mo) { \
|
||||||
|
return atomic_exchange_explicit(a, val, mo); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE bool \
|
||||||
|
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
|
||||||
|
type *expected, type desired, atomic_memory_order_t success_mo, \
|
||||||
|
atomic_memory_order_t failure_mo) { \
|
||||||
|
return atomic_compare_exchange_weak_explicit(a, expected, \
|
||||||
|
desired, success_mo, failure_mo); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE bool \
|
||||||
|
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
|
||||||
|
type *expected, type desired, atomic_memory_order_t success_mo, \
|
||||||
|
atomic_memory_order_t failure_mo) { \
|
||||||
|
return atomic_compare_exchange_strong_explicit(a, expected, \
|
||||||
|
desired, success_mo, failure_mo); \
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Integral types have some special operations available that non-integral ones
|
||||||
|
* lack.
|
||||||
|
*/
|
||||||
|
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
|
||||||
|
/* unused */ lg_size) \
|
||||||
|
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \
|
||||||
|
type val, atomic_memory_order_t mo) { \
|
||||||
|
return atomic_fetch_add_explicit(a, val, mo); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \
|
||||||
|
type val, atomic_memory_order_t mo) { \
|
||||||
|
return atomic_fetch_sub_explicit(a, val, mo); \
|
||||||
|
} \
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \
|
||||||
|
type val, atomic_memory_order_t mo) { \
|
||||||
|
return atomic_fetch_and_explicit(a, val, mo); \
|
||||||
|
} \
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \
|
||||||
|
type val, atomic_memory_order_t mo) { \
|
||||||
|
return atomic_fetch_or_explicit(a, val, mo); \
|
||||||
|
} \
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \
|
||||||
|
type val, atomic_memory_order_t mo) { \
|
||||||
|
return atomic_fetch_xor_explicit(a, val, mo); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_ATOMIC_C11_H */
|
127
include/jemalloc/internal/atomic_gcc_atomic.h
Normal file
127
include/jemalloc/internal/atomic_gcc_atomic.h
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H
|
||||||
|
#define JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/assert.h"
|
||||||
|
|
||||||
|
#define ATOMIC_INIT(...) {__VA_ARGS__}
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
atomic_memory_order_relaxed,
|
||||||
|
atomic_memory_order_acquire,
|
||||||
|
atomic_memory_order_release,
|
||||||
|
atomic_memory_order_acq_rel,
|
||||||
|
atomic_memory_order_seq_cst
|
||||||
|
} atomic_memory_order_t;
|
||||||
|
|
||||||
|
ATOMIC_INLINE int
|
||||||
|
atomic_enum_to_builtin(atomic_memory_order_t mo) {
|
||||||
|
switch (mo) {
|
||||||
|
case atomic_memory_order_relaxed:
|
||||||
|
return __ATOMIC_RELAXED;
|
||||||
|
case atomic_memory_order_acquire:
|
||||||
|
return __ATOMIC_ACQUIRE;
|
||||||
|
case atomic_memory_order_release:
|
||||||
|
return __ATOMIC_RELEASE;
|
||||||
|
case atomic_memory_order_acq_rel:
|
||||||
|
return __ATOMIC_ACQ_REL;
|
||||||
|
case atomic_memory_order_seq_cst:
|
||||||
|
return __ATOMIC_SEQ_CST;
|
||||||
|
}
|
||||||
|
/* Can't happen; the switch is exhaustive. */
|
||||||
|
not_reached();
|
||||||
|
}
|
||||||
|
|
||||||
|
ATOMIC_INLINE void
|
||||||
|
atomic_fence(atomic_memory_order_t mo) {
|
||||||
|
__atomic_thread_fence(atomic_enum_to_builtin(mo));
|
||||||
|
}
|
||||||
|
|
||||||
|
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
|
||||||
|
/* unused */ lg_size) \
|
||||||
|
typedef struct { \
|
||||||
|
type repr; \
|
||||||
|
} atomic_##short_type##_t; \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_load_##short_type(const atomic_##short_type##_t *a, \
|
||||||
|
atomic_memory_order_t mo) { \
|
||||||
|
type result; \
|
||||||
|
__atomic_load(&a->repr, &result, atomic_enum_to_builtin(mo)); \
|
||||||
|
return result; \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE void \
|
||||||
|
atomic_store_##short_type(atomic_##short_type##_t *a, type val, \
|
||||||
|
atomic_memory_order_t mo) { \
|
||||||
|
__atomic_store(&a->repr, &val, atomic_enum_to_builtin(mo)); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
|
||||||
|
atomic_memory_order_t mo) { \
|
||||||
|
type result; \
|
||||||
|
__atomic_exchange(&a->repr, &val, &result, \
|
||||||
|
atomic_enum_to_builtin(mo)); \
|
||||||
|
return result; \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE bool \
|
||||||
|
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
|
||||||
|
type *expected, type desired, atomic_memory_order_t success_mo, \
|
||||||
|
atomic_memory_order_t failure_mo) { \
|
||||||
|
return __atomic_compare_exchange(&a->repr, expected, &desired, \
|
||||||
|
true, atomic_enum_to_builtin(success_mo), \
|
||||||
|
atomic_enum_to_builtin(failure_mo)); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE bool \
|
||||||
|
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
|
||||||
|
type *expected, type desired, atomic_memory_order_t success_mo, \
|
||||||
|
atomic_memory_order_t failure_mo) { \
|
||||||
|
return __atomic_compare_exchange(&a->repr, expected, &desired, \
|
||||||
|
false, \
|
||||||
|
atomic_enum_to_builtin(success_mo), \
|
||||||
|
atomic_enum_to_builtin(failure_mo)); \
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
|
||||||
|
/* unused */ lg_size) \
|
||||||
|
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \
|
||||||
|
atomic_memory_order_t mo) { \
|
||||||
|
return __atomic_fetch_add(&a->repr, val, \
|
||||||
|
atomic_enum_to_builtin(mo)); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \
|
||||||
|
atomic_memory_order_t mo) { \
|
||||||
|
return __atomic_fetch_sub(&a->repr, val, \
|
||||||
|
atomic_enum_to_builtin(mo)); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \
|
||||||
|
atomic_memory_order_t mo) { \
|
||||||
|
return __atomic_fetch_and(&a->repr, val, \
|
||||||
|
atomic_enum_to_builtin(mo)); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \
|
||||||
|
atomic_memory_order_t mo) { \
|
||||||
|
return __atomic_fetch_or(&a->repr, val, \
|
||||||
|
atomic_enum_to_builtin(mo)); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \
|
||||||
|
atomic_memory_order_t mo) { \
|
||||||
|
return __atomic_fetch_xor(&a->repr, val, \
|
||||||
|
atomic_enum_to_builtin(mo)); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H */
|
191
include/jemalloc/internal/atomic_gcc_sync.h
Normal file
191
include/jemalloc/internal/atomic_gcc_sync.h
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H
|
||||||
|
#define JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H
|
||||||
|
|
||||||
|
#define ATOMIC_INIT(...) {__VA_ARGS__}
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
atomic_memory_order_relaxed,
|
||||||
|
atomic_memory_order_acquire,
|
||||||
|
atomic_memory_order_release,
|
||||||
|
atomic_memory_order_acq_rel,
|
||||||
|
atomic_memory_order_seq_cst
|
||||||
|
} atomic_memory_order_t;
|
||||||
|
|
||||||
|
ATOMIC_INLINE void
|
||||||
|
atomic_fence(atomic_memory_order_t mo) {
|
||||||
|
/* Easy cases first: no barrier, and full barrier. */
|
||||||
|
if (mo == atomic_memory_order_relaxed) {
|
||||||
|
asm volatile("" ::: "memory");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (mo == atomic_memory_order_seq_cst) {
|
||||||
|
asm volatile("" ::: "memory");
|
||||||
|
__sync_synchronize();
|
||||||
|
asm volatile("" ::: "memory");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
asm volatile("" ::: "memory");
|
||||||
|
# if defined(__i386__) || defined(__x86_64__)
|
||||||
|
/* This is implicit on x86. */
|
||||||
|
# elif defined(__ppc__)
|
||||||
|
asm volatile("lwsync");
|
||||||
|
# elif defined(__sparc__) && defined(__arch64__)
|
||||||
|
if (mo == atomic_memory_order_acquire) {
|
||||||
|
asm volatile("membar #LoadLoad | #LoadStore");
|
||||||
|
} else if (mo == atomic_memory_order_release) {
|
||||||
|
asm volatile("membar #LoadStore | #StoreStore");
|
||||||
|
} else {
|
||||||
|
asm volatile("membar #LoadLoad | #LoadStore | #StoreStore");
|
||||||
|
}
|
||||||
|
# else
|
||||||
|
__sync_synchronize();
|
||||||
|
# endif
|
||||||
|
asm volatile("" ::: "memory");
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A correct implementation of seq_cst loads and stores on weakly ordered
|
||||||
|
* architectures could do either of the following:
|
||||||
|
* 1. store() is weak-fence -> store -> strong fence, load() is load ->
|
||||||
|
* strong-fence.
|
||||||
|
* 2. store() is strong-fence -> store, load() is strong-fence -> load ->
|
||||||
|
* weak-fence.
|
||||||
|
* The tricky thing is, load() and store() above can be the load or store
|
||||||
|
* portions of a gcc __sync builtin, so we have to follow GCC's lead, which
|
||||||
|
* means going with strategy 2.
|
||||||
|
* On strongly ordered architectures, the natural strategy is to stick a strong
|
||||||
|
* fence after seq_cst stores, and have naked loads. So we want the strong
|
||||||
|
* fences in different places on different architectures.
|
||||||
|
* atomic_pre_sc_load_fence and atomic_post_sc_store_fence allow us to
|
||||||
|
* accomplish this.
|
||||||
|
*/
|
||||||
|
|
||||||
|
ATOMIC_INLINE void
|
||||||
|
atomic_pre_sc_load_fence() {
|
||||||
|
# if defined(__i386__) || defined(__x86_64__) || \
|
||||||
|
(defined(__sparc__) && defined(__arch64__))
|
||||||
|
atomic_fence(atomic_memory_order_relaxed);
|
||||||
|
# else
|
||||||
|
atomic_fence(atomic_memory_order_seq_cst);
|
||||||
|
# endif
|
||||||
|
}
|
||||||
|
|
||||||
|
ATOMIC_INLINE void
|
||||||
|
atomic_post_sc_store_fence() {
|
||||||
|
# if defined(__i386__) || defined(__x86_64__) || \
|
||||||
|
(defined(__sparc__) && defined(__arch64__))
|
||||||
|
atomic_fence(atomic_memory_order_seq_cst);
|
||||||
|
# else
|
||||||
|
atomic_fence(atomic_memory_order_relaxed);
|
||||||
|
# endif
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
|
||||||
|
/* unused */ lg_size) \
|
||||||
|
typedef struct { \
|
||||||
|
type volatile repr; \
|
||||||
|
} atomic_##short_type##_t; \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_load_##short_type(const atomic_##short_type##_t *a, \
|
||||||
|
atomic_memory_order_t mo) { \
|
||||||
|
if (mo == atomic_memory_order_seq_cst) { \
|
||||||
|
atomic_pre_sc_load_fence(); \
|
||||||
|
} \
|
||||||
|
type result = a->repr; \
|
||||||
|
if (mo != atomic_memory_order_relaxed) { \
|
||||||
|
atomic_fence(atomic_memory_order_acquire); \
|
||||||
|
} \
|
||||||
|
return result; \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE void \
|
||||||
|
atomic_store_##short_type(atomic_##short_type##_t *a, \
|
||||||
|
type val, atomic_memory_order_t mo) { \
|
||||||
|
if (mo != atomic_memory_order_relaxed) { \
|
||||||
|
atomic_fence(atomic_memory_order_release); \
|
||||||
|
} \
|
||||||
|
a->repr = val; \
|
||||||
|
if (mo == atomic_memory_order_seq_cst) { \
|
||||||
|
atomic_post_sc_store_fence(); \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
|
||||||
|
atomic_memory_order_t mo) { \
|
||||||
|
/* \
|
||||||
|
* Because of FreeBSD, we care about gcc 4.2, which doesn't have\
|
||||||
|
* an atomic exchange builtin. We fake it with a CAS loop. \
|
||||||
|
*/ \
|
||||||
|
while (true) { \
|
||||||
|
type old = a->repr; \
|
||||||
|
if (__sync_bool_compare_and_swap(&a->repr, old, val)) { \
|
||||||
|
return old; \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE bool \
|
||||||
|
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
|
||||||
|
type *expected, type desired, atomic_memory_order_t success_mo, \
|
||||||
|
atomic_memory_order_t failure_mo) { \
|
||||||
|
type prev = __sync_val_compare_and_swap(&a->repr, *expected, \
|
||||||
|
desired); \
|
||||||
|
if (prev == *expected) { \
|
||||||
|
return true; \
|
||||||
|
} else { \
|
||||||
|
*expected = prev; \
|
||||||
|
return false; \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
ATOMIC_INLINE bool \
|
||||||
|
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
|
||||||
|
type *expected, type desired, atomic_memory_order_t success_mo, \
|
||||||
|
atomic_memory_order_t failure_mo) { \
|
||||||
|
type prev = __sync_val_compare_and_swap(&a->repr, *expected, \
|
||||||
|
desired); \
|
||||||
|
if (prev == *expected) { \
|
||||||
|
return true; \
|
||||||
|
} else { \
|
||||||
|
*expected = prev; \
|
||||||
|
return false; \
|
||||||
|
} \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
|
||||||
|
/* unused */ lg_size) \
|
||||||
|
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \
|
||||||
|
atomic_memory_order_t mo) { \
|
||||||
|
return __sync_fetch_and_add(&a->repr, val); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \
|
||||||
|
atomic_memory_order_t mo) { \
|
||||||
|
return __sync_fetch_and_sub(&a->repr, val); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \
|
||||||
|
atomic_memory_order_t mo) { \
|
||||||
|
return __sync_fetch_and_and(&a->repr, val); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \
|
||||||
|
atomic_memory_order_t mo) { \
|
||||||
|
return __sync_fetch_and_or(&a->repr, val); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \
|
||||||
|
atomic_memory_order_t mo) { \
|
||||||
|
return __sync_fetch_and_xor(&a->repr, val); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H */
|
158
include/jemalloc/internal/atomic_msvc.h
Normal file
158
include/jemalloc/internal/atomic_msvc.h
Normal file
@ -0,0 +1,158 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_ATOMIC_MSVC_H
|
||||||
|
#define JEMALLOC_INTERNAL_ATOMIC_MSVC_H
|
||||||
|
|
||||||
|
#define ATOMIC_INIT(...) {__VA_ARGS__}
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
atomic_memory_order_relaxed,
|
||||||
|
atomic_memory_order_acquire,
|
||||||
|
atomic_memory_order_release,
|
||||||
|
atomic_memory_order_acq_rel,
|
||||||
|
atomic_memory_order_seq_cst
|
||||||
|
} atomic_memory_order_t;
|
||||||
|
|
||||||
|
typedef char atomic_repr_0_t;
|
||||||
|
typedef short atomic_repr_1_t;
|
||||||
|
typedef long atomic_repr_2_t;
|
||||||
|
typedef __int64 atomic_repr_3_t;
|
||||||
|
|
||||||
|
ATOMIC_INLINE void
|
||||||
|
atomic_fence(atomic_memory_order_t mo) {
|
||||||
|
_ReadWriteBarrier();
|
||||||
|
# if defined(_M_ARM) || defined(_M_ARM64)
|
||||||
|
/* ARM needs a barrier for everything but relaxed. */
|
||||||
|
if (mo != atomic_memory_order_relaxed) {
|
||||||
|
MemoryBarrier();
|
||||||
|
}
|
||||||
|
# elif defined(_M_IX86) || defined (_M_X64)
|
||||||
|
/* x86 needs a barrier only for seq_cst. */
|
||||||
|
if (mo == atomic_memory_order_seq_cst) {
|
||||||
|
MemoryBarrier();
|
||||||
|
}
|
||||||
|
# else
|
||||||
|
# error "Don't know how to create atomics for this platform for MSVC."
|
||||||
|
# endif
|
||||||
|
_ReadWriteBarrier();
|
||||||
|
}
|
||||||
|
|
||||||
|
#define ATOMIC_INTERLOCKED_REPR(lg_size) atomic_repr_ ## lg_size ## _t
|
||||||
|
|
||||||
|
#define ATOMIC_CONCAT(a, b) ATOMIC_RAW_CONCAT(a, b)
|
||||||
|
#define ATOMIC_RAW_CONCAT(a, b) a ## b
|
||||||
|
|
||||||
|
#define ATOMIC_INTERLOCKED_NAME(base_name, lg_size) ATOMIC_CONCAT( \
|
||||||
|
base_name, ATOMIC_INTERLOCKED_SUFFIX(lg_size))
|
||||||
|
|
||||||
|
#define ATOMIC_INTERLOCKED_SUFFIX(lg_size) \
|
||||||
|
ATOMIC_CONCAT(ATOMIC_INTERLOCKED_SUFFIX_, lg_size)
|
||||||
|
|
||||||
|
#define ATOMIC_INTERLOCKED_SUFFIX_0 8
|
||||||
|
#define ATOMIC_INTERLOCKED_SUFFIX_1 16
|
||||||
|
#define ATOMIC_INTERLOCKED_SUFFIX_2
|
||||||
|
#define ATOMIC_INTERLOCKED_SUFFIX_3 64
|
||||||
|
|
||||||
|
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \
|
||||||
|
typedef struct { \
|
||||||
|
ATOMIC_INTERLOCKED_REPR(lg_size) repr; \
|
||||||
|
} atomic_##short_type##_t; \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_load_##short_type(const atomic_##short_type##_t *a, \
|
||||||
|
atomic_memory_order_t mo) { \
|
||||||
|
ATOMIC_INTERLOCKED_REPR(lg_size) ret = a->repr; \
|
||||||
|
if (mo != atomic_memory_order_relaxed) { \
|
||||||
|
atomic_fence(atomic_memory_order_acquire); \
|
||||||
|
} \
|
||||||
|
return (type) ret; \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE void \
|
||||||
|
atomic_store_##short_type(atomic_##short_type##_t *a, \
|
||||||
|
type val, atomic_memory_order_t mo) { \
|
||||||
|
if (mo != atomic_memory_order_relaxed) { \
|
||||||
|
atomic_fence(atomic_memory_order_release); \
|
||||||
|
} \
|
||||||
|
a->repr = (ATOMIC_INTERLOCKED_REPR(lg_size)) val; \
|
||||||
|
if (mo == atomic_memory_order_seq_cst) { \
|
||||||
|
atomic_fence(atomic_memory_order_seq_cst); \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
|
||||||
|
atomic_memory_order_t mo) { \
|
||||||
|
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchange, \
|
||||||
|
lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE bool \
|
||||||
|
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
|
||||||
|
type *expected, type desired, atomic_memory_order_t success_mo, \
|
||||||
|
atomic_memory_order_t failure_mo) { \
|
||||||
|
ATOMIC_INTERLOCKED_REPR(lg_size) e = \
|
||||||
|
(ATOMIC_INTERLOCKED_REPR(lg_size))*expected; \
|
||||||
|
ATOMIC_INTERLOCKED_REPR(lg_size) d = \
|
||||||
|
(ATOMIC_INTERLOCKED_REPR(lg_size))desired; \
|
||||||
|
ATOMIC_INTERLOCKED_REPR(lg_size) old = \
|
||||||
|
ATOMIC_INTERLOCKED_NAME(_InterlockedCompareExchange, \
|
||||||
|
lg_size)(&a->repr, d, e); \
|
||||||
|
if (old == e) { \
|
||||||
|
return true; \
|
||||||
|
} else { \
|
||||||
|
*expected = (type)old; \
|
||||||
|
return false; \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE bool \
|
||||||
|
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
|
||||||
|
type *expected, type desired, atomic_memory_order_t success_mo, \
|
||||||
|
atomic_memory_order_t failure_mo) { \
|
||||||
|
/* We implement the weak version with strong semantics. */ \
|
||||||
|
return atomic_compare_exchange_weak_##short_type(a, expected, \
|
||||||
|
desired, success_mo, failure_mo); \
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \
|
||||||
|
JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \
|
||||||
|
type val, atomic_memory_order_t mo) { \
|
||||||
|
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchangeAdd, \
|
||||||
|
lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \
|
||||||
|
type val, atomic_memory_order_t mo) { \
|
||||||
|
/* \
|
||||||
|
* MSVC warns on negation of unsigned operands, but for us it \
|
||||||
|
* gives exactly the right semantics (MAX_TYPE + 1 - operand). \
|
||||||
|
*/ \
|
||||||
|
__pragma(warning(push)) \
|
||||||
|
__pragma(warning(disable: 4146)) \
|
||||||
|
return atomic_fetch_add_##short_type(a, -val, mo); \
|
||||||
|
__pragma(warning(pop)) \
|
||||||
|
} \
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \
|
||||||
|
type val, atomic_memory_order_t mo) { \
|
||||||
|
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedAnd, lg_size)( \
|
||||||
|
&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
|
||||||
|
} \
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \
|
||||||
|
type val, atomic_memory_order_t mo) { \
|
||||||
|
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedOr, lg_size)( \
|
||||||
|
&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
|
||||||
|
} \
|
||||||
|
ATOMIC_INLINE type \
|
||||||
|
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \
|
||||||
|
type val, atomic_memory_order_t mo) { \
|
||||||
|
return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedXor, lg_size)( \
|
||||||
|
&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_ATOMIC_MSVC_H */
|
30
include/jemalloc/internal/background_thread_externs.h
Normal file
30
include/jemalloc/internal/background_thread_externs.h
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
|
||||||
|
#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
|
||||||
|
|
||||||
|
extern bool opt_background_thread;
|
||||||
|
extern malloc_mutex_t background_thread_lock;
|
||||||
|
extern atomic_b_t background_thread_enabled_state;
|
||||||
|
extern size_t n_background_threads;
|
||||||
|
extern background_thread_info_t *background_thread_info;
|
||||||
|
|
||||||
|
bool background_thread_create(tsd_t *tsd, unsigned arena_ind);
|
||||||
|
bool background_threads_enable(tsd_t *tsd);
|
||||||
|
bool background_threads_disable(tsd_t *tsd);
|
||||||
|
void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
arena_decay_t *decay, size_t npages_new);
|
||||||
|
void background_thread_prefork0(tsdn_t *tsdn);
|
||||||
|
void background_thread_prefork1(tsdn_t *tsdn);
|
||||||
|
void background_thread_postfork_parent(tsdn_t *tsdn);
|
||||||
|
void background_thread_postfork_child(tsdn_t *tsdn);
|
||||||
|
bool background_thread_stats_read(tsdn_t *tsdn,
|
||||||
|
background_thread_stats_t *stats);
|
||||||
|
void background_thread_ctl_init(tsdn_t *tsdn);
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
|
||||||
|
extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *,
|
||||||
|
void *(*)(void *), void *__restrict);
|
||||||
|
#endif
|
||||||
|
bool background_thread_boot0(void);
|
||||||
|
bool background_thread_boot1(tsdn_t *tsdn);
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */
|
56
include/jemalloc/internal/background_thread_inlines.h
Normal file
56
include/jemalloc/internal/background_thread_inlines.h
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H
|
||||||
|
#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
|
background_thread_enabled(void) {
|
||||||
|
return atomic_load_b(&background_thread_enabled_state, ATOMIC_RELAXED);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
background_thread_enabled_set(tsdn_t *tsdn, bool state) {
|
||||||
|
malloc_mutex_assert_owner(tsdn, &background_thread_lock);
|
||||||
|
atomic_store_b(&background_thread_enabled_state, state, ATOMIC_RELAXED);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE background_thread_info_t *
|
||||||
|
arena_background_thread_info_get(arena_t *arena) {
|
||||||
|
unsigned arena_ind = arena_ind_get(arena);
|
||||||
|
return &background_thread_info[arena_ind % ncpus];
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||||
|
background_thread_wakeup_time_get(background_thread_info_t *info) {
|
||||||
|
uint64_t next_wakeup = nstime_ns(&info->next_wakeup);
|
||||||
|
assert(atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE) ==
|
||||||
|
(next_wakeup == BACKGROUND_THREAD_INDEFINITE_SLEEP));
|
||||||
|
return next_wakeup;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
background_thread_wakeup_time_set(tsdn_t *tsdn, background_thread_info_t *info,
|
||||||
|
uint64_t wakeup_time) {
|
||||||
|
malloc_mutex_assert_owner(tsdn, &info->mtx);
|
||||||
|
atomic_store_b(&info->indefinite_sleep,
|
||||||
|
wakeup_time == BACKGROUND_THREAD_INDEFINITE_SLEEP, ATOMIC_RELEASE);
|
||||||
|
nstime_init(&info->next_wakeup, wakeup_time);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
|
background_thread_indefinite_sleep(background_thread_info_t *info) {
|
||||||
|
return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena) {
|
||||||
|
if (!background_thread_enabled()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
background_thread_info_t *info =
|
||||||
|
arena_background_thread_info_get(arena);
|
||||||
|
if (background_thread_indefinite_sleep(info)) {
|
||||||
|
background_thread_interval_check(tsdn, arena,
|
||||||
|
&arena->decay_dirty, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */
|
52
include/jemalloc/internal/background_thread_structs.h
Normal file
52
include/jemalloc/internal/background_thread_structs.h
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H
|
||||||
|
#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H
|
||||||
|
|
||||||
|
/* This file really combines "structs" and "types", but only transitionally. */
|
||||||
|
|
||||||
|
#if defined(JEMALLOC_BACKGROUND_THREAD) || defined(JEMALLOC_LAZY_LOCK)
|
||||||
|
# define JEMALLOC_PTHREAD_CREATE_WRAPPER
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define BACKGROUND_THREAD_INDEFINITE_SLEEP UINT64_MAX
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
background_thread_stopped,
|
||||||
|
background_thread_started,
|
||||||
|
/* Thread waits on the global lock when paused (for arena_reset). */
|
||||||
|
background_thread_paused,
|
||||||
|
} background_thread_state_t;
|
||||||
|
|
||||||
|
struct background_thread_info_s {
|
||||||
|
#ifdef JEMALLOC_BACKGROUND_THREAD
|
||||||
|
/* Background thread is pthread specific. */
|
||||||
|
pthread_t thread;
|
||||||
|
pthread_cond_t cond;
|
||||||
|
#endif
|
||||||
|
malloc_mutex_t mtx;
|
||||||
|
background_thread_state_t state;
|
||||||
|
/* When true, it means no wakeup scheduled. */
|
||||||
|
atomic_b_t indefinite_sleep;
|
||||||
|
/* Next scheduled wakeup time (absolute time in ns). */
|
||||||
|
nstime_t next_wakeup;
|
||||||
|
/*
|
||||||
|
* Since the last background thread run, newly added number of pages
|
||||||
|
* that need to be purged by the next wakeup. This is adjusted on
|
||||||
|
* epoch advance, and is used to determine whether we should signal the
|
||||||
|
* background thread to wake up earlier.
|
||||||
|
*/
|
||||||
|
size_t npages_to_purge_new;
|
||||||
|
/* Stats: total number of runs since started. */
|
||||||
|
uint64_t tot_n_runs;
|
||||||
|
/* Stats: total sleep time since started. */
|
||||||
|
nstime_t tot_sleep_time;
|
||||||
|
};
|
||||||
|
typedef struct background_thread_info_s background_thread_info_t;
|
||||||
|
|
||||||
|
struct background_thread_stats_s {
|
||||||
|
size_t num_threads;
|
||||||
|
uint64_t num_runs;
|
||||||
|
nstime_t run_interval;
|
||||||
|
};
|
||||||
|
typedef struct background_thread_stats_s background_thread_stats_t;
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H */
|
@ -1,25 +0,0 @@
|
|||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_TYPES
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
|
||||||
|
|
||||||
void *base_alloc(tsdn_t *tsdn, size_t size);
|
|
||||||
void base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
|
|
||||||
size_t *mapped);
|
|
||||||
bool base_boot(void);
|
|
||||||
void base_prefork(tsdn_t *tsdn);
|
|
||||||
void base_postfork_parent(tsdn_t *tsdn);
|
|
||||||
void base_postfork_child(tsdn_t *tsdn);
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
/******************************************************************************/
|
|
19
include/jemalloc/internal/base_externs.h
Normal file
19
include/jemalloc/internal/base_externs.h
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H
|
||||||
|
#define JEMALLOC_INTERNAL_BASE_EXTERNS_H
|
||||||
|
|
||||||
|
base_t *b0get(void);
|
||||||
|
base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
|
||||||
|
void base_delete(base_t *base);
|
||||||
|
extent_hooks_t *base_extent_hooks_get(base_t *base);
|
||||||
|
extent_hooks_t *base_extent_hooks_set(base_t *base,
|
||||||
|
extent_hooks_t *extent_hooks);
|
||||||
|
void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
|
||||||
|
extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base);
|
||||||
|
void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
|
||||||
|
size_t *resident, size_t *mapped);
|
||||||
|
void base_prefork(tsdn_t *tsdn, base_t *base);
|
||||||
|
void base_postfork_parent(tsdn_t *tsdn, base_t *base);
|
||||||
|
void base_postfork_child(tsdn_t *tsdn, base_t *base);
|
||||||
|
bool base_boot(tsdn_t *tsdn);
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_BASE_EXTERNS_H */
|
9
include/jemalloc/internal/base_inlines.h
Normal file
9
include/jemalloc/internal/base_inlines.h
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H
|
||||||
|
#define JEMALLOC_INTERNAL_BASE_INLINES_H
|
||||||
|
|
||||||
|
static inline unsigned
|
||||||
|
base_ind_get(const base_t *base) {
|
||||||
|
return base->ind;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */
|
55
include/jemalloc/internal/base_structs.h
Normal file
55
include/jemalloc/internal/base_structs.h
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_BASE_STRUCTS_H
|
||||||
|
#define JEMALLOC_INTERNAL_BASE_STRUCTS_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||||
|
#include "jemalloc/internal/mutex.h"
|
||||||
|
#include "jemalloc/internal/size_classes.h"
|
||||||
|
|
||||||
|
/* Embedded at the beginning of every block of base-managed virtual memory. */
|
||||||
|
struct base_block_s {
|
||||||
|
/* Total size of block's virtual memory mapping. */
|
||||||
|
size_t size;
|
||||||
|
|
||||||
|
/* Next block in list of base's blocks. */
|
||||||
|
base_block_t *next;
|
||||||
|
|
||||||
|
/* Tracks unused trailing space. */
|
||||||
|
extent_t extent;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct base_s {
|
||||||
|
/* Associated arena's index within the arenas array. */
|
||||||
|
unsigned ind;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* User-configurable extent hook functions. Points to an
|
||||||
|
* extent_hooks_t.
|
||||||
|
*/
|
||||||
|
atomic_p_t extent_hooks;
|
||||||
|
|
||||||
|
/* Protects base_alloc() and base_stats_get() operations. */
|
||||||
|
malloc_mutex_t mtx;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Most recent size class in the series of increasingly large base
|
||||||
|
* extents. Logarithmic spacing between subsequent allocations ensures
|
||||||
|
* that the total number of distinct mappings remains small.
|
||||||
|
*/
|
||||||
|
pszind_t pind_last;
|
||||||
|
|
||||||
|
/* Serial number generation state. */
|
||||||
|
size_t extent_sn_next;
|
||||||
|
|
||||||
|
/* Chain of all blocks associated with base. */
|
||||||
|
base_block_t *blocks;
|
||||||
|
|
||||||
|
/* Heap of extents that track unused trailing space within blocks. */
|
||||||
|
extent_heap_t avail[NSIZES];
|
||||||
|
|
||||||
|
/* Stats, only maintained if config_stats. */
|
||||||
|
size_t allocated;
|
||||||
|
size_t resident;
|
||||||
|
size_t mapped;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_BASE_STRUCTS_H */
|
7
include/jemalloc/internal/base_types.h
Normal file
7
include/jemalloc/internal/base_types.h
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_BASE_TYPES_H
|
||||||
|
#define JEMALLOC_INTERNAL_BASE_TYPES_H
|
||||||
|
|
||||||
|
typedef struct base_block_s base_block_t;
|
||||||
|
typedef struct base_s base_t;
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_BASE_TYPES_H */
|
165
include/jemalloc/internal/bit_util.h
Normal file
165
include/jemalloc/internal/bit_util.h
Normal file
@ -0,0 +1,165 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_BIT_UTIL_H
|
||||||
|
#define JEMALLOC_INTERNAL_BIT_UTIL_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/assert.h"
|
||||||
|
|
||||||
|
#define BIT_UTIL_INLINE static inline
|
||||||
|
|
||||||
|
/* Sanity check. */
|
||||||
|
#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
|
||||||
|
|| !defined(JEMALLOC_INTERNAL_FFS)
|
||||||
|
# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
BIT_UTIL_INLINE unsigned
|
||||||
|
ffs_llu(unsigned long long bitmap) {
|
||||||
|
return JEMALLOC_INTERNAL_FFSLL(bitmap);
|
||||||
|
}
|
||||||
|
|
||||||
|
BIT_UTIL_INLINE unsigned
|
||||||
|
ffs_lu(unsigned long bitmap) {
|
||||||
|
return JEMALLOC_INTERNAL_FFSL(bitmap);
|
||||||
|
}
|
||||||
|
|
||||||
|
BIT_UTIL_INLINE unsigned
|
||||||
|
ffs_u(unsigned bitmap) {
|
||||||
|
return JEMALLOC_INTERNAL_FFS(bitmap);
|
||||||
|
}
|
||||||
|
|
||||||
|
BIT_UTIL_INLINE unsigned
|
||||||
|
ffs_zu(size_t bitmap) {
|
||||||
|
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
|
||||||
|
return ffs_u(bitmap);
|
||||||
|
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
|
||||||
|
return ffs_lu(bitmap);
|
||||||
|
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
|
||||||
|
return ffs_llu(bitmap);
|
||||||
|
#else
|
||||||
|
#error No implementation for size_t ffs()
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
BIT_UTIL_INLINE unsigned
|
||||||
|
ffs_u64(uint64_t bitmap) {
|
||||||
|
#if LG_SIZEOF_LONG == 3
|
||||||
|
return ffs_lu(bitmap);
|
||||||
|
#elif LG_SIZEOF_LONG_LONG == 3
|
||||||
|
return ffs_llu(bitmap);
|
||||||
|
#else
|
||||||
|
#error No implementation for 64-bit ffs()
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
BIT_UTIL_INLINE unsigned
|
||||||
|
ffs_u32(uint32_t bitmap) {
|
||||||
|
#if LG_SIZEOF_INT == 2
|
||||||
|
return ffs_u(bitmap);
|
||||||
|
#else
|
||||||
|
#error No implementation for 32-bit ffs()
|
||||||
|
#endif
|
||||||
|
return ffs_u(bitmap);
|
||||||
|
}
|
||||||
|
|
||||||
|
BIT_UTIL_INLINE uint64_t
|
||||||
|
pow2_ceil_u64(uint64_t x) {
|
||||||
|
x--;
|
||||||
|
x |= x >> 1;
|
||||||
|
x |= x >> 2;
|
||||||
|
x |= x >> 4;
|
||||||
|
x |= x >> 8;
|
||||||
|
x |= x >> 16;
|
||||||
|
x |= x >> 32;
|
||||||
|
x++;
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
|
BIT_UTIL_INLINE uint32_t
|
||||||
|
pow2_ceil_u32(uint32_t x) {
|
||||||
|
x--;
|
||||||
|
x |= x >> 1;
|
||||||
|
x |= x >> 2;
|
||||||
|
x |= x >> 4;
|
||||||
|
x |= x >> 8;
|
||||||
|
x |= x >> 16;
|
||||||
|
x++;
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Compute the smallest power of 2 that is >= x. */
|
||||||
|
BIT_UTIL_INLINE size_t
|
||||||
|
pow2_ceil_zu(size_t x) {
|
||||||
|
#if (LG_SIZEOF_PTR == 3)
|
||||||
|
return pow2_ceil_u64(x);
|
||||||
|
#else
|
||||||
|
return pow2_ceil_u32(x);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
|
||||||
|
BIT_UTIL_INLINE unsigned
|
||||||
|
lg_floor(size_t x) {
|
||||||
|
size_t ret;
|
||||||
|
assert(x != 0);
|
||||||
|
|
||||||
|
asm ("bsr %1, %0"
|
||||||
|
: "=r"(ret) // Outputs.
|
||||||
|
: "r"(x) // Inputs.
|
||||||
|
);
|
||||||
|
assert(ret < UINT_MAX);
|
||||||
|
return (unsigned)ret;
|
||||||
|
}
|
||||||
|
#elif (defined(_MSC_VER))
|
||||||
|
BIT_UTIL_INLINE unsigned
|
||||||
|
lg_floor(size_t x) {
|
||||||
|
unsigned long ret;
|
||||||
|
|
||||||
|
assert(x != 0);
|
||||||
|
|
||||||
|
#if (LG_SIZEOF_PTR == 3)
|
||||||
|
_BitScanReverse64(&ret, x);
|
||||||
|
#elif (LG_SIZEOF_PTR == 2)
|
||||||
|
_BitScanReverse(&ret, x);
|
||||||
|
#else
|
||||||
|
# error "Unsupported type size for lg_floor()"
|
||||||
|
#endif
|
||||||
|
assert(ret < UINT_MAX);
|
||||||
|
return (unsigned)ret;
|
||||||
|
}
|
||||||
|
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
|
||||||
|
BIT_UTIL_INLINE unsigned
|
||||||
|
lg_floor(size_t x) {
|
||||||
|
assert(x != 0);
|
||||||
|
|
||||||
|
#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
|
||||||
|
return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x);
|
||||||
|
#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
|
||||||
|
return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x);
|
||||||
|
#else
|
||||||
|
# error "Unsupported type size for lg_floor()"
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
BIT_UTIL_INLINE unsigned
|
||||||
|
lg_floor(size_t x) {
|
||||||
|
assert(x != 0);
|
||||||
|
|
||||||
|
x |= (x >> 1);
|
||||||
|
x |= (x >> 2);
|
||||||
|
x |= (x >> 4);
|
||||||
|
x |= (x >> 8);
|
||||||
|
x |= (x >> 16);
|
||||||
|
#if (LG_SIZEOF_PTR == 3)
|
||||||
|
x |= (x >> 32);
|
||||||
|
#endif
|
||||||
|
if (x == SIZE_T_MAX) {
|
||||||
|
return (8 << LG_SIZEOF_PTR) - 1;
|
||||||
|
}
|
||||||
|
x++;
|
||||||
|
return ffs_zu(x) - 2;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#undef BIT_UTIL_INLINE
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_BIT_UTIL_H */
|
@ -1,18 +1,26 @@
|
|||||||
/******************************************************************************/
|
#ifndef JEMALLOC_INTERNAL_BITMAP_H
|
||||||
#ifdef JEMALLOC_H_TYPES
|
#define JEMALLOC_INTERNAL_BITMAP_H
|
||||||
|
|
||||||
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
|
#include "jemalloc/internal/arena_types.h"
|
||||||
#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS
|
#include "jemalloc/internal/bit_util.h"
|
||||||
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
|
#include "jemalloc/internal/size_classes.h"
|
||||||
|
|
||||||
typedef struct bitmap_level_s bitmap_level_t;
|
|
||||||
typedef struct bitmap_info_s bitmap_info_t;
|
|
||||||
typedef unsigned long bitmap_t;
|
typedef unsigned long bitmap_t;
|
||||||
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
|
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
|
||||||
|
|
||||||
|
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
|
||||||
|
#if LG_SLAB_MAXREGS > LG_CEIL_NSIZES
|
||||||
|
/* Maximum bitmap bit count is determined by maximum regions per slab. */
|
||||||
|
# define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS
|
||||||
|
#else
|
||||||
|
/* Maximum bitmap bit count is determined by number of extent size classes. */
|
||||||
|
# define LG_BITMAP_MAXBITS LG_CEIL_NSIZES
|
||||||
|
#endif
|
||||||
|
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
|
||||||
|
|
||||||
/* Number of bits per group. */
|
/* Number of bits per group. */
|
||||||
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
|
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
|
||||||
#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
|
#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS)
|
||||||
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
|
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -21,12 +29,12 @@ typedef unsigned long bitmap_t;
|
|||||||
* use a tree instead.
|
* use a tree instead.
|
||||||
*/
|
*/
|
||||||
#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
|
#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
|
||||||
# define USE_TREE
|
# define BITMAP_USE_TREE
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Number of groups required to store a given number of bits. */
|
/* Number of groups required to store a given number of bits. */
|
||||||
#define BITMAP_BITS2GROUPS(nbits) \
|
#define BITMAP_BITS2GROUPS(nbits) \
|
||||||
((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
|
(((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Number of groups required at a particular level for a given number of bits.
|
* Number of groups required at a particular level for a given number of bits.
|
||||||
@ -40,6 +48,9 @@ typedef unsigned long bitmap_t;
|
|||||||
#define BITMAP_GROUPS_L3(nbits) \
|
#define BITMAP_GROUPS_L3(nbits) \
|
||||||
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
|
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
|
||||||
BITMAP_BITS2GROUPS((nbits)))))
|
BITMAP_BITS2GROUPS((nbits)))))
|
||||||
|
#define BITMAP_GROUPS_L4(nbits) \
|
||||||
|
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
|
||||||
|
BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Assuming the number of levels, number of groups required for a given number
|
* Assuming the number of levels, number of groups required for a given number
|
||||||
@ -53,49 +64,96 @@ typedef unsigned long bitmap_t;
|
|||||||
(BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
|
(BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
|
||||||
#define BITMAP_GROUPS_4_LEVEL(nbits) \
|
#define BITMAP_GROUPS_4_LEVEL(nbits) \
|
||||||
(BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
|
(BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
|
||||||
|
#define BITMAP_GROUPS_5_LEVEL(nbits) \
|
||||||
|
(BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
|
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
|
||||||
*/
|
*/
|
||||||
#ifdef USE_TREE
|
#ifdef BITMAP_USE_TREE
|
||||||
|
|
||||||
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
|
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
|
||||||
|
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits)
|
||||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
|
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
|
||||||
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
|
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
|
||||||
|
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits)
|
||||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
|
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
|
||||||
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
|
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
|
||||||
|
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits)
|
||||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
|
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
|
||||||
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
|
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
|
||||||
|
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits)
|
||||||
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
|
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
|
||||||
|
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5
|
||||||
|
# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits)
|
||||||
|
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS)
|
||||||
#else
|
#else
|
||||||
# error "Unsupported bitmap size"
|
# error "Unsupported bitmap size"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Maximum number of levels possible. */
|
/*
|
||||||
#define BITMAP_MAX_LEVELS \
|
* Maximum number of levels possible. This could be statically computed based
|
||||||
(LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
|
* on LG_BITMAP_MAXBITS:
|
||||||
+ !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
|
*
|
||||||
|
* #define BITMAP_MAX_LEVELS \
|
||||||
|
* (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
|
||||||
|
* + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
|
||||||
|
*
|
||||||
|
* However, that would not allow the generic BITMAP_INFO_INITIALIZER() macro, so
|
||||||
|
* instead hardcode BITMAP_MAX_LEVELS to the largest number supported by the
|
||||||
|
* various cascading macros. The only additional cost this incurs is some
|
||||||
|
* unused trailing entries in bitmap_info_t structures; the bitmaps themselves
|
||||||
|
* are not impacted.
|
||||||
|
*/
|
||||||
|
#define BITMAP_MAX_LEVELS 5
|
||||||
|
|
||||||
#else /* USE_TREE */
|
#define BITMAP_INFO_INITIALIZER(nbits) { \
|
||||||
|
/* nbits. */ \
|
||||||
|
nbits, \
|
||||||
|
/* nlevels. */ \
|
||||||
|
(BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) + \
|
||||||
|
(BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) + \
|
||||||
|
(BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) + \
|
||||||
|
(BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1, \
|
||||||
|
/* levels. */ \
|
||||||
|
{ \
|
||||||
|
{0}, \
|
||||||
|
{BITMAP_GROUPS_L0(nbits)}, \
|
||||||
|
{BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
|
||||||
|
{BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) + \
|
||||||
|
BITMAP_GROUPS_L0(nbits)}, \
|
||||||
|
{BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) + \
|
||||||
|
BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \
|
||||||
|
{BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) + \
|
||||||
|
BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) \
|
||||||
|
+ BITMAP_GROUPS_L0(nbits)} \
|
||||||
|
} \
|
||||||
|
}
|
||||||
|
|
||||||
|
#else /* BITMAP_USE_TREE */
|
||||||
|
|
||||||
|
#define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits)
|
||||||
#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
|
#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
|
||||||
|
|
||||||
#endif /* USE_TREE */
|
#define BITMAP_INFO_INITIALIZER(nbits) { \
|
||||||
|
/* nbits. */ \
|
||||||
|
nbits, \
|
||||||
|
/* ngroups. */ \
|
||||||
|
BITMAP_BITS2GROUPS(nbits) \
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
#endif /* BITMAP_USE_TREE */
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
struct bitmap_level_s {
|
typedef struct bitmap_level_s {
|
||||||
/* Offset of this level's groups within the array of groups. */
|
/* Offset of this level's groups within the array of groups. */
|
||||||
size_t group_offset;
|
size_t group_offset;
|
||||||
};
|
} bitmap_level_t;
|
||||||
|
|
||||||
struct bitmap_info_s {
|
typedef struct bitmap_info_s {
|
||||||
/* Logical number of bits in bitmap (stored at bottom level). */
|
/* Logical number of bits in bitmap (stored at bottom level). */
|
||||||
size_t nbits;
|
size_t nbits;
|
||||||
|
|
||||||
#ifdef USE_TREE
|
#ifdef BITMAP_USE_TREE
|
||||||
/* Number of levels necessary for nbits. */
|
/* Number of levels necessary for nbits. */
|
||||||
unsigned nlevels;
|
unsigned nlevels;
|
||||||
|
|
||||||
@ -104,37 +162,19 @@ struct bitmap_info_s {
|
|||||||
* bottom to top (e.g. the bottom level is stored in levels[0]).
|
* bottom to top (e.g. the bottom level is stored in levels[0]).
|
||||||
*/
|
*/
|
||||||
bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
|
bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
|
||||||
#else /* USE_TREE */
|
#else /* BITMAP_USE_TREE */
|
||||||
/* Number of groups necessary for nbits. */
|
/* Number of groups necessary for nbits. */
|
||||||
size_t ngroups;
|
size_t ngroups;
|
||||||
#endif /* USE_TREE */
|
#endif /* BITMAP_USE_TREE */
|
||||||
};
|
} bitmap_info_t;
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
|
||||||
|
|
||||||
void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
|
void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
|
||||||
void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo);
|
void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill);
|
||||||
size_t bitmap_size(const bitmap_info_t *binfo);
|
size_t bitmap_size(const bitmap_info_t *binfo);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
static inline bool
|
||||||
/******************************************************************************/
|
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
|
||||||
#ifdef JEMALLOC_H_INLINES
|
#ifdef BITMAP_USE_TREE
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo);
|
|
||||||
bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
|
|
||||||
void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
|
|
||||||
size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo);
|
|
||||||
void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_))
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
|
|
||||||
{
|
|
||||||
#ifdef USE_TREE
|
|
||||||
size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
|
size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
|
||||||
bitmap_t rg = bitmap[rgoff];
|
bitmap_t rg = bitmap[rgoff];
|
||||||
/* The bitmap is full iff the root group is 0. */
|
/* The bitmap is full iff the root group is 0. */
|
||||||
@ -143,28 +183,27 @@ bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
|
|||||||
size_t i;
|
size_t i;
|
||||||
|
|
||||||
for (i = 0; i < binfo->ngroups; i++) {
|
for (i = 0; i < binfo->ngroups; i++) {
|
||||||
if (bitmap[i] != 0)
|
if (bitmap[i] != 0) {
|
||||||
return (false);
|
return false;
|
||||||
}
|
}
|
||||||
return (true);
|
}
|
||||||
|
return true;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
static inline bool
|
||||||
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
||||||
{
|
|
||||||
size_t goff;
|
size_t goff;
|
||||||
bitmap_t g;
|
bitmap_t g;
|
||||||
|
|
||||||
assert(bit < binfo->nbits);
|
assert(bit < binfo->nbits);
|
||||||
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
goff = bit >> LG_BITMAP_GROUP_NBITS;
|
||||||
g = bitmap[goff];
|
g = bitmap[goff];
|
||||||
return (!(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))));
|
return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
||||||
{
|
|
||||||
size_t goff;
|
size_t goff;
|
||||||
bitmap_t *gp;
|
bitmap_t *gp;
|
||||||
bitmap_t g;
|
bitmap_t g;
|
||||||
@ -178,7 +217,7 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
|||||||
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||||
*gp = g;
|
*gp = g;
|
||||||
assert(bitmap_get(bitmap, binfo, bit));
|
assert(bitmap_get(bitmap, binfo, bit));
|
||||||
#ifdef USE_TREE
|
#ifdef BITMAP_USE_TREE
|
||||||
/* Propagate group state transitions up the tree. */
|
/* Propagate group state transitions up the tree. */
|
||||||
if (g == 0) {
|
if (g == 0) {
|
||||||
unsigned i;
|
unsigned i;
|
||||||
@ -190,24 +229,83 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
|||||||
assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
|
assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
|
||||||
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||||
*gp = g;
|
*gp = g;
|
||||||
if (g != 0)
|
if (g != 0) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ffu: find first unset >= bit. */
|
||||||
|
static inline size_t
|
||||||
|
bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
|
||||||
|
assert(min_bit < binfo->nbits);
|
||||||
|
|
||||||
|
#ifdef BITMAP_USE_TREE
|
||||||
|
size_t bit = 0;
|
||||||
|
for (unsigned level = binfo->nlevels; level--;) {
|
||||||
|
size_t lg_bits_per_group = (LG_BITMAP_GROUP_NBITS * (level +
|
||||||
|
1));
|
||||||
|
bitmap_t group = bitmap[binfo->levels[level].group_offset + (bit
|
||||||
|
>> lg_bits_per_group)];
|
||||||
|
unsigned group_nmask = (unsigned)(((min_bit > bit) ? (min_bit -
|
||||||
|
bit) : 0) >> (lg_bits_per_group - LG_BITMAP_GROUP_NBITS));
|
||||||
|
assert(group_nmask <= BITMAP_GROUP_NBITS);
|
||||||
|
bitmap_t group_mask = ~((1LU << group_nmask) - 1);
|
||||||
|
bitmap_t group_masked = group & group_mask;
|
||||||
|
if (group_masked == 0LU) {
|
||||||
|
if (group == 0LU) {
|
||||||
|
return binfo->nbits;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* min_bit was preceded by one or more unset bits in
|
||||||
|
* this group, but there are no other unset bits in this
|
||||||
|
* group. Try again starting at the first bit of the
|
||||||
|
* next sibling. This will recurse at most once per
|
||||||
|
* non-root level.
|
||||||
|
*/
|
||||||
|
size_t sib_base = bit + (ZU(1) << lg_bits_per_group);
|
||||||
|
assert(sib_base > min_bit);
|
||||||
|
assert(sib_base > bit);
|
||||||
|
if (sib_base >= binfo->nbits) {
|
||||||
|
return binfo->nbits;
|
||||||
|
}
|
||||||
|
return bitmap_ffu(bitmap, binfo, sib_base);
|
||||||
|
}
|
||||||
|
bit += ((size_t)(ffs_lu(group_masked) - 1)) <<
|
||||||
|
(lg_bits_per_group - LG_BITMAP_GROUP_NBITS);
|
||||||
|
}
|
||||||
|
assert(bit >= min_bit);
|
||||||
|
assert(bit < binfo->nbits);
|
||||||
|
return bit;
|
||||||
|
#else
|
||||||
|
size_t i = min_bit >> LG_BITMAP_GROUP_NBITS;
|
||||||
|
bitmap_t g = bitmap[i] & ~((1LU << (min_bit & BITMAP_GROUP_NBITS_MASK))
|
||||||
|
- 1);
|
||||||
|
size_t bit;
|
||||||
|
do {
|
||||||
|
bit = ffs_lu(g);
|
||||||
|
if (bit != 0) {
|
||||||
|
return (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
|
||||||
|
}
|
||||||
|
i++;
|
||||||
|
g = bitmap[i];
|
||||||
|
} while (i < binfo->ngroups);
|
||||||
|
return binfo->nbits;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/* sfu: set first unset. */
|
/* sfu: set first unset. */
|
||||||
JEMALLOC_INLINE size_t
|
static inline size_t
|
||||||
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
|
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
|
||||||
{
|
|
||||||
size_t bit;
|
size_t bit;
|
||||||
bitmap_t g;
|
bitmap_t g;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
assert(!bitmap_full(bitmap, binfo));
|
assert(!bitmap_full(bitmap, binfo));
|
||||||
|
|
||||||
#ifdef USE_TREE
|
#ifdef BITMAP_USE_TREE
|
||||||
i = binfo->nlevels - 1;
|
i = binfo->nlevels - 1;
|
||||||
g = bitmap[binfo->levels[i].group_offset];
|
g = bitmap[binfo->levels[i].group_offset];
|
||||||
bit = ffs_lu(g) - 1;
|
bit = ffs_lu(g) - 1;
|
||||||
@ -226,12 +324,11 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
|
|||||||
bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
|
bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
|
||||||
#endif
|
#endif
|
||||||
bitmap_set(bitmap, binfo, bit);
|
bitmap_set(bitmap, binfo, bit);
|
||||||
return (bit);
|
return bit;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
||||||
{
|
|
||||||
size_t goff;
|
size_t goff;
|
||||||
bitmap_t *gp;
|
bitmap_t *gp;
|
||||||
bitmap_t g;
|
bitmap_t g;
|
||||||
@ -247,7 +344,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
|||||||
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||||
*gp = g;
|
*gp = g;
|
||||||
assert(!bitmap_get(bitmap, binfo, bit));
|
assert(!bitmap_get(bitmap, binfo, bit));
|
||||||
#ifdef USE_TREE
|
#ifdef BITMAP_USE_TREE
|
||||||
/* Propagate group state transitions up the tree. */
|
/* Propagate group state transitions up the tree. */
|
||||||
if (propagate) {
|
if (propagate) {
|
||||||
unsigned i;
|
unsigned i;
|
||||||
@ -261,14 +358,12 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
|||||||
== 0);
|
== 0);
|
||||||
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||||
*gp = g;
|
*gp = g;
|
||||||
if (!propagate)
|
if (!propagate) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif /* USE_TREE */
|
}
|
||||||
|
#endif /* BITMAP_USE_TREE */
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif /* JEMALLOC_INTERNAL_BITMAP_H */
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
/******************************************************************************/
|
|
||||||
|
@ -1,97 +0,0 @@
|
|||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_TYPES
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Size and alignment of memory chunks that are allocated by the OS's virtual
|
|
||||||
* memory system.
|
|
||||||
*/
|
|
||||||
#define LG_CHUNK_DEFAULT 21
|
|
||||||
|
|
||||||
/* Return the chunk address for allocation address a. */
|
|
||||||
#define CHUNK_ADDR2BASE(a) \
|
|
||||||
((void *)((uintptr_t)(a) & ~chunksize_mask))
|
|
||||||
|
|
||||||
/* Return the chunk offset of address a. */
|
|
||||||
#define CHUNK_ADDR2OFFSET(a) \
|
|
||||||
((size_t)((uintptr_t)(a) & chunksize_mask))
|
|
||||||
|
|
||||||
/* Return the smallest chunk multiple that is >= s. */
|
|
||||||
#define CHUNK_CEILING(s) \
|
|
||||||
(((s) + chunksize_mask) & ~chunksize_mask)
|
|
||||||
|
|
||||||
#define CHUNK_HOOKS_INITIALIZER { \
|
|
||||||
NULL, \
|
|
||||||
NULL, \
|
|
||||||
NULL, \
|
|
||||||
NULL, \
|
|
||||||
NULL, \
|
|
||||||
NULL, \
|
|
||||||
NULL \
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
|
||||||
|
|
||||||
extern size_t opt_lg_chunk;
|
|
||||||
extern const char *opt_dss;
|
|
||||||
|
|
||||||
extern rtree_t chunks_rtree;
|
|
||||||
|
|
||||||
extern size_t chunksize;
|
|
||||||
extern size_t chunksize_mask; /* (chunksize - 1). */
|
|
||||||
extern size_t chunk_npages;
|
|
||||||
|
|
||||||
extern const chunk_hooks_t chunk_hooks_default;
|
|
||||||
|
|
||||||
chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena);
|
|
||||||
chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
|
|
||||||
const chunk_hooks_t *chunk_hooks);
|
|
||||||
|
|
||||||
bool chunk_register(const void *chunk, const extent_node_t *node,
|
|
||||||
bool *gdump);
|
|
||||||
void chunk_deregister(const void *chunk, const extent_node_t *node);
|
|
||||||
void *chunk_alloc_base(size_t size);
|
|
||||||
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
|
|
||||||
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
|
||||||
size_t *sn, bool *zero, bool *commit, bool dalloc_node);
|
|
||||||
void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
|
||||||
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
|
||||||
size_t *sn, bool *zero, bool *commit);
|
|
||||||
void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
|
|
||||||
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
|
|
||||||
bool committed);
|
|
||||||
void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
|
||||||
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
|
|
||||||
bool zeroed, bool committed);
|
|
||||||
bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
|
||||||
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
|
|
||||||
size_t length);
|
|
||||||
bool chunk_boot(void);
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
extent_node_t *chunk_lookup(const void *chunk, bool dependent);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_))
|
|
||||||
JEMALLOC_INLINE extent_node_t *
|
|
||||||
chunk_lookup(const void *ptr, bool dependent)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (rtree_get(&chunks_rtree, (uintptr_t)ptr, dependent));
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
/******************************************************************************/
|
|
||||||
|
|
||||||
#include "jemalloc/internal/chunk_dss.h"
|
|
||||||
#include "jemalloc/internal/chunk_mmap.h"
|
|
@ -1,37 +0,0 @@
|
|||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_TYPES
|
|
||||||
|
|
||||||
typedef enum {
|
|
||||||
dss_prec_disabled = 0,
|
|
||||||
dss_prec_primary = 1,
|
|
||||||
dss_prec_secondary = 2,
|
|
||||||
|
|
||||||
dss_prec_limit = 3
|
|
||||||
} dss_prec_t;
|
|
||||||
#define DSS_PREC_DEFAULT dss_prec_secondary
|
|
||||||
#define DSS_DEFAULT "secondary"
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
extern const char *dss_prec_names[];
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
|
||||||
|
|
||||||
dss_prec_t chunk_dss_prec_get(void);
|
|
||||||
bool chunk_dss_prec_set(dss_prec_t dss_prec);
|
|
||||||
void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
|
|
||||||
size_t size, size_t alignment, bool *zero, bool *commit);
|
|
||||||
bool chunk_in_dss(void *chunk);
|
|
||||||
bool chunk_dss_mergeable(void *chunk_a, void *chunk_b);
|
|
||||||
void chunk_dss_boot(void);
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
/******************************************************************************/
|
|
@ -1,21 +0,0 @@
|
|||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_TYPES
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
|
||||||
|
|
||||||
void *chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment,
|
|
||||||
bool *zero, bool *commit);
|
|
||||||
bool chunk_dalloc_mmap(void *chunk, size_t size);
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
/******************************************************************************/
|
|
@ -1,12 +1,13 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_CKH_H
|
||||||
|
#define JEMALLOC_INTERNAL_CKH_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/tsd.h"
|
||||||
|
|
||||||
|
/* Cuckoo hashing implementation. Skip to the end for the interface. */
|
||||||
|
|
||||||
|
/******************************************************************************/
|
||||||
|
/* INTERNAL DEFINITIONS -- IGNORE */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_TYPES
|
|
||||||
|
|
||||||
typedef struct ckh_s ckh_t;
|
|
||||||
typedef struct ckhc_s ckhc_t;
|
|
||||||
|
|
||||||
/* Typedefs to allow easy function pointer passing. */
|
|
||||||
typedef void ckh_hash_t (const void *, size_t[2]);
|
|
||||||
typedef bool ckh_keycomp_t (const void *, const void *);
|
|
||||||
|
|
||||||
/* Maintain counters used to get an idea of performance. */
|
/* Maintain counters used to get an idea of performance. */
|
||||||
/* #define CKH_COUNT */
|
/* #define CKH_COUNT */
|
||||||
@ -19,17 +20,18 @@ typedef bool ckh_keycomp_t (const void *, const void *);
|
|||||||
*/
|
*/
|
||||||
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
|
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
/* Typedefs to allow easy function pointer passing. */
|
||||||
/******************************************************************************/
|
typedef void ckh_hash_t (const void *, size_t[2]);
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
typedef bool ckh_keycomp_t (const void *, const void *);
|
||||||
|
|
||||||
/* Hash table cell. */
|
/* Hash table cell. */
|
||||||
struct ckhc_s {
|
typedef struct {
|
||||||
const void *key;
|
const void *key;
|
||||||
const void *data;
|
const void *data;
|
||||||
};
|
} ckhc_t;
|
||||||
|
|
||||||
struct ckh_s {
|
/* The hash table itself. */
|
||||||
|
typedef struct {
|
||||||
#ifdef CKH_COUNT
|
#ifdef CKH_COUNT
|
||||||
/* Counters used to get an idea of performance. */
|
/* Counters used to get an idea of performance. */
|
||||||
uint64_t ngrows;
|
uint64_t ngrows;
|
||||||
@ -58,29 +60,42 @@ struct ckh_s {
|
|||||||
|
|
||||||
/* Hash table with 2^lg_curbuckets buckets. */
|
/* Hash table with 2^lg_curbuckets buckets. */
|
||||||
ckhc_t *tab;
|
ckhc_t *tab;
|
||||||
};
|
} ckh_t;
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
/* BEGIN PUBLIC API */
|
||||||
|
/******************************************************************************/
|
||||||
|
|
||||||
|
/* Lifetime management. Minitems is the initial capacity. */
|
||||||
bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
||||||
ckh_keycomp_t *keycomp);
|
ckh_keycomp_t *keycomp);
|
||||||
void ckh_delete(tsd_t *tsd, ckh_t *ckh);
|
void ckh_delete(tsd_t *tsd, ckh_t *ckh);
|
||||||
|
|
||||||
|
/* Get the number of elements in the set. */
|
||||||
size_t ckh_count(ckh_t *ckh);
|
size_t ckh_count(ckh_t *ckh);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* To iterate over the elements in the table, initialize *tabind to 0 and call
|
||||||
|
* this function until it returns true. Each call that returns false will
|
||||||
|
* update *key and *data to the next element in the table, assuming the pointers
|
||||||
|
* are non-NULL.
|
||||||
|
*/
|
||||||
bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
|
bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Basic hash table operations -- insert, removal, lookup. For ckh_remove and
|
||||||
|
* ckh_search, key or data can be NULL. The hash-table only stores pointers to
|
||||||
|
* the key and value, and doesn't do any lifetime management.
|
||||||
|
*/
|
||||||
bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
|
bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
|
||||||
bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
|
bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
|
||||||
void **data);
|
void **data);
|
||||||
bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
|
bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
|
||||||
|
|
||||||
|
/* Some useful hash and comparison functions for strings and pointers. */
|
||||||
void ckh_string_hash(const void *key, size_t r_hash[2]);
|
void ckh_string_hash(const void *key, size_t r_hash[2]);
|
||||||
bool ckh_string_keycomp(const void *k1, const void *k2);
|
bool ckh_string_keycomp(const void *k1, const void *k2);
|
||||||
void ckh_pointer_hash(const void *key, size_t r_hash[2]);
|
void ckh_pointer_hash(const void *key, size_t r_hash[2]);
|
||||||
bool ckh_pointer_keycomp(const void *k1, const void *k2);
|
bool ckh_pointer_keycomp(const void *k1, const void *k2);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_INTERNAL_CKH_H */
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
/******************************************************************************/
|
|
||||||
|
@ -1,47 +1,37 @@
|
|||||||
/******************************************************************************/
|
#ifndef JEMALLOC_INTERNAL_CTL_H
|
||||||
#ifdef JEMALLOC_H_TYPES
|
#define JEMALLOC_INTERNAL_CTL_H
|
||||||
|
|
||||||
typedef struct ctl_node_s ctl_node_t;
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||||
typedef struct ctl_named_node_s ctl_named_node_t;
|
#include "jemalloc/internal/malloc_io.h"
|
||||||
typedef struct ctl_indexed_node_s ctl_indexed_node_t;
|
#include "jemalloc/internal/mutex_prof.h"
|
||||||
typedef struct ctl_arena_stats_s ctl_arena_stats_t;
|
#include "jemalloc/internal/ql.h"
|
||||||
typedef struct ctl_stats_s ctl_stats_t;
|
#include "jemalloc/internal/size_classes.h"
|
||||||
|
#include "jemalloc/internal/stats.h"
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
/* Maximum ctl tree depth. */
|
||||||
/******************************************************************************/
|
#define CTL_MAX_DEPTH 7
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
struct ctl_node_s {
|
typedef struct ctl_node_s {
|
||||||
bool named;
|
bool named;
|
||||||
};
|
} ctl_node_t;
|
||||||
|
|
||||||
struct ctl_named_node_s {
|
typedef struct ctl_named_node_s {
|
||||||
struct ctl_node_s node;
|
ctl_node_t node;
|
||||||
const char *name;
|
const char *name;
|
||||||
/* If (nchildren == 0), this is a terminal node. */
|
/* If (nchildren == 0), this is a terminal node. */
|
||||||
unsigned nchildren;
|
size_t nchildren;
|
||||||
const ctl_node_t *children;
|
const ctl_node_t *children;
|
||||||
int (*ctl)(tsd_t *, const size_t *, size_t, void *,
|
int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *,
|
||||||
size_t *, void *, size_t);
|
size_t);
|
||||||
};
|
} ctl_named_node_t;
|
||||||
|
|
||||||
struct ctl_indexed_node_s {
|
typedef struct ctl_indexed_node_s {
|
||||||
struct ctl_node_s node;
|
struct ctl_node_s node;
|
||||||
const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
|
const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
|
||||||
size_t);
|
size_t);
|
||||||
};
|
} ctl_indexed_node_t;
|
||||||
|
|
||||||
struct ctl_arena_stats_s {
|
|
||||||
bool initialized;
|
|
||||||
unsigned nthreads;
|
|
||||||
const char *dss;
|
|
||||||
ssize_t lg_dirty_mult;
|
|
||||||
ssize_t decay_time;
|
|
||||||
size_t pactive;
|
|
||||||
size_t pdirty;
|
|
||||||
|
|
||||||
/* The remainder are only populated if config_stats is true. */
|
|
||||||
|
|
||||||
|
typedef struct ctl_arena_stats_s {
|
||||||
arena_stats_t astats;
|
arena_stats_t astats;
|
||||||
|
|
||||||
/* Aggregate stats for small size classes, based on bin stats. */
|
/* Aggregate stats for small size classes, based on bin stats. */
|
||||||
@ -51,24 +41,53 @@ struct ctl_arena_stats_s {
|
|||||||
uint64_t nrequests_small;
|
uint64_t nrequests_small;
|
||||||
|
|
||||||
malloc_bin_stats_t bstats[NBINS];
|
malloc_bin_stats_t bstats[NBINS];
|
||||||
malloc_large_stats_t *lstats; /* nlclasses elements. */
|
malloc_large_stats_t lstats[NSIZES - NBINS];
|
||||||
malloc_huge_stats_t *hstats; /* nhclasses elements. */
|
} ctl_arena_stats_t;
|
||||||
};
|
|
||||||
|
|
||||||
struct ctl_stats_s {
|
typedef struct ctl_stats_s {
|
||||||
size_t allocated;
|
size_t allocated;
|
||||||
size_t active;
|
size_t active;
|
||||||
size_t metadata;
|
size_t metadata;
|
||||||
size_t resident;
|
size_t resident;
|
||||||
size_t mapped;
|
size_t mapped;
|
||||||
size_t retained;
|
size_t retained;
|
||||||
unsigned narenas;
|
|
||||||
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
|
background_thread_stats_t background_thread;
|
||||||
|
mutex_prof_data_t mutex_prof_data[mutex_prof_num_global_mutexes];
|
||||||
|
} ctl_stats_t;
|
||||||
|
|
||||||
|
typedef struct ctl_arena_s ctl_arena_t;
|
||||||
|
struct ctl_arena_s {
|
||||||
|
unsigned arena_ind;
|
||||||
|
bool initialized;
|
||||||
|
ql_elm(ctl_arena_t) destroyed_link;
|
||||||
|
|
||||||
|
/* Basic stats, supported even if !config_stats. */
|
||||||
|
unsigned nthreads;
|
||||||
|
const char *dss;
|
||||||
|
ssize_t dirty_decay_ms;
|
||||||
|
ssize_t muzzy_decay_ms;
|
||||||
|
size_t pactive;
|
||||||
|
size_t pdirty;
|
||||||
|
size_t pmuzzy;
|
||||||
|
|
||||||
|
/* NULL if !config_stats. */
|
||||||
|
ctl_arena_stats_t *astats;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
typedef struct ctl_arenas_s {
|
||||||
/******************************************************************************/
|
uint64_t epoch;
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
unsigned narenas;
|
||||||
|
ql_head(ctl_arena_t) destroyed;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Element 0 corresponds to merged stats for extant arenas (accessed via
|
||||||
|
* MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for
|
||||||
|
* destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the
|
||||||
|
* remaining MALLOCX_ARENA_LIMIT elements correspond to arenas.
|
||||||
|
*/
|
||||||
|
ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT];
|
||||||
|
} ctl_arenas_t;
|
||||||
|
|
||||||
int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
|
int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
|
||||||
void *newp, size_t newlen);
|
void *newp, size_t newlen);
|
||||||
@ -109,10 +128,4 @@ void ctl_postfork_child(tsdn_t *tsdn);
|
|||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_INTERNAL_CTL_H */
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
/******************************************************************************/
|
|
||||||
|
|
||||||
|
@ -1,275 +0,0 @@
|
|||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_TYPES
|
|
||||||
|
|
||||||
typedef struct extent_node_s extent_node_t;
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
/* Tree of extents. Use accessor functions for en_* fields. */
|
|
||||||
struct extent_node_s {
|
|
||||||
/* Arena from which this extent came, if any. */
|
|
||||||
arena_t *en_arena;
|
|
||||||
|
|
||||||
/* Pointer to the extent that this tree node is responsible for. */
|
|
||||||
void *en_addr;
|
|
||||||
|
|
||||||
/* Total region size. */
|
|
||||||
size_t en_size;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Serial number (potentially non-unique).
|
|
||||||
*
|
|
||||||
* In principle serial numbers can wrap around on 32-bit systems if
|
|
||||||
* JEMALLOC_MUNMAP is defined, but as long as comparison functions fall
|
|
||||||
* back on address comparison for equal serial numbers, stable (if
|
|
||||||
* imperfect) ordering is maintained.
|
|
||||||
*
|
|
||||||
* Serial numbers may not be unique even in the absence of wrap-around,
|
|
||||||
* e.g. when splitting an extent and assigning the same serial number to
|
|
||||||
* both resulting adjacent extents.
|
|
||||||
*/
|
|
||||||
size_t en_sn;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The zeroed flag is used by chunk recycling code to track whether
|
|
||||||
* memory is zero-filled.
|
|
||||||
*/
|
|
||||||
bool en_zeroed;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* True if physical memory is committed to the extent, whether
|
|
||||||
* explicitly or implicitly as on a system that overcommits and
|
|
||||||
* satisfies physical memory needs on demand via soft page faults.
|
|
||||||
*/
|
|
||||||
bool en_committed;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The achunk flag is used to validate that huge allocation lookups
|
|
||||||
* don't return arena chunks.
|
|
||||||
*/
|
|
||||||
bool en_achunk;
|
|
||||||
|
|
||||||
/* Profile counters, used for huge objects. */
|
|
||||||
prof_tctx_t *en_prof_tctx;
|
|
||||||
|
|
||||||
/* Linkage for arena's runs_dirty and chunks_cache rings. */
|
|
||||||
arena_runs_dirty_link_t rd;
|
|
||||||
qr(extent_node_t) cc_link;
|
|
||||||
|
|
||||||
union {
|
|
||||||
/* Linkage for the size/sn/address-ordered tree. */
|
|
||||||
rb_node(extent_node_t) szsnad_link;
|
|
||||||
|
|
||||||
/* Linkage for arena's achunks, huge, and node_cache lists. */
|
|
||||||
ql_elm(extent_node_t) ql_link;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Linkage for the address-ordered tree. */
|
|
||||||
rb_node(extent_node_t) ad_link;
|
|
||||||
};
|
|
||||||
typedef rb_tree(extent_node_t) extent_tree_t;
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_JET
|
|
||||||
size_t extent_size_quantize_floor(size_t size);
|
|
||||||
#endif
|
|
||||||
size_t extent_size_quantize_ceil(size_t size);
|
|
||||||
|
|
||||||
rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t)
|
|
||||||
|
|
||||||
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
arena_t *extent_node_arena_get(const extent_node_t *node);
|
|
||||||
void *extent_node_addr_get(const extent_node_t *node);
|
|
||||||
size_t extent_node_size_get(const extent_node_t *node);
|
|
||||||
size_t extent_node_sn_get(const extent_node_t *node);
|
|
||||||
bool extent_node_zeroed_get(const extent_node_t *node);
|
|
||||||
bool extent_node_committed_get(const extent_node_t *node);
|
|
||||||
bool extent_node_achunk_get(const extent_node_t *node);
|
|
||||||
prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
|
|
||||||
void extent_node_arena_set(extent_node_t *node, arena_t *arena);
|
|
||||||
void extent_node_addr_set(extent_node_t *node, void *addr);
|
|
||||||
void extent_node_size_set(extent_node_t *node, size_t size);
|
|
||||||
void extent_node_sn_set(extent_node_t *node, size_t sn);
|
|
||||||
void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
|
|
||||||
void extent_node_committed_set(extent_node_t *node, bool committed);
|
|
||||||
void extent_node_achunk_set(extent_node_t *node, bool achunk);
|
|
||||||
void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
|
|
||||||
void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
|
|
||||||
size_t size, size_t sn, bool zeroed, bool committed);
|
|
||||||
void extent_node_dirty_linkage_init(extent_node_t *node);
|
|
||||||
void extent_node_dirty_insert(extent_node_t *node,
|
|
||||||
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
|
|
||||||
void extent_node_dirty_remove(extent_node_t *node);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
|
|
||||||
JEMALLOC_INLINE arena_t *
|
|
||||||
extent_node_arena_get(const extent_node_t *node)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (node->en_arena);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void *
|
|
||||||
extent_node_addr_get(const extent_node_t *node)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (node->en_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
|
||||||
extent_node_size_get(const extent_node_t *node)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (node->en_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
|
||||||
extent_node_sn_get(const extent_node_t *node)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (node->en_sn);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
extent_node_zeroed_get(const extent_node_t *node)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (node->en_zeroed);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
extent_node_committed_get(const extent_node_t *node)
|
|
||||||
{
|
|
||||||
|
|
||||||
assert(!node->en_achunk);
|
|
||||||
return (node->en_committed);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
extent_node_achunk_get(const extent_node_t *node)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (node->en_achunk);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE prof_tctx_t *
|
|
||||||
extent_node_prof_tctx_get(const extent_node_t *node)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (node->en_prof_tctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
extent_node_arena_set(extent_node_t *node, arena_t *arena)
|
|
||||||
{
|
|
||||||
|
|
||||||
node->en_arena = arena;
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
extent_node_addr_set(extent_node_t *node, void *addr)
|
|
||||||
{
|
|
||||||
|
|
||||||
node->en_addr = addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
extent_node_size_set(extent_node_t *node, size_t size)
|
|
||||||
{
|
|
||||||
|
|
||||||
node->en_size = size;
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
extent_node_sn_set(extent_node_t *node, size_t sn)
|
|
||||||
{
|
|
||||||
|
|
||||||
node->en_sn = sn;
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
extent_node_zeroed_set(extent_node_t *node, bool zeroed)
|
|
||||||
{
|
|
||||||
|
|
||||||
node->en_zeroed = zeroed;
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
extent_node_committed_set(extent_node_t *node, bool committed)
|
|
||||||
{
|
|
||||||
|
|
||||||
node->en_committed = committed;
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
extent_node_achunk_set(extent_node_t *node, bool achunk)
|
|
||||||
{
|
|
||||||
|
|
||||||
node->en_achunk = achunk;
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
|
|
||||||
{
|
|
||||||
|
|
||||||
node->en_prof_tctx = tctx;
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
|
|
||||||
size_t sn, bool zeroed, bool committed)
|
|
||||||
{
|
|
||||||
|
|
||||||
extent_node_arena_set(node, arena);
|
|
||||||
extent_node_addr_set(node, addr);
|
|
||||||
extent_node_size_set(node, size);
|
|
||||||
extent_node_sn_set(node, sn);
|
|
||||||
extent_node_zeroed_set(node, zeroed);
|
|
||||||
extent_node_committed_set(node, committed);
|
|
||||||
extent_node_achunk_set(node, false);
|
|
||||||
if (config_prof)
|
|
||||||
extent_node_prof_tctx_set(node, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
extent_node_dirty_linkage_init(extent_node_t *node)
|
|
||||||
{
|
|
||||||
|
|
||||||
qr_new(&node->rd, rd_link);
|
|
||||||
qr_new(node, cc_link);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
extent_node_dirty_insert(extent_node_t *node,
|
|
||||||
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty)
|
|
||||||
{
|
|
||||||
|
|
||||||
qr_meld(runs_dirty, &node->rd, rd_link);
|
|
||||||
qr_meld(chunks_dirty, node, cc_link);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
extent_node_dirty_remove(extent_node_t *node)
|
|
||||||
{
|
|
||||||
|
|
||||||
qr_remove(&node->rd, rd_link);
|
|
||||||
qr_remove(node, cc_link);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
/******************************************************************************/
|
|
||||||
|
|
26
include/jemalloc/internal/extent_dss.h
Normal file
26
include/jemalloc/internal/extent_dss.h
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_EXTENT_DSS_H
|
||||||
|
#define JEMALLOC_INTERNAL_EXTENT_DSS_H
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
dss_prec_disabled = 0,
|
||||||
|
dss_prec_primary = 1,
|
||||||
|
dss_prec_secondary = 2,
|
||||||
|
|
||||||
|
dss_prec_limit = 3
|
||||||
|
} dss_prec_t;
|
||||||
|
#define DSS_PREC_DEFAULT dss_prec_secondary
|
||||||
|
#define DSS_DEFAULT "secondary"
|
||||||
|
|
||||||
|
extern const char *dss_prec_names[];
|
||||||
|
|
||||||
|
extern const char *opt_dss;
|
||||||
|
|
||||||
|
dss_prec_t extent_dss_prec_get(void);
|
||||||
|
bool extent_dss_prec_set(dss_prec_t dss_prec);
|
||||||
|
void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
|
||||||
|
size_t size, size_t alignment, bool *zero, bool *commit);
|
||||||
|
bool extent_in_dss(void *addr);
|
||||||
|
bool extent_dss_mergeable(void *addr_a, void *addr_b);
|
||||||
|
void extent_dss_boot(void);
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_EXTENT_DSS_H */
|
72
include/jemalloc/internal/extent_externs.h
Normal file
72
include/jemalloc/internal/extent_externs.h
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
|
||||||
|
#define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/mutex.h"
|
||||||
|
#include "jemalloc/internal/mutex_pool.h"
|
||||||
|
#include "jemalloc/internal/ph.h"
|
||||||
|
#include "jemalloc/internal/rb.h"
|
||||||
|
#include "jemalloc/internal/rtree.h"
|
||||||
|
|
||||||
|
extern rtree_t extents_rtree;
|
||||||
|
extern const extent_hooks_t extent_hooks_default;
|
||||||
|
extern mutex_pool_t extent_mutex_pool;
|
||||||
|
|
||||||
|
extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
|
||||||
|
void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
|
||||||
|
|
||||||
|
extent_hooks_t *extent_hooks_get(arena_t *arena);
|
||||||
|
extent_hooks_t *extent_hooks_set(tsd_t *tsd, arena_t *arena,
|
||||||
|
extent_hooks_t *extent_hooks);
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_JET
|
||||||
|
size_t extent_size_quantize_floor(size_t size);
|
||||||
|
size_t extent_size_quantize_ceil(size_t size);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
rb_proto(, extent_avail_, extent_tree_t, extent_t)
|
||||||
|
ph_proto(, extent_heap_, extent_heap_t, extent_t)
|
||||||
|
|
||||||
|
bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
|
||||||
|
bool delay_coalesce);
|
||||||
|
extent_state_t extents_state_get(const extents_t *extents);
|
||||||
|
size_t extents_npages_get(extents_t *extents);
|
||||||
|
extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
|
||||||
|
size_t size, size_t pad, size_t alignment, bool slab, szind_t szind,
|
||||||
|
bool *zero, bool *commit);
|
||||||
|
void extents_dalloc(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent);
|
||||||
|
extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_min);
|
||||||
|
void extents_prefork(tsdn_t *tsdn, extents_t *extents);
|
||||||
|
void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents);
|
||||||
|
void extents_postfork_child(tsdn_t *tsdn, extents_t *extents);
|
||||||
|
extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
|
||||||
|
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit);
|
||||||
|
void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
|
||||||
|
void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
extent_hooks_t **r_extent_hooks, extent_t *extent);
|
||||||
|
void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
extent_hooks_t **r_extent_hooks, extent_t *extent);
|
||||||
|
bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
|
||||||
|
size_t length);
|
||||||
|
bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
|
||||||
|
size_t length);
|
||||||
|
bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
|
||||||
|
size_t length);
|
||||||
|
bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
|
||||||
|
size_t length);
|
||||||
|
extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
|
||||||
|
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b);
|
||||||
|
bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b);
|
||||||
|
|
||||||
|
bool extent_boot(void);
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_EXTENT_EXTERNS_H */
|
407
include/jemalloc/internal/extent_inlines.h
Normal file
407
include/jemalloc/internal/extent_inlines.h
Normal file
@ -0,0 +1,407 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H
|
||||||
|
#define JEMALLOC_INTERNAL_EXTENT_INLINES_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/mutex.h"
|
||||||
|
#include "jemalloc/internal/mutex_pool.h"
|
||||||
|
#include "jemalloc/internal/pages.h"
|
||||||
|
#include "jemalloc/internal/prng.h"
|
||||||
|
#include "jemalloc/internal/ql.h"
|
||||||
|
#include "jemalloc/internal/sz.h"
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_lock(tsdn_t *tsdn, extent_t *extent) {
|
||||||
|
assert(extent != NULL);
|
||||||
|
mutex_pool_lock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_unlock(tsdn_t *tsdn, extent_t *extent) {
|
||||||
|
assert(extent != NULL);
|
||||||
|
mutex_pool_unlock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_lock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
|
||||||
|
assert(extent1 != NULL && extent2 != NULL);
|
||||||
|
mutex_pool_lock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
|
||||||
|
(uintptr_t)extent2);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_unlock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
|
||||||
|
assert(extent1 != NULL && extent2 != NULL);
|
||||||
|
mutex_pool_unlock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
|
||||||
|
(uintptr_t)extent2);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline arena_t *
|
||||||
|
extent_arena_get(const extent_t *extent) {
|
||||||
|
unsigned arena_ind = (unsigned)((extent->e_bits &
|
||||||
|
EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT);
|
||||||
|
/*
|
||||||
|
* The following check is omitted because we should never actually read
|
||||||
|
* a NULL arena pointer.
|
||||||
|
*/
|
||||||
|
if (false && arena_ind >= MALLOCX_ARENA_LIMIT) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
assert(arena_ind < MALLOCX_ARENA_LIMIT);
|
||||||
|
return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_ACQUIRE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline szind_t
|
||||||
|
extent_szind_get_maybe_invalid(const extent_t *extent) {
|
||||||
|
szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >>
|
||||||
|
EXTENT_BITS_SZIND_SHIFT);
|
||||||
|
assert(szind <= NSIZES);
|
||||||
|
return szind;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline szind_t
|
||||||
|
extent_szind_get(const extent_t *extent) {
|
||||||
|
szind_t szind = extent_szind_get_maybe_invalid(extent);
|
||||||
|
assert(szind < NSIZES); /* Never call when "invalid". */
|
||||||
|
return szind;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline size_t
|
||||||
|
extent_usize_get(const extent_t *extent) {
|
||||||
|
return sz_index2size(extent_szind_get(extent));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline size_t
|
||||||
|
extent_sn_get(const extent_t *extent) {
|
||||||
|
return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >>
|
||||||
|
EXTENT_BITS_SN_SHIFT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline extent_state_t
|
||||||
|
extent_state_get(const extent_t *extent) {
|
||||||
|
return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >>
|
||||||
|
EXTENT_BITS_STATE_SHIFT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
extent_zeroed_get(const extent_t *extent) {
|
||||||
|
return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >>
|
||||||
|
EXTENT_BITS_ZEROED_SHIFT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
extent_committed_get(const extent_t *extent) {
|
||||||
|
return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >>
|
||||||
|
EXTENT_BITS_COMMITTED_SHIFT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
extent_slab_get(const extent_t *extent) {
|
||||||
|
return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >>
|
||||||
|
EXTENT_BITS_SLAB_SHIFT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned
|
||||||
|
extent_nfree_get(const extent_t *extent) {
|
||||||
|
assert(extent_slab_get(extent));
|
||||||
|
return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK) >>
|
||||||
|
EXTENT_BITS_NFREE_SHIFT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void *
|
||||||
|
extent_base_get(const extent_t *extent) {
|
||||||
|
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
|
||||||
|
!extent_slab_get(extent));
|
||||||
|
return PAGE_ADDR2BASE(extent->e_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void *
|
||||||
|
extent_addr_get(const extent_t *extent) {
|
||||||
|
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
|
||||||
|
!extent_slab_get(extent));
|
||||||
|
return extent->e_addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline size_t
|
||||||
|
extent_size_get(const extent_t *extent) {
|
||||||
|
return (extent->e_size_esn & EXTENT_SIZE_MASK);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline size_t
|
||||||
|
extent_esn_get(const extent_t *extent) {
|
||||||
|
return (extent->e_size_esn & EXTENT_ESN_MASK);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline size_t
|
||||||
|
extent_bsize_get(const extent_t *extent) {
|
||||||
|
return extent->e_bsize;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void *
|
||||||
|
extent_before_get(const extent_t *extent) {
|
||||||
|
return (void *)((uintptr_t)extent_base_get(extent) - PAGE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void *
|
||||||
|
extent_last_get(const extent_t *extent) {
|
||||||
|
return (void *)((uintptr_t)extent_base_get(extent) +
|
||||||
|
extent_size_get(extent) - PAGE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void *
|
||||||
|
extent_past_get(const extent_t *extent) {
|
||||||
|
return (void *)((uintptr_t)extent_base_get(extent) +
|
||||||
|
extent_size_get(extent));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline arena_slab_data_t *
|
||||||
|
extent_slab_data_get(extent_t *extent) {
|
||||||
|
assert(extent_slab_get(extent));
|
||||||
|
return &extent->e_slab_data;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline const arena_slab_data_t *
|
||||||
|
extent_slab_data_get_const(const extent_t *extent) {
|
||||||
|
assert(extent_slab_get(extent));
|
||||||
|
return &extent->e_slab_data;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline prof_tctx_t *
|
||||||
|
extent_prof_tctx_get(const extent_t *extent) {
|
||||||
|
return (prof_tctx_t *)atomic_load_p(&extent->e_prof_tctx,
|
||||||
|
ATOMIC_ACQUIRE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_arena_set(extent_t *extent, arena_t *arena) {
|
||||||
|
unsigned arena_ind = (arena != NULL) ? arena_ind_get(arena) : ((1U <<
|
||||||
|
MALLOCX_ARENA_BITS) - 1);
|
||||||
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ARENA_MASK) |
|
||||||
|
((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_addr_set(extent_t *extent, void *addr) {
|
||||||
|
extent->e_addr = addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) {
|
||||||
|
assert(extent_base_get(extent) == extent_addr_get(extent));
|
||||||
|
|
||||||
|
if (alignment < PAGE) {
|
||||||
|
unsigned lg_range = LG_PAGE -
|
||||||
|
lg_floor(CACHELINE_CEILING(alignment));
|
||||||
|
size_t r =
|
||||||
|
prng_lg_range_zu(&extent_arena_get(extent)->offset_state,
|
||||||
|
lg_range, true);
|
||||||
|
uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
|
||||||
|
lg_range);
|
||||||
|
extent->e_addr = (void *)((uintptr_t)extent->e_addr +
|
||||||
|
random_offset);
|
||||||
|
assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) ==
|
||||||
|
extent->e_addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_size_set(extent_t *extent, size_t size) {
|
||||||
|
assert((size & ~EXTENT_SIZE_MASK) == 0);
|
||||||
|
extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_esn_set(extent_t *extent, size_t esn) {
|
||||||
|
extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn &
|
||||||
|
EXTENT_ESN_MASK);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_bsize_set(extent_t *extent, size_t bsize) {
|
||||||
|
extent->e_bsize = bsize;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_szind_set(extent_t *extent, szind_t szind) {
|
||||||
|
assert(szind <= NSIZES); /* NSIZES means "invalid". */
|
||||||
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) |
|
||||||
|
((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_nfree_set(extent_t *extent, unsigned nfree) {
|
||||||
|
assert(extent_slab_get(extent));
|
||||||
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK) |
|
||||||
|
((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_nfree_inc(extent_t *extent) {
|
||||||
|
assert(extent_slab_get(extent));
|
||||||
|
extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_nfree_dec(extent_t *extent) {
|
||||||
|
assert(extent_slab_get(extent));
|
||||||
|
extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_sn_set(extent_t *extent, size_t sn) {
|
||||||
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) |
|
||||||
|
((uint64_t)sn << EXTENT_BITS_SN_SHIFT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_state_set(extent_t *extent, extent_state_t state) {
|
||||||
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) |
|
||||||
|
((uint64_t)state << EXTENT_BITS_STATE_SHIFT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_zeroed_set(extent_t *extent, bool zeroed) {
|
||||||
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) |
|
||||||
|
((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_committed_set(extent_t *extent, bool committed) {
|
||||||
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) |
|
||||||
|
((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_slab_set(extent_t *extent, bool slab) {
|
||||||
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) |
|
||||||
|
((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
|
||||||
|
atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||||
|
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
|
||||||
|
bool committed) {
|
||||||
|
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
||||||
|
|
||||||
|
extent_arena_set(extent, arena);
|
||||||
|
extent_addr_set(extent, addr);
|
||||||
|
extent_size_set(extent, size);
|
||||||
|
extent_slab_set(extent, slab);
|
||||||
|
extent_szind_set(extent, szind);
|
||||||
|
extent_sn_set(extent, sn);
|
||||||
|
extent_state_set(extent, state);
|
||||||
|
extent_zeroed_set(extent, zeroed);
|
||||||
|
extent_committed_set(extent, committed);
|
||||||
|
ql_elm_new(extent, ql_link);
|
||||||
|
if (config_prof) {
|
||||||
|
extent_prof_tctx_set(extent, NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
|
||||||
|
extent_arena_set(extent, NULL);
|
||||||
|
extent_addr_set(extent, addr);
|
||||||
|
extent_bsize_set(extent, bsize);
|
||||||
|
extent_slab_set(extent, false);
|
||||||
|
extent_szind_set(extent, NSIZES);
|
||||||
|
extent_sn_set(extent, sn);
|
||||||
|
extent_state_set(extent, extent_state_active);
|
||||||
|
extent_zeroed_set(extent, true);
|
||||||
|
extent_committed_set(extent, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_list_init(extent_list_t *list) {
|
||||||
|
ql_new(list);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline extent_t *
|
||||||
|
extent_list_first(const extent_list_t *list) {
|
||||||
|
return ql_first(list);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline extent_t *
|
||||||
|
extent_list_last(const extent_list_t *list) {
|
||||||
|
return ql_last(list, ql_link);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_list_append(extent_list_t *list, extent_t *extent) {
|
||||||
|
ql_tail_insert(list, extent, ql_link);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_list_replace(extent_list_t *list, extent_t *to_remove,
|
||||||
|
extent_t *to_insert) {
|
||||||
|
ql_after_insert(to_remove, to_insert, ql_link);
|
||||||
|
ql_remove(list, to_remove, ql_link);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
extent_list_remove(extent_list_t *list, extent_t *extent) {
|
||||||
|
ql_remove(list, extent, ql_link);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
extent_sn_comp(const extent_t *a, const extent_t *b) {
|
||||||
|
size_t a_sn = extent_sn_get(a);
|
||||||
|
size_t b_sn = extent_sn_get(b);
|
||||||
|
|
||||||
|
return (a_sn > b_sn) - (a_sn < b_sn);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
extent_esn_comp(const extent_t *a, const extent_t *b) {
|
||||||
|
size_t a_esn = extent_esn_get(a);
|
||||||
|
size_t b_esn = extent_esn_get(b);
|
||||||
|
|
||||||
|
return (a_esn > b_esn) - (a_esn < b_esn);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
extent_ad_comp(const extent_t *a, const extent_t *b) {
|
||||||
|
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
|
||||||
|
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
|
||||||
|
|
||||||
|
return (a_addr > b_addr) - (a_addr < b_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
extent_ead_comp(const extent_t *a, const extent_t *b) {
|
||||||
|
uintptr_t a_eaddr = (uintptr_t)a;
|
||||||
|
uintptr_t b_eaddr = (uintptr_t)b;
|
||||||
|
|
||||||
|
return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
extent_snad_comp(const extent_t *a, const extent_t *b) {
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = extent_sn_comp(a, b);
|
||||||
|
if (ret != 0) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = extent_ad_comp(a, b);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
extent_esnead_comp(const extent_t *a, const extent_t *b) {
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = extent_esn_comp(a, b);
|
||||||
|
if (ret != 0) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = extent_ead_comp(a, b);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_EXTENT_INLINES_H */
|
10
include/jemalloc/internal/extent_mmap.h
Normal file
10
include/jemalloc/internal/extent_mmap.h
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
|
||||||
|
#define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
|
||||||
|
|
||||||
|
extern bool opt_retain;
|
||||||
|
|
||||||
|
void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment,
|
||||||
|
bool *zero, bool *commit);
|
||||||
|
bool extent_dalloc_mmap(void *addr, size_t size);
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */
|
199
include/jemalloc/internal/extent_structs.h
Normal file
199
include/jemalloc/internal/extent_structs.h
Normal file
@ -0,0 +1,199 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
|
||||||
|
#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/atomic.h"
|
||||||
|
#include "jemalloc/internal/bitmap.h"
|
||||||
|
#include "jemalloc/internal/mutex.h"
|
||||||
|
#include "jemalloc/internal/ql.h"
|
||||||
|
#include "jemalloc/internal/rb.h"
|
||||||
|
#include "jemalloc/internal/ph.h"
|
||||||
|
#include "jemalloc/internal/size_classes.h"
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
extent_state_active = 0,
|
||||||
|
extent_state_dirty = 1,
|
||||||
|
extent_state_muzzy = 2,
|
||||||
|
extent_state_retained = 3
|
||||||
|
} extent_state_t;
|
||||||
|
|
||||||
|
/* Extent (span of pages). Use accessor functions for e_* fields. */
|
||||||
|
struct extent_s {
|
||||||
|
/*
|
||||||
|
* Bitfield containing several fields:
|
||||||
|
*
|
||||||
|
* a: arena_ind
|
||||||
|
* b: slab
|
||||||
|
* c: committed
|
||||||
|
* z: zeroed
|
||||||
|
* t: state
|
||||||
|
* i: szind
|
||||||
|
* f: nfree
|
||||||
|
* n: sn
|
||||||
|
*
|
||||||
|
* nnnnnnnn ... nnnnnfff fffffffi iiiiiiit tzcbaaaa aaaaaaaa
|
||||||
|
*
|
||||||
|
* arena_ind: Arena from which this extent came, or all 1 bits if
|
||||||
|
* unassociated.
|
||||||
|
*
|
||||||
|
* slab: The slab flag indicates whether the extent is used for a slab
|
||||||
|
* of small regions. This helps differentiate small size classes,
|
||||||
|
* and it indicates whether interior pointers can be looked up via
|
||||||
|
* iealloc().
|
||||||
|
*
|
||||||
|
* committed: The committed flag indicates whether physical memory is
|
||||||
|
* committed to the extent, whether explicitly or implicitly
|
||||||
|
* as on a system that overcommits and satisfies physical
|
||||||
|
* memory needs on demand via soft page faults.
|
||||||
|
*
|
||||||
|
* zeroed: The zeroed flag is used by extent recycling code to track
|
||||||
|
* whether memory is zero-filled.
|
||||||
|
*
|
||||||
|
* state: The state flag is an extent_state_t.
|
||||||
|
*
|
||||||
|
* szind: The szind flag indicates usable size class index for
|
||||||
|
* allocations residing in this extent, regardless of whether the
|
||||||
|
* extent is a slab. Extent size and usable size often differ
|
||||||
|
* even for non-slabs, either due to sz_large_pad or promotion of
|
||||||
|
* sampled small regions.
|
||||||
|
*
|
||||||
|
* nfree: Number of free regions in slab.
|
||||||
|
*
|
||||||
|
* sn: Serial number (potentially non-unique).
|
||||||
|
*
|
||||||
|
* Serial numbers may wrap around if !opt_retain, but as long as
|
||||||
|
* comparison functions fall back on address comparison for equal
|
||||||
|
* serial numbers, stable (if imperfect) ordering is maintained.
|
||||||
|
*
|
||||||
|
* Serial numbers may not be unique even in the absence of
|
||||||
|
* wrap-around, e.g. when splitting an extent and assigning the same
|
||||||
|
* serial number to both resulting adjacent extents.
|
||||||
|
*/
|
||||||
|
uint64_t e_bits;
|
||||||
|
#define EXTENT_BITS_ARENA_SHIFT 0
|
||||||
|
#define EXTENT_BITS_ARENA_MASK \
|
||||||
|
(((uint64_t)(1U << MALLOCX_ARENA_BITS) - 1) << EXTENT_BITS_ARENA_SHIFT)
|
||||||
|
|
||||||
|
#define EXTENT_BITS_SLAB_SHIFT MALLOCX_ARENA_BITS
|
||||||
|
#define EXTENT_BITS_SLAB_MASK \
|
||||||
|
((uint64_t)0x1U << EXTENT_BITS_SLAB_SHIFT)
|
||||||
|
|
||||||
|
#define EXTENT_BITS_COMMITTED_SHIFT (MALLOCX_ARENA_BITS + 1)
|
||||||
|
#define EXTENT_BITS_COMMITTED_MASK \
|
||||||
|
((uint64_t)0x1U << EXTENT_BITS_COMMITTED_SHIFT)
|
||||||
|
|
||||||
|
#define EXTENT_BITS_ZEROED_SHIFT (MALLOCX_ARENA_BITS + 2)
|
||||||
|
#define EXTENT_BITS_ZEROED_MASK \
|
||||||
|
((uint64_t)0x1U << EXTENT_BITS_ZEROED_SHIFT)
|
||||||
|
|
||||||
|
#define EXTENT_BITS_STATE_SHIFT (MALLOCX_ARENA_BITS + 3)
|
||||||
|
#define EXTENT_BITS_STATE_MASK \
|
||||||
|
((uint64_t)0x3U << EXTENT_BITS_STATE_SHIFT)
|
||||||
|
|
||||||
|
#define EXTENT_BITS_SZIND_SHIFT (MALLOCX_ARENA_BITS + 5)
|
||||||
|
#define EXTENT_BITS_SZIND_MASK \
|
||||||
|
(((uint64_t)(1U << LG_CEIL_NSIZES) - 1) << EXTENT_BITS_SZIND_SHIFT)
|
||||||
|
|
||||||
|
#define EXTENT_BITS_NFREE_SHIFT \
|
||||||
|
(MALLOCX_ARENA_BITS + 5 + LG_CEIL_NSIZES)
|
||||||
|
#define EXTENT_BITS_NFREE_MASK \
|
||||||
|
((uint64_t)((1U << (LG_SLAB_MAXREGS + 1)) - 1) << EXTENT_BITS_NFREE_SHIFT)
|
||||||
|
|
||||||
|
#define EXTENT_BITS_SN_SHIFT \
|
||||||
|
(MALLOCX_ARENA_BITS + 5 + LG_CEIL_NSIZES + (LG_SLAB_MAXREGS + 1))
|
||||||
|
#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
|
||||||
|
|
||||||
|
/* Pointer to the extent that this structure is responsible for. */
|
||||||
|
void *e_addr;
|
||||||
|
|
||||||
|
union {
|
||||||
|
/*
|
||||||
|
* Extent size and serial number associated with the extent
|
||||||
|
* structure (different than the serial number for the extent at
|
||||||
|
* e_addr).
|
||||||
|
*
|
||||||
|
* ssssssss [...] ssssssss ssssnnnn nnnnnnnn
|
||||||
|
*/
|
||||||
|
size_t e_size_esn;
|
||||||
|
#define EXTENT_SIZE_MASK ((size_t)~(PAGE-1))
|
||||||
|
#define EXTENT_ESN_MASK ((size_t)PAGE-1)
|
||||||
|
/* Base extent size, which may not be a multiple of PAGE. */
|
||||||
|
size_t e_bsize;
|
||||||
|
};
|
||||||
|
|
||||||
|
union {
|
||||||
|
/*
|
||||||
|
* List linkage, used by a variety of lists:
|
||||||
|
* - arena_bin_t's slabs_full
|
||||||
|
* - extents_t's LRU
|
||||||
|
* - stashed dirty extents
|
||||||
|
* - arena's large allocations
|
||||||
|
*/
|
||||||
|
ql_elm(extent_t) ql_link;
|
||||||
|
/* Red-black tree linkage, used by arena's extent_avail. */
|
||||||
|
rb_node(extent_t) rb_link;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Linkage for per size class sn/address-ordered heaps. */
|
||||||
|
phn(extent_t) ph_link;
|
||||||
|
|
||||||
|
union {
|
||||||
|
/* Small region slab metadata. */
|
||||||
|
arena_slab_data_t e_slab_data;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Profile counters, used for large objects. Points to a
|
||||||
|
* prof_tctx_t.
|
||||||
|
*/
|
||||||
|
atomic_p_t e_prof_tctx;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
typedef ql_head(extent_t) extent_list_t;
|
||||||
|
typedef rb_tree(extent_t) extent_tree_t;
|
||||||
|
typedef ph(extent_t) extent_heap_t;
|
||||||
|
|
||||||
|
/* Quantized collection of extents, with built-in LRU queue. */
|
||||||
|
struct extents_s {
|
||||||
|
malloc_mutex_t mtx;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Quantized per size class heaps of extents.
|
||||||
|
*
|
||||||
|
* Synchronization: mtx.
|
||||||
|
*/
|
||||||
|
extent_heap_t heaps[NPSIZES+1];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Bitmap for which set bits correspond to non-empty heaps.
|
||||||
|
*
|
||||||
|
* Synchronization: mtx.
|
||||||
|
*/
|
||||||
|
bitmap_t bitmap[BITMAP_GROUPS(NPSIZES+1)];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* LRU of all extents in heaps.
|
||||||
|
*
|
||||||
|
* Synchronization: mtx.
|
||||||
|
*/
|
||||||
|
extent_list_t lru;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Page sum for all extents in heaps.
|
||||||
|
*
|
||||||
|
* The synchronization here is a little tricky. Modifications to npages
|
||||||
|
* must hold mtx, but reads need not (though, a reader who sees npages
|
||||||
|
* without holding the mutex can't assume anything about the rest of the
|
||||||
|
* state of the extents_t).
|
||||||
|
*/
|
||||||
|
atomic_zu_t npages;
|
||||||
|
|
||||||
|
/* All stored extents must be in the same state. */
|
||||||
|
extent_state_t state;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If true, delay coalescing until eviction; otherwise coalesce during
|
||||||
|
* deallocation.
|
||||||
|
*/
|
||||||
|
bool delay_coalesce;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */
|
9
include/jemalloc/internal/extent_types.h
Normal file
9
include/jemalloc/internal/extent_types.h
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H
|
||||||
|
#define JEMALLOC_INTERNAL_EXTENT_TYPES_H
|
||||||
|
|
||||||
|
typedef struct extent_s extent_t;
|
||||||
|
typedef struct extents_s extents_t;
|
||||||
|
|
||||||
|
#define EXTENT_HOOKS_INITIALIZER NULL
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */
|
@ -1,109 +1,76 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_HASH_H
|
||||||
|
#define JEMALLOC_INTERNAL_HASH_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/assert.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The following hash function is based on MurmurHash3, placed into the public
|
* The following hash function is based on MurmurHash3, placed into the public
|
||||||
* domain by Austin Appleby. See https://github.com/aappleby/smhasher for
|
* domain by Austin Appleby. See https://github.com/aappleby/smhasher for
|
||||||
* details.
|
* details.
|
||||||
*/
|
*/
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_TYPES
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
uint32_t hash_x86_32(const void *key, int len, uint32_t seed);
|
|
||||||
void hash_x86_128(const void *key, const int len, uint32_t seed,
|
|
||||||
uint64_t r_out[2]);
|
|
||||||
void hash_x64_128(const void *key, const int len, const uint32_t seed,
|
|
||||||
uint64_t r_out[2]);
|
|
||||||
void hash(const void *key, size_t len, const uint32_t seed,
|
|
||||||
size_t r_hash[2]);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Internal implementation. */
|
/* Internal implementation. */
|
||||||
JEMALLOC_INLINE uint32_t
|
static inline uint32_t
|
||||||
hash_rotl_32(uint32_t x, int8_t r)
|
hash_rotl_32(uint32_t x, int8_t r) {
|
||||||
{
|
|
||||||
|
|
||||||
return ((x << r) | (x >> (32 - r)));
|
return ((x << r) | (x >> (32 - r)));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE uint64_t
|
static inline uint64_t
|
||||||
hash_rotl_64(uint64_t x, int8_t r)
|
hash_rotl_64(uint64_t x, int8_t r) {
|
||||||
{
|
|
||||||
|
|
||||||
return ((x << r) | (x >> (64 - r)));
|
return ((x << r) | (x >> (64 - r)));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE uint32_t
|
static inline uint32_t
|
||||||
hash_get_block_32(const uint32_t *p, int i)
|
hash_get_block_32(const uint32_t *p, int i) {
|
||||||
{
|
|
||||||
|
|
||||||
/* Handle unaligned read. */
|
/* Handle unaligned read. */
|
||||||
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
|
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
|
||||||
uint32_t ret;
|
uint32_t ret;
|
||||||
|
|
||||||
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
|
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
|
||||||
return (ret);
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
return (p[i]);
|
return p[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE uint64_t
|
static inline uint64_t
|
||||||
hash_get_block_64(const uint64_t *p, int i)
|
hash_get_block_64(const uint64_t *p, int i) {
|
||||||
{
|
|
||||||
|
|
||||||
/* Handle unaligned read. */
|
/* Handle unaligned read. */
|
||||||
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
|
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
|
||||||
uint64_t ret;
|
uint64_t ret;
|
||||||
|
|
||||||
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
|
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
|
||||||
return (ret);
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
return (p[i]);
|
return p[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE uint32_t
|
static inline uint32_t
|
||||||
hash_fmix_32(uint32_t h)
|
hash_fmix_32(uint32_t h) {
|
||||||
{
|
|
||||||
|
|
||||||
h ^= h >> 16;
|
h ^= h >> 16;
|
||||||
h *= 0x85ebca6b;
|
h *= 0x85ebca6b;
|
||||||
h ^= h >> 13;
|
h ^= h >> 13;
|
||||||
h *= 0xc2b2ae35;
|
h *= 0xc2b2ae35;
|
||||||
h ^= h >> 16;
|
h ^= h >> 16;
|
||||||
|
|
||||||
return (h);
|
return h;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE uint64_t
|
static inline uint64_t
|
||||||
hash_fmix_64(uint64_t k)
|
hash_fmix_64(uint64_t k) {
|
||||||
{
|
|
||||||
|
|
||||||
k ^= k >> 33;
|
k ^= k >> 33;
|
||||||
k *= KQU(0xff51afd7ed558ccd);
|
k *= KQU(0xff51afd7ed558ccd);
|
||||||
k ^= k >> 33;
|
k ^= k >> 33;
|
||||||
k *= KQU(0xc4ceb9fe1a85ec53);
|
k *= KQU(0xc4ceb9fe1a85ec53);
|
||||||
k ^= k >> 33;
|
k ^= k >> 33;
|
||||||
|
|
||||||
return (k);
|
return k;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE uint32_t
|
static inline uint32_t
|
||||||
hash_x86_32(const void *key, int len, uint32_t seed)
|
hash_x86_32(const void *key, int len, uint32_t seed) {
|
||||||
{
|
|
||||||
const uint8_t *data = (const uint8_t *) key;
|
const uint8_t *data = (const uint8_t *) key;
|
||||||
const int nblocks = len / 4;
|
const int nblocks = len / 4;
|
||||||
|
|
||||||
@ -149,13 +116,12 @@ hash_x86_32(const void *key, int len, uint32_t seed)
|
|||||||
|
|
||||||
h1 = hash_fmix_32(h1);
|
h1 = hash_fmix_32(h1);
|
||||||
|
|
||||||
return (h1);
|
return h1;
|
||||||
}
|
}
|
||||||
|
|
||||||
UNUSED JEMALLOC_INLINE void
|
UNUSED static inline void
|
||||||
hash_x86_128(const void *key, const int len, uint32_t seed,
|
hash_x86_128(const void *key, const int len, uint32_t seed,
|
||||||
uint64_t r_out[2])
|
uint64_t r_out[2]) {
|
||||||
{
|
|
||||||
const uint8_t * data = (const uint8_t *) key;
|
const uint8_t * data = (const uint8_t *) key;
|
||||||
const int nblocks = len / 16;
|
const int nblocks = len / 16;
|
||||||
|
|
||||||
@ -254,10 +220,9 @@ hash_x86_128(const void *key, const int len, uint32_t seed,
|
|||||||
r_out[1] = (((uint64_t) h4) << 32) | h3;
|
r_out[1] = (((uint64_t) h4) << 32) | h3;
|
||||||
}
|
}
|
||||||
|
|
||||||
UNUSED JEMALLOC_INLINE void
|
UNUSED static inline void
|
||||||
hash_x64_128(const void *key, const int len, const uint32_t seed,
|
hash_x64_128(const void *key, const int len, const uint32_t seed,
|
||||||
uint64_t r_out[2])
|
uint64_t r_out[2]) {
|
||||||
{
|
|
||||||
const uint8_t *data = (const uint8_t *) key;
|
const uint8_t *data = (const uint8_t *) key;
|
||||||
const int nblocks = len / 16;
|
const int nblocks = len / 16;
|
||||||
|
|
||||||
@ -334,10 +299,8 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
|
|||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* API. */
|
/* API. */
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
|
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) {
|
||||||
{
|
|
||||||
|
|
||||||
assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
|
assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
|
||||||
|
|
||||||
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
|
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
|
||||||
@ -351,7 +314,5 @@ hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
#endif /* JEMALLOC_INTERNAL_HASH_H */
|
||||||
/******************************************************************************/
|
|
||||||
|
19
include/jemalloc/internal/hooks.h
Normal file
19
include/jemalloc/internal/hooks.h
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_HOOKS_H
|
||||||
|
#define JEMALLOC_INTERNAL_HOOKS_H
|
||||||
|
|
||||||
|
extern JEMALLOC_EXPORT void (*hooks_arena_new_hook)();
|
||||||
|
extern JEMALLOC_EXPORT void (*hooks_libc_hook)();
|
||||||
|
|
||||||
|
#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
|
||||||
|
|
||||||
|
#define open JEMALLOC_HOOK(open, hooks_libc_hook)
|
||||||
|
#define read JEMALLOC_HOOK(read, hooks_libc_hook)
|
||||||
|
#define write JEMALLOC_HOOK(write, hooks_libc_hook)
|
||||||
|
#define readlink JEMALLOC_HOOK(readlink, hooks_libc_hook)
|
||||||
|
#define close JEMALLOC_HOOK(close, hooks_libc_hook)
|
||||||
|
#define creat JEMALLOC_HOOK(creat, hooks_libc_hook)
|
||||||
|
#define secure_getenv JEMALLOC_HOOK(secure_getenv, hooks_libc_hook)
|
||||||
|
/* Note that this is undef'd and re-define'd in src/prof.c. */
|
||||||
|
#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, hooks_libc_hook)
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_HOOKS_H */
|
@ -1,35 +0,0 @@
|
|||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_TYPES
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
|
||||||
|
|
||||||
void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
|
|
||||||
void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
|
||||||
size_t alignment, bool zero);
|
|
||||||
bool huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
|
|
||||||
size_t usize_min, size_t usize_max, bool zero);
|
|
||||||
void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
|
|
||||||
size_t usize, size_t alignment, bool zero, tcache_t *tcache);
|
|
||||||
#ifdef JEMALLOC_JET
|
|
||||||
typedef void (huge_dalloc_junk_t)(void *, size_t);
|
|
||||||
extern huge_dalloc_junk_t *huge_dalloc_junk;
|
|
||||||
#endif
|
|
||||||
void huge_dalloc(tsdn_t *tsdn, void *ptr);
|
|
||||||
arena_t *huge_aalloc(const void *ptr);
|
|
||||||
size_t huge_salloc(tsdn_t *tsdn, const void *ptr);
|
|
||||||
prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
|
|
||||||
void huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx);
|
|
||||||
void huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr);
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
/******************************************************************************/
|
|
File diff suppressed because it is too large
Load Diff
@ -14,6 +14,11 @@
|
|||||||
# if !defined(SYS_write) && defined(__NR_write)
|
# if !defined(SYS_write) && defined(__NR_write)
|
||||||
# define SYS_write __NR_write
|
# define SYS_write __NR_write
|
||||||
# endif
|
# endif
|
||||||
|
# if defined(SYS_open) && defined(__aarch64__)
|
||||||
|
/* Android headers may define SYS_open to __NR_open even though
|
||||||
|
* __NR_open may not exist on AArch64 (superseded by __NR_openat). */
|
||||||
|
# undef SYS_open
|
||||||
|
# endif
|
||||||
# include <sys/uio.h>
|
# include <sys/uio.h>
|
||||||
# endif
|
# endif
|
||||||
# include <pthread.h>
|
# include <pthread.h>
|
||||||
@ -36,6 +41,9 @@
|
|||||||
#ifndef SIZE_T_MAX
|
#ifndef SIZE_T_MAX
|
||||||
# define SIZE_T_MAX SIZE_MAX
|
# define SIZE_T_MAX SIZE_MAX
|
||||||
#endif
|
#endif
|
||||||
|
#ifndef SSIZE_MAX
|
||||||
|
# define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1))
|
||||||
|
#endif
|
||||||
#include <stdarg.h>
|
#include <stdarg.h>
|
||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
@ -61,9 +69,7 @@ typedef intptr_t ssize_t;
|
|||||||
# pragma warning(disable: 4996)
|
# pragma warning(disable: 4996)
|
||||||
#if _MSC_VER < 1800
|
#if _MSC_VER < 1800
|
||||||
static int
|
static int
|
||||||
isblank(int c)
|
isblank(int c) {
|
||||||
{
|
|
||||||
|
|
||||||
return (c == '\t' || c == ' ');
|
return (c == '\t' || c == ' ');
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -8,6 +8,18 @@
|
|||||||
#undef JEMALLOC_PREFIX
|
#undef JEMALLOC_PREFIX
|
||||||
#undef JEMALLOC_CPREFIX
|
#undef JEMALLOC_CPREFIX
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Define overrides for non-standard allocator-related functions if they are
|
||||||
|
* present on the system.
|
||||||
|
*/
|
||||||
|
#undef JEMALLOC_OVERRIDE___LIBC_CALLOC
|
||||||
|
#undef JEMALLOC_OVERRIDE___LIBC_FREE
|
||||||
|
#undef JEMALLOC_OVERRIDE___LIBC_MALLOC
|
||||||
|
#undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
|
||||||
|
#undef JEMALLOC_OVERRIDE___LIBC_REALLOC
|
||||||
|
#undef JEMALLOC_OVERRIDE___LIBC_VALLOC
|
||||||
|
#undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
||||||
* For shared libraries, symbol visibility mechanisms prevent these symbols
|
* For shared libraries, symbol visibility mechanisms prevent these symbols
|
||||||
@ -22,17 +34,21 @@
|
|||||||
*/
|
*/
|
||||||
#undef CPU_SPINWAIT
|
#undef CPU_SPINWAIT
|
||||||
|
|
||||||
/* Defined if C11 atomics are available. */
|
|
||||||
#undef JEMALLOC_C11ATOMICS
|
|
||||||
|
|
||||||
/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
|
|
||||||
#undef JEMALLOC_ATOMIC9
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if OSAtomic*() functions are available, as provided by Darwin, and
|
* Number of significant bits in virtual addresses. This may be less than the
|
||||||
* documented in the atomic(3) manual page.
|
* total number of bits in a pointer, e.g. on x64, for which the uppermost 16
|
||||||
|
* bits are the same as bit 47.
|
||||||
*/
|
*/
|
||||||
#undef JEMALLOC_OSATOMIC
|
#undef LG_VADDR
|
||||||
|
|
||||||
|
/* Defined if C11 atomics are available. */
|
||||||
|
#undef JEMALLOC_C11_ATOMICS
|
||||||
|
|
||||||
|
/* Defined if GCC __atomic atomics are available. */
|
||||||
|
#undef JEMALLOC_GCC_ATOMIC_ATOMICS
|
||||||
|
|
||||||
|
/* Defined if GCC __sync atomics are available. */
|
||||||
|
#undef JEMALLOC_GCC_SYNC_ATOMICS
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
|
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
|
||||||
@ -123,12 +139,6 @@
|
|||||||
/* Non-empty if the tls_model attribute is supported. */
|
/* Non-empty if the tls_model attribute is supported. */
|
||||||
#undef JEMALLOC_TLS_MODEL
|
#undef JEMALLOC_TLS_MODEL
|
||||||
|
|
||||||
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
|
|
||||||
#undef JEMALLOC_CC_SILENCE
|
|
||||||
|
|
||||||
/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */
|
|
||||||
#undef JEMALLOC_CODE_COVERAGE
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
|
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
|
||||||
* inline functions.
|
* inline functions.
|
||||||
@ -151,36 +161,23 @@
|
|||||||
#undef JEMALLOC_PROF_GCC
|
#undef JEMALLOC_PROF_GCC
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
|
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
|
||||||
* This makes it possible to allocate/deallocate objects without any locking
|
|
||||||
* when the cache is in the steady state.
|
|
||||||
*/
|
|
||||||
#undef JEMALLOC_TCACHE
|
|
||||||
|
|
||||||
/*
|
|
||||||
* JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
|
|
||||||
* segment (DSS).
|
* segment (DSS).
|
||||||
*/
|
*/
|
||||||
#undef JEMALLOC_DSS
|
#undef JEMALLOC_DSS
|
||||||
|
|
||||||
/* Support memory filling (junk/zero/quarantine/redzone). */
|
/* Support memory filling (junk/zero). */
|
||||||
#undef JEMALLOC_FILL
|
#undef JEMALLOC_FILL
|
||||||
|
|
||||||
/* Support utrace(2)-based tracing. */
|
/* Support utrace(2)-based tracing. */
|
||||||
#undef JEMALLOC_UTRACE
|
#undef JEMALLOC_UTRACE
|
||||||
|
|
||||||
/* Support Valgrind. */
|
|
||||||
#undef JEMALLOC_VALGRIND
|
|
||||||
|
|
||||||
/* Support optional abort() on OOM. */
|
/* Support optional abort() on OOM. */
|
||||||
#undef JEMALLOC_XMALLOC
|
#undef JEMALLOC_XMALLOC
|
||||||
|
|
||||||
/* Support lazy locking (avoid locking unless a second thread is launched). */
|
/* Support lazy locking (avoid locking unless a second thread is launched). */
|
||||||
#undef JEMALLOC_LAZY_LOCK
|
#undef JEMALLOC_LAZY_LOCK
|
||||||
|
|
||||||
/* Minimum size class to support is 2^LG_TINY_MIN bytes. */
|
|
||||||
#undef LG_TINY_MIN
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
|
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
|
||||||
* classes).
|
* classes).
|
||||||
@ -190,6 +187,13 @@
|
|||||||
/* One page is 2^LG_PAGE bytes. */
|
/* One page is 2^LG_PAGE bytes. */
|
||||||
#undef LG_PAGE
|
#undef LG_PAGE
|
||||||
|
|
||||||
|
/*
|
||||||
|
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
|
||||||
|
* system does not explicitly support huge pages; system calls that require
|
||||||
|
* explicit huge page support are separately configured.
|
||||||
|
*/
|
||||||
|
#undef LG_HUGEPAGE
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If defined, adjacent virtual memory mappings with identical attributes
|
* If defined, adjacent virtual memory mappings with identical attributes
|
||||||
* automatically coalesce, and they fragment when changes are made to subranges.
|
* automatically coalesce, and they fragment when changes are made to subranges.
|
||||||
@ -200,11 +204,12 @@
|
|||||||
#undef JEMALLOC_MAPS_COALESCE
|
#undef JEMALLOC_MAPS_COALESCE
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If defined, use munmap() to unmap freed chunks, rather than storing them for
|
* If defined, retain memory for later reuse by default rather than using e.g.
|
||||||
* later reuse. This is disabled by default on Linux because common sequences
|
* munmap() to unmap freed extents. This is enabled on 64-bit Linux because
|
||||||
* of mmap()/munmap() calls will cause virtual memory map holes.
|
* common sequences of mmap()/munmap() calls will cause virtual memory map
|
||||||
|
* holes.
|
||||||
*/
|
*/
|
||||||
#undef JEMALLOC_MUNMAP
|
#undef JEMALLOC_RETAIN
|
||||||
|
|
||||||
/* TLS is used to map arenas and magazine caches to threads. */
|
/* TLS is used to map arenas and magazine caches to threads. */
|
||||||
#undef JEMALLOC_TLS
|
#undef JEMALLOC_TLS
|
||||||
@ -223,12 +228,6 @@
|
|||||||
#undef JEMALLOC_INTERNAL_FFSL
|
#undef JEMALLOC_INTERNAL_FFSL
|
||||||
#undef JEMALLOC_INTERNAL_FFS
|
#undef JEMALLOC_INTERNAL_FFS
|
||||||
|
|
||||||
/*
|
|
||||||
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
|
|
||||||
* within jemalloc-owned chunks before dereferencing them.
|
|
||||||
*/
|
|
||||||
#undef JEMALLOC_IVSALLOC
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If defined, explicitly attempt to more uniformly distribute large allocation
|
* If defined, explicitly attempt to more uniformly distribute large allocation
|
||||||
* pointer alignments across all cache indices.
|
* pointer alignments across all cache indices.
|
||||||
@ -252,25 +251,27 @@
|
|||||||
/* Defined if madvise(2) is available. */
|
/* Defined if madvise(2) is available. */
|
||||||
#undef JEMALLOC_HAVE_MADVISE
|
#undef JEMALLOC_HAVE_MADVISE
|
||||||
|
|
||||||
/*
|
|
||||||
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
|
|
||||||
* arguments to madvise(2).
|
|
||||||
*/
|
|
||||||
#undef JEMALLOC_HAVE_MADVISE_HUGE
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Methods for purging unused pages differ between operating systems.
|
* Methods for purging unused pages differ between operating systems.
|
||||||
*
|
*
|
||||||
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
|
* madvise(..., MADV_FREE) : This marks pages as being unused, such that they
|
||||||
* will be discarded rather than swapped out.
|
* will be discarded rather than swapped out.
|
||||||
* madvise(..., MADV_DONTNEED) : This immediately discards pages, such that
|
* madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
|
||||||
* new pages will be demand-zeroed if the
|
* defined, this immediately discards pages,
|
||||||
* address region is later touched.
|
* such that new pages will be demand-zeroed if
|
||||||
|
* the address region is later touched;
|
||||||
|
* otherwise this behaves similarly to
|
||||||
|
* MADV_FREE, though typically with higher
|
||||||
|
* system overhead.
|
||||||
*/
|
*/
|
||||||
#undef JEMALLOC_PURGE_MADVISE_FREE
|
#undef JEMALLOC_PURGE_MADVISE_FREE
|
||||||
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
|
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||||
|
#undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
|
||||||
|
|
||||||
/* Defined if transparent huge page support is enabled. */
|
/*
|
||||||
|
* Defined if transparent huge pages (THPs) are supported via the
|
||||||
|
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||||
|
*/
|
||||||
#undef JEMALLOC_THP
|
#undef JEMALLOC_THP
|
||||||
|
|
||||||
/* Define if operating system has alloca.h header. */
|
/* Define if operating system has alloca.h header. */
|
||||||
@ -300,9 +301,26 @@
|
|||||||
/* glibc memalign hook. */
|
/* glibc memalign hook. */
|
||||||
#undef JEMALLOC_GLIBC_MEMALIGN_HOOK
|
#undef JEMALLOC_GLIBC_MEMALIGN_HOOK
|
||||||
|
|
||||||
|
/* pthread support */
|
||||||
|
#undef JEMALLOC_HAVE_PTHREAD
|
||||||
|
|
||||||
|
/* dlsym() support */
|
||||||
|
#undef JEMALLOC_HAVE_DLSYM
|
||||||
|
|
||||||
/* Adaptive mutex support in pthreads. */
|
/* Adaptive mutex support in pthreads. */
|
||||||
#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
|
#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
|
||||||
|
|
||||||
|
/* GNU specific sched_getcpu support */
|
||||||
|
#undef JEMALLOC_HAVE_SCHED_GETCPU
|
||||||
|
|
||||||
|
/* GNU specific sched_setaffinity support */
|
||||||
|
#undef JEMALLOC_HAVE_SCHED_SETAFFINITY
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If defined, all the features necessary for background threads are present.
|
||||||
|
*/
|
||||||
|
#undef JEMALLOC_BACKGROUND_THREAD
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If defined, jemalloc symbols are not exported (doesn't work when
|
* If defined, jemalloc symbols are not exported (doesn't work when
|
||||||
* JEMALLOC_PREFIX is not defined).
|
* JEMALLOC_PREFIX is not defined).
|
||||||
@ -312,4 +330,7 @@
|
|||||||
/* config.malloc_conf options string. */
|
/* config.malloc_conf options string. */
|
||||||
#undef JEMALLOC_CONFIG_MALLOC_CONF
|
#undef JEMALLOC_CONFIG_MALLOC_CONF
|
||||||
|
|
||||||
|
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
|
||||||
|
#undef JEMALLOC_IS_MALLOC
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
|
||||||
|
53
include/jemalloc/internal/jemalloc_internal_externs.h
Normal file
53
include/jemalloc/internal/jemalloc_internal_externs.h
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_EXTERNS_H
|
||||||
|
#define JEMALLOC_INTERNAL_EXTERNS_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/atomic.h"
|
||||||
|
#include "jemalloc/internal/size_classes.h"
|
||||||
|
#include "jemalloc/internal/tsd_types.h"
|
||||||
|
|
||||||
|
/* TSD checks this to set thread local slow state accordingly. */
|
||||||
|
extern bool malloc_slow;
|
||||||
|
|
||||||
|
/* Run-time options. */
|
||||||
|
extern bool opt_abort;
|
||||||
|
extern bool opt_abort_conf;
|
||||||
|
extern const char *opt_junk;
|
||||||
|
extern bool opt_junk_alloc;
|
||||||
|
extern bool opt_junk_free;
|
||||||
|
extern bool opt_utrace;
|
||||||
|
extern bool opt_xmalloc;
|
||||||
|
extern bool opt_zero;
|
||||||
|
extern unsigned opt_narenas;
|
||||||
|
|
||||||
|
/* Number of CPUs. */
|
||||||
|
extern unsigned ncpus;
|
||||||
|
|
||||||
|
/* Number of arenas used for automatic multiplexing of threads and arenas. */
|
||||||
|
extern unsigned narenas_auto;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Arenas that are used to service external requests. Not all elements of the
|
||||||
|
* arenas array are necessarily used; arenas are created lazily as needed.
|
||||||
|
*/
|
||||||
|
extern atomic_p_t arenas[];
|
||||||
|
|
||||||
|
void *a0malloc(size_t size);
|
||||||
|
void a0dalloc(void *ptr);
|
||||||
|
void *bootstrap_malloc(size_t size);
|
||||||
|
void *bootstrap_calloc(size_t num, size_t size);
|
||||||
|
void bootstrap_free(void *ptr);
|
||||||
|
void arena_set(unsigned ind, arena_t *arena);
|
||||||
|
unsigned narenas_total_get(void);
|
||||||
|
arena_t *arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
|
||||||
|
arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
|
||||||
|
arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
|
||||||
|
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
|
||||||
|
void iarena_cleanup(tsd_t *tsd);
|
||||||
|
void arena_cleanup(tsd_t *tsd);
|
||||||
|
void arenas_tdata_cleanup(tsd_t *tsd);
|
||||||
|
void jemalloc_prefork(void);
|
||||||
|
void jemalloc_postfork_parent(void);
|
||||||
|
void jemalloc_postfork_child(void);
|
||||||
|
bool malloc_initialized(void);
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_EXTERNS_H */
|
94
include/jemalloc/internal/jemalloc_internal_includes.h
Normal file
94
include/jemalloc/internal/jemalloc_internal_includes.h
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_INCLUDES_H
|
||||||
|
#define JEMALLOC_INTERNAL_INCLUDES_H
|
||||||
|
|
||||||
|
/*
|
||||||
|
* jemalloc can conceptually be broken into components (arena, tcache, etc.),
|
||||||
|
* but there are circular dependencies that cannot be broken without
|
||||||
|
* substantial performance degradation.
|
||||||
|
*
|
||||||
|
* Historically, we dealt with this by each header into four sections (types,
|
||||||
|
* structs, externs, and inlines), and included each header file multiple times
|
||||||
|
* in this file, picking out the portion we want on each pass using the
|
||||||
|
* following #defines:
|
||||||
|
* JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
|
||||||
|
* types.
|
||||||
|
* JEMALLOC_H_STRUCTS : Data structures.
|
||||||
|
* JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
|
||||||
|
* JEMALLOC_H_INLINES : Inline functions.
|
||||||
|
*
|
||||||
|
* We're moving toward a world in which the dependencies are explicit; each file
|
||||||
|
* will #include the headers it depends on (rather than relying on them being
|
||||||
|
* implicitly available via this file including every header file in the
|
||||||
|
* project).
|
||||||
|
*
|
||||||
|
* We're now in an intermediate state: we've broken up the header files to avoid
|
||||||
|
* having to include each one multiple times, but have not yet moved the
|
||||||
|
* dependency information into the header files (i.e. we still rely on the
|
||||||
|
* ordering in this file to ensure all a header's dependencies are available in
|
||||||
|
* its translation unit). Each component is now broken up into multiple header
|
||||||
|
* files, corresponding to the sections above (e.g. instead of "foo.h", we now
|
||||||
|
* have "foo_types.h", "foo_structs.h", "foo_externs.h", "foo_inlines.h").
|
||||||
|
*
|
||||||
|
* Those files which have been converted to explicitly include their
|
||||||
|
* inter-component dependencies are now in the initial HERMETIC HEADERS
|
||||||
|
* section. All headers may still rely on jemalloc_preamble.h (which, by fiat,
|
||||||
|
* must be included first in every translation unit) for system headers and
|
||||||
|
* global jemalloc definitions, however.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/******************************************************************************/
|
||||||
|
/* TYPES */
|
||||||
|
/******************************************************************************/
|
||||||
|
|
||||||
|
#include "jemalloc/internal/extent_types.h"
|
||||||
|
#include "jemalloc/internal/base_types.h"
|
||||||
|
#include "jemalloc/internal/arena_types.h"
|
||||||
|
#include "jemalloc/internal/tcache_types.h"
|
||||||
|
#include "jemalloc/internal/prof_types.h"
|
||||||
|
|
||||||
|
/******************************************************************************/
|
||||||
|
/* STRUCTS */
|
||||||
|
/******************************************************************************/
|
||||||
|
|
||||||
|
#include "jemalloc/internal/arena_structs_a.h"
|
||||||
|
#include "jemalloc/internal/extent_structs.h"
|
||||||
|
#include "jemalloc/internal/base_structs.h"
|
||||||
|
#include "jemalloc/internal/prof_structs.h"
|
||||||
|
#include "jemalloc/internal/arena_structs_b.h"
|
||||||
|
#include "jemalloc/internal/tcache_structs.h"
|
||||||
|
#include "jemalloc/internal/background_thread_structs.h"
|
||||||
|
|
||||||
|
/******************************************************************************/
|
||||||
|
/* EXTERNS */
|
||||||
|
/******************************************************************************/
|
||||||
|
|
||||||
|
#include "jemalloc/internal/jemalloc_internal_externs.h"
|
||||||
|
#include "jemalloc/internal/extent_externs.h"
|
||||||
|
#include "jemalloc/internal/base_externs.h"
|
||||||
|
#include "jemalloc/internal/arena_externs.h"
|
||||||
|
#include "jemalloc/internal/large_externs.h"
|
||||||
|
#include "jemalloc/internal/tcache_externs.h"
|
||||||
|
#include "jemalloc/internal/prof_externs.h"
|
||||||
|
#include "jemalloc/internal/background_thread_externs.h"
|
||||||
|
|
||||||
|
/******************************************************************************/
|
||||||
|
/* INLINES */
|
||||||
|
/******************************************************************************/
|
||||||
|
|
||||||
|
#include "jemalloc/internal/jemalloc_internal_inlines_a.h"
|
||||||
|
#include "jemalloc/internal/base_inlines.h"
|
||||||
|
/*
|
||||||
|
* Include portions of arena code interleaved with tcache code in order to
|
||||||
|
* resolve circular dependencies.
|
||||||
|
*/
|
||||||
|
#include "jemalloc/internal/prof_inlines_a.h"
|
||||||
|
#include "jemalloc/internal/arena_inlines_a.h"
|
||||||
|
#include "jemalloc/internal/extent_inlines.h"
|
||||||
|
#include "jemalloc/internal/jemalloc_internal_inlines_b.h"
|
||||||
|
#include "jemalloc/internal/tcache_inlines.h"
|
||||||
|
#include "jemalloc/internal/arena_inlines_b.h"
|
||||||
|
#include "jemalloc/internal/jemalloc_internal_inlines_c.h"
|
||||||
|
#include "jemalloc/internal/prof_inlines_b.h"
|
||||||
|
#include "jemalloc/internal/background_thread_inlines.h"
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_INCLUDES_H */
|
168
include/jemalloc/internal/jemalloc_internal_inlines_a.h
Normal file
168
include/jemalloc/internal/jemalloc_internal_inlines_a.h
Normal file
@ -0,0 +1,168 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_INLINES_A_H
|
||||||
|
#define JEMALLOC_INTERNAL_INLINES_A_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/atomic.h"
|
||||||
|
#include "jemalloc/internal/bit_util.h"
|
||||||
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||||
|
#include "jemalloc/internal/size_classes.h"
|
||||||
|
#include "jemalloc/internal/ticker.h"
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE malloc_cpuid_t
|
||||||
|
malloc_getcpu(void) {
|
||||||
|
assert(have_percpu_arena);
|
||||||
|
#if defined(JEMALLOC_HAVE_SCHED_GETCPU)
|
||||||
|
return (malloc_cpuid_t)sched_getcpu();
|
||||||
|
#else
|
||||||
|
not_reached();
|
||||||
|
return -1;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return the chosen arena index based on current cpu. */
|
||||||
|
JEMALLOC_ALWAYS_INLINE unsigned
|
||||||
|
percpu_arena_choose(void) {
|
||||||
|
assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena));
|
||||||
|
|
||||||
|
malloc_cpuid_t cpuid = malloc_getcpu();
|
||||||
|
assert(cpuid >= 0);
|
||||||
|
|
||||||
|
unsigned arena_ind;
|
||||||
|
if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus /
|
||||||
|
2)) {
|
||||||
|
arena_ind = cpuid;
|
||||||
|
} else {
|
||||||
|
assert(opt_percpu_arena == per_phycpu_arena);
|
||||||
|
/* Hyper threads on the same physical CPU share arena. */
|
||||||
|
arena_ind = cpuid - ncpus / 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
return arena_ind;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */
|
||||||
|
JEMALLOC_ALWAYS_INLINE unsigned
|
||||||
|
percpu_arena_ind_limit(percpu_arena_mode_t mode) {
|
||||||
|
assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode));
|
||||||
|
if (mode == per_phycpu_arena && ncpus > 1) {
|
||||||
|
if (ncpus % 2) {
|
||||||
|
/* This likely means a misconfig. */
|
||||||
|
return ncpus / 2 + 1;
|
||||||
|
}
|
||||||
|
return ncpus / 2;
|
||||||
|
} else {
|
||||||
|
return ncpus;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline arena_tdata_t *
|
||||||
|
arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) {
|
||||||
|
arena_tdata_t *tdata;
|
||||||
|
arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
|
||||||
|
|
||||||
|
if (unlikely(arenas_tdata == NULL)) {
|
||||||
|
/* arenas_tdata hasn't been initialized yet. */
|
||||||
|
return arena_tdata_get_hard(tsd, ind);
|
||||||
|
}
|
||||||
|
if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
|
||||||
|
/*
|
||||||
|
* ind is invalid, cache is old (too small), or tdata to be
|
||||||
|
* initialized.
|
||||||
|
*/
|
||||||
|
return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
|
||||||
|
NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
tdata = &arenas_tdata[ind];
|
||||||
|
if (likely(tdata != NULL) || !refresh_if_missing) {
|
||||||
|
return tdata;
|
||||||
|
}
|
||||||
|
return arena_tdata_get_hard(tsd, ind);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline arena_t *
|
||||||
|
arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
|
||||||
|
arena_t *ret;
|
||||||
|
|
||||||
|
assert(ind < MALLOCX_ARENA_LIMIT);
|
||||||
|
|
||||||
|
ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE);
|
||||||
|
if (unlikely(ret == NULL)) {
|
||||||
|
if (init_if_missing) {
|
||||||
|
ret = arena_init(tsdn, ind,
|
||||||
|
(extent_hooks_t *)&extent_hooks_default);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline ticker_t *
|
||||||
|
decay_ticker_get(tsd_t *tsd, unsigned ind) {
|
||||||
|
arena_tdata_t *tdata;
|
||||||
|
|
||||||
|
tdata = arena_tdata_get(tsd, ind, true);
|
||||||
|
if (unlikely(tdata == NULL)) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
return &tdata->decay_ticker;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE tcache_bin_t *
|
||||||
|
tcache_small_bin_get(tcache_t *tcache, szind_t binind) {
|
||||||
|
assert(binind < NBINS);
|
||||||
|
return &tcache->tbins_small[binind];
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE tcache_bin_t *
|
||||||
|
tcache_large_bin_get(tcache_t *tcache, szind_t binind) {
|
||||||
|
assert(binind >= NBINS &&binind < nhbins);
|
||||||
|
return &tcache->tbins_large[binind - NBINS];
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
|
tcache_available(tsd_t *tsd) {
|
||||||
|
/*
|
||||||
|
* Thread specific auto tcache might be unavailable if: 1) during tcache
|
||||||
|
* initialization, or 2) disabled through thread.tcache.enabled mallctl
|
||||||
|
* or config options. This check covers all cases.
|
||||||
|
*/
|
||||||
|
if (likely(tsd_tcache_enabled_get(tsd))) {
|
||||||
|
/* Associated arena == NULL implies tcache init in progress. */
|
||||||
|
assert(tsd_tcachep_get(tsd)->arena == NULL ||
|
||||||
|
tcache_small_bin_get(tsd_tcachep_get(tsd), 0)->avail !=
|
||||||
|
NULL);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE tcache_t *
|
||||||
|
tcache_get(tsd_t *tsd) {
|
||||||
|
if (!tcache_available(tsd)) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return tsd_tcachep_get(tsd);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
pre_reentrancy(tsd_t *tsd) {
|
||||||
|
bool fast = tsd_fast(tsd);
|
||||||
|
++*tsd_reentrancy_levelp_get(tsd);
|
||||||
|
if (fast) {
|
||||||
|
/* Prepare slow path for reentrancy. */
|
||||||
|
tsd_slow_update(tsd);
|
||||||
|
assert(tsd->state == tsd_state_nominal_slow);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
post_reentrancy(tsd_t *tsd) {
|
||||||
|
int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd);
|
||||||
|
assert(*reentrancy_level > 0);
|
||||||
|
if (--*reentrancy_level == 0) {
|
||||||
|
tsd_slow_update(tsd);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_INLINES_A_H */
|
86
include/jemalloc/internal/jemalloc_internal_inlines_b.h
Normal file
86
include/jemalloc/internal/jemalloc_internal_inlines_b.h
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_INLINES_B_H
|
||||||
|
#define JEMALLOC_INTERNAL_INLINES_B_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/rtree.h"
|
||||||
|
|
||||||
|
/* Choose an arena based on a per-thread value. */
|
||||||
|
static inline arena_t *
|
||||||
|
arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
|
||||||
|
arena_t *ret;
|
||||||
|
|
||||||
|
if (arena != NULL) {
|
||||||
|
return arena;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* During reentrancy, arena 0 is the safest bet. */
|
||||||
|
if (unlikely(tsd_reentrancy_level_get(tsd) > 0)) {
|
||||||
|
return arena_get(tsd_tsdn(tsd), 0, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
|
||||||
|
if (unlikely(ret == NULL)) {
|
||||||
|
ret = arena_choose_hard(tsd, internal);
|
||||||
|
assert(ret);
|
||||||
|
if (tcache_available(tsd)) {
|
||||||
|
tcache_t *tcache = tcache_get(tsd);
|
||||||
|
if (tcache->arena != NULL) {
|
||||||
|
/* See comments in tcache_data_init().*/
|
||||||
|
assert(tcache->arena ==
|
||||||
|
arena_get(tsd_tsdn(tsd), 0, false));
|
||||||
|
if (tcache->arena != ret) {
|
||||||
|
tcache_arena_reassociate(tsd_tsdn(tsd),
|
||||||
|
tcache, ret);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tcache_arena_associate(tsd_tsdn(tsd), tcache,
|
||||||
|
ret);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note that for percpu arena, if the current arena is outside of the
|
||||||
|
* auto percpu arena range, (i.e. thread is assigned to a manually
|
||||||
|
* managed arena), then percpu arena is skipped.
|
||||||
|
*/
|
||||||
|
if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) &&
|
||||||
|
!internal && (arena_ind_get(ret) <
|
||||||
|
percpu_arena_ind_limit(opt_percpu_arena)) && (ret->last_thd !=
|
||||||
|
tsd_tsdn(tsd))) {
|
||||||
|
unsigned ind = percpu_arena_choose();
|
||||||
|
if (arena_ind_get(ret) != ind) {
|
||||||
|
percpu_arena_update(tsd, ind);
|
||||||
|
ret = tsd_arena_get(tsd);
|
||||||
|
}
|
||||||
|
ret->last_thd = tsd_tsdn(tsd);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline arena_t *
|
||||||
|
arena_choose(tsd_t *tsd, arena_t *arena) {
|
||||||
|
return arena_choose_impl(tsd, arena, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline arena_t *
|
||||||
|
arena_ichoose(tsd_t *tsd, arena_t *arena) {
|
||||||
|
return arena_choose_impl(tsd, arena, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
arena_is_auto(arena_t *arena) {
|
||||||
|
assert(narenas_auto > 0);
|
||||||
|
return (arena_ind_get(arena) < narenas_auto);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||||
|
iealloc(tsdn_t *tsdn, const void *ptr) {
|
||||||
|
rtree_ctx_t rtree_ctx_fallback;
|
||||||
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||||
|
|
||||||
|
return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
|
||||||
|
(uintptr_t)ptr, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_INLINES_B_H */
|
197
include/jemalloc/internal/jemalloc_internal_inlines_c.h
Normal file
197
include/jemalloc/internal/jemalloc_internal_inlines_c.h
Normal file
@ -0,0 +1,197 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_INLINES_C_H
|
||||||
|
#define JEMALLOC_INTERNAL_INLINES_C_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||||
|
#include "jemalloc/internal/sz.h"
|
||||||
|
#include "jemalloc/internal/witness.h"
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE arena_t *
|
||||||
|
iaalloc(tsdn_t *tsdn, const void *ptr) {
|
||||||
|
assert(ptr != NULL);
|
||||||
|
|
||||||
|
return arena_aalloc(tsdn, ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
|
isalloc(tsdn_t *tsdn, const void *ptr) {
|
||||||
|
assert(ptr != NULL);
|
||||||
|
|
||||||
|
return arena_salloc(tsdn, ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
|
iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
|
||||||
|
bool is_internal, arena_t *arena, bool slow_path) {
|
||||||
|
void *ret;
|
||||||
|
|
||||||
|
assert(size != 0);
|
||||||
|
assert(!is_internal || tcache == NULL);
|
||||||
|
assert(!is_internal || arena == NULL || arena_is_auto(arena));
|
||||||
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
|
WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
|
ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
|
||||||
|
if (config_stats && is_internal && likely(ret != NULL)) {
|
||||||
|
arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
|
ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) {
|
||||||
|
return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false,
|
||||||
|
NULL, slow_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
|
ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
||||||
|
tcache_t *tcache, bool is_internal, arena_t *arena) {
|
||||||
|
void *ret;
|
||||||
|
|
||||||
|
assert(usize != 0);
|
||||||
|
assert(usize == sz_sa2u(usize, alignment));
|
||||||
|
assert(!is_internal || tcache == NULL);
|
||||||
|
assert(!is_internal || arena == NULL || arena_is_auto(arena));
|
||||||
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
|
WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
|
ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
|
||||||
|
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
|
||||||
|
if (config_stats && is_internal && likely(ret != NULL)) {
|
||||||
|
arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
|
ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
||||||
|
tcache_t *tcache, arena_t *arena) {
|
||||||
|
return ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
|
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) {
|
||||||
|
return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
|
||||||
|
tcache_get(tsd), false, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
|
ivsalloc(tsdn_t *tsdn, const void *ptr) {
|
||||||
|
return arena_vsalloc(tsdn, ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx,
|
||||||
|
bool is_internal, bool slow_path) {
|
||||||
|
assert(ptr != NULL);
|
||||||
|
assert(!is_internal || tcache == NULL);
|
||||||
|
assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr)));
|
||||||
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
|
WITNESS_RANK_CORE, 0);
|
||||||
|
if (config_stats && is_internal) {
|
||||||
|
arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
|
||||||
|
}
|
||||||
|
if (!is_internal && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) {
|
||||||
|
assert(tcache == NULL);
|
||||||
|
}
|
||||||
|
arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
idalloc(tsd_t *tsd, void *ptr) {
|
||||||
|
idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), NULL, false, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
||||||
|
alloc_ctx_t *alloc_ctx, bool slow_path) {
|
||||||
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
|
WITNESS_RANK_CORE, 0);
|
||||||
|
arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
|
iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
||||||
|
size_t extra, size_t alignment, bool zero, tcache_t *tcache,
|
||||||
|
arena_t *arena) {
|
||||||
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
|
WITNESS_RANK_CORE, 0);
|
||||||
|
void *p;
|
||||||
|
size_t usize, copysize;
|
||||||
|
|
||||||
|
usize = sz_sa2u(size + extra, alignment);
|
||||||
|
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
|
||||||
|
if (p == NULL) {
|
||||||
|
if (extra == 0) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
/* Try again, without extra this time. */
|
||||||
|
usize = sz_sa2u(size, alignment);
|
||||||
|
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
|
||||||
|
if (p == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* Copy at most size bytes (not size+extra), since the caller has no
|
||||||
|
* expectation that the extra bytes will be reliably preserved.
|
||||||
|
*/
|
||||||
|
copysize = (size < oldsize) ? size : oldsize;
|
||||||
|
memcpy(p, ptr, copysize);
|
||||||
|
isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
|
iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment,
|
||||||
|
bool zero, tcache_t *tcache, arena_t *arena) {
|
||||||
|
assert(ptr != NULL);
|
||||||
|
assert(size != 0);
|
||||||
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
|
WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
|
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
|
||||||
|
!= 0) {
|
||||||
|
/*
|
||||||
|
* Existing object alignment is inadequate; allocate new space
|
||||||
|
* and copy.
|
||||||
|
*/
|
||||||
|
return iralloct_realign(tsdn, ptr, oldsize, size, 0, alignment,
|
||||||
|
zero, tcache, arena);
|
||||||
|
}
|
||||||
|
|
||||||
|
return arena_ralloc(tsdn, arena, ptr, oldsize, size, alignment, zero,
|
||||||
|
tcache);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
|
iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
|
||||||
|
bool zero) {
|
||||||
|
return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, zero,
|
||||||
|
tcache_get(tsd), NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
|
ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||||
|
size_t alignment, bool zero) {
|
||||||
|
assert(ptr != NULL);
|
||||||
|
assert(size != 0);
|
||||||
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
|
WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
|
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
|
||||||
|
!= 0) {
|
||||||
|
/* Existing object alignment is inadequate. */
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_INLINES_C_H */
|
@ -1,57 +1,40 @@
|
|||||||
/*
|
#ifndef JEMALLOC_INTERNAL_MACROS_H
|
||||||
* JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for
|
#define JEMALLOC_INTERNAL_MACROS_H
|
||||||
* functions that are static inline functions if inlining is enabled, and
|
|
||||||
* single-definition library-private functions if inlining is disabled.
|
#ifdef JEMALLOC_DEBUG
|
||||||
*
|
|
||||||
* JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in
|
|
||||||
* which case the denoted functions are always static, regardless of whether
|
|
||||||
* inlining is enabled.
|
|
||||||
*/
|
|
||||||
#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE)
|
|
||||||
/* Disable inlining to make debugging/profiling easier. */
|
|
||||||
# define JEMALLOC_ALWAYS_INLINE
|
|
||||||
# define JEMALLOC_ALWAYS_INLINE_C static
|
|
||||||
# define JEMALLOC_INLINE
|
|
||||||
# define JEMALLOC_INLINE_C static
|
|
||||||
# define inline
|
|
||||||
#else
|
|
||||||
# define JEMALLOC_ENABLE_INLINE
|
|
||||||
# ifdef JEMALLOC_HAVE_ATTR
|
|
||||||
# define JEMALLOC_ALWAYS_INLINE \
|
|
||||||
static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline)
|
|
||||||
# define JEMALLOC_ALWAYS_INLINE_C \
|
|
||||||
static inline JEMALLOC_ATTR(always_inline)
|
|
||||||
# else
|
|
||||||
# define JEMALLOC_ALWAYS_INLINE static inline
|
# define JEMALLOC_ALWAYS_INLINE static inline
|
||||||
# define JEMALLOC_ALWAYS_INLINE_C static inline
|
#else
|
||||||
|
# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline
|
||||||
#endif
|
#endif
|
||||||
# define JEMALLOC_INLINE static inline
|
|
||||||
# define JEMALLOC_INLINE_C static inline
|
|
||||||
#ifdef _MSC_VER
|
#ifdef _MSC_VER
|
||||||
# define inline _inline
|
# define inline _inline
|
||||||
#endif
|
#endif
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_CC_SILENCE
|
|
||||||
#define UNUSED JEMALLOC_ATTR(unused)
|
#define UNUSED JEMALLOC_ATTR(unused)
|
||||||
#else
|
|
||||||
# define UNUSED
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define ZU(z) ((size_t)z)
|
#define ZU(z) ((size_t)z)
|
||||||
#define ZI(z) ((ssize_t)z)
|
#define ZD(z) ((ssize_t)z)
|
||||||
#define QU(q) ((uint64_t)q)
|
#define QU(q) ((uint64_t)q)
|
||||||
#define QI(q) ((int64_t)q)
|
#define QD(q) ((int64_t)q)
|
||||||
|
|
||||||
#define KZU(z) ZU(z##ULL)
|
#define KZU(z) ZU(z##ULL)
|
||||||
#define KZI(z) ZI(z##LL)
|
#define KZD(z) ZD(z##LL)
|
||||||
#define KQU(q) QU(q##ULL)
|
#define KQU(q) QU(q##ULL)
|
||||||
#define KQI(q) QI(q##LL)
|
#define KQD(q) QI(q##LL)
|
||||||
|
|
||||||
#ifndef __DECONST
|
#ifndef __DECONST
|
||||||
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
|
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef JEMALLOC_HAS_RESTRICT
|
#if !defined(JEMALLOC_HAS_RESTRICT) || defined(__cplusplus)
|
||||||
# define restrict
|
# define restrict
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* Various function pointers are statick and immutable except during testing. */
|
||||||
|
#ifdef JEMALLOC_JET
|
||||||
|
# define JET_MUTABLE
|
||||||
|
#else
|
||||||
|
# define JET_MUTABLE const
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_MACROS_H */
|
||||||
|
178
include/jemalloc/internal/jemalloc_internal_types.h
Normal file
178
include/jemalloc/internal/jemalloc_internal_types.h
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_TYPES_H
|
||||||
|
#define JEMALLOC_INTERNAL_TYPES_H
|
||||||
|
|
||||||
|
/* Page size index type. */
|
||||||
|
typedef unsigned pszind_t;
|
||||||
|
|
||||||
|
/* Size class index type. */
|
||||||
|
typedef unsigned szind_t;
|
||||||
|
|
||||||
|
/* Processor / core id type. */
|
||||||
|
typedef int malloc_cpuid_t;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flags bits:
|
||||||
|
*
|
||||||
|
* a: arena
|
||||||
|
* t: tcache
|
||||||
|
* 0: unused
|
||||||
|
* z: zero
|
||||||
|
* n: alignment
|
||||||
|
*
|
||||||
|
* aaaaaaaa aaaatttt tttttttt 0znnnnnn
|
||||||
|
*/
|
||||||
|
#define MALLOCX_ARENA_BITS 12
|
||||||
|
#define MALLOCX_TCACHE_BITS 12
|
||||||
|
#define MALLOCX_LG_ALIGN_BITS 6
|
||||||
|
#define MALLOCX_ARENA_SHIFT 20
|
||||||
|
#define MALLOCX_TCACHE_SHIFT 8
|
||||||
|
#define MALLOCX_ARENA_MASK \
|
||||||
|
(((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)
|
||||||
|
/* NB: Arena index bias decreases the maximum number of arenas by 1. */
|
||||||
|
#define MALLOCX_ARENA_LIMIT ((1 << MALLOCX_ARENA_BITS) - 1)
|
||||||
|
#define MALLOCX_TCACHE_MASK \
|
||||||
|
(((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)
|
||||||
|
#define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3)
|
||||||
|
#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1)
|
||||||
|
/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
|
||||||
|
#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
|
||||||
|
(ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
|
||||||
|
#define MALLOCX_ALIGN_GET(flags) \
|
||||||
|
(MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
|
||||||
|
#define MALLOCX_ZERO_GET(flags) \
|
||||||
|
((bool)(flags & MALLOCX_ZERO))
|
||||||
|
|
||||||
|
#define MALLOCX_TCACHE_GET(flags) \
|
||||||
|
(((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2)
|
||||||
|
#define MALLOCX_ARENA_GET(flags) \
|
||||||
|
(((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1)
|
||||||
|
|
||||||
|
/* Smallest size class to support. */
|
||||||
|
#define TINY_MIN (1U << LG_TINY_MIN)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
|
||||||
|
* classes).
|
||||||
|
*/
|
||||||
|
#ifndef LG_QUANTUM
|
||||||
|
# if (defined(__i386__) || defined(_M_IX86))
|
||||||
|
# define LG_QUANTUM 4
|
||||||
|
# endif
|
||||||
|
# ifdef __ia64__
|
||||||
|
# define LG_QUANTUM 4
|
||||||
|
# endif
|
||||||
|
# ifdef __alpha__
|
||||||
|
# define LG_QUANTUM 4
|
||||||
|
# endif
|
||||||
|
# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
|
||||||
|
# define LG_QUANTUM 4
|
||||||
|
# endif
|
||||||
|
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
|
||||||
|
# define LG_QUANTUM 4
|
||||||
|
# endif
|
||||||
|
# ifdef __arm__
|
||||||
|
# define LG_QUANTUM 3
|
||||||
|
# endif
|
||||||
|
# ifdef __aarch64__
|
||||||
|
# define LG_QUANTUM 4
|
||||||
|
# endif
|
||||||
|
# ifdef __hppa__
|
||||||
|
# define LG_QUANTUM 4
|
||||||
|
# endif
|
||||||
|
# ifdef __mips__
|
||||||
|
# define LG_QUANTUM 3
|
||||||
|
# endif
|
||||||
|
# ifdef __or1k__
|
||||||
|
# define LG_QUANTUM 3
|
||||||
|
# endif
|
||||||
|
# ifdef __powerpc__
|
||||||
|
# define LG_QUANTUM 4
|
||||||
|
# endif
|
||||||
|
# ifdef __riscv__
|
||||||
|
# define LG_QUANTUM 4
|
||||||
|
# endif
|
||||||
|
# ifdef __s390__
|
||||||
|
# define LG_QUANTUM 4
|
||||||
|
# endif
|
||||||
|
# ifdef __SH4__
|
||||||
|
# define LG_QUANTUM 4
|
||||||
|
# endif
|
||||||
|
# ifdef __tile__
|
||||||
|
# define LG_QUANTUM 4
|
||||||
|
# endif
|
||||||
|
# ifdef __le32__
|
||||||
|
# define LG_QUANTUM 4
|
||||||
|
# endif
|
||||||
|
# ifndef LG_QUANTUM
|
||||||
|
# error "Unknown minimum alignment for architecture; specify via "
|
||||||
|
"--with-lg-quantum"
|
||||||
|
# endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define QUANTUM ((size_t)(1U << LG_QUANTUM))
|
||||||
|
#define QUANTUM_MASK (QUANTUM - 1)
|
||||||
|
|
||||||
|
/* Return the smallest quantum multiple that is >= a. */
|
||||||
|
#define QUANTUM_CEILING(a) \
|
||||||
|
(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
|
||||||
|
|
||||||
|
#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
|
||||||
|
#define LONG_MASK (LONG - 1)
|
||||||
|
|
||||||
|
/* Return the smallest long multiple that is >= a. */
|
||||||
|
#define LONG_CEILING(a) \
|
||||||
|
(((a) + LONG_MASK) & ~LONG_MASK)
|
||||||
|
|
||||||
|
#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
|
||||||
|
#define PTR_MASK (SIZEOF_PTR - 1)
|
||||||
|
|
||||||
|
/* Return the smallest (void *) multiple that is >= a. */
|
||||||
|
#define PTR_CEILING(a) \
|
||||||
|
(((a) + PTR_MASK) & ~PTR_MASK)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Maximum size of L1 cache line. This is used to avoid cache line aliasing.
|
||||||
|
* In addition, this controls the spacing of cacheline-spaced size classes.
|
||||||
|
*
|
||||||
|
* CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
|
||||||
|
* only handle raw constants.
|
||||||
|
*/
|
||||||
|
#define LG_CACHELINE 6
|
||||||
|
#define CACHELINE 64
|
||||||
|
#define CACHELINE_MASK (CACHELINE - 1)
|
||||||
|
|
||||||
|
/* Return the smallest cacheline multiple that is >= s. */
|
||||||
|
#define CACHELINE_CEILING(s) \
|
||||||
|
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
|
||||||
|
|
||||||
|
/* Return the nearest aligned address at or below a. */
|
||||||
|
#define ALIGNMENT_ADDR2BASE(a, alignment) \
|
||||||
|
((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
|
||||||
|
|
||||||
|
/* Return the offset between a and the nearest aligned address at or below a. */
|
||||||
|
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
|
||||||
|
((size_t)((uintptr_t)(a) & (alignment - 1)))
|
||||||
|
|
||||||
|
/* Return the smallest alignment multiple that is >= s. */
|
||||||
|
#define ALIGNMENT_CEILING(s, alignment) \
|
||||||
|
(((s) + (alignment - 1)) & ((~(alignment)) + 1))
|
||||||
|
|
||||||
|
/* Declare a variable-length array. */
|
||||||
|
#if __STDC_VERSION__ < 199901L
|
||||||
|
# ifdef _MSC_VER
|
||||||
|
# include <malloc.h>
|
||||||
|
# define alloca _alloca
|
||||||
|
# else
|
||||||
|
# ifdef JEMALLOC_HAS_ALLOCA_H
|
||||||
|
# include <alloca.h>
|
||||||
|
# else
|
||||||
|
# include <stdlib.h>
|
||||||
|
# endif
|
||||||
|
# endif
|
||||||
|
# define VARIABLE_ARRAY(type, name, count) \
|
||||||
|
type *name = alloca(sizeof(type) * (count))
|
||||||
|
#else
|
||||||
|
# define VARIABLE_ARRAY(type, name, count) type name[(count)]
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_TYPES_H */
|
179
include/jemalloc/internal/jemalloc_preamble.h.in
Normal file
179
include/jemalloc/internal/jemalloc_preamble.h.in
Normal file
@ -0,0 +1,179 @@
|
|||||||
|
#ifndef JEMALLOC_PREAMBLE_H
|
||||||
|
#define JEMALLOC_PREAMBLE_H
|
||||||
|
|
||||||
|
#include "jemalloc_internal_defs.h"
|
||||||
|
#include "jemalloc/internal/jemalloc_internal_decls.h"
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_UTRACE
|
||||||
|
#include <sys/ktrace.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define JEMALLOC_NO_DEMANGLE
|
||||||
|
#ifdef JEMALLOC_JET
|
||||||
|
# undef JEMALLOC_IS_MALLOC
|
||||||
|
# define JEMALLOC_N(n) jet_##n
|
||||||
|
# include "jemalloc/internal/public_namespace.h"
|
||||||
|
# define JEMALLOC_NO_RENAME
|
||||||
|
# include "../jemalloc@install_suffix@.h"
|
||||||
|
# undef JEMALLOC_NO_RENAME
|
||||||
|
#else
|
||||||
|
# define JEMALLOC_N(n) @private_namespace@##n
|
||||||
|
# include "../jemalloc@install_suffix@.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
|
||||||
|
#include <libkern/OSAtomic.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_ZONE
|
||||||
|
#include <mach/mach_error.h>
|
||||||
|
#include <mach/mach_init.h>
|
||||||
|
#include <mach/vm_map.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include "jemalloc/internal/jemalloc_internal_macros.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note that the ordering matters here; the hook itself is name-mangled. We
|
||||||
|
* want the inclusion of hooks to happen early, so that we hook as much as
|
||||||
|
* possible.
|
||||||
|
*/
|
||||||
|
#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
|
||||||
|
# ifndef JEMALLOC_JET
|
||||||
|
# include "jemalloc/internal/private_namespace.h"
|
||||||
|
# else
|
||||||
|
# include "jemalloc/internal/private_namespace_jet.h"
|
||||||
|
# endif
|
||||||
|
#endif
|
||||||
|
#include "jemalloc/internal/hooks.h"
|
||||||
|
|
||||||
|
static const bool config_debug =
|
||||||
|
#ifdef JEMALLOC_DEBUG
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool have_dss =
|
||||||
|
#ifdef JEMALLOC_DSS
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_fill =
|
||||||
|
#ifdef JEMALLOC_FILL
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_lazy_lock =
|
||||||
|
#ifdef JEMALLOC_LAZY_LOCK
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
|
||||||
|
static const bool config_prof =
|
||||||
|
#ifdef JEMALLOC_PROF
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_prof_libgcc =
|
||||||
|
#ifdef JEMALLOC_PROF_LIBGCC
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_prof_libunwind =
|
||||||
|
#ifdef JEMALLOC_PROF_LIBUNWIND
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool maps_coalesce =
|
||||||
|
#ifdef JEMALLOC_MAPS_COALESCE
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_stats =
|
||||||
|
#ifdef JEMALLOC_STATS
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_thp =
|
||||||
|
#ifdef JEMALLOC_THP
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_tls =
|
||||||
|
#ifdef JEMALLOC_TLS
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_utrace =
|
||||||
|
#ifdef JEMALLOC_UTRACE
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_xmalloc =
|
||||||
|
#ifdef JEMALLOC_XMALLOC
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_cache_oblivious =
|
||||||
|
#ifdef JEMALLOC_CACHE_OBLIVIOUS
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
#ifdef JEMALLOC_HAVE_SCHED_GETCPU
|
||||||
|
/* Currently percpu_arena depends on sched_getcpu. */
|
||||||
|
#define JEMALLOC_PERCPU_ARENA
|
||||||
|
#endif
|
||||||
|
static const bool have_percpu_arena =
|
||||||
|
#ifdef JEMALLOC_PERCPU_ARENA
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
/*
|
||||||
|
* Undocumented, and not recommended; the application should take full
|
||||||
|
* responsibility for tracking provenance.
|
||||||
|
*/
|
||||||
|
static const bool force_ivsalloc =
|
||||||
|
#ifdef JEMALLOC_FORCE_IVSALLOC
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool have_background_thread =
|
||||||
|
#ifdef JEMALLOC_BACKGROUND_THREAD
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_PREAMBLE_H */
|
26
include/jemalloc/internal/large_externs.h
Normal file
26
include/jemalloc/internal/large_externs.h
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_LARGE_EXTERNS_H
|
||||||
|
#define JEMALLOC_INTERNAL_LARGE_EXTERNS_H
|
||||||
|
|
||||||
|
void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
|
||||||
|
void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||||
|
bool zero);
|
||||||
|
bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
||||||
|
size_t usize_max, bool zero);
|
||||||
|
void *large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
|
||||||
|
size_t alignment, bool zero, tcache_t *tcache);
|
||||||
|
|
||||||
|
typedef void (large_dalloc_junk_t)(void *, size_t);
|
||||||
|
extern large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk;
|
||||||
|
|
||||||
|
typedef void (large_dalloc_maybe_junk_t)(void *, size_t);
|
||||||
|
extern large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk;
|
||||||
|
|
||||||
|
void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent);
|
||||||
|
void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent);
|
||||||
|
void large_dalloc(tsdn_t *tsdn, extent_t *extent);
|
||||||
|
size_t large_salloc(tsdn_t *tsdn, const extent_t *extent);
|
||||||
|
prof_tctx_t *large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent);
|
||||||
|
void large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx);
|
||||||
|
void large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent);
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */
|
62
include/jemalloc/internal/malloc_io.h
Normal file
62
include/jemalloc/internal/malloc_io.h
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_MALLOC_IO_H
|
||||||
|
#define JEMALLOC_INTERNAL_MALLOC_IO_H
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
# ifdef _WIN64
|
||||||
|
# define FMT64_PREFIX "ll"
|
||||||
|
# define FMTPTR_PREFIX "ll"
|
||||||
|
# else
|
||||||
|
# define FMT64_PREFIX "ll"
|
||||||
|
# define FMTPTR_PREFIX ""
|
||||||
|
# endif
|
||||||
|
# define FMTd32 "d"
|
||||||
|
# define FMTu32 "u"
|
||||||
|
# define FMTx32 "x"
|
||||||
|
# define FMTd64 FMT64_PREFIX "d"
|
||||||
|
# define FMTu64 FMT64_PREFIX "u"
|
||||||
|
# define FMTx64 FMT64_PREFIX "x"
|
||||||
|
# define FMTdPTR FMTPTR_PREFIX "d"
|
||||||
|
# define FMTuPTR FMTPTR_PREFIX "u"
|
||||||
|
# define FMTxPTR FMTPTR_PREFIX "x"
|
||||||
|
#else
|
||||||
|
# include <inttypes.h>
|
||||||
|
# define FMTd32 PRId32
|
||||||
|
# define FMTu32 PRIu32
|
||||||
|
# define FMTx32 PRIx32
|
||||||
|
# define FMTd64 PRId64
|
||||||
|
# define FMTu64 PRIu64
|
||||||
|
# define FMTx64 PRIx64
|
||||||
|
# define FMTdPTR PRIdPTR
|
||||||
|
# define FMTuPTR PRIuPTR
|
||||||
|
# define FMTxPTR PRIxPTR
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Size of stack-allocated buffer passed to buferror(). */
|
||||||
|
#define BUFERROR_BUF 64
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
|
||||||
|
* large enough for all possible uses within jemalloc.
|
||||||
|
*/
|
||||||
|
#define MALLOC_PRINTF_BUFSIZE 4096
|
||||||
|
|
||||||
|
int buferror(int err, char *buf, size_t buflen);
|
||||||
|
uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr,
|
||||||
|
int base);
|
||||||
|
void malloc_write(const char *s);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
|
||||||
|
* point math.
|
||||||
|
*/
|
||||||
|
size_t malloc_vsnprintf(char *str, size_t size, const char *format,
|
||||||
|
va_list ap);
|
||||||
|
size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
|
||||||
|
JEMALLOC_FORMAT_PRINTF(3, 4);
|
||||||
|
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||||
|
const char *format, va_list ap);
|
||||||
|
void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||||
|
const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4);
|
||||||
|
void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_MALLOC_IO_H */
|
@ -1,115 +0,0 @@
|
|||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_TYPES
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
void mb_write(void);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_))
|
|
||||||
#ifdef __i386__
|
|
||||||
/*
|
|
||||||
* According to the Intel Architecture Software Developer's Manual, current
|
|
||||||
* processors execute instructions in order from the perspective of other
|
|
||||||
* processors in a multiprocessor system, but 1) Intel reserves the right to
|
|
||||||
* change that, and 2) the compiler's optimizer could re-order instructions if
|
|
||||||
* there weren't some form of barrier. Therefore, even if running on an
|
|
||||||
* architecture that does not need memory barriers (everything through at least
|
|
||||||
* i686), an "optimizer barrier" is necessary.
|
|
||||||
*/
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
mb_write(void)
|
|
||||||
{
|
|
||||||
|
|
||||||
# if 0
|
|
||||||
/* This is a true memory barrier. */
|
|
||||||
asm volatile ("pusha;"
|
|
||||||
"xor %%eax,%%eax;"
|
|
||||||
"cpuid;"
|
|
||||||
"popa;"
|
|
||||||
: /* Outputs. */
|
|
||||||
: /* Inputs. */
|
|
||||||
: "memory" /* Clobbers. */
|
|
||||||
);
|
|
||||||
# else
|
|
||||||
/*
|
|
||||||
* This is hopefully enough to keep the compiler from reordering
|
|
||||||
* instructions around this one.
|
|
||||||
*/
|
|
||||||
asm volatile ("nop;"
|
|
||||||
: /* Outputs. */
|
|
||||||
: /* Inputs. */
|
|
||||||
: "memory" /* Clobbers. */
|
|
||||||
);
|
|
||||||
# endif
|
|
||||||
}
|
|
||||||
#elif (defined(__amd64__) || defined(__x86_64__))
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
mb_write(void)
|
|
||||||
{
|
|
||||||
|
|
||||||
asm volatile ("sfence"
|
|
||||||
: /* Outputs. */
|
|
||||||
: /* Inputs. */
|
|
||||||
: "memory" /* Clobbers. */
|
|
||||||
);
|
|
||||||
}
|
|
||||||
#elif defined(__powerpc__)
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
mb_write(void)
|
|
||||||
{
|
|
||||||
|
|
||||||
asm volatile ("eieio"
|
|
||||||
: /* Outputs. */
|
|
||||||
: /* Inputs. */
|
|
||||||
: "memory" /* Clobbers. */
|
|
||||||
);
|
|
||||||
}
|
|
||||||
#elif defined(__sparc__) && defined(__arch64__)
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
mb_write(void)
|
|
||||||
{
|
|
||||||
|
|
||||||
asm volatile ("membar #StoreStore"
|
|
||||||
: /* Outputs. */
|
|
||||||
: /* Inputs. */
|
|
||||||
: "memory" /* Clobbers. */
|
|
||||||
);
|
|
||||||
}
|
|
||||||
#elif defined(__tile__)
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
mb_write(void)
|
|
||||||
{
|
|
||||||
|
|
||||||
__sync_synchronize();
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
/*
|
|
||||||
* This is much slower than a simple memory barrier, but the semantics of mutex
|
|
||||||
* unlock make this work.
|
|
||||||
*/
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
mb_write(void)
|
|
||||||
{
|
|
||||||
malloc_mutex_t mtx;
|
|
||||||
|
|
||||||
malloc_mutex_init(&mtx, "mb", WITNESS_RANK_OMIT);
|
|
||||||
malloc_mutex_lock(TSDN_NULL, &mtx);
|
|
||||||
malloc_mutex_unlock(TSDN_NULL, &mtx);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
/******************************************************************************/
|
|
@ -1,37 +1,34 @@
|
|||||||
/******************************************************************************/
|
#ifndef JEMALLOC_INTERNAL_MUTEX_H
|
||||||
#ifdef JEMALLOC_H_TYPES
|
#define JEMALLOC_INTERNAL_MUTEX_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/atomic.h"
|
||||||
|
#include "jemalloc/internal/mutex_prof.h"
|
||||||
|
#include "jemalloc/internal/tsd.h"
|
||||||
|
#include "jemalloc/internal/witness.h"
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
/* Can only acquire one mutex of a given witness rank at a time. */
|
||||||
|
malloc_mutex_rank_exclusive,
|
||||||
|
/*
|
||||||
|
* Can acquire multiple mutexes of the same witness rank, but in
|
||||||
|
* address-ascending order only.
|
||||||
|
*/
|
||||||
|
malloc_mutex_address_ordered
|
||||||
|
} malloc_mutex_lock_order_t;
|
||||||
|
|
||||||
typedef struct malloc_mutex_s malloc_mutex_t;
|
typedef struct malloc_mutex_s malloc_mutex_t;
|
||||||
|
|
||||||
#ifdef _WIN32
|
|
||||||
# define MALLOC_MUTEX_INITIALIZER
|
|
||||||
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
|
||||||
# define MALLOC_MUTEX_INITIALIZER \
|
|
||||||
{OS_UNFAIR_LOCK_INIT, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
|
|
||||||
#elif (defined(JEMALLOC_OSSPIN))
|
|
||||||
# define MALLOC_MUTEX_INITIALIZER {0, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
|
|
||||||
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
|
||||||
# define MALLOC_MUTEX_INITIALIZER \
|
|
||||||
{PTHREAD_MUTEX_INITIALIZER, NULL, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
|
|
||||||
#else
|
|
||||||
# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
|
|
||||||
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
|
|
||||||
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
|
|
||||||
# define MALLOC_MUTEX_INITIALIZER \
|
|
||||||
{PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, \
|
|
||||||
WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
|
|
||||||
# else
|
|
||||||
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
|
|
||||||
# define MALLOC_MUTEX_INITIALIZER \
|
|
||||||
{PTHREAD_MUTEX_INITIALIZER, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
|
|
||||||
# endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
struct malloc_mutex_s {
|
struct malloc_mutex_s {
|
||||||
|
union {
|
||||||
|
struct {
|
||||||
|
/*
|
||||||
|
* prof_data is defined first to reduce cacheline
|
||||||
|
* bouncing: the data is not touched by the mutex holder
|
||||||
|
* during unlocking, while might be modified by
|
||||||
|
* contenders. Having it before the mutex itself could
|
||||||
|
* avoid prefetching a modified cacheline (for the
|
||||||
|
* unlocking thread).
|
||||||
|
*/
|
||||||
|
mutex_prof_data_t prof_data;
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
# if _WIN32_WINNT >= 0x0600
|
# if _WIN32_WINNT >= 0x0600
|
||||||
SRWLOCK lock;
|
SRWLOCK lock;
|
||||||
@ -48,12 +45,79 @@ struct malloc_mutex_s {
|
|||||||
#else
|
#else
|
||||||
pthread_mutex_t lock;
|
pthread_mutex_t lock;
|
||||||
#endif
|
#endif
|
||||||
|
};
|
||||||
|
/*
|
||||||
|
* We only touch witness when configured w/ debug. However we
|
||||||
|
* keep the field in a union when !debug so that we don't have
|
||||||
|
* to pollute the code base with #ifdefs, while avoid paying the
|
||||||
|
* memory cost.
|
||||||
|
*/
|
||||||
|
#if !defined(JEMALLOC_DEBUG)
|
||||||
witness_t witness;
|
witness_t witness;
|
||||||
|
malloc_mutex_lock_order_t lock_order;
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
#if defined(JEMALLOC_DEBUG)
|
||||||
/******************************************************************************/
|
witness_t witness;
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
malloc_mutex_lock_order_t lock_order;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Based on benchmark results, a fixed spin with this amount of retries works
|
||||||
|
* well for our critical sections.
|
||||||
|
*/
|
||||||
|
#define MALLOC_MUTEX_MAX_SPIN 250
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
# if _WIN32_WINNT >= 0x0600
|
||||||
|
# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
|
||||||
|
# define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock)
|
||||||
|
# define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
|
||||||
|
# else
|
||||||
|
# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock)
|
||||||
|
# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock)
|
||||||
|
# define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
|
||||||
|
# endif
|
||||||
|
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
||||||
|
# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock)
|
||||||
|
# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock)
|
||||||
|
# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
|
||||||
|
#elif (defined(JEMALLOC_OSSPIN))
|
||||||
|
# define MALLOC_MUTEX_LOCK(m) OSSpinLockLock(&(m)->lock)
|
||||||
|
# define MALLOC_MUTEX_UNLOCK(m) OSSpinLockUnlock(&(m)->lock)
|
||||||
|
# define MALLOC_MUTEX_TRYLOCK(m) (!OSSpinLockTry(&(m)->lock))
|
||||||
|
#else
|
||||||
|
# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock)
|
||||||
|
# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock)
|
||||||
|
# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define LOCK_PROF_DATA_INITIALIZER \
|
||||||
|
{NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \
|
||||||
|
ATOMIC_INIT(0), 0, NULL, 0}
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
# define MALLOC_MUTEX_INITIALIZER
|
||||||
|
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
||||||
|
# define MALLOC_MUTEX_INITIALIZER \
|
||||||
|
{{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}}, \
|
||||||
|
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||||
|
#elif (defined(JEMALLOC_OSSPIN))
|
||||||
|
# define MALLOC_MUTEX_INITIALIZER \
|
||||||
|
{{{LOCK_PROF_DATA_INITIALIZER, 0}}, \
|
||||||
|
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||||
|
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||||
|
# define MALLOC_MUTEX_INITIALIZER \
|
||||||
|
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL}}, \
|
||||||
|
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||||
|
#else
|
||||||
|
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
|
||||||
|
# define MALLOC_MUTEX_INITIALIZER \
|
||||||
|
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \
|
||||||
|
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef JEMALLOC_LAZY_LOCK
|
#ifdef JEMALLOC_LAZY_LOCK
|
||||||
extern bool isthreaded;
|
extern bool isthreaded;
|
||||||
@ -63,83 +127,122 @@ extern bool isthreaded;
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
|
bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
|
||||||
witness_rank_t rank);
|
witness_rank_t rank, malloc_mutex_lock_order_t lock_order);
|
||||||
void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||||
void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||||
void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||||
bool malloc_mutex_boot(void);
|
bool malloc_mutex_boot(void);
|
||||||
|
void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
static inline void
|
||||||
void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
malloc_mutex_lock_final(malloc_mutex_t *mutex) {
|
||||||
void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
MALLOC_MUTEX_LOCK(mutex);
|
||||||
void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
}
|
||||||
void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
|
static inline bool
|
||||||
JEMALLOC_INLINE void
|
malloc_mutex_trylock_final(malloc_mutex_t *mutex) {
|
||||||
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
|
return MALLOC_MUTEX_TRYLOCK(mutex);
|
||||||
{
|
}
|
||||||
|
|
||||||
witness_assert_not_owner(tsdn, &mutex->witness);
|
static inline void
|
||||||
|
mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||||
|
if (config_stats) {
|
||||||
|
mutex_prof_data_t *data = &mutex->prof_data;
|
||||||
|
data->n_lock_ops++;
|
||||||
|
if (data->prev_owner != tsdn) {
|
||||||
|
data->prev_owner = tsdn;
|
||||||
|
data->n_owner_switches++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Trylock: return false if the lock is successfully acquired. */
|
||||||
|
static inline bool
|
||||||
|
malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||||
|
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||||
if (isthreaded) {
|
if (isthreaded) {
|
||||||
#ifdef _WIN32
|
if (malloc_mutex_trylock_final(mutex)) {
|
||||||
# if _WIN32_WINNT >= 0x0600
|
return true;
|
||||||
AcquireSRWLockExclusive(&mutex->lock);
|
|
||||||
# else
|
|
||||||
EnterCriticalSection(&mutex->lock);
|
|
||||||
# endif
|
|
||||||
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
|
||||||
os_unfair_lock_lock(&mutex->lock);
|
|
||||||
#elif (defined(JEMALLOC_OSSPIN))
|
|
||||||
OSSpinLockLock(&mutex->lock);
|
|
||||||
#else
|
|
||||||
pthread_mutex_lock(&mutex->lock);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
witness_lock(tsdn, &mutex->witness);
|
mutex_owner_stats_update(tsdn, mutex);
|
||||||
|
}
|
||||||
|
witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||||
|
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
/* Aggregate lock prof data. */
|
||||||
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
|
static inline void
|
||||||
{
|
malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
|
||||||
|
nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
|
||||||
|
if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
|
||||||
|
nstime_copy(&sum->max_wait_time, &data->max_wait_time);
|
||||||
|
}
|
||||||
|
|
||||||
witness_unlock(tsdn, &mutex->witness);
|
sum->n_wait_times += data->n_wait_times;
|
||||||
|
sum->n_spin_acquired += data->n_spin_acquired;
|
||||||
|
|
||||||
|
if (sum->max_n_thds < data->max_n_thds) {
|
||||||
|
sum->max_n_thds = data->max_n_thds;
|
||||||
|
}
|
||||||
|
uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds,
|
||||||
|
ATOMIC_RELAXED);
|
||||||
|
uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32(
|
||||||
|
&data->n_waiting_thds, ATOMIC_RELAXED);
|
||||||
|
atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds,
|
||||||
|
ATOMIC_RELAXED);
|
||||||
|
sum->n_owner_switches += data->n_owner_switches;
|
||||||
|
sum->n_lock_ops += data->n_lock_ops;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||||
|
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||||
if (isthreaded) {
|
if (isthreaded) {
|
||||||
#ifdef _WIN32
|
if (malloc_mutex_trylock_final(mutex)) {
|
||||||
# if _WIN32_WINNT >= 0x0600
|
malloc_mutex_lock_slow(mutex);
|
||||||
ReleaseSRWLockExclusive(&mutex->lock);
|
}
|
||||||
# else
|
mutex_owner_stats_update(tsdn, mutex);
|
||||||
LeaveCriticalSection(&mutex->lock);
|
}
|
||||||
# endif
|
witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||||
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
}
|
||||||
os_unfair_lock_unlock(&mutex->lock);
|
|
||||||
#elif (defined(JEMALLOC_OSSPIN))
|
static inline void
|
||||||
OSSpinLockUnlock(&mutex->lock);
|
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||||
#else
|
witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||||
pthread_mutex_unlock(&mutex->lock);
|
if (isthreaded) {
|
||||||
#endif
|
MALLOC_MUTEX_UNLOCK(mutex);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
|
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||||
{
|
witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||||
|
|
||||||
witness_assert_owner(tsdn, &mutex->witness);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
static inline void
|
||||||
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
|
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||||
{
|
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
|
||||||
|
|
||||||
witness_assert_not_owner(tsdn, &mutex->witness);
|
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
/* Copy the prof data from mutex for processing. */
|
||||||
/******************************************************************************/
|
static inline void
|
||||||
|
malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
|
||||||
|
malloc_mutex_t *mutex) {
|
||||||
|
mutex_prof_data_t *source = &mutex->prof_data;
|
||||||
|
/* Can only read holding the mutex. */
|
||||||
|
malloc_mutex_assert_owner(tsdn, mutex);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Not *really* allowed (we shouldn't be doing non-atomic loads of
|
||||||
|
* atomic data), but the mutex protection makes this safe, and writing
|
||||||
|
* a member-for-member copy is tedious for this situation.
|
||||||
|
*/
|
||||||
|
*data = *source;
|
||||||
|
/* n_wait_thds is not reported (modified w/o locking). */
|
||||||
|
atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_MUTEX_H */
|
||||||
|
94
include/jemalloc/internal/mutex_pool.h
Normal file
94
include/jemalloc/internal/mutex_pool.h
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_MUTEX_POOL_H
|
||||||
|
#define JEMALLOC_INTERNAL_MUTEX_POOL_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/hash.h"
|
||||||
|
#include "jemalloc/internal/mutex.h"
|
||||||
|
#include "jemalloc/internal/witness.h"
|
||||||
|
|
||||||
|
/* We do mod reductions by this value, so it should be kept a power of 2. */
|
||||||
|
#define MUTEX_POOL_SIZE 256
|
||||||
|
|
||||||
|
typedef struct mutex_pool_s mutex_pool_t;
|
||||||
|
struct mutex_pool_s {
|
||||||
|
malloc_mutex_t mutexes[MUTEX_POOL_SIZE];
|
||||||
|
};
|
||||||
|
|
||||||
|
bool mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank);
|
||||||
|
|
||||||
|
/* Internal helper - not meant to be called outside this module. */
|
||||||
|
static inline malloc_mutex_t *
|
||||||
|
mutex_pool_mutex(mutex_pool_t *pool, uintptr_t key) {
|
||||||
|
size_t hash_result[2];
|
||||||
|
hash(&key, sizeof(key), 0xd50dcc1b, hash_result);
|
||||||
|
return &pool->mutexes[hash_result[0] % MUTEX_POOL_SIZE];
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
mutex_pool_assert_not_held(tsdn_t *tsdn, mutex_pool_t *pool) {
|
||||||
|
for (int i = 0; i < MUTEX_POOL_SIZE; i++) {
|
||||||
|
malloc_mutex_assert_not_owner(tsdn, &pool->mutexes[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note that a mutex pool doesn't work exactly the way an embdedded mutex would.
|
||||||
|
* You're not allowed to acquire mutexes in the pool one at a time. You have to
|
||||||
|
* acquire all the mutexes you'll need in a single function call, and then
|
||||||
|
* release them all in a single function call.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
mutex_pool_lock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
|
||||||
|
mutex_pool_assert_not_held(tsdn, pool);
|
||||||
|
|
||||||
|
malloc_mutex_t *mutex = mutex_pool_mutex(pool, key);
|
||||||
|
malloc_mutex_lock(tsdn, mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
mutex_pool_unlock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
|
||||||
|
malloc_mutex_t *mutex = mutex_pool_mutex(pool, key);
|
||||||
|
malloc_mutex_unlock(tsdn, mutex);
|
||||||
|
|
||||||
|
mutex_pool_assert_not_held(tsdn, pool);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
mutex_pool_lock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1,
|
||||||
|
uintptr_t key2) {
|
||||||
|
mutex_pool_assert_not_held(tsdn, pool);
|
||||||
|
|
||||||
|
malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1);
|
||||||
|
malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2);
|
||||||
|
if ((uintptr_t)mutex1 < (uintptr_t)mutex2) {
|
||||||
|
malloc_mutex_lock(tsdn, mutex1);
|
||||||
|
malloc_mutex_lock(tsdn, mutex2);
|
||||||
|
} else if ((uintptr_t)mutex1 == (uintptr_t)mutex2) {
|
||||||
|
malloc_mutex_lock(tsdn, mutex1);
|
||||||
|
} else {
|
||||||
|
malloc_mutex_lock(tsdn, mutex2);
|
||||||
|
malloc_mutex_lock(tsdn, mutex1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
mutex_pool_unlock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1,
|
||||||
|
uintptr_t key2) {
|
||||||
|
malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1);
|
||||||
|
malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2);
|
||||||
|
if (mutex1 == mutex2) {
|
||||||
|
malloc_mutex_unlock(tsdn, mutex1);
|
||||||
|
} else {
|
||||||
|
malloc_mutex_unlock(tsdn, mutex1);
|
||||||
|
malloc_mutex_unlock(tsdn, mutex2);
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_pool_assert_not_held(tsdn, pool);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
mutex_pool_assert_owner(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
|
||||||
|
malloc_mutex_assert_owner(tsdn, mutex_pool_mutex(pool, key));
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_MUTEX_POOL_H */
|
86
include/jemalloc/internal/mutex_prof.h
Normal file
86
include/jemalloc/internal/mutex_prof.h
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H
|
||||||
|
#define JEMALLOC_INTERNAL_MUTEX_PROF_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/atomic.h"
|
||||||
|
#include "jemalloc/internal/nstime.h"
|
||||||
|
#include "jemalloc/internal/tsd_types.h"
|
||||||
|
|
||||||
|
#define MUTEX_PROF_GLOBAL_MUTEXES \
|
||||||
|
OP(background_thread) \
|
||||||
|
OP(ctl) \
|
||||||
|
OP(prof)
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
#define OP(mtx) global_prof_mutex_##mtx,
|
||||||
|
MUTEX_PROF_GLOBAL_MUTEXES
|
||||||
|
#undef OP
|
||||||
|
mutex_prof_num_global_mutexes
|
||||||
|
} mutex_prof_global_ind_t;
|
||||||
|
|
||||||
|
#define MUTEX_PROF_ARENA_MUTEXES \
|
||||||
|
OP(large) \
|
||||||
|
OP(extent_avail) \
|
||||||
|
OP(extents_dirty) \
|
||||||
|
OP(extents_muzzy) \
|
||||||
|
OP(extents_retained) \
|
||||||
|
OP(decay_dirty) \
|
||||||
|
OP(decay_muzzy) \
|
||||||
|
OP(base) \
|
||||||
|
OP(tcache_list)
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
#define OP(mtx) arena_prof_mutex_##mtx,
|
||||||
|
MUTEX_PROF_ARENA_MUTEXES
|
||||||
|
#undef OP
|
||||||
|
mutex_prof_num_arena_mutexes
|
||||||
|
} mutex_prof_arena_ind_t;
|
||||||
|
|
||||||
|
#define MUTEX_PROF_COUNTERS \
|
||||||
|
OP(num_ops, uint64_t) \
|
||||||
|
OP(num_wait, uint64_t) \
|
||||||
|
OP(num_spin_acq, uint64_t) \
|
||||||
|
OP(num_owner_switch, uint64_t) \
|
||||||
|
OP(total_wait_time, uint64_t) \
|
||||||
|
OP(max_wait_time, uint64_t) \
|
||||||
|
OP(max_num_thds, uint32_t)
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
#define OP(counter, type) mutex_counter_##counter,
|
||||||
|
MUTEX_PROF_COUNTERS
|
||||||
|
#undef OP
|
||||||
|
mutex_prof_num_counters
|
||||||
|
} mutex_prof_counter_ind_t;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
/*
|
||||||
|
* Counters touched on the slow path, i.e. when there is lock
|
||||||
|
* contention. We update them once we have the lock.
|
||||||
|
*/
|
||||||
|
/* Total time (in nano seconds) spent waiting on this mutex. */
|
||||||
|
nstime_t tot_wait_time;
|
||||||
|
/* Max time (in nano seconds) spent on a single lock operation. */
|
||||||
|
nstime_t max_wait_time;
|
||||||
|
/* # of times have to wait for this mutex (after spinning). */
|
||||||
|
uint64_t n_wait_times;
|
||||||
|
/* # of times acquired the mutex through local spinning. */
|
||||||
|
uint64_t n_spin_acquired;
|
||||||
|
/* Max # of threads waiting for the mutex at the same time. */
|
||||||
|
uint32_t max_n_thds;
|
||||||
|
/* Current # of threads waiting on the lock. Atomic synced. */
|
||||||
|
atomic_u32_t n_waiting_thds;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Data touched on the fast path. These are modified right after we
|
||||||
|
* grab the lock, so it's placed closest to the end (i.e. right before
|
||||||
|
* the lock) so that we have a higher chance of them being on the same
|
||||||
|
* cacheline.
|
||||||
|
*/
|
||||||
|
/* # of times the mutex holder is different than the previous one. */
|
||||||
|
uint64_t n_owner_switches;
|
||||||
|
/* Previous mutex holder, to facilitate n_owner_switches. */
|
||||||
|
tsdn_t *prev_owner;
|
||||||
|
/* # of lock() operations in total. */
|
||||||
|
uint64_t n_lock_ops;
|
||||||
|
} mutex_prof_data_t;
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */
|
@ -1,48 +1,34 @@
|
|||||||
/******************************************************************************/
|
#ifndef JEMALLOC_INTERNAL_NSTIME_H
|
||||||
#ifdef JEMALLOC_H_TYPES
|
#define JEMALLOC_INTERNAL_NSTIME_H
|
||||||
|
|
||||||
typedef struct nstime_s nstime_t;
|
|
||||||
|
|
||||||
/* Maximum supported number of seconds (~584 years). */
|
/* Maximum supported number of seconds (~584 years). */
|
||||||
#define NSTIME_SEC_MAX KQU(18446744072)
|
#define NSTIME_SEC_MAX KQU(18446744072)
|
||||||
|
#define NSTIME_ZERO_INITIALIZER {0}
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
typedef struct {
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
struct nstime_s {
|
|
||||||
uint64_t ns;
|
uint64_t ns;
|
||||||
};
|
} nstime_t;
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
|
||||||
|
|
||||||
void nstime_init(nstime_t *time, uint64_t ns);
|
void nstime_init(nstime_t *time, uint64_t ns);
|
||||||
void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
|
void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
|
||||||
uint64_t nstime_ns(const nstime_t *time);
|
uint64_t nstime_ns(const nstime_t *time);
|
||||||
uint64_t nstime_sec(const nstime_t *time);
|
uint64_t nstime_sec(const nstime_t *time);
|
||||||
|
uint64_t nstime_msec(const nstime_t *time);
|
||||||
uint64_t nstime_nsec(const nstime_t *time);
|
uint64_t nstime_nsec(const nstime_t *time);
|
||||||
void nstime_copy(nstime_t *time, const nstime_t *source);
|
void nstime_copy(nstime_t *time, const nstime_t *source);
|
||||||
int nstime_compare(const nstime_t *a, const nstime_t *b);
|
int nstime_compare(const nstime_t *a, const nstime_t *b);
|
||||||
void nstime_add(nstime_t *time, const nstime_t *addend);
|
void nstime_add(nstime_t *time, const nstime_t *addend);
|
||||||
|
void nstime_iadd(nstime_t *time, uint64_t addend);
|
||||||
void nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
|
void nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
|
||||||
|
void nstime_isubtract(nstime_t *time, uint64_t subtrahend);
|
||||||
void nstime_imultiply(nstime_t *time, uint64_t multiplier);
|
void nstime_imultiply(nstime_t *time, uint64_t multiplier);
|
||||||
void nstime_idivide(nstime_t *time, uint64_t divisor);
|
void nstime_idivide(nstime_t *time, uint64_t divisor);
|
||||||
uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor);
|
uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor);
|
||||||
#ifdef JEMALLOC_JET
|
|
||||||
typedef bool (nstime_monotonic_t)(void);
|
typedef bool (nstime_monotonic_t)(void);
|
||||||
extern nstime_monotonic_t *nstime_monotonic;
|
extern nstime_monotonic_t *JET_MUTABLE nstime_monotonic;
|
||||||
|
|
||||||
typedef bool (nstime_update_t)(nstime_t *);
|
typedef bool (nstime_update_t)(nstime_t *);
|
||||||
extern nstime_update_t *nstime_update;
|
extern nstime_update_t *JET_MUTABLE nstime_update;
|
||||||
#else
|
|
||||||
bool nstime_monotonic(void);
|
|
||||||
bool nstime_update(nstime_t *time);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_INTERNAL_NSTIME_H */
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
/******************************************************************************/
|
|
||||||
|
@ -1,29 +1,71 @@
|
|||||||
/******************************************************************************/
|
#ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H
|
||||||
#ifdef JEMALLOC_H_TYPES
|
#define JEMALLOC_INTERNAL_PAGES_EXTERNS_H
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
/* Page size. LG_PAGE is determined by the configure script. */
|
||||||
/******************************************************************************/
|
#ifdef PAGE_MASK
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
# undef PAGE_MASK
|
||||||
|
#endif
|
||||||
|
#define PAGE ((size_t)(1U << LG_PAGE))
|
||||||
|
#define PAGE_MASK ((size_t)(PAGE - 1))
|
||||||
|
/* Return the page base address for the page containing address a. */
|
||||||
|
#define PAGE_ADDR2BASE(a) \
|
||||||
|
((void *)((uintptr_t)(a) & ~PAGE_MASK))
|
||||||
|
/* Return the smallest pagesize multiple that is >= s. */
|
||||||
|
#define PAGE_CEILING(s) \
|
||||||
|
(((s) + PAGE_MASK) & ~PAGE_MASK)
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
/* Huge page size. LG_HUGEPAGE is determined by the configure script. */
|
||||||
/******************************************************************************/
|
#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
|
||||||
|
/* Return the huge page base address for the huge page containing address a. */
|
||||||
|
#define HUGEPAGE_ADDR2BASE(a) \
|
||||||
|
((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
|
||||||
|
/* Return the smallest pagesize multiple that is >= s. */
|
||||||
|
#define HUGEPAGE_CEILING(s) \
|
||||||
|
(((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK)
|
||||||
|
|
||||||
void *pages_map(void *addr, size_t size, bool *commit);
|
/* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */
|
||||||
|
#if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE)
|
||||||
|
# define PAGES_CAN_PURGE_LAZY
|
||||||
|
#endif
|
||||||
|
/*
|
||||||
|
* PAGES_CAN_PURGE_FORCED is defined if forced purging is supported.
|
||||||
|
*
|
||||||
|
* The only supported way to hard-purge on Windows is to decommit and then
|
||||||
|
* re-commit, but doing so is racy, and if re-commit fails it's a pain to
|
||||||
|
* propagate the "poisoned" memory state. Since we typically decommit as the
|
||||||
|
* next step after purging on Windows anyway, there's no point in adding such
|
||||||
|
* complexity.
|
||||||
|
*/
|
||||||
|
#if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
|
||||||
|
defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \
|
||||||
|
defined(JEMALLOC_MAPS_COALESCE))
|
||||||
|
# define PAGES_CAN_PURGE_FORCED
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static const bool pages_can_purge_lazy =
|
||||||
|
#ifdef PAGES_CAN_PURGE_LAZY
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool pages_can_purge_forced =
|
||||||
|
#ifdef PAGES_CAN_PURGE_FORCED
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
|
||||||
|
void *pages_map(void *addr, size_t size, size_t alignment, bool *commit);
|
||||||
void pages_unmap(void *addr, size_t size);
|
void pages_unmap(void *addr, size_t size);
|
||||||
void *pages_trim(void *addr, size_t alloc_size, size_t leadsize,
|
|
||||||
size_t size, bool *commit);
|
|
||||||
bool pages_commit(void *addr, size_t size);
|
bool pages_commit(void *addr, size_t size);
|
||||||
bool pages_decommit(void *addr, size_t size);
|
bool pages_decommit(void *addr, size_t size);
|
||||||
bool pages_purge(void *addr, size_t size);
|
bool pages_purge_lazy(void *addr, size_t size);
|
||||||
|
bool pages_purge_forced(void *addr, size_t size);
|
||||||
bool pages_huge(void *addr, size_t size);
|
bool pages_huge(void *addr, size_t size);
|
||||||
bool pages_nohuge(void *addr, size_t size);
|
bool pages_nohuge(void *addr, size_t size);
|
||||||
void pages_boot(void);
|
bool pages_boot(void);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
/******************************************************************************/
|
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */
|
||||||
|
@ -58,17 +58,18 @@ struct { \
|
|||||||
phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
|
phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
|
||||||
phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
|
phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
|
||||||
phn_next_set(a_type, a_field, a_phn1, phn0child); \
|
phn_next_set(a_type, a_field, a_phn1, phn0child); \
|
||||||
if (phn0child != NULL) \
|
if (phn0child != NULL) { \
|
||||||
phn_prev_set(a_type, a_field, phn0child, a_phn1); \
|
phn_prev_set(a_type, a_field, phn0child, a_phn1); \
|
||||||
|
} \
|
||||||
phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
|
phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
|
#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
|
||||||
if (a_phn0 == NULL) \
|
if (a_phn0 == NULL) { \
|
||||||
r_phn = a_phn1; \
|
r_phn = a_phn1; \
|
||||||
else if (a_phn1 == NULL) \
|
} else if (a_phn1 == NULL) { \
|
||||||
r_phn = a_phn0; \
|
r_phn = a_phn0; \
|
||||||
else if (a_cmp(a_phn0, a_phn1) < 0) { \
|
} else if (a_cmp(a_phn0, a_phn1) < 0) { \
|
||||||
phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
|
phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
|
||||||
a_cmp); \
|
a_cmp); \
|
||||||
r_phn = a_phn0; \
|
r_phn = a_phn0; \
|
||||||
@ -95,8 +96,9 @@ struct { \
|
|||||||
*/ \
|
*/ \
|
||||||
if (phn1 != NULL) { \
|
if (phn1 != NULL) { \
|
||||||
a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
|
a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
|
||||||
if (phnrest != NULL) \
|
if (phnrest != NULL) { \
|
||||||
phn_prev_set(a_type, a_field, phnrest, NULL); \
|
phn_prev_set(a_type, a_field, phnrest, NULL); \
|
||||||
|
} \
|
||||||
phn_prev_set(a_type, a_field, phn0, NULL); \
|
phn_prev_set(a_type, a_field, phn0, NULL); \
|
||||||
phn_next_set(a_type, a_field, phn0, NULL); \
|
phn_next_set(a_type, a_field, phn0, NULL); \
|
||||||
phn_prev_set(a_type, a_field, phn1, NULL); \
|
phn_prev_set(a_type, a_field, phn1, NULL); \
|
||||||
@ -150,8 +152,9 @@ struct { \
|
|||||||
NULL); \
|
NULL); \
|
||||||
phn_merge(a_type, a_field, phn0, phn1, \
|
phn_merge(a_type, a_field, phn0, phn1, \
|
||||||
a_cmp, phn0); \
|
a_cmp, phn0); \
|
||||||
if (head == NULL) \
|
if (head == NULL) { \
|
||||||
break; \
|
break; \
|
||||||
|
} \
|
||||||
phn_next_set(a_type, a_field, tail, \
|
phn_next_set(a_type, a_field, tail, \
|
||||||
phn0); \
|
phn0); \
|
||||||
tail = phn0; \
|
tail = phn0; \
|
||||||
@ -179,9 +182,9 @@ struct { \
|
|||||||
|
|
||||||
#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
|
#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
|
||||||
a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
|
a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
|
||||||
if (lchild == NULL) \
|
if (lchild == NULL) { \
|
||||||
r_phn = NULL; \
|
r_phn = NULL; \
|
||||||
else { \
|
} else { \
|
||||||
ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
|
ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
|
||||||
r_phn); \
|
r_phn); \
|
||||||
} \
|
} \
|
||||||
@ -195,8 +198,10 @@ struct { \
|
|||||||
a_attr void a_prefix##new(a_ph_type *ph); \
|
a_attr void a_prefix##new(a_ph_type *ph); \
|
||||||
a_attr bool a_prefix##empty(a_ph_type *ph); \
|
a_attr bool a_prefix##empty(a_ph_type *ph); \
|
||||||
a_attr a_type *a_prefix##first(a_ph_type *ph); \
|
a_attr a_type *a_prefix##first(a_ph_type *ph); \
|
||||||
|
a_attr a_type *a_prefix##any(a_ph_type *ph); \
|
||||||
a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
|
a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
|
||||||
a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
|
a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
|
||||||
|
a_attr a_type *a_prefix##remove_any(a_ph_type *ph); \
|
||||||
a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
|
a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -205,30 +210,34 @@ a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
|
|||||||
*/
|
*/
|
||||||
#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
|
#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
|
||||||
a_attr void \
|
a_attr void \
|
||||||
a_prefix##new(a_ph_type *ph) \
|
a_prefix##new(a_ph_type *ph) { \
|
||||||
{ \
|
|
||||||
\
|
|
||||||
memset(ph, 0, sizeof(ph(a_type))); \
|
memset(ph, 0, sizeof(ph(a_type))); \
|
||||||
} \
|
} \
|
||||||
a_attr bool \
|
a_attr bool \
|
||||||
a_prefix##empty(a_ph_type *ph) \
|
a_prefix##empty(a_ph_type *ph) { \
|
||||||
{ \
|
|
||||||
\
|
|
||||||
return (ph->ph_root == NULL); \
|
return (ph->ph_root == NULL); \
|
||||||
} \
|
} \
|
||||||
a_attr a_type * \
|
a_attr a_type * \
|
||||||
a_prefix##first(a_ph_type *ph) \
|
a_prefix##first(a_ph_type *ph) { \
|
||||||
{ \
|
if (ph->ph_root == NULL) { \
|
||||||
\
|
return NULL; \
|
||||||
if (ph->ph_root == NULL) \
|
} \
|
||||||
return (NULL); \
|
|
||||||
ph_merge_aux(a_type, a_field, ph, a_cmp); \
|
ph_merge_aux(a_type, a_field, ph, a_cmp); \
|
||||||
return (ph->ph_root); \
|
return ph->ph_root; \
|
||||||
|
} \
|
||||||
|
a_attr a_type * \
|
||||||
|
a_prefix##any(a_ph_type *ph) { \
|
||||||
|
if (ph->ph_root == NULL) { \
|
||||||
|
return NULL; \
|
||||||
|
} \
|
||||||
|
a_type *aux = phn_next_get(a_type, a_field, ph->ph_root); \
|
||||||
|
if (aux != NULL) { \
|
||||||
|
return aux; \
|
||||||
|
} \
|
||||||
|
return ph->ph_root; \
|
||||||
} \
|
} \
|
||||||
a_attr void \
|
a_attr void \
|
||||||
a_prefix##insert(a_ph_type *ph, a_type *phn) \
|
a_prefix##insert(a_ph_type *ph, a_type *phn) { \
|
||||||
{ \
|
|
||||||
\
|
|
||||||
memset(&phn->a_field, 0, sizeof(phn(a_type))); \
|
memset(&phn->a_field, 0, sizeof(phn(a_type))); \
|
||||||
\
|
\
|
||||||
/* \
|
/* \
|
||||||
@ -239,9 +248,9 @@ a_prefix##insert(a_ph_type *ph, a_type *phn) \
|
|||||||
* constant-time, whereas eager merging would make insert \
|
* constant-time, whereas eager merging would make insert \
|
||||||
* O(log n). \
|
* O(log n). \
|
||||||
*/ \
|
*/ \
|
||||||
if (ph->ph_root == NULL) \
|
if (ph->ph_root == NULL) { \
|
||||||
ph->ph_root = phn; \
|
ph->ph_root = phn; \
|
||||||
else { \
|
} else { \
|
||||||
phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
|
phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
|
||||||
a_field, ph->ph_root)); \
|
a_field, ph->ph_root)); \
|
||||||
if (phn_next_get(a_type, a_field, ph->ph_root) != \
|
if (phn_next_get(a_type, a_field, ph->ph_root) != \
|
||||||
@ -255,12 +264,12 @@ a_prefix##insert(a_ph_type *ph, a_type *phn) \
|
|||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
a_attr a_type * \
|
a_attr a_type * \
|
||||||
a_prefix##remove_first(a_ph_type *ph) \
|
a_prefix##remove_first(a_ph_type *ph) { \
|
||||||
{ \
|
|
||||||
a_type *ret; \
|
a_type *ret; \
|
||||||
\
|
\
|
||||||
if (ph->ph_root == NULL) \
|
if (ph->ph_root == NULL) { \
|
||||||
return (NULL); \
|
return NULL; \
|
||||||
|
} \
|
||||||
ph_merge_aux(a_type, a_field, ph, a_cmp); \
|
ph_merge_aux(a_type, a_field, ph, a_cmp); \
|
||||||
\
|
\
|
||||||
ret = ph->ph_root; \
|
ret = ph->ph_root; \
|
||||||
@ -268,18 +277,54 @@ a_prefix##remove_first(a_ph_type *ph) \
|
|||||||
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
|
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
|
||||||
ph->ph_root); \
|
ph->ph_root); \
|
||||||
\
|
\
|
||||||
return (ret); \
|
return ret; \
|
||||||
|
} \
|
||||||
|
a_attr a_type * \
|
||||||
|
a_prefix##remove_any(a_ph_type *ph) { \
|
||||||
|
/* \
|
||||||
|
* Remove the most recently inserted aux list element, or the \
|
||||||
|
* root if the aux list is empty. This has the effect of \
|
||||||
|
* behaving as a LIFO (and insertion/removal is therefore \
|
||||||
|
* constant-time) if a_prefix##[remove_]first() are never \
|
||||||
|
* called. \
|
||||||
|
*/ \
|
||||||
|
if (ph->ph_root == NULL) { \
|
||||||
|
return NULL; \
|
||||||
|
} \
|
||||||
|
a_type *ret = phn_next_get(a_type, a_field, ph->ph_root); \
|
||||||
|
if (ret != NULL) { \
|
||||||
|
a_type *aux = phn_next_get(a_type, a_field, ret); \
|
||||||
|
phn_next_set(a_type, a_field, ph->ph_root, aux); \
|
||||||
|
if (aux != NULL) { \
|
||||||
|
phn_prev_set(a_type, a_field, aux, \
|
||||||
|
ph->ph_root); \
|
||||||
|
} \
|
||||||
|
return ret; \
|
||||||
|
} \
|
||||||
|
ret = ph->ph_root; \
|
||||||
|
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
|
||||||
|
ph->ph_root); \
|
||||||
|
return ret; \
|
||||||
} \
|
} \
|
||||||
a_attr void \
|
a_attr void \
|
||||||
a_prefix##remove(a_ph_type *ph, a_type *phn) \
|
a_prefix##remove(a_ph_type *ph, a_type *phn) { \
|
||||||
{ \
|
|
||||||
a_type *replace, *parent; \
|
a_type *replace, *parent; \
|
||||||
\
|
\
|
||||||
/* \
|
|
||||||
* We can delete from aux list without merging it, but we need \
|
|
||||||
* to merge if we are dealing with the root node. \
|
|
||||||
*/ \
|
|
||||||
if (ph->ph_root == phn) { \
|
if (ph->ph_root == phn) { \
|
||||||
|
/* \
|
||||||
|
* We can delete from aux list without merging it, but \
|
||||||
|
* we need to merge if we are dealing with the root \
|
||||||
|
* node and it has children. \
|
||||||
|
*/ \
|
||||||
|
if (phn_lchild_get(a_type, a_field, phn) == NULL) { \
|
||||||
|
ph->ph_root = phn_next_get(a_type, a_field, \
|
||||||
|
phn); \
|
||||||
|
if (ph->ph_root != NULL) { \
|
||||||
|
phn_prev_set(a_type, a_field, \
|
||||||
|
ph->ph_root, NULL); \
|
||||||
|
} \
|
||||||
|
return; \
|
||||||
|
} \
|
||||||
ph_merge_aux(a_type, a_field, ph, a_cmp); \
|
ph_merge_aux(a_type, a_field, ph, a_cmp); \
|
||||||
if (ph->ph_root == phn) { \
|
if (ph->ph_root == phn) { \
|
||||||
ph_merge_children(a_type, a_field, ph->ph_root, \
|
ph_merge_children(a_type, a_field, ph->ph_root, \
|
||||||
@ -290,9 +335,10 @@ a_prefix##remove(a_ph_type *ph, a_type *phn) \
|
|||||||
\
|
\
|
||||||
/* Get parent (if phn is leftmost child) before mutating. */ \
|
/* Get parent (if phn is leftmost child) before mutating. */ \
|
||||||
if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
|
if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
|
||||||
if (phn_lchild_get(a_type, a_field, parent) != phn) \
|
if (phn_lchild_get(a_type, a_field, parent) != phn) { \
|
||||||
parent = NULL; \
|
parent = NULL; \
|
||||||
} \
|
} \
|
||||||
|
} \
|
||||||
/* Find a possible replacement node, and link to parent. */ \
|
/* Find a possible replacement node, and link to parent. */ \
|
||||||
ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
|
ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
|
||||||
/* Set next/prev for sibling linked list. */ \
|
/* Set next/prev for sibling linked list. */ \
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
for symbol in `cat $1` ; do
|
for symbol in `cat "$@"` ; do
|
||||||
echo "#define ${symbol} JEMALLOC_N(${symbol})"
|
echo "#define ${symbol} JEMALLOC_N(${symbol})"
|
||||||
done
|
done
|
||||||
|
51
include/jemalloc/internal/private_symbols.sh
Executable file
51
include/jemalloc/internal/private_symbols.sh
Executable file
@ -0,0 +1,51 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
#
|
||||||
|
# Generate private_symbols[_jet].awk.
|
||||||
|
#
|
||||||
|
# Usage: private_symbols.sh <sym_prefix> <sym>*
|
||||||
|
#
|
||||||
|
# <sym_prefix> is typically "" or "_".
|
||||||
|
|
||||||
|
sym_prefix=$1
|
||||||
|
shift
|
||||||
|
|
||||||
|
cat <<EOF
|
||||||
|
#!/usr/bin/env awk -f
|
||||||
|
|
||||||
|
BEGIN {
|
||||||
|
sym_prefix = "${sym_prefix}"
|
||||||
|
split("\\
|
||||||
|
EOF
|
||||||
|
|
||||||
|
for public_sym in "$@" ; do
|
||||||
|
cat <<EOF
|
||||||
|
${sym_prefix}${public_sym} \\
|
||||||
|
EOF
|
||||||
|
done
|
||||||
|
|
||||||
|
cat <<"EOF"
|
||||||
|
", exported_symbol_names)
|
||||||
|
# Store exported symbol names as keys in exported_symbols.
|
||||||
|
for (i in exported_symbol_names) {
|
||||||
|
exported_symbols[exported_symbol_names[i]] = 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Process 'nm -a <c_source.o>' output.
|
||||||
|
#
|
||||||
|
# Handle lines like:
|
||||||
|
# 0000000000000008 D opt_junk
|
||||||
|
# 0000000000007574 T malloc_initialized
|
||||||
|
(NF == 3 && $2 ~ /^[ABCDGRSTVW]$/ && !($3 in exported_symbols) && $3 ~ /^[A-Za-z0-9_]+$/) {
|
||||||
|
print substr($3, 1+length(sym_prefix), length($3)-length(sym_prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
# Process 'dumpbin /SYMBOLS <c_source.obj>' output.
|
||||||
|
#
|
||||||
|
# Handle lines like:
|
||||||
|
# 353 00008098 SECT4 notype External | opt_junk
|
||||||
|
# 3F1 00000000 SECT7 notype () External | malloc_initialized
|
||||||
|
($3 ~ /^SECT[0-9]+/ && $(NF-2) == "External" && !($NF in exported_symbols)) {
|
||||||
|
print $NF
|
||||||
|
}
|
||||||
|
EOF
|
@ -1,639 +0,0 @@
|
|||||||
a0dalloc
|
|
||||||
a0get
|
|
||||||
a0malloc
|
|
||||||
arena_aalloc
|
|
||||||
arena_alloc_junk_small
|
|
||||||
arena_basic_stats_merge
|
|
||||||
arena_bin_index
|
|
||||||
arena_bin_info
|
|
||||||
arena_bitselm_get_const
|
|
||||||
arena_bitselm_get_mutable
|
|
||||||
arena_boot
|
|
||||||
arena_choose
|
|
||||||
arena_choose_hard
|
|
||||||
arena_choose_impl
|
|
||||||
arena_chunk_alloc_huge
|
|
||||||
arena_chunk_cache_maybe_insert
|
|
||||||
arena_chunk_cache_maybe_remove
|
|
||||||
arena_chunk_dalloc_huge
|
|
||||||
arena_chunk_ralloc_huge_expand
|
|
||||||
arena_chunk_ralloc_huge_shrink
|
|
||||||
arena_chunk_ralloc_huge_similar
|
|
||||||
arena_cleanup
|
|
||||||
arena_dalloc
|
|
||||||
arena_dalloc_bin
|
|
||||||
arena_dalloc_bin_junked_locked
|
|
||||||
arena_dalloc_junk_large
|
|
||||||
arena_dalloc_junk_small
|
|
||||||
arena_dalloc_large
|
|
||||||
arena_dalloc_large_junked_locked
|
|
||||||
arena_dalloc_small
|
|
||||||
arena_decay_tick
|
|
||||||
arena_decay_ticks
|
|
||||||
arena_decay_time_default_get
|
|
||||||
arena_decay_time_default_set
|
|
||||||
arena_decay_time_get
|
|
||||||
arena_decay_time_set
|
|
||||||
arena_dss_prec_get
|
|
||||||
arena_dss_prec_set
|
|
||||||
arena_extent_sn_next
|
|
||||||
arena_get
|
|
||||||
arena_ichoose
|
|
||||||
arena_init
|
|
||||||
arena_lg_dirty_mult_default_get
|
|
||||||
arena_lg_dirty_mult_default_set
|
|
||||||
arena_lg_dirty_mult_get
|
|
||||||
arena_lg_dirty_mult_set
|
|
||||||
arena_malloc
|
|
||||||
arena_malloc_hard
|
|
||||||
arena_malloc_large
|
|
||||||
arena_mapbits_allocated_get
|
|
||||||
arena_mapbits_binind_get
|
|
||||||
arena_mapbits_decommitted_get
|
|
||||||
arena_mapbits_dirty_get
|
|
||||||
arena_mapbits_get
|
|
||||||
arena_mapbits_internal_set
|
|
||||||
arena_mapbits_large_binind_set
|
|
||||||
arena_mapbits_large_get
|
|
||||||
arena_mapbits_large_set
|
|
||||||
arena_mapbits_large_size_get
|
|
||||||
arena_mapbits_size_decode
|
|
||||||
arena_mapbits_size_encode
|
|
||||||
arena_mapbits_small_runind_get
|
|
||||||
arena_mapbits_small_set
|
|
||||||
arena_mapbits_unallocated_set
|
|
||||||
arena_mapbits_unallocated_size_get
|
|
||||||
arena_mapbits_unallocated_size_set
|
|
||||||
arena_mapbits_unzeroed_get
|
|
||||||
arena_mapbitsp_get_const
|
|
||||||
arena_mapbitsp_get_mutable
|
|
||||||
arena_mapbitsp_read
|
|
||||||
arena_mapbitsp_write
|
|
||||||
arena_maxrun
|
|
||||||
arena_maybe_purge
|
|
||||||
arena_metadata_allocated_add
|
|
||||||
arena_metadata_allocated_get
|
|
||||||
arena_metadata_allocated_sub
|
|
||||||
arena_migrate
|
|
||||||
arena_miscelm_get_const
|
|
||||||
arena_miscelm_get_mutable
|
|
||||||
arena_miscelm_to_pageind
|
|
||||||
arena_miscelm_to_rpages
|
|
||||||
arena_new
|
|
||||||
arena_node_alloc
|
|
||||||
arena_node_dalloc
|
|
||||||
arena_nthreads_dec
|
|
||||||
arena_nthreads_get
|
|
||||||
arena_nthreads_inc
|
|
||||||
arena_palloc
|
|
||||||
arena_postfork_child
|
|
||||||
arena_postfork_parent
|
|
||||||
arena_prefork0
|
|
||||||
arena_prefork1
|
|
||||||
arena_prefork2
|
|
||||||
arena_prefork3
|
|
||||||
arena_prof_accum
|
|
||||||
arena_prof_accum_impl
|
|
||||||
arena_prof_accum_locked
|
|
||||||
arena_prof_promoted
|
|
||||||
arena_prof_tctx_get
|
|
||||||
arena_prof_tctx_reset
|
|
||||||
arena_prof_tctx_set
|
|
||||||
arena_ptr_small_binind_get
|
|
||||||
arena_purge
|
|
||||||
arena_quarantine_junk_small
|
|
||||||
arena_ralloc
|
|
||||||
arena_ralloc_junk_large
|
|
||||||
arena_ralloc_no_move
|
|
||||||
arena_rd_to_miscelm
|
|
||||||
arena_redzone_corruption
|
|
||||||
arena_reset
|
|
||||||
arena_run_regind
|
|
||||||
arena_run_to_miscelm
|
|
||||||
arena_salloc
|
|
||||||
arena_sdalloc
|
|
||||||
arena_stats_merge
|
|
||||||
arena_tcache_fill_small
|
|
||||||
arena_tdata_get
|
|
||||||
arena_tdata_get_hard
|
|
||||||
arenas
|
|
||||||
arenas_tdata_bypass_cleanup
|
|
||||||
arenas_tdata_cleanup
|
|
||||||
atomic_add_p
|
|
||||||
atomic_add_u
|
|
||||||
atomic_add_uint32
|
|
||||||
atomic_add_uint64
|
|
||||||
atomic_add_z
|
|
||||||
atomic_cas_p
|
|
||||||
atomic_cas_u
|
|
||||||
atomic_cas_uint32
|
|
||||||
atomic_cas_uint64
|
|
||||||
atomic_cas_z
|
|
||||||
atomic_sub_p
|
|
||||||
atomic_sub_u
|
|
||||||
atomic_sub_uint32
|
|
||||||
atomic_sub_uint64
|
|
||||||
atomic_sub_z
|
|
||||||
atomic_write_p
|
|
||||||
atomic_write_u
|
|
||||||
atomic_write_uint32
|
|
||||||
atomic_write_uint64
|
|
||||||
atomic_write_z
|
|
||||||
base_alloc
|
|
||||||
base_boot
|
|
||||||
base_postfork_child
|
|
||||||
base_postfork_parent
|
|
||||||
base_prefork
|
|
||||||
base_stats_get
|
|
||||||
bitmap_full
|
|
||||||
bitmap_get
|
|
||||||
bitmap_info_init
|
|
||||||
bitmap_init
|
|
||||||
bitmap_set
|
|
||||||
bitmap_sfu
|
|
||||||
bitmap_size
|
|
||||||
bitmap_unset
|
|
||||||
bootstrap_calloc
|
|
||||||
bootstrap_free
|
|
||||||
bootstrap_malloc
|
|
||||||
bt_init
|
|
||||||
buferror
|
|
||||||
chunk_alloc_base
|
|
||||||
chunk_alloc_cache
|
|
||||||
chunk_alloc_dss
|
|
||||||
chunk_alloc_mmap
|
|
||||||
chunk_alloc_wrapper
|
|
||||||
chunk_boot
|
|
||||||
chunk_dalloc_cache
|
|
||||||
chunk_dalloc_mmap
|
|
||||||
chunk_dalloc_wrapper
|
|
||||||
chunk_deregister
|
|
||||||
chunk_dss_boot
|
|
||||||
chunk_dss_mergeable
|
|
||||||
chunk_dss_prec_get
|
|
||||||
chunk_dss_prec_set
|
|
||||||
chunk_hooks_default
|
|
||||||
chunk_hooks_get
|
|
||||||
chunk_hooks_set
|
|
||||||
chunk_in_dss
|
|
||||||
chunk_lookup
|
|
||||||
chunk_npages
|
|
||||||
chunk_purge_wrapper
|
|
||||||
chunk_register
|
|
||||||
chunks_rtree
|
|
||||||
chunksize
|
|
||||||
chunksize_mask
|
|
||||||
ckh_count
|
|
||||||
ckh_delete
|
|
||||||
ckh_insert
|
|
||||||
ckh_iter
|
|
||||||
ckh_new
|
|
||||||
ckh_pointer_hash
|
|
||||||
ckh_pointer_keycomp
|
|
||||||
ckh_remove
|
|
||||||
ckh_search
|
|
||||||
ckh_string_hash
|
|
||||||
ckh_string_keycomp
|
|
||||||
ctl_boot
|
|
||||||
ctl_bymib
|
|
||||||
ctl_byname
|
|
||||||
ctl_nametomib
|
|
||||||
ctl_postfork_child
|
|
||||||
ctl_postfork_parent
|
|
||||||
ctl_prefork
|
|
||||||
decay_ticker_get
|
|
||||||
dss_prec_names
|
|
||||||
extent_node_achunk_get
|
|
||||||
extent_node_achunk_set
|
|
||||||
extent_node_addr_get
|
|
||||||
extent_node_addr_set
|
|
||||||
extent_node_arena_get
|
|
||||||
extent_node_arena_set
|
|
||||||
extent_node_committed_get
|
|
||||||
extent_node_committed_set
|
|
||||||
extent_node_dirty_insert
|
|
||||||
extent_node_dirty_linkage_init
|
|
||||||
extent_node_dirty_remove
|
|
||||||
extent_node_init
|
|
||||||
extent_node_prof_tctx_get
|
|
||||||
extent_node_prof_tctx_set
|
|
||||||
extent_node_size_get
|
|
||||||
extent_node_size_set
|
|
||||||
extent_node_sn_get
|
|
||||||
extent_node_sn_set
|
|
||||||
extent_node_zeroed_get
|
|
||||||
extent_node_zeroed_set
|
|
||||||
extent_size_quantize_ceil
|
|
||||||
extent_size_quantize_floor
|
|
||||||
extent_tree_ad_destroy
|
|
||||||
extent_tree_ad_destroy_recurse
|
|
||||||
extent_tree_ad_empty
|
|
||||||
extent_tree_ad_first
|
|
||||||
extent_tree_ad_insert
|
|
||||||
extent_tree_ad_iter
|
|
||||||
extent_tree_ad_iter_recurse
|
|
||||||
extent_tree_ad_iter_start
|
|
||||||
extent_tree_ad_last
|
|
||||||
extent_tree_ad_new
|
|
||||||
extent_tree_ad_next
|
|
||||||
extent_tree_ad_nsearch
|
|
||||||
extent_tree_ad_prev
|
|
||||||
extent_tree_ad_psearch
|
|
||||||
extent_tree_ad_remove
|
|
||||||
extent_tree_ad_reverse_iter
|
|
||||||
extent_tree_ad_reverse_iter_recurse
|
|
||||||
extent_tree_ad_reverse_iter_start
|
|
||||||
extent_tree_ad_search
|
|
||||||
extent_tree_szsnad_destroy
|
|
||||||
extent_tree_szsnad_destroy_recurse
|
|
||||||
extent_tree_szsnad_empty
|
|
||||||
extent_tree_szsnad_first
|
|
||||||
extent_tree_szsnad_insert
|
|
||||||
extent_tree_szsnad_iter
|
|
||||||
extent_tree_szsnad_iter_recurse
|
|
||||||
extent_tree_szsnad_iter_start
|
|
||||||
extent_tree_szsnad_last
|
|
||||||
extent_tree_szsnad_new
|
|
||||||
extent_tree_szsnad_next
|
|
||||||
extent_tree_szsnad_nsearch
|
|
||||||
extent_tree_szsnad_prev
|
|
||||||
extent_tree_szsnad_psearch
|
|
||||||
extent_tree_szsnad_remove
|
|
||||||
extent_tree_szsnad_reverse_iter
|
|
||||||
extent_tree_szsnad_reverse_iter_recurse
|
|
||||||
extent_tree_szsnad_reverse_iter_start
|
|
||||||
extent_tree_szsnad_search
|
|
||||||
ffs_llu
|
|
||||||
ffs_lu
|
|
||||||
ffs_u
|
|
||||||
ffs_u32
|
|
||||||
ffs_u64
|
|
||||||
ffs_zu
|
|
||||||
get_errno
|
|
||||||
hash
|
|
||||||
hash_fmix_32
|
|
||||||
hash_fmix_64
|
|
||||||
hash_get_block_32
|
|
||||||
hash_get_block_64
|
|
||||||
hash_rotl_32
|
|
||||||
hash_rotl_64
|
|
||||||
hash_x64_128
|
|
||||||
hash_x86_128
|
|
||||||
hash_x86_32
|
|
||||||
huge_aalloc
|
|
||||||
huge_dalloc
|
|
||||||
huge_dalloc_junk
|
|
||||||
huge_malloc
|
|
||||||
huge_palloc
|
|
||||||
huge_prof_tctx_get
|
|
||||||
huge_prof_tctx_reset
|
|
||||||
huge_prof_tctx_set
|
|
||||||
huge_ralloc
|
|
||||||
huge_ralloc_no_move
|
|
||||||
huge_salloc
|
|
||||||
iaalloc
|
|
||||||
ialloc
|
|
||||||
iallocztm
|
|
||||||
iarena_cleanup
|
|
||||||
idalloc
|
|
||||||
idalloctm
|
|
||||||
in_valgrind
|
|
||||||
index2size
|
|
||||||
index2size_compute
|
|
||||||
index2size_lookup
|
|
||||||
index2size_tab
|
|
||||||
ipalloc
|
|
||||||
ipalloct
|
|
||||||
ipallocztm
|
|
||||||
iqalloc
|
|
||||||
iralloc
|
|
||||||
iralloct
|
|
||||||
iralloct_realign
|
|
||||||
isalloc
|
|
||||||
isdalloct
|
|
||||||
isqalloc
|
|
||||||
isthreaded
|
|
||||||
ivsalloc
|
|
||||||
ixalloc
|
|
||||||
jemalloc_postfork_child
|
|
||||||
jemalloc_postfork_parent
|
|
||||||
jemalloc_prefork
|
|
||||||
large_maxclass
|
|
||||||
lg_floor
|
|
||||||
lg_prof_sample
|
|
||||||
malloc_cprintf
|
|
||||||
malloc_mutex_assert_not_owner
|
|
||||||
malloc_mutex_assert_owner
|
|
||||||
malloc_mutex_boot
|
|
||||||
malloc_mutex_init
|
|
||||||
malloc_mutex_lock
|
|
||||||
malloc_mutex_postfork_child
|
|
||||||
malloc_mutex_postfork_parent
|
|
||||||
malloc_mutex_prefork
|
|
||||||
malloc_mutex_unlock
|
|
||||||
malloc_printf
|
|
||||||
malloc_snprintf
|
|
||||||
malloc_strtoumax
|
|
||||||
malloc_tsd_boot0
|
|
||||||
malloc_tsd_boot1
|
|
||||||
malloc_tsd_cleanup_register
|
|
||||||
malloc_tsd_dalloc
|
|
||||||
malloc_tsd_malloc
|
|
||||||
malloc_tsd_no_cleanup
|
|
||||||
malloc_vcprintf
|
|
||||||
malloc_vsnprintf
|
|
||||||
malloc_write
|
|
||||||
map_bias
|
|
||||||
map_misc_offset
|
|
||||||
mb_write
|
|
||||||
narenas_auto
|
|
||||||
narenas_tdata_cleanup
|
|
||||||
narenas_total_get
|
|
||||||
ncpus
|
|
||||||
nhbins
|
|
||||||
nhclasses
|
|
||||||
nlclasses
|
|
||||||
nstime_add
|
|
||||||
nstime_compare
|
|
||||||
nstime_copy
|
|
||||||
nstime_divide
|
|
||||||
nstime_idivide
|
|
||||||
nstime_imultiply
|
|
||||||
nstime_init
|
|
||||||
nstime_init2
|
|
||||||
nstime_monotonic
|
|
||||||
nstime_ns
|
|
||||||
nstime_nsec
|
|
||||||
nstime_sec
|
|
||||||
nstime_subtract
|
|
||||||
nstime_update
|
|
||||||
opt_abort
|
|
||||||
opt_decay_time
|
|
||||||
opt_dss
|
|
||||||
opt_junk
|
|
||||||
opt_junk_alloc
|
|
||||||
opt_junk_free
|
|
||||||
opt_lg_chunk
|
|
||||||
opt_lg_dirty_mult
|
|
||||||
opt_lg_prof_interval
|
|
||||||
opt_lg_prof_sample
|
|
||||||
opt_lg_tcache_max
|
|
||||||
opt_narenas
|
|
||||||
opt_prof
|
|
||||||
opt_prof_accum
|
|
||||||
opt_prof_active
|
|
||||||
opt_prof_final
|
|
||||||
opt_prof_gdump
|
|
||||||
opt_prof_leak
|
|
||||||
opt_prof_prefix
|
|
||||||
opt_prof_thread_active_init
|
|
||||||
opt_purge
|
|
||||||
opt_quarantine
|
|
||||||
opt_redzone
|
|
||||||
opt_stats_print
|
|
||||||
opt_tcache
|
|
||||||
opt_thp
|
|
||||||
opt_utrace
|
|
||||||
opt_xmalloc
|
|
||||||
opt_zero
|
|
||||||
p2rz
|
|
||||||
pages_boot
|
|
||||||
pages_commit
|
|
||||||
pages_decommit
|
|
||||||
pages_huge
|
|
||||||
pages_map
|
|
||||||
pages_nohuge
|
|
||||||
pages_purge
|
|
||||||
pages_trim
|
|
||||||
pages_unmap
|
|
||||||
pind2sz
|
|
||||||
pind2sz_compute
|
|
||||||
pind2sz_lookup
|
|
||||||
pind2sz_tab
|
|
||||||
pow2_ceil_u32
|
|
||||||
pow2_ceil_u64
|
|
||||||
pow2_ceil_zu
|
|
||||||
prng_lg_range_u32
|
|
||||||
prng_lg_range_u64
|
|
||||||
prng_lg_range_zu
|
|
||||||
prng_range_u32
|
|
||||||
prng_range_u64
|
|
||||||
prng_range_zu
|
|
||||||
prng_state_next_u32
|
|
||||||
prng_state_next_u64
|
|
||||||
prng_state_next_zu
|
|
||||||
prof_active
|
|
||||||
prof_active_get
|
|
||||||
prof_active_get_unlocked
|
|
||||||
prof_active_set
|
|
||||||
prof_alloc_prep
|
|
||||||
prof_alloc_rollback
|
|
||||||
prof_backtrace
|
|
||||||
prof_boot0
|
|
||||||
prof_boot1
|
|
||||||
prof_boot2
|
|
||||||
prof_bt_count
|
|
||||||
prof_dump_header
|
|
||||||
prof_dump_open
|
|
||||||
prof_free
|
|
||||||
prof_free_sampled_object
|
|
||||||
prof_gdump
|
|
||||||
prof_gdump_get
|
|
||||||
prof_gdump_get_unlocked
|
|
||||||
prof_gdump_set
|
|
||||||
prof_gdump_val
|
|
||||||
prof_idump
|
|
||||||
prof_interval
|
|
||||||
prof_lookup
|
|
||||||
prof_malloc
|
|
||||||
prof_malloc_sample_object
|
|
||||||
prof_mdump
|
|
||||||
prof_postfork_child
|
|
||||||
prof_postfork_parent
|
|
||||||
prof_prefork0
|
|
||||||
prof_prefork1
|
|
||||||
prof_realloc
|
|
||||||
prof_reset
|
|
||||||
prof_sample_accum_update
|
|
||||||
prof_sample_threshold_update
|
|
||||||
prof_tctx_get
|
|
||||||
prof_tctx_reset
|
|
||||||
prof_tctx_set
|
|
||||||
prof_tdata_cleanup
|
|
||||||
prof_tdata_count
|
|
||||||
prof_tdata_get
|
|
||||||
prof_tdata_init
|
|
||||||
prof_tdata_reinit
|
|
||||||
prof_thread_active_get
|
|
||||||
prof_thread_active_init_get
|
|
||||||
prof_thread_active_init_set
|
|
||||||
prof_thread_active_set
|
|
||||||
prof_thread_name_get
|
|
||||||
prof_thread_name_set
|
|
||||||
psz2ind
|
|
||||||
psz2u
|
|
||||||
purge_mode_names
|
|
||||||
quarantine
|
|
||||||
quarantine_alloc_hook
|
|
||||||
quarantine_alloc_hook_work
|
|
||||||
quarantine_cleanup
|
|
||||||
rtree_child_read
|
|
||||||
rtree_child_read_hard
|
|
||||||
rtree_child_tryread
|
|
||||||
rtree_delete
|
|
||||||
rtree_get
|
|
||||||
rtree_new
|
|
||||||
rtree_node_valid
|
|
||||||
rtree_set
|
|
||||||
rtree_start_level
|
|
||||||
rtree_subkey
|
|
||||||
rtree_subtree_read
|
|
||||||
rtree_subtree_read_hard
|
|
||||||
rtree_subtree_tryread
|
|
||||||
rtree_val_read
|
|
||||||
rtree_val_write
|
|
||||||
run_quantize_ceil
|
|
||||||
run_quantize_floor
|
|
||||||
s2u
|
|
||||||
s2u_compute
|
|
||||||
s2u_lookup
|
|
||||||
sa2u
|
|
||||||
set_errno
|
|
||||||
size2index
|
|
||||||
size2index_compute
|
|
||||||
size2index_lookup
|
|
||||||
size2index_tab
|
|
||||||
spin_adaptive
|
|
||||||
spin_init
|
|
||||||
stats_cactive
|
|
||||||
stats_cactive_add
|
|
||||||
stats_cactive_get
|
|
||||||
stats_cactive_sub
|
|
||||||
stats_print
|
|
||||||
tcache_alloc_easy
|
|
||||||
tcache_alloc_large
|
|
||||||
tcache_alloc_small
|
|
||||||
tcache_alloc_small_hard
|
|
||||||
tcache_arena_reassociate
|
|
||||||
tcache_bin_flush_large
|
|
||||||
tcache_bin_flush_small
|
|
||||||
tcache_bin_info
|
|
||||||
tcache_boot
|
|
||||||
tcache_cleanup
|
|
||||||
tcache_create
|
|
||||||
tcache_dalloc_large
|
|
||||||
tcache_dalloc_small
|
|
||||||
tcache_enabled_cleanup
|
|
||||||
tcache_enabled_get
|
|
||||||
tcache_enabled_set
|
|
||||||
tcache_event
|
|
||||||
tcache_event_hard
|
|
||||||
tcache_flush
|
|
||||||
tcache_get
|
|
||||||
tcache_get_hard
|
|
||||||
tcache_maxclass
|
|
||||||
tcache_postfork_child
|
|
||||||
tcache_postfork_parent
|
|
||||||
tcache_prefork
|
|
||||||
tcache_salloc
|
|
||||||
tcache_stats_merge
|
|
||||||
tcaches
|
|
||||||
tcaches_create
|
|
||||||
tcaches_destroy
|
|
||||||
tcaches_flush
|
|
||||||
tcaches_get
|
|
||||||
thread_allocated_cleanup
|
|
||||||
thread_deallocated_cleanup
|
|
||||||
ticker_copy
|
|
||||||
ticker_init
|
|
||||||
ticker_read
|
|
||||||
ticker_tick
|
|
||||||
ticker_ticks
|
|
||||||
tsd_arena_get
|
|
||||||
tsd_arena_set
|
|
||||||
tsd_arenap_get
|
|
||||||
tsd_arenas_tdata_bypass_get
|
|
||||||
tsd_arenas_tdata_bypass_set
|
|
||||||
tsd_arenas_tdata_bypassp_get
|
|
||||||
tsd_arenas_tdata_get
|
|
||||||
tsd_arenas_tdata_set
|
|
||||||
tsd_arenas_tdatap_get
|
|
||||||
tsd_boot
|
|
||||||
tsd_boot0
|
|
||||||
tsd_boot1
|
|
||||||
tsd_booted
|
|
||||||
tsd_booted_get
|
|
||||||
tsd_cleanup
|
|
||||||
tsd_cleanup_wrapper
|
|
||||||
tsd_fetch
|
|
||||||
tsd_fetch_impl
|
|
||||||
tsd_get
|
|
||||||
tsd_get_allocates
|
|
||||||
tsd_iarena_get
|
|
||||||
tsd_iarena_set
|
|
||||||
tsd_iarenap_get
|
|
||||||
tsd_initialized
|
|
||||||
tsd_init_check_recursion
|
|
||||||
tsd_init_finish
|
|
||||||
tsd_init_head
|
|
||||||
tsd_narenas_tdata_get
|
|
||||||
tsd_narenas_tdata_set
|
|
||||||
tsd_narenas_tdatap_get
|
|
||||||
tsd_wrapper_get
|
|
||||||
tsd_wrapper_set
|
|
||||||
tsd_nominal
|
|
||||||
tsd_prof_tdata_get
|
|
||||||
tsd_prof_tdata_set
|
|
||||||
tsd_prof_tdatap_get
|
|
||||||
tsd_quarantine_get
|
|
||||||
tsd_quarantine_set
|
|
||||||
tsd_quarantinep_get
|
|
||||||
tsd_set
|
|
||||||
tsd_tcache_enabled_get
|
|
||||||
tsd_tcache_enabled_set
|
|
||||||
tsd_tcache_enabledp_get
|
|
||||||
tsd_tcache_get
|
|
||||||
tsd_tcache_set
|
|
||||||
tsd_tcachep_get
|
|
||||||
tsd_thread_allocated_get
|
|
||||||
tsd_thread_allocated_set
|
|
||||||
tsd_thread_allocatedp_get
|
|
||||||
tsd_thread_deallocated_get
|
|
||||||
tsd_thread_deallocated_set
|
|
||||||
tsd_thread_deallocatedp_get
|
|
||||||
tsd_tls
|
|
||||||
tsd_tsd
|
|
||||||
tsd_tsdn
|
|
||||||
tsd_witness_fork_get
|
|
||||||
tsd_witness_fork_set
|
|
||||||
tsd_witness_forkp_get
|
|
||||||
tsd_witnesses_get
|
|
||||||
tsd_witnesses_set
|
|
||||||
tsd_witnessesp_get
|
|
||||||
tsdn_fetch
|
|
||||||
tsdn_null
|
|
||||||
tsdn_tsd
|
|
||||||
u2rz
|
|
||||||
valgrind_freelike_block
|
|
||||||
valgrind_make_mem_defined
|
|
||||||
valgrind_make_mem_noaccess
|
|
||||||
valgrind_make_mem_undefined
|
|
||||||
witness_assert_depth
|
|
||||||
witness_assert_depth_to_rank
|
|
||||||
witness_assert_lockless
|
|
||||||
witness_assert_not_owner
|
|
||||||
witness_assert_owner
|
|
||||||
witness_depth_error
|
|
||||||
witness_fork_cleanup
|
|
||||||
witness_init
|
|
||||||
witness_lock
|
|
||||||
witness_lock_error
|
|
||||||
witness_not_owner_error
|
|
||||||
witness_owner
|
|
||||||
witness_owner_error
|
|
||||||
witness_postfork_child
|
|
||||||
witness_postfork_parent
|
|
||||||
witness_prefork
|
|
||||||
witness_unlock
|
|
||||||
witnesses_cleanup
|
|
||||||
zone_register
|
|
@ -1,5 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
for symbol in `cat $1` ; do
|
|
||||||
echo "#undef ${symbol}"
|
|
||||||
done
|
|
@ -1,5 +1,8 @@
|
|||||||
/******************************************************************************/
|
#ifndef JEMALLOC_INTERNAL_PRNG_H
|
||||||
#ifdef JEMALLOC_H_TYPES
|
#define JEMALLOC_INTERNAL_PRNG_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/atomic.h"
|
||||||
|
#include "jemalloc/internal/bit_util.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Simple linear congruential pseudo-random number generator:
|
* Simple linear congruential pseudo-random number generator:
|
||||||
@ -20,95 +23,71 @@
|
|||||||
* bits.
|
* bits.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/******************************************************************************/
|
||||||
|
/* INTERNAL DEFINITIONS -- IGNORE */
|
||||||
|
/******************************************************************************/
|
||||||
#define PRNG_A_32 UINT32_C(1103515241)
|
#define PRNG_A_32 UINT32_C(1103515241)
|
||||||
#define PRNG_C_32 UINT32_C(12347)
|
#define PRNG_C_32 UINT32_C(12347)
|
||||||
|
|
||||||
#define PRNG_A_64 UINT64_C(6364136223846793005)
|
#define PRNG_A_64 UINT64_C(6364136223846793005)
|
||||||
#define PRNG_C_64 UINT64_C(1442695040888963407)
|
#define PRNG_C_64 UINT64_C(1442695040888963407)
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
uint32_t prng_state_next_u32(uint32_t state);
|
|
||||||
uint64_t prng_state_next_u64(uint64_t state);
|
|
||||||
size_t prng_state_next_zu(size_t state);
|
|
||||||
|
|
||||||
uint32_t prng_lg_range_u32(uint32_t *state, unsigned lg_range,
|
|
||||||
bool atomic);
|
|
||||||
uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range);
|
|
||||||
size_t prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic);
|
|
||||||
|
|
||||||
uint32_t prng_range_u32(uint32_t *state, uint32_t range, bool atomic);
|
|
||||||
uint64_t prng_range_u64(uint64_t *state, uint64_t range);
|
|
||||||
size_t prng_range_zu(size_t *state, size_t range, bool atomic);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_))
|
|
||||||
JEMALLOC_ALWAYS_INLINE uint32_t
|
JEMALLOC_ALWAYS_INLINE uint32_t
|
||||||
prng_state_next_u32(uint32_t state)
|
prng_state_next_u32(uint32_t state) {
|
||||||
{
|
return (state * PRNG_A_32) + PRNG_C_32;
|
||||||
|
|
||||||
return ((state * PRNG_A_32) + PRNG_C_32);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||||
prng_state_next_u64(uint64_t state)
|
prng_state_next_u64(uint64_t state) {
|
||||||
{
|
return (state * PRNG_A_64) + PRNG_C_64;
|
||||||
|
|
||||||
return ((state * PRNG_A_64) + PRNG_C_64);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
prng_state_next_zu(size_t state)
|
prng_state_next_zu(size_t state) {
|
||||||
{
|
|
||||||
|
|
||||||
#if LG_SIZEOF_PTR == 2
|
#if LG_SIZEOF_PTR == 2
|
||||||
return ((state * PRNG_A_32) + PRNG_C_32);
|
return (state * PRNG_A_32) + PRNG_C_32;
|
||||||
#elif LG_SIZEOF_PTR == 3
|
#elif LG_SIZEOF_PTR == 3
|
||||||
return ((state * PRNG_A_64) + PRNG_C_64);
|
return (state * PRNG_A_64) + PRNG_C_64;
|
||||||
#else
|
#else
|
||||||
#error Unsupported pointer size
|
#error Unsupported pointer size
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/******************************************************************************/
|
||||||
|
/* BEGIN PUBLIC API */
|
||||||
|
/******************************************************************************/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The prng_lg_range functions give a uniform int in the half-open range [0,
|
||||||
|
* 2**lg_range). If atomic is true, they do so safely from multiple threads.
|
||||||
|
* Multithreaded 64-bit prngs aren't supported.
|
||||||
|
*/
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE uint32_t
|
JEMALLOC_ALWAYS_INLINE uint32_t
|
||||||
prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic)
|
prng_lg_range_u32(atomic_u32_t *state, unsigned lg_range, bool atomic) {
|
||||||
{
|
uint32_t ret, state0, state1;
|
||||||
uint32_t ret, state1;
|
|
||||||
|
|
||||||
assert(lg_range > 0);
|
assert(lg_range > 0);
|
||||||
assert(lg_range <= 32);
|
assert(lg_range <= 32);
|
||||||
|
|
||||||
if (atomic) {
|
state0 = atomic_load_u32(state, ATOMIC_RELAXED);
|
||||||
uint32_t state0;
|
|
||||||
|
|
||||||
|
if (atomic) {
|
||||||
do {
|
do {
|
||||||
state0 = atomic_read_uint32(state);
|
|
||||||
state1 = prng_state_next_u32(state0);
|
state1 = prng_state_next_u32(state0);
|
||||||
} while (atomic_cas_uint32(state, state0, state1));
|
} while (!atomic_compare_exchange_weak_u32(state, &state0,
|
||||||
|
state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
|
||||||
} else {
|
} else {
|
||||||
state1 = prng_state_next_u32(*state);
|
state1 = prng_state_next_u32(state0);
|
||||||
*state = state1;
|
atomic_store_u32(state, state1, ATOMIC_RELAXED);
|
||||||
}
|
}
|
||||||
ret = state1 >> (32 - lg_range);
|
ret = state1 >> (32 - lg_range);
|
||||||
|
|
||||||
return (ret);
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* 64-bit atomic operations cannot be supported on all relevant platforms. */
|
|
||||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||||
prng_lg_range_u64(uint64_t *state, unsigned lg_range)
|
prng_lg_range_u64(uint64_t *state, unsigned lg_range) {
|
||||||
{
|
|
||||||
uint64_t ret, state1;
|
uint64_t ret, state1;
|
||||||
|
|
||||||
assert(lg_range > 0);
|
assert(lg_range > 0);
|
||||||
@ -118,36 +97,39 @@ prng_lg_range_u64(uint64_t *state, unsigned lg_range)
|
|||||||
*state = state1;
|
*state = state1;
|
||||||
ret = state1 >> (64 - lg_range);
|
ret = state1 >> (64 - lg_range);
|
||||||
|
|
||||||
return (ret);
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic)
|
prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) {
|
||||||
{
|
size_t ret, state0, state1;
|
||||||
size_t ret, state1;
|
|
||||||
|
|
||||||
assert(lg_range > 0);
|
assert(lg_range > 0);
|
||||||
assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
|
assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
|
||||||
|
|
||||||
if (atomic) {
|
state0 = atomic_load_zu(state, ATOMIC_RELAXED);
|
||||||
size_t state0;
|
|
||||||
|
|
||||||
|
if (atomic) {
|
||||||
do {
|
do {
|
||||||
state0 = atomic_read_z(state);
|
|
||||||
state1 = prng_state_next_zu(state0);
|
state1 = prng_state_next_zu(state0);
|
||||||
} while (atomic_cas_z(state, state0, state1));
|
} while (atomic_compare_exchange_weak_zu(state, &state0,
|
||||||
|
state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
|
||||||
} else {
|
} else {
|
||||||
state1 = prng_state_next_zu(*state);
|
state1 = prng_state_next_zu(state0);
|
||||||
*state = state1;
|
atomic_store_zu(state, state1, ATOMIC_RELAXED);
|
||||||
}
|
}
|
||||||
ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
|
ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
|
||||||
|
|
||||||
return (ret);
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The prng_range functions behave like the prng_lg_range, but return a result
|
||||||
|
* in [0, range) instead of [0, 2**lg_range).
|
||||||
|
*/
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE uint32_t
|
JEMALLOC_ALWAYS_INLINE uint32_t
|
||||||
prng_range_u32(uint32_t *state, uint32_t range, bool atomic)
|
prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) {
|
||||||
{
|
|
||||||
uint32_t ret;
|
uint32_t ret;
|
||||||
unsigned lg_range;
|
unsigned lg_range;
|
||||||
|
|
||||||
@ -161,12 +143,11 @@ prng_range_u32(uint32_t *state, uint32_t range, bool atomic)
|
|||||||
ret = prng_lg_range_u32(state, lg_range, atomic);
|
ret = prng_lg_range_u32(state, lg_range, atomic);
|
||||||
} while (ret >= range);
|
} while (ret >= range);
|
||||||
|
|
||||||
return (ret);
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||||
prng_range_u64(uint64_t *state, uint64_t range)
|
prng_range_u64(uint64_t *state, uint64_t range) {
|
||||||
{
|
|
||||||
uint64_t ret;
|
uint64_t ret;
|
||||||
unsigned lg_range;
|
unsigned lg_range;
|
||||||
|
|
||||||
@ -180,12 +161,11 @@ prng_range_u64(uint64_t *state, uint64_t range)
|
|||||||
ret = prng_lg_range_u64(state, lg_range);
|
ret = prng_lg_range_u64(state, lg_range);
|
||||||
} while (ret >= range);
|
} while (ret >= range);
|
||||||
|
|
||||||
return (ret);
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
prng_range_zu(size_t *state, size_t range, bool atomic)
|
prng_range_zu(atomic_zu_t *state, size_t range, bool atomic) {
|
||||||
{
|
|
||||||
size_t ret;
|
size_t ret;
|
||||||
unsigned lg_range;
|
unsigned lg_range;
|
||||||
|
|
||||||
@ -199,9 +179,7 @@ prng_range_zu(size_t *state, size_t range, bool atomic)
|
|||||||
ret = prng_lg_range_zu(state, lg_range, atomic);
|
ret = prng_lg_range_zu(state, lg_range, atomic);
|
||||||
} while (ret >= range);
|
} while (ret >= range);
|
||||||
|
|
||||||
return (ret);
|
return ret;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
#endif /* JEMALLOC_INTERNAL_PRNG_H */
|
||||||
/******************************************************************************/
|
|
||||||
|
@ -1,547 +0,0 @@
|
|||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_TYPES
|
|
||||||
|
|
||||||
typedef struct prof_bt_s prof_bt_t;
|
|
||||||
typedef struct prof_cnt_s prof_cnt_t;
|
|
||||||
typedef struct prof_tctx_s prof_tctx_t;
|
|
||||||
typedef struct prof_gctx_s prof_gctx_t;
|
|
||||||
typedef struct prof_tdata_s prof_tdata_t;
|
|
||||||
|
|
||||||
/* Option defaults. */
|
|
||||||
#ifdef JEMALLOC_PROF
|
|
||||||
# define PROF_PREFIX_DEFAULT "jeprof"
|
|
||||||
#else
|
|
||||||
# define PROF_PREFIX_DEFAULT ""
|
|
||||||
#endif
|
|
||||||
#define LG_PROF_SAMPLE_DEFAULT 19
|
|
||||||
#define LG_PROF_INTERVAL_DEFAULT -1
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
|
|
||||||
* is based on __builtin_return_address() necessarily has a hard-coded number
|
|
||||||
* of backtrace frame handlers, and should be kept in sync with this setting.
|
|
||||||
*/
|
|
||||||
#define PROF_BT_MAX 128
|
|
||||||
|
|
||||||
/* Initial hash table size. */
|
|
||||||
#define PROF_CKH_MINITEMS 64
|
|
||||||
|
|
||||||
/* Size of memory buffer to use when writing dump files. */
|
|
||||||
#define PROF_DUMP_BUFSIZE 65536
|
|
||||||
|
|
||||||
/* Size of stack-allocated buffer used by prof_printf(). */
|
|
||||||
#define PROF_PRINTF_BUFSIZE 128
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Number of mutexes shared among all gctx's. No space is allocated for these
|
|
||||||
* unless profiling is enabled, so it's okay to over-provision.
|
|
||||||
*/
|
|
||||||
#define PROF_NCTX_LOCKS 1024
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Number of mutexes shared among all tdata's. No space is allocated for these
|
|
||||||
* unless profiling is enabled, so it's okay to over-provision.
|
|
||||||
*/
|
|
||||||
#define PROF_NTDATA_LOCKS 256
|
|
||||||
|
|
||||||
/*
|
|
||||||
* prof_tdata pointers close to NULL are used to encode state information that
|
|
||||||
* is used for cleaning up during thread shutdown.
|
|
||||||
*/
|
|
||||||
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
|
|
||||||
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
|
|
||||||
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
struct prof_bt_s {
|
|
||||||
/* Backtrace, stored as len program counters. */
|
|
||||||
void **vec;
|
|
||||||
unsigned len;
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_PROF_LIBGCC
|
|
||||||
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
|
|
||||||
typedef struct {
|
|
||||||
prof_bt_t *bt;
|
|
||||||
unsigned max;
|
|
||||||
} prof_unwind_data_t;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct prof_cnt_s {
|
|
||||||
/* Profiling counters. */
|
|
||||||
uint64_t curobjs;
|
|
||||||
uint64_t curbytes;
|
|
||||||
uint64_t accumobjs;
|
|
||||||
uint64_t accumbytes;
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef enum {
|
|
||||||
prof_tctx_state_initializing,
|
|
||||||
prof_tctx_state_nominal,
|
|
||||||
prof_tctx_state_dumping,
|
|
||||||
prof_tctx_state_purgatory /* Dumper must finish destroying. */
|
|
||||||
} prof_tctx_state_t;
|
|
||||||
|
|
||||||
struct prof_tctx_s {
|
|
||||||
/* Thread data for thread that performed the allocation. */
|
|
||||||
prof_tdata_t *tdata;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
|
|
||||||
* defunct during teardown.
|
|
||||||
*/
|
|
||||||
uint64_t thr_uid;
|
|
||||||
uint64_t thr_discrim;
|
|
||||||
|
|
||||||
/* Profiling counters, protected by tdata->lock. */
|
|
||||||
prof_cnt_t cnts;
|
|
||||||
|
|
||||||
/* Associated global context. */
|
|
||||||
prof_gctx_t *gctx;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* UID that distinguishes multiple tctx's created by the same thread,
|
|
||||||
* but coexisting in gctx->tctxs. There are two ways that such
|
|
||||||
* coexistence can occur:
|
|
||||||
* - A dumper thread can cause a tctx to be retained in the purgatory
|
|
||||||
* state.
|
|
||||||
* - Although a single "producer" thread must create all tctx's which
|
|
||||||
* share the same thr_uid, multiple "consumers" can each concurrently
|
|
||||||
* execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
|
|
||||||
* gets called once each time cnts.cur{objs,bytes} drop to 0, but this
|
|
||||||
* threshold can be hit again before the first consumer finishes
|
|
||||||
* executing prof_tctx_destroy().
|
|
||||||
*/
|
|
||||||
uint64_t tctx_uid;
|
|
||||||
|
|
||||||
/* Linkage into gctx's tctxs. */
|
|
||||||
rb_node(prof_tctx_t) tctx_link;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
|
|
||||||
* sample vs destroy race.
|
|
||||||
*/
|
|
||||||
bool prepared;
|
|
||||||
|
|
||||||
/* Current dump-related state, protected by gctx->lock. */
|
|
||||||
prof_tctx_state_t state;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Copy of cnts snapshotted during early dump phase, protected by
|
|
||||||
* dump_mtx.
|
|
||||||
*/
|
|
||||||
prof_cnt_t dump_cnts;
|
|
||||||
};
|
|
||||||
typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
|
|
||||||
|
|
||||||
struct prof_gctx_s {
|
|
||||||
/* Protects nlimbo, cnt_summed, and tctxs. */
|
|
||||||
malloc_mutex_t *lock;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Number of threads that currently cause this gctx to be in a state of
|
|
||||||
* limbo due to one of:
|
|
||||||
* - Initializing this gctx.
|
|
||||||
* - Initializing per thread counters associated with this gctx.
|
|
||||||
* - Preparing to destroy this gctx.
|
|
||||||
* - Dumping a heap profile that includes this gctx.
|
|
||||||
* nlimbo must be 1 (single destroyer) in order to safely destroy the
|
|
||||||
* gctx.
|
|
||||||
*/
|
|
||||||
unsigned nlimbo;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Tree of profile counters, one for each thread that has allocated in
|
|
||||||
* this context.
|
|
||||||
*/
|
|
||||||
prof_tctx_tree_t tctxs;
|
|
||||||
|
|
||||||
/* Linkage for tree of contexts to be dumped. */
|
|
||||||
rb_node(prof_gctx_t) dump_link;
|
|
||||||
|
|
||||||
/* Temporary storage for summation during dump. */
|
|
||||||
prof_cnt_t cnt_summed;
|
|
||||||
|
|
||||||
/* Associated backtrace. */
|
|
||||||
prof_bt_t bt;
|
|
||||||
|
|
||||||
/* Backtrace vector, variable size, referred to by bt. */
|
|
||||||
void *vec[1];
|
|
||||||
};
|
|
||||||
typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
|
|
||||||
|
|
||||||
struct prof_tdata_s {
|
|
||||||
malloc_mutex_t *lock;
|
|
||||||
|
|
||||||
/* Monotonically increasing unique thread identifier. */
|
|
||||||
uint64_t thr_uid;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Monotonically increasing discriminator among tdata structures
|
|
||||||
* associated with the same thr_uid.
|
|
||||||
*/
|
|
||||||
uint64_t thr_discrim;
|
|
||||||
|
|
||||||
/* Included in heap profile dumps if non-NULL. */
|
|
||||||
char *thread_name;
|
|
||||||
|
|
||||||
bool attached;
|
|
||||||
bool expired;
|
|
||||||
|
|
||||||
rb_node(prof_tdata_t) tdata_link;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Counter used to initialize prof_tctx_t's tctx_uid. No locking is
|
|
||||||
* necessary when incrementing this field, because only one thread ever
|
|
||||||
* does so.
|
|
||||||
*/
|
|
||||||
uint64_t tctx_uid_next;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
|
|
||||||
* backtraces for which it has non-zero allocation/deallocation counters
|
|
||||||
* associated with thread-specific prof_tctx_t objects. Other threads
|
|
||||||
* may write to prof_tctx_t contents when freeing associated objects.
|
|
||||||
*/
|
|
||||||
ckh_t bt2tctx;
|
|
||||||
|
|
||||||
/* Sampling state. */
|
|
||||||
uint64_t prng_state;
|
|
||||||
uint64_t bytes_until_sample;
|
|
||||||
|
|
||||||
/* State used to avoid dumping while operating on prof internals. */
|
|
||||||
bool enq;
|
|
||||||
bool enq_idump;
|
|
||||||
bool enq_gdump;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set to true during an early dump phase for tdata's which are
|
|
||||||
* currently being dumped. New threads' tdata's have this initialized
|
|
||||||
* to false so that they aren't accidentally included in later dump
|
|
||||||
* phases.
|
|
||||||
*/
|
|
||||||
bool dumping;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* True if profiling is active for this tdata's thread
|
|
||||||
* (thread.prof.active mallctl).
|
|
||||||
*/
|
|
||||||
bool active;
|
|
||||||
|
|
||||||
/* Temporary storage for summation during dump. */
|
|
||||||
prof_cnt_t cnt_summed;
|
|
||||||
|
|
||||||
/* Backtrace vector, used for calls to prof_backtrace(). */
|
|
||||||
void *vec[PROF_BT_MAX];
|
|
||||||
};
|
|
||||||
typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
|
||||||
|
|
||||||
extern bool opt_prof;
|
|
||||||
extern bool opt_prof_active;
|
|
||||||
extern bool opt_prof_thread_active_init;
|
|
||||||
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
|
|
||||||
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
|
|
||||||
extern bool opt_prof_gdump; /* High-water memory dumping. */
|
|
||||||
extern bool opt_prof_final; /* Final profile dumping. */
|
|
||||||
extern bool opt_prof_leak; /* Dump leak summary at exit. */
|
|
||||||
extern bool opt_prof_accum; /* Report cumulative bytes. */
|
|
||||||
extern char opt_prof_prefix[
|
|
||||||
/* Minimize memory bloat for non-prof builds. */
|
|
||||||
#ifdef JEMALLOC_PROF
|
|
||||||
PATH_MAX +
|
|
||||||
#endif
|
|
||||||
1];
|
|
||||||
|
|
||||||
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
|
|
||||||
extern bool prof_active;
|
|
||||||
|
|
||||||
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
|
|
||||||
extern bool prof_gdump_val;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Profile dump interval, measured in bytes allocated. Each arena triggers a
|
|
||||||
* profile dump when it reaches this threshold. The effect is that the
|
|
||||||
* interval between profile dumps averages prof_interval, though the actual
|
|
||||||
* interval between dumps will tend to be sporadic, and the interval will be a
|
|
||||||
* maximum of approximately (prof_interval * narenas).
|
|
||||||
*/
|
|
||||||
extern uint64_t prof_interval;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Initialized as opt_lg_prof_sample, and potentially modified during profiling
|
|
||||||
* resets.
|
|
||||||
*/
|
|
||||||
extern size_t lg_prof_sample;
|
|
||||||
|
|
||||||
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
|
|
||||||
void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
|
|
||||||
prof_tctx_t *tctx);
|
|
||||||
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
|
|
||||||
void bt_init(prof_bt_t *bt, void **vec);
|
|
||||||
void prof_backtrace(prof_bt_t *bt);
|
|
||||||
prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
|
|
||||||
#ifdef JEMALLOC_JET
|
|
||||||
size_t prof_tdata_count(void);
|
|
||||||
size_t prof_bt_count(void);
|
|
||||||
const prof_cnt_t *prof_cnt_all(void);
|
|
||||||
typedef int (prof_dump_open_t)(bool, const char *);
|
|
||||||
extern prof_dump_open_t *prof_dump_open;
|
|
||||||
typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
|
|
||||||
extern prof_dump_header_t *prof_dump_header;
|
|
||||||
#endif
|
|
||||||
void prof_idump(tsdn_t *tsdn);
|
|
||||||
bool prof_mdump(tsd_t *tsd, const char *filename);
|
|
||||||
void prof_gdump(tsdn_t *tsdn);
|
|
||||||
prof_tdata_t *prof_tdata_init(tsd_t *tsd);
|
|
||||||
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
|
|
||||||
void prof_reset(tsd_t *tsd, size_t lg_sample);
|
|
||||||
void prof_tdata_cleanup(tsd_t *tsd);
|
|
||||||
bool prof_active_get(tsdn_t *tsdn);
|
|
||||||
bool prof_active_set(tsdn_t *tsdn, bool active);
|
|
||||||
const char *prof_thread_name_get(tsd_t *tsd);
|
|
||||||
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
|
|
||||||
bool prof_thread_active_get(tsd_t *tsd);
|
|
||||||
bool prof_thread_active_set(tsd_t *tsd, bool active);
|
|
||||||
bool prof_thread_active_init_get(tsdn_t *tsdn);
|
|
||||||
bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
|
|
||||||
bool prof_gdump_get(tsdn_t *tsdn);
|
|
||||||
bool prof_gdump_set(tsdn_t *tsdn, bool active);
|
|
||||||
void prof_boot0(void);
|
|
||||||
void prof_boot1(void);
|
|
||||||
bool prof_boot2(tsd_t *tsd);
|
|
||||||
void prof_prefork0(tsdn_t *tsdn);
|
|
||||||
void prof_prefork1(tsdn_t *tsdn);
|
|
||||||
void prof_postfork_parent(tsdn_t *tsdn);
|
|
||||||
void prof_postfork_child(tsdn_t *tsdn);
|
|
||||||
void prof_sample_threshold_update(prof_tdata_t *tdata);
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
bool prof_active_get_unlocked(void);
|
|
||||||
bool prof_gdump_get_unlocked(void);
|
|
||||||
prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
|
|
||||||
prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr);
|
|
||||||
void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
|
|
||||||
prof_tctx_t *tctx);
|
|
||||||
void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
|
|
||||||
const void *old_ptr, prof_tctx_t *tctx);
|
|
||||||
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
|
|
||||||
prof_tdata_t **tdata_out);
|
|
||||||
prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
|
|
||||||
bool update);
|
|
||||||
void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize,
|
|
||||||
prof_tctx_t *tctx);
|
|
||||||
void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
|
|
||||||
prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
|
|
||||||
size_t old_usize, prof_tctx_t *old_tctx);
|
|
||||||
void prof_free(tsd_t *tsd, const void *ptr, size_t usize);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
|
|
||||||
JEMALLOC_ALWAYS_INLINE bool
|
|
||||||
prof_active_get_unlocked(void)
|
|
||||||
{
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Even if opt_prof is true, sampling can be temporarily disabled by
|
|
||||||
* setting prof_active to false. No locking is used when reading
|
|
||||||
* prof_active in the fast path, so there are no guarantees regarding
|
|
||||||
* how long it will take for all threads to notice state changes.
|
|
||||||
*/
|
|
||||||
return (prof_active);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE bool
|
|
||||||
prof_gdump_get_unlocked(void)
|
|
||||||
{
|
|
||||||
|
|
||||||
/*
|
|
||||||
* No locking is used when reading prof_gdump_val in the fast path, so
|
|
||||||
* there are no guarantees regarding how long it will take for all
|
|
||||||
* threads to notice state changes.
|
|
||||||
*/
|
|
||||||
return (prof_gdump_val);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE prof_tdata_t *
|
|
||||||
prof_tdata_get(tsd_t *tsd, bool create)
|
|
||||||
{
|
|
||||||
prof_tdata_t *tdata;
|
|
||||||
|
|
||||||
cassert(config_prof);
|
|
||||||
|
|
||||||
tdata = tsd_prof_tdata_get(tsd);
|
|
||||||
if (create) {
|
|
||||||
if (unlikely(tdata == NULL)) {
|
|
||||||
if (tsd_nominal(tsd)) {
|
|
||||||
tdata = prof_tdata_init(tsd);
|
|
||||||
tsd_prof_tdata_set(tsd, tdata);
|
|
||||||
}
|
|
||||||
} else if (unlikely(tdata->expired)) {
|
|
||||||
tdata = prof_tdata_reinit(tsd, tdata);
|
|
||||||
tsd_prof_tdata_set(tsd, tdata);
|
|
||||||
}
|
|
||||||
assert(tdata == NULL || tdata->attached);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (tdata);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
|
||||||
prof_tctx_get(tsdn_t *tsdn, const void *ptr)
|
|
||||||
{
|
|
||||||
|
|
||||||
cassert(config_prof);
|
|
||||||
assert(ptr != NULL);
|
|
||||||
|
|
||||||
return (arena_prof_tctx_get(tsdn, ptr));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
|
||||||
prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
|
|
||||||
{
|
|
||||||
|
|
||||||
cassert(config_prof);
|
|
||||||
assert(ptr != NULL);
|
|
||||||
|
|
||||||
arena_prof_tctx_set(tsdn, ptr, usize, tctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
|
||||||
prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr,
|
|
||||||
prof_tctx_t *old_tctx)
|
|
||||||
{
|
|
||||||
|
|
||||||
cassert(config_prof);
|
|
||||||
assert(ptr != NULL);
|
|
||||||
|
|
||||||
arena_prof_tctx_reset(tsdn, ptr, usize, old_ptr, old_tctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE bool
|
|
||||||
prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
|
|
||||||
prof_tdata_t **tdata_out)
|
|
||||||
{
|
|
||||||
prof_tdata_t *tdata;
|
|
||||||
|
|
||||||
cassert(config_prof);
|
|
||||||
|
|
||||||
tdata = prof_tdata_get(tsd, true);
|
|
||||||
if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX))
|
|
||||||
tdata = NULL;
|
|
||||||
|
|
||||||
if (tdata_out != NULL)
|
|
||||||
*tdata_out = tdata;
|
|
||||||
|
|
||||||
if (unlikely(tdata == NULL))
|
|
||||||
return (true);
|
|
||||||
|
|
||||||
if (likely(tdata->bytes_until_sample >= usize)) {
|
|
||||||
if (update)
|
|
||||||
tdata->bytes_until_sample -= usize;
|
|
||||||
return (true);
|
|
||||||
} else {
|
|
||||||
/* Compute new sample threshold. */
|
|
||||||
if (update)
|
|
||||||
prof_sample_threshold_update(tdata);
|
|
||||||
return (!tdata->active);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
|
||||||
prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
|
|
||||||
{
|
|
||||||
prof_tctx_t *ret;
|
|
||||||
prof_tdata_t *tdata;
|
|
||||||
prof_bt_t bt;
|
|
||||||
|
|
||||||
assert(usize == s2u(usize));
|
|
||||||
|
|
||||||
if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
|
|
||||||
&tdata)))
|
|
||||||
ret = (prof_tctx_t *)(uintptr_t)1U;
|
|
||||||
else {
|
|
||||||
bt_init(&bt, tdata->vec);
|
|
||||||
prof_backtrace(&bt);
|
|
||||||
ret = prof_lookup(tsd, &bt);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
|
||||||
prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
|
|
||||||
{
|
|
||||||
|
|
||||||
cassert(config_prof);
|
|
||||||
assert(ptr != NULL);
|
|
||||||
assert(usize == isalloc(tsdn, ptr, true));
|
|
||||||
|
|
||||||
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
|
|
||||||
prof_malloc_sample_object(tsdn, ptr, usize, tctx);
|
|
||||||
else
|
|
||||||
prof_tctx_set(tsdn, ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
|
||||||
prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
|
|
||||||
bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
|
|
||||||
prof_tctx_t *old_tctx)
|
|
||||||
{
|
|
||||||
bool sampled, old_sampled;
|
|
||||||
|
|
||||||
cassert(config_prof);
|
|
||||||
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
|
|
||||||
|
|
||||||
if (prof_active && !updated && ptr != NULL) {
|
|
||||||
assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
|
|
||||||
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
|
|
||||||
/*
|
|
||||||
* Don't sample. The usize passed to prof_alloc_prep()
|
|
||||||
* was larger than what actually got allocated, so a
|
|
||||||
* backtrace was captured for this allocation, even
|
|
||||||
* though its actual usize was insufficient to cross the
|
|
||||||
* sample threshold.
|
|
||||||
*/
|
|
||||||
prof_alloc_rollback(tsd, tctx, true);
|
|
||||||
tctx = (prof_tctx_t *)(uintptr_t)1U;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sampled = ((uintptr_t)tctx > (uintptr_t)1U);
|
|
||||||
old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
|
|
||||||
|
|
||||||
if (unlikely(sampled))
|
|
||||||
prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
|
|
||||||
else
|
|
||||||
prof_tctx_reset(tsd_tsdn(tsd), ptr, usize, old_ptr, old_tctx);
|
|
||||||
|
|
||||||
if (unlikely(old_sampled))
|
|
||||||
prof_free_sampled_object(tsd, old_usize, old_tctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
|
||||||
prof_free(tsd_t *tsd, const void *ptr, size_t usize)
|
|
||||||
{
|
|
||||||
prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
|
|
||||||
|
|
||||||
cassert(config_prof);
|
|
||||||
assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
|
|
||||||
|
|
||||||
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
|
|
||||||
prof_free_sampled_object(tsd, usize, tctx);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
/******************************************************************************/
|
|
92
include/jemalloc/internal/prof_externs.h
Normal file
92
include/jemalloc/internal/prof_externs.h
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H
|
||||||
|
#define JEMALLOC_INTERNAL_PROF_EXTERNS_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/mutex.h"
|
||||||
|
|
||||||
|
extern malloc_mutex_t bt2gctx_mtx;
|
||||||
|
|
||||||
|
extern bool opt_prof;
|
||||||
|
extern bool opt_prof_active;
|
||||||
|
extern bool opt_prof_thread_active_init;
|
||||||
|
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
|
||||||
|
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
|
||||||
|
extern bool opt_prof_gdump; /* High-water memory dumping. */
|
||||||
|
extern bool opt_prof_final; /* Final profile dumping. */
|
||||||
|
extern bool opt_prof_leak; /* Dump leak summary at exit. */
|
||||||
|
extern bool opt_prof_accum; /* Report cumulative bytes. */
|
||||||
|
extern char opt_prof_prefix[
|
||||||
|
/* Minimize memory bloat for non-prof builds. */
|
||||||
|
#ifdef JEMALLOC_PROF
|
||||||
|
PATH_MAX +
|
||||||
|
#endif
|
||||||
|
1];
|
||||||
|
|
||||||
|
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
|
||||||
|
extern bool prof_active;
|
||||||
|
|
||||||
|
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
|
||||||
|
extern bool prof_gdump_val;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Profile dump interval, measured in bytes allocated. Each arena triggers a
|
||||||
|
* profile dump when it reaches this threshold. The effect is that the
|
||||||
|
* interval between profile dumps averages prof_interval, though the actual
|
||||||
|
* interval between dumps will tend to be sporadic, and the interval will be a
|
||||||
|
* maximum of approximately (prof_interval * narenas).
|
||||||
|
*/
|
||||||
|
extern uint64_t prof_interval;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Initialized as opt_lg_prof_sample, and potentially modified during profiling
|
||||||
|
* resets.
|
||||||
|
*/
|
||||||
|
extern size_t lg_prof_sample;
|
||||||
|
|
||||||
|
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
|
||||||
|
void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
|
||||||
|
prof_tctx_t *tctx);
|
||||||
|
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
|
||||||
|
void bt_init(prof_bt_t *bt, void **vec);
|
||||||
|
void prof_backtrace(prof_bt_t *bt);
|
||||||
|
prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
|
||||||
|
#ifdef JEMALLOC_JET
|
||||||
|
size_t prof_tdata_count(void);
|
||||||
|
size_t prof_bt_count(void);
|
||||||
|
#endif
|
||||||
|
typedef int (prof_dump_open_t)(bool, const char *);
|
||||||
|
extern prof_dump_open_t *JET_MUTABLE prof_dump_open;
|
||||||
|
|
||||||
|
typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
|
||||||
|
extern prof_dump_header_t *JET_MUTABLE prof_dump_header;
|
||||||
|
#ifdef JEMALLOC_JET
|
||||||
|
void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
|
||||||
|
uint64_t *accumbytes);
|
||||||
|
#endif
|
||||||
|
bool prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum);
|
||||||
|
void prof_idump(tsdn_t *tsdn);
|
||||||
|
bool prof_mdump(tsd_t *tsd, const char *filename);
|
||||||
|
void prof_gdump(tsdn_t *tsdn);
|
||||||
|
prof_tdata_t *prof_tdata_init(tsd_t *tsd);
|
||||||
|
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
|
||||||
|
void prof_reset(tsd_t *tsd, size_t lg_sample);
|
||||||
|
void prof_tdata_cleanup(tsd_t *tsd);
|
||||||
|
bool prof_active_get(tsdn_t *tsdn);
|
||||||
|
bool prof_active_set(tsdn_t *tsdn, bool active);
|
||||||
|
const char *prof_thread_name_get(tsd_t *tsd);
|
||||||
|
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
|
||||||
|
bool prof_thread_active_get(tsd_t *tsd);
|
||||||
|
bool prof_thread_active_set(tsd_t *tsd, bool active);
|
||||||
|
bool prof_thread_active_init_get(tsdn_t *tsdn);
|
||||||
|
bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
|
||||||
|
bool prof_gdump_get(tsdn_t *tsdn);
|
||||||
|
bool prof_gdump_set(tsdn_t *tsdn, bool active);
|
||||||
|
void prof_boot0(void);
|
||||||
|
void prof_boot1(void);
|
||||||
|
bool prof_boot2(tsd_t *tsd);
|
||||||
|
void prof_prefork0(tsdn_t *tsdn);
|
||||||
|
void prof_prefork1(tsdn_t *tsdn);
|
||||||
|
void prof_postfork_parent(tsdn_t *tsdn);
|
||||||
|
void prof_postfork_child(tsdn_t *tsdn);
|
||||||
|
void prof_sample_threshold_update(prof_tdata_t *tdata);
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */
|
72
include/jemalloc/internal/prof_inlines_a.h
Normal file
72
include/jemalloc/internal/prof_inlines_a.h
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H
|
||||||
|
#define JEMALLOC_INTERNAL_PROF_INLINES_A_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/mutex.h"
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, uint64_t accumbytes) {
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
|
bool overflow;
|
||||||
|
uint64_t a0, a1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the application allocates fast enough (and/or if idump is slow
|
||||||
|
* enough), extreme overflow here (a1 >= prof_interval * 2) can cause
|
||||||
|
* idump trigger coalescing. This is an intentional mechanism that
|
||||||
|
* avoids rate-limiting allocation.
|
||||||
|
*/
|
||||||
|
#ifdef JEMALLOC_ATOMIC_U64
|
||||||
|
a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
|
||||||
|
do {
|
||||||
|
a1 = a0 + accumbytes;
|
||||||
|
assert(a1 >= a0);
|
||||||
|
overflow = (a1 >= prof_interval);
|
||||||
|
if (overflow) {
|
||||||
|
a1 %= prof_interval;
|
||||||
|
}
|
||||||
|
} while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
|
||||||
|
a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
|
||||||
|
#else
|
||||||
|
malloc_mutex_lock(tsdn, &prof_accum->mtx);
|
||||||
|
a0 = prof_accum->accumbytes;
|
||||||
|
a1 = a0 + accumbytes;
|
||||||
|
overflow = (a1 >= prof_interval);
|
||||||
|
if (overflow) {
|
||||||
|
a1 %= prof_interval;
|
||||||
|
}
|
||||||
|
prof_accum->accumbytes = a1;
|
||||||
|
malloc_mutex_unlock(tsdn, &prof_accum->mtx);
|
||||||
|
#endif
|
||||||
|
return overflow;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, size_t usize) {
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Cancel out as much of the excessive prof_accumbytes increase as
|
||||||
|
* possible without underflowing. Interval-triggered dumps occur
|
||||||
|
* slightly more often than intended as a result of incomplete
|
||||||
|
* canceling.
|
||||||
|
*/
|
||||||
|
uint64_t a0, a1;
|
||||||
|
#ifdef JEMALLOC_ATOMIC_U64
|
||||||
|
a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
|
||||||
|
do {
|
||||||
|
a1 = (a0 >= LARGE_MINCLASS - usize) ? a0 - (LARGE_MINCLASS -
|
||||||
|
usize) : 0;
|
||||||
|
} while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
|
||||||
|
a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
|
||||||
|
#else
|
||||||
|
malloc_mutex_lock(tsdn, &prof_accum->mtx);
|
||||||
|
a0 = prof_accum->accumbytes;
|
||||||
|
a1 = (a0 >= LARGE_MINCLASS - usize) ? a0 - (LARGE_MINCLASS - usize) :
|
||||||
|
0;
|
||||||
|
prof_accum->accumbytes = a1;
|
||||||
|
malloc_mutex_unlock(tsdn, &prof_accum->mtx);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */
|
217
include/jemalloc/internal/prof_inlines_b.h
Normal file
217
include/jemalloc/internal/prof_inlines_b.h
Normal file
@ -0,0 +1,217 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H
|
||||||
|
#define JEMALLOC_INTERNAL_PROF_INLINES_B_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/sz.h"
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
|
prof_active_get_unlocked(void) {
|
||||||
|
/*
|
||||||
|
* Even if opt_prof is true, sampling can be temporarily disabled by
|
||||||
|
* setting prof_active to false. No locking is used when reading
|
||||||
|
* prof_active in the fast path, so there are no guarantees regarding
|
||||||
|
* how long it will take for all threads to notice state changes.
|
||||||
|
*/
|
||||||
|
return prof_active;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
|
prof_gdump_get_unlocked(void) {
|
||||||
|
/*
|
||||||
|
* No locking is used when reading prof_gdump_val in the fast path, so
|
||||||
|
* there are no guarantees regarding how long it will take for all
|
||||||
|
* threads to notice state changes.
|
||||||
|
*/
|
||||||
|
return prof_gdump_val;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE prof_tdata_t *
|
||||||
|
prof_tdata_get(tsd_t *tsd, bool create) {
|
||||||
|
prof_tdata_t *tdata;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
|
tdata = tsd_prof_tdata_get(tsd);
|
||||||
|
if (create) {
|
||||||
|
if (unlikely(tdata == NULL)) {
|
||||||
|
if (tsd_nominal(tsd)) {
|
||||||
|
tdata = prof_tdata_init(tsd);
|
||||||
|
tsd_prof_tdata_set(tsd, tdata);
|
||||||
|
}
|
||||||
|
} else if (unlikely(tdata->expired)) {
|
||||||
|
tdata = prof_tdata_reinit(tsd, tdata);
|
||||||
|
tsd_prof_tdata_set(tsd, tdata);
|
||||||
|
}
|
||||||
|
assert(tdata == NULL || tdata->attached);
|
||||||
|
}
|
||||||
|
|
||||||
|
return tdata;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
||||||
|
prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
|
||||||
|
cassert(config_prof);
|
||||||
|
assert(ptr != NULL);
|
||||||
|
|
||||||
|
return arena_prof_tctx_get(tsdn, ptr, alloc_ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
|
||||||
|
alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
|
||||||
|
cassert(config_prof);
|
||||||
|
assert(ptr != NULL);
|
||||||
|
|
||||||
|
arena_prof_tctx_set(tsdn, ptr, usize, alloc_ctx, tctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
|
||||||
|
cassert(config_prof);
|
||||||
|
assert(ptr != NULL);
|
||||||
|
|
||||||
|
arena_prof_tctx_reset(tsdn, ptr, tctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
|
prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
|
||||||
|
prof_tdata_t **tdata_out) {
|
||||||
|
prof_tdata_t *tdata;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
|
tdata = prof_tdata_get(tsd, true);
|
||||||
|
if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) {
|
||||||
|
tdata = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tdata_out != NULL) {
|
||||||
|
*tdata_out = tdata;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (unlikely(tdata == NULL)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (likely(tdata->bytes_until_sample >= usize)) {
|
||||||
|
if (update) {
|
||||||
|
tdata->bytes_until_sample -= usize;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
if (tsd_reentrancy_level_get(tsd) > 0) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
/* Compute new sample threshold. */
|
||||||
|
if (update) {
|
||||||
|
prof_sample_threshold_update(tdata);
|
||||||
|
}
|
||||||
|
return !tdata->active;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
||||||
|
prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) {
|
||||||
|
prof_tctx_t *ret;
|
||||||
|
prof_tdata_t *tdata;
|
||||||
|
prof_bt_t bt;
|
||||||
|
|
||||||
|
assert(usize == sz_s2u(usize));
|
||||||
|
|
||||||
|
if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
|
||||||
|
&tdata))) {
|
||||||
|
ret = (prof_tctx_t *)(uintptr_t)1U;
|
||||||
|
} else {
|
||||||
|
bt_init(&bt, tdata->vec);
|
||||||
|
prof_backtrace(&bt);
|
||||||
|
ret = prof_lookup(tsd, &bt);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx,
|
||||||
|
prof_tctx_t *tctx) {
|
||||||
|
cassert(config_prof);
|
||||||
|
assert(ptr != NULL);
|
||||||
|
assert(usize == isalloc(tsdn, ptr));
|
||||||
|
|
||||||
|
if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
|
||||||
|
prof_malloc_sample_object(tsdn, ptr, usize, tctx);
|
||||||
|
} else {
|
||||||
|
prof_tctx_set(tsdn, ptr, usize, alloc_ctx,
|
||||||
|
(prof_tctx_t *)(uintptr_t)1U);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
|
||||||
|
bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
|
||||||
|
prof_tctx_t *old_tctx) {
|
||||||
|
bool sampled, old_sampled, moved;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
|
||||||
|
|
||||||
|
if (prof_active && !updated && ptr != NULL) {
|
||||||
|
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
|
||||||
|
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
|
||||||
|
/*
|
||||||
|
* Don't sample. The usize passed to prof_alloc_prep()
|
||||||
|
* was larger than what actually got allocated, so a
|
||||||
|
* backtrace was captured for this allocation, even
|
||||||
|
* though its actual usize was insufficient to cross the
|
||||||
|
* sample threshold.
|
||||||
|
*/
|
||||||
|
prof_alloc_rollback(tsd, tctx, true);
|
||||||
|
tctx = (prof_tctx_t *)(uintptr_t)1U;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sampled = ((uintptr_t)tctx > (uintptr_t)1U);
|
||||||
|
old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
|
||||||
|
moved = (ptr != old_ptr);
|
||||||
|
|
||||||
|
if (unlikely(sampled)) {
|
||||||
|
prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
|
||||||
|
} else if (moved) {
|
||||||
|
prof_tctx_set(tsd_tsdn(tsd), ptr, usize, NULL,
|
||||||
|
(prof_tctx_t *)(uintptr_t)1U);
|
||||||
|
} else if (unlikely(old_sampled)) {
|
||||||
|
/*
|
||||||
|
* prof_tctx_set() would work for the !moved case as well, but
|
||||||
|
* prof_tctx_reset() is slightly cheaper, and the proper thing
|
||||||
|
* to do here in the presence of explicit knowledge re: moved
|
||||||
|
* state.
|
||||||
|
*/
|
||||||
|
prof_tctx_reset(tsd_tsdn(tsd), ptr, tctx);
|
||||||
|
} else {
|
||||||
|
assert((uintptr_t)prof_tctx_get(tsd_tsdn(tsd), ptr, NULL) ==
|
||||||
|
(uintptr_t)1U);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The prof_free_sampled_object() call must come after the
|
||||||
|
* prof_malloc_sample_object() call, because tctx and old_tctx may be
|
||||||
|
* the same, in which case reversing the call order could cause the tctx
|
||||||
|
* to be prematurely destroyed as a side effect of momentarily zeroed
|
||||||
|
* counters.
|
||||||
|
*/
|
||||||
|
if (unlikely(old_sampled)) {
|
||||||
|
prof_free_sampled_object(tsd, old_usize, old_tctx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
prof_free(tsd_t *tsd, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx) {
|
||||||
|
prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
|
||||||
|
|
||||||
|
if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
|
||||||
|
prof_free_sampled_object(tsd, usize, tctx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_PROF_INLINES_B_H */
|
201
include/jemalloc/internal/prof_structs.h
Normal file
201
include/jemalloc/internal/prof_structs.h
Normal file
@ -0,0 +1,201 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H
|
||||||
|
#define JEMALLOC_INTERNAL_PROF_STRUCTS_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/ckh.h"
|
||||||
|
#include "jemalloc/internal/mutex.h"
|
||||||
|
#include "jemalloc/internal/prng.h"
|
||||||
|
#include "jemalloc/internal/rb.h"
|
||||||
|
|
||||||
|
struct prof_bt_s {
|
||||||
|
/* Backtrace, stored as len program counters. */
|
||||||
|
void **vec;
|
||||||
|
unsigned len;
|
||||||
|
};
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_PROF_LIBGCC
|
||||||
|
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
|
||||||
|
typedef struct {
|
||||||
|
prof_bt_t *bt;
|
||||||
|
unsigned max;
|
||||||
|
} prof_unwind_data_t;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
struct prof_accum_s {
|
||||||
|
#ifndef JEMALLOC_ATOMIC_U64
|
||||||
|
malloc_mutex_t mtx;
|
||||||
|
uint64_t accumbytes;
|
||||||
|
#else
|
||||||
|
atomic_u64_t accumbytes;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
struct prof_cnt_s {
|
||||||
|
/* Profiling counters. */
|
||||||
|
uint64_t curobjs;
|
||||||
|
uint64_t curbytes;
|
||||||
|
uint64_t accumobjs;
|
||||||
|
uint64_t accumbytes;
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
prof_tctx_state_initializing,
|
||||||
|
prof_tctx_state_nominal,
|
||||||
|
prof_tctx_state_dumping,
|
||||||
|
prof_tctx_state_purgatory /* Dumper must finish destroying. */
|
||||||
|
} prof_tctx_state_t;
|
||||||
|
|
||||||
|
struct prof_tctx_s {
|
||||||
|
/* Thread data for thread that performed the allocation. */
|
||||||
|
prof_tdata_t *tdata;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
|
||||||
|
* defunct during teardown.
|
||||||
|
*/
|
||||||
|
uint64_t thr_uid;
|
||||||
|
uint64_t thr_discrim;
|
||||||
|
|
||||||
|
/* Profiling counters, protected by tdata->lock. */
|
||||||
|
prof_cnt_t cnts;
|
||||||
|
|
||||||
|
/* Associated global context. */
|
||||||
|
prof_gctx_t *gctx;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* UID that distinguishes multiple tctx's created by the same thread,
|
||||||
|
* but coexisting in gctx->tctxs. There are two ways that such
|
||||||
|
* coexistence can occur:
|
||||||
|
* - A dumper thread can cause a tctx to be retained in the purgatory
|
||||||
|
* state.
|
||||||
|
* - Although a single "producer" thread must create all tctx's which
|
||||||
|
* share the same thr_uid, multiple "consumers" can each concurrently
|
||||||
|
* execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
|
||||||
|
* gets called once each time cnts.cur{objs,bytes} drop to 0, but this
|
||||||
|
* threshold can be hit again before the first consumer finishes
|
||||||
|
* executing prof_tctx_destroy().
|
||||||
|
*/
|
||||||
|
uint64_t tctx_uid;
|
||||||
|
|
||||||
|
/* Linkage into gctx's tctxs. */
|
||||||
|
rb_node(prof_tctx_t) tctx_link;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
|
||||||
|
* sample vs destroy race.
|
||||||
|
*/
|
||||||
|
bool prepared;
|
||||||
|
|
||||||
|
/* Current dump-related state, protected by gctx->lock. */
|
||||||
|
prof_tctx_state_t state;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copy of cnts snapshotted during early dump phase, protected by
|
||||||
|
* dump_mtx.
|
||||||
|
*/
|
||||||
|
prof_cnt_t dump_cnts;
|
||||||
|
};
|
||||||
|
typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
|
||||||
|
|
||||||
|
struct prof_gctx_s {
|
||||||
|
/* Protects nlimbo, cnt_summed, and tctxs. */
|
||||||
|
malloc_mutex_t *lock;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Number of threads that currently cause this gctx to be in a state of
|
||||||
|
* limbo due to one of:
|
||||||
|
* - Initializing this gctx.
|
||||||
|
* - Initializing per thread counters associated with this gctx.
|
||||||
|
* - Preparing to destroy this gctx.
|
||||||
|
* - Dumping a heap profile that includes this gctx.
|
||||||
|
* nlimbo must be 1 (single destroyer) in order to safely destroy the
|
||||||
|
* gctx.
|
||||||
|
*/
|
||||||
|
unsigned nlimbo;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Tree of profile counters, one for each thread that has allocated in
|
||||||
|
* this context.
|
||||||
|
*/
|
||||||
|
prof_tctx_tree_t tctxs;
|
||||||
|
|
||||||
|
/* Linkage for tree of contexts to be dumped. */
|
||||||
|
rb_node(prof_gctx_t) dump_link;
|
||||||
|
|
||||||
|
/* Temporary storage for summation during dump. */
|
||||||
|
prof_cnt_t cnt_summed;
|
||||||
|
|
||||||
|
/* Associated backtrace. */
|
||||||
|
prof_bt_t bt;
|
||||||
|
|
||||||
|
/* Backtrace vector, variable size, referred to by bt. */
|
||||||
|
void *vec[1];
|
||||||
|
};
|
||||||
|
typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
|
||||||
|
|
||||||
|
struct prof_tdata_s {
|
||||||
|
malloc_mutex_t *lock;
|
||||||
|
|
||||||
|
/* Monotonically increasing unique thread identifier. */
|
||||||
|
uint64_t thr_uid;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Monotonically increasing discriminator among tdata structures
|
||||||
|
* associated with the same thr_uid.
|
||||||
|
*/
|
||||||
|
uint64_t thr_discrim;
|
||||||
|
|
||||||
|
/* Included in heap profile dumps if non-NULL. */
|
||||||
|
char *thread_name;
|
||||||
|
|
||||||
|
bool attached;
|
||||||
|
bool expired;
|
||||||
|
|
||||||
|
rb_node(prof_tdata_t) tdata_link;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Counter used to initialize prof_tctx_t's tctx_uid. No locking is
|
||||||
|
* necessary when incrementing this field, because only one thread ever
|
||||||
|
* does so.
|
||||||
|
*/
|
||||||
|
uint64_t tctx_uid_next;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
|
||||||
|
* backtraces for which it has non-zero allocation/deallocation counters
|
||||||
|
* associated with thread-specific prof_tctx_t objects. Other threads
|
||||||
|
* may write to prof_tctx_t contents when freeing associated objects.
|
||||||
|
*/
|
||||||
|
ckh_t bt2tctx;
|
||||||
|
|
||||||
|
/* Sampling state. */
|
||||||
|
uint64_t prng_state;
|
||||||
|
uint64_t bytes_until_sample;
|
||||||
|
|
||||||
|
/* State used to avoid dumping while operating on prof internals. */
|
||||||
|
bool enq;
|
||||||
|
bool enq_idump;
|
||||||
|
bool enq_gdump;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set to true during an early dump phase for tdata's which are
|
||||||
|
* currently being dumped. New threads' tdata's have this initialized
|
||||||
|
* to false so that they aren't accidentally included in later dump
|
||||||
|
* phases.
|
||||||
|
*/
|
||||||
|
bool dumping;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* True if profiling is active for this tdata's thread
|
||||||
|
* (thread.prof.active mallctl).
|
||||||
|
*/
|
||||||
|
bool active;
|
||||||
|
|
||||||
|
/* Temporary storage for summation during dump. */
|
||||||
|
prof_cnt_t cnt_summed;
|
||||||
|
|
||||||
|
/* Backtrace vector, used for calls to prof_backtrace(). */
|
||||||
|
void *vec[PROF_BT_MAX];
|
||||||
|
};
|
||||||
|
typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_PROF_STRUCTS_H */
|
56
include/jemalloc/internal/prof_types.h
Normal file
56
include/jemalloc/internal/prof_types.h
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_PROF_TYPES_H
|
||||||
|
#define JEMALLOC_INTERNAL_PROF_TYPES_H
|
||||||
|
|
||||||
|
typedef struct prof_bt_s prof_bt_t;
|
||||||
|
typedef struct prof_accum_s prof_accum_t;
|
||||||
|
typedef struct prof_cnt_s prof_cnt_t;
|
||||||
|
typedef struct prof_tctx_s prof_tctx_t;
|
||||||
|
typedef struct prof_gctx_s prof_gctx_t;
|
||||||
|
typedef struct prof_tdata_s prof_tdata_t;
|
||||||
|
|
||||||
|
/* Option defaults. */
|
||||||
|
#ifdef JEMALLOC_PROF
|
||||||
|
# define PROF_PREFIX_DEFAULT "jeprof"
|
||||||
|
#else
|
||||||
|
# define PROF_PREFIX_DEFAULT ""
|
||||||
|
#endif
|
||||||
|
#define LG_PROF_SAMPLE_DEFAULT 19
|
||||||
|
#define LG_PROF_INTERVAL_DEFAULT -1
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
|
||||||
|
* is based on __builtin_return_address() necessarily has a hard-coded number
|
||||||
|
* of backtrace frame handlers, and should be kept in sync with this setting.
|
||||||
|
*/
|
||||||
|
#define PROF_BT_MAX 128
|
||||||
|
|
||||||
|
/* Initial hash table size. */
|
||||||
|
#define PROF_CKH_MINITEMS 64
|
||||||
|
|
||||||
|
/* Size of memory buffer to use when writing dump files. */
|
||||||
|
#define PROF_DUMP_BUFSIZE 65536
|
||||||
|
|
||||||
|
/* Size of stack-allocated buffer used by prof_printf(). */
|
||||||
|
#define PROF_PRINTF_BUFSIZE 128
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Number of mutexes shared among all gctx's. No space is allocated for these
|
||||||
|
* unless profiling is enabled, so it's okay to over-provision.
|
||||||
|
*/
|
||||||
|
#define PROF_NCTX_LOCKS 1024
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Number of mutexes shared among all tdata's. No space is allocated for these
|
||||||
|
* unless profiling is enabled, so it's okay to over-provision.
|
||||||
|
*/
|
||||||
|
#define PROF_NTDATA_LOCKS 256
|
||||||
|
|
||||||
|
/*
|
||||||
|
* prof_tdata pointers close to NULL are used to encode state information that
|
||||||
|
* is used for cleaning up during thread shutdown.
|
||||||
|
*/
|
||||||
|
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
|
||||||
|
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
|
||||||
|
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */
|
@ -1,3 +1,8 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_QL_H
|
||||||
|
#define JEMALLOC_INTERNAL_QL_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/qr.h"
|
||||||
|
|
||||||
/* List definitions. */
|
/* List definitions. */
|
||||||
#define ql_head(a_type) \
|
#define ql_head(a_type) \
|
||||||
struct { \
|
struct { \
|
||||||
@ -79,3 +84,5 @@ struct { \
|
|||||||
|
|
||||||
#define ql_reverse_foreach(a_var, a_head, a_field) \
|
#define ql_reverse_foreach(a_var, a_head, a_field) \
|
||||||
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
|
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_QL_H */
|
||||||
|
@ -1,3 +1,6 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_QR_H
|
||||||
|
#define JEMALLOC_INTERNAL_QR_H
|
||||||
|
|
||||||
/* Ring definitions. */
|
/* Ring definitions. */
|
||||||
#define qr(a_type) \
|
#define qr(a_type) \
|
||||||
struct { \
|
struct { \
|
||||||
@ -22,17 +25,15 @@ struct { \
|
|||||||
(a_qrelm)->a_field.qre_prev = (a_qr); \
|
(a_qrelm)->a_field.qre_prev = (a_qr); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define qr_after_insert(a_qrelm, a_qr, a_field) \
|
#define qr_after_insert(a_qrelm, a_qr, a_field) do { \
|
||||||
do \
|
|
||||||
{ \
|
|
||||||
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
|
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
|
||||||
(a_qr)->a_field.qre_prev = (a_qrelm); \
|
(a_qr)->a_field.qre_prev = (a_qrelm); \
|
||||||
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
|
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
|
||||||
(a_qrelm)->a_field.qre_next = (a_qr); \
|
(a_qrelm)->a_field.qre_next = (a_qr); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
|
#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \
|
||||||
void *t; \
|
a_type *t; \
|
||||||
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
|
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
|
||||||
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
|
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
|
||||||
t = (a_qr_a)->a_field.qre_prev; \
|
t = (a_qr_a)->a_field.qre_prev; \
|
||||||
@ -44,8 +45,8 @@ struct { \
|
|||||||
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
|
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
|
||||||
* have two copies of the code.
|
* have two copies of the code.
|
||||||
*/
|
*/
|
||||||
#define qr_split(a_qr_a, a_qr_b, a_field) \
|
#define qr_split(a_qr_a, a_qr_b, a_type, a_field) \
|
||||||
qr_meld((a_qr_a), (a_qr_b), a_field)
|
qr_meld((a_qr_a), (a_qr_b), a_type, a_field)
|
||||||
|
|
||||||
#define qr_remove(a_qr, a_field) do { \
|
#define qr_remove(a_qr, a_field) do { \
|
||||||
(a_qr)->a_field.qre_prev->a_field.qre_next \
|
(a_qr)->a_field.qre_prev->a_field.qre_next \
|
||||||
@ -67,3 +68,5 @@ struct { \
|
|||||||
(var) != NULL; \
|
(var) != NULL; \
|
||||||
(var) = (((var) != (a_qr)) \
|
(var) = (((var) != (a_qr)) \
|
||||||
? (var)->a_field.qre_prev : NULL))
|
? (var)->a_field.qre_prev : NULL))
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_QR_H */
|
||||||
|
@ -1,60 +0,0 @@
|
|||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_TYPES
|
|
||||||
|
|
||||||
typedef struct quarantine_obj_s quarantine_obj_t;
|
|
||||||
typedef struct quarantine_s quarantine_t;
|
|
||||||
|
|
||||||
/* Default per thread quarantine size if valgrind is enabled. */
|
|
||||||
#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24)
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
struct quarantine_obj_s {
|
|
||||||
void *ptr;
|
|
||||||
size_t usize;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct quarantine_s {
|
|
||||||
size_t curbytes;
|
|
||||||
size_t curobjs;
|
|
||||||
size_t first;
|
|
||||||
#define LG_MAXOBJS_INIT 10
|
|
||||||
size_t lg_maxobjs;
|
|
||||||
quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
|
||||||
|
|
||||||
void quarantine_alloc_hook_work(tsd_t *tsd);
|
|
||||||
void quarantine(tsd_t *tsd, void *ptr);
|
|
||||||
void quarantine_cleanup(tsd_t *tsd);
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
void quarantine_alloc_hook(void);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_))
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
|
||||||
quarantine_alloc_hook(void)
|
|
||||||
{
|
|
||||||
tsd_t *tsd;
|
|
||||||
|
|
||||||
assert(config_fill && opt_quarantine);
|
|
||||||
|
|
||||||
tsd = tsd_fetch();
|
|
||||||
if (tsd_quarantine_get(tsd) == NULL)
|
|
||||||
quarantine_alloc_hook_work(tsd);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
/******************************************************************************/
|
|
||||||
|
|
@ -22,6 +22,10 @@
|
|||||||
#ifndef RB_H_
|
#ifndef RB_H_
|
||||||
#define RB_H_
|
#define RB_H_
|
||||||
|
|
||||||
|
#ifndef __PGI
|
||||||
|
#define RB_COMPACT
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef RB_COMPACT
|
#ifdef RB_COMPACT
|
||||||
/* Node structure. */
|
/* Node structure. */
|
||||||
#define rb_node(a_type) \
|
#define rb_node(a_type) \
|
||||||
@ -348,13 +352,13 @@ a_attr a_type * \
|
|||||||
a_prefix##first(a_rbt_type *rbtree) { \
|
a_prefix##first(a_rbt_type *rbtree) { \
|
||||||
a_type *ret; \
|
a_type *ret; \
|
||||||
rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
|
rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
|
||||||
return (ret); \
|
return ret; \
|
||||||
} \
|
} \
|
||||||
a_attr a_type * \
|
a_attr a_type * \
|
||||||
a_prefix##last(a_rbt_type *rbtree) { \
|
a_prefix##last(a_rbt_type *rbtree) { \
|
||||||
a_type *ret; \
|
a_type *ret; \
|
||||||
rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
|
rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
|
||||||
return (ret); \
|
return ret; \
|
||||||
} \
|
} \
|
||||||
a_attr a_type * \
|
a_attr a_type * \
|
||||||
a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
|
a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
|
||||||
@ -379,7 +383,7 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
|
|||||||
assert(tnode != NULL); \
|
assert(tnode != NULL); \
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
return (ret); \
|
return ret; \
|
||||||
} \
|
} \
|
||||||
a_attr a_type * \
|
a_attr a_type * \
|
||||||
a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
|
a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
|
||||||
@ -404,7 +408,7 @@ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
|
|||||||
assert(tnode != NULL); \
|
assert(tnode != NULL); \
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
return (ret); \
|
return ret; \
|
||||||
} \
|
} \
|
||||||
a_attr a_type * \
|
a_attr a_type * \
|
||||||
a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
|
a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
|
||||||
@ -419,7 +423,7 @@ a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
|
|||||||
ret = rbtn_right_get(a_type, a_field, ret); \
|
ret = rbtn_right_get(a_type, a_field, ret); \
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
return (ret); \
|
return ret; \
|
||||||
} \
|
} \
|
||||||
a_attr a_type * \
|
a_attr a_type * \
|
||||||
a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
|
a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
|
||||||
@ -438,7 +442,7 @@ a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
|
|||||||
break; \
|
break; \
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
return (ret); \
|
return ret; \
|
||||||
} \
|
} \
|
||||||
a_attr a_type * \
|
a_attr a_type * \
|
||||||
a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
|
a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
|
||||||
@ -457,7 +461,7 @@ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
|
|||||||
break; \
|
break; \
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
return (ret); \
|
return ret; \
|
||||||
} \
|
} \
|
||||||
a_attr void \
|
a_attr void \
|
||||||
a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
|
a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
|
||||||
@ -550,8 +554,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
|||||||
/* Find node's successor, in preparation for swap. */ \
|
/* Find node's successor, in preparation for swap. */ \
|
||||||
pathp->cmp = 1; \
|
pathp->cmp = 1; \
|
||||||
nodep = pathp; \
|
nodep = pathp; \
|
||||||
for (pathp++; pathp->node != NULL; \
|
for (pathp++; pathp->node != NULL; pathp++) { \
|
||||||
pathp++) { \
|
|
||||||
pathp->cmp = -1; \
|
pathp->cmp = -1; \
|
||||||
pathp[1].node = rbtn_left_get(a_type, a_field, \
|
pathp[1].node = rbtn_left_get(a_type, a_field, \
|
||||||
pathp->node); \
|
pathp->node); \
|
||||||
@ -873,16 +876,16 @@ a_attr a_type * \
|
|||||||
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
|
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
|
||||||
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
|
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
|
||||||
if (node == NULL) { \
|
if (node == NULL) { \
|
||||||
return (NULL); \
|
return NULL; \
|
||||||
} else { \
|
} else { \
|
||||||
a_type *ret; \
|
a_type *ret; \
|
||||||
if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
|
if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
|
||||||
a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \
|
a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \
|
||||||
arg)) != NULL) { \
|
arg)) != NULL) { \
|
||||||
return (ret); \
|
return ret; \
|
||||||
} \
|
} \
|
||||||
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
|
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
|
||||||
a_field, node), cb, arg)); \
|
a_field, node), cb, arg); \
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
a_attr a_type * \
|
a_attr a_type * \
|
||||||
@ -894,20 +897,20 @@ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \
|
|||||||
if ((ret = a_prefix##iter_start(rbtree, start, \
|
if ((ret = a_prefix##iter_start(rbtree, start, \
|
||||||
rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \
|
rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \
|
||||||
(ret = cb(rbtree, node, arg)) != NULL) { \
|
(ret = cb(rbtree, node, arg)) != NULL) { \
|
||||||
return (ret); \
|
return ret; \
|
||||||
} \
|
} \
|
||||||
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
|
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
|
||||||
a_field, node), cb, arg)); \
|
a_field, node), cb, arg); \
|
||||||
} else if (cmp > 0) { \
|
} else if (cmp > 0) { \
|
||||||
return (a_prefix##iter_start(rbtree, start, \
|
return a_prefix##iter_start(rbtree, start, \
|
||||||
rbtn_right_get(a_type, a_field, node), cb, arg)); \
|
rbtn_right_get(a_type, a_field, node), cb, arg); \
|
||||||
} else { \
|
} else { \
|
||||||
a_type *ret; \
|
a_type *ret; \
|
||||||
if ((ret = cb(rbtree, node, arg)) != NULL) { \
|
if ((ret = cb(rbtree, node, arg)) != NULL) { \
|
||||||
return (ret); \
|
return ret; \
|
||||||
} \
|
} \
|
||||||
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
|
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
|
||||||
a_field, node), cb, arg)); \
|
a_field, node), cb, arg); \
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
a_attr a_type * \
|
a_attr a_type * \
|
||||||
@ -920,22 +923,22 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
|
|||||||
} else { \
|
} else { \
|
||||||
ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
|
ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
|
||||||
} \
|
} \
|
||||||
return (ret); \
|
return ret; \
|
||||||
} \
|
} \
|
||||||
a_attr a_type * \
|
a_attr a_type * \
|
||||||
a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
|
a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
|
||||||
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
|
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
|
||||||
if (node == NULL) { \
|
if (node == NULL) { \
|
||||||
return (NULL); \
|
return NULL; \
|
||||||
} else { \
|
} else { \
|
||||||
a_type *ret; \
|
a_type *ret; \
|
||||||
if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
|
if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
|
||||||
rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
|
rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
|
||||||
(ret = cb(rbtree, node, arg)) != NULL) { \
|
(ret = cb(rbtree, node, arg)) != NULL) { \
|
||||||
return (ret); \
|
return ret; \
|
||||||
} \
|
} \
|
||||||
return (a_prefix##reverse_iter_recurse(rbtree, \
|
return a_prefix##reverse_iter_recurse(rbtree, \
|
||||||
rbtn_left_get(a_type, a_field, node), cb, arg)); \
|
rbtn_left_get(a_type, a_field, node), cb, arg); \
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
a_attr a_type * \
|
a_attr a_type * \
|
||||||
@ -948,20 +951,20 @@ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \
|
|||||||
if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
|
if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
|
||||||
rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
|
rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
|
||||||
(ret = cb(rbtree, node, arg)) != NULL) { \
|
(ret = cb(rbtree, node, arg)) != NULL) { \
|
||||||
return (ret); \
|
return ret; \
|
||||||
} \
|
} \
|
||||||
return (a_prefix##reverse_iter_recurse(rbtree, \
|
return a_prefix##reverse_iter_recurse(rbtree, \
|
||||||
rbtn_left_get(a_type, a_field, node), cb, arg)); \
|
rbtn_left_get(a_type, a_field, node), cb, arg); \
|
||||||
} else if (cmp < 0) { \
|
} else if (cmp < 0) { \
|
||||||
return (a_prefix##reverse_iter_start(rbtree, start, \
|
return a_prefix##reverse_iter_start(rbtree, start, \
|
||||||
rbtn_left_get(a_type, a_field, node), cb, arg)); \
|
rbtn_left_get(a_type, a_field, node), cb, arg); \
|
||||||
} else { \
|
} else { \
|
||||||
a_type *ret; \
|
a_type *ret; \
|
||||||
if ((ret = cb(rbtree, node, arg)) != NULL) { \
|
if ((ret = cb(rbtree, node, arg)) != NULL) { \
|
||||||
return (ret); \
|
return ret; \
|
||||||
} \
|
} \
|
||||||
return (a_prefix##reverse_iter_recurse(rbtree, \
|
return a_prefix##reverse_iter_recurse(rbtree, \
|
||||||
rbtn_left_get(a_type, a_field, node), cb, arg)); \
|
rbtn_left_get(a_type, a_field, node), cb, arg); \
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
a_attr a_type * \
|
a_attr a_type * \
|
||||||
@ -975,7 +978,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
|
|||||||
ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
|
ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
|
||||||
cb, arg); \
|
cb, arg); \
|
||||||
} \
|
} \
|
||||||
return (ret); \
|
return ret; \
|
||||||
} \
|
} \
|
||||||
a_attr void \
|
a_attr void \
|
||||||
a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \
|
a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \
|
||||||
|
@ -1,75 +1,72 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_RTREE_H
|
||||||
|
#define JEMALLOC_INTERNAL_RTREE_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/atomic.h"
|
||||||
|
#include "jemalloc/internal/mutex.h"
|
||||||
|
#include "jemalloc/internal/rtree_tsd.h"
|
||||||
|
#include "jemalloc/internal/size_classes.h"
|
||||||
|
#include "jemalloc/internal/tsd.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This radix tree implementation is tailored to the singular purpose of
|
* This radix tree implementation is tailored to the singular purpose of
|
||||||
* associating metadata with chunks that are currently owned by jemalloc.
|
* associating metadata with extents that are currently owned by jemalloc.
|
||||||
*
|
*
|
||||||
*******************************************************************************
|
*******************************************************************************
|
||||||
*/
|
*/
|
||||||
#ifdef JEMALLOC_H_TYPES
|
|
||||||
|
/* Number of high insignificant bits. */
|
||||||
|
#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR)
|
||||||
|
/* Number of low insigificant bits. */
|
||||||
|
#define RTREE_NLIB LG_PAGE
|
||||||
|
/* Number of significant bits. */
|
||||||
|
#define RTREE_NSB (LG_VADDR - RTREE_NLIB)
|
||||||
|
/* Number of levels in radix tree. */
|
||||||
|
#if RTREE_NSB <= 10
|
||||||
|
# define RTREE_HEIGHT 1
|
||||||
|
#elif RTREE_NSB <= 36
|
||||||
|
# define RTREE_HEIGHT 2
|
||||||
|
#elif RTREE_NSB <= 52
|
||||||
|
# define RTREE_HEIGHT 3
|
||||||
|
#else
|
||||||
|
# error Unsupported number of significant virtual address bits
|
||||||
|
#endif
|
||||||
|
/* Use compact leaf representation if virtual address encoding allows. */
|
||||||
|
#if RTREE_NHIB >= LG_CEIL_NSIZES
|
||||||
|
# define RTREE_LEAF_COMPACT
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Needed for initialization only. */
|
||||||
|
#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
|
||||||
|
|
||||||
typedef struct rtree_node_elm_s rtree_node_elm_t;
|
typedef struct rtree_node_elm_s rtree_node_elm_t;
|
||||||
typedef struct rtree_level_s rtree_level_t;
|
|
||||||
typedef struct rtree_s rtree_t;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the
|
|
||||||
* machine address width.
|
|
||||||
*/
|
|
||||||
#define LG_RTREE_BITS_PER_LEVEL 4
|
|
||||||
#define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL)
|
|
||||||
/* Maximum rtree height. */
|
|
||||||
#define RTREE_HEIGHT_MAX \
|
|
||||||
((1U << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
|
|
||||||
|
|
||||||
/* Used for two-stage lock-free node initialization. */
|
|
||||||
#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The node allocation callback function's argument is the number of contiguous
|
|
||||||
* rtree_node_elm_t structures to allocate, and the resulting memory must be
|
|
||||||
* zeroed.
|
|
||||||
*/
|
|
||||||
typedef rtree_node_elm_t *(rtree_node_alloc_t)(size_t);
|
|
||||||
typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *);
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
struct rtree_node_elm_s {
|
struct rtree_node_elm_s {
|
||||||
union {
|
atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */
|
||||||
void *pun;
|
|
||||||
rtree_node_elm_t *child;
|
|
||||||
extent_node_t *val;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct rtree_level_s {
|
struct rtree_leaf_elm_s {
|
||||||
|
#ifdef RTREE_LEAF_COMPACT
|
||||||
/*
|
/*
|
||||||
* A non-NULL subtree points to a subtree rooted along the hypothetical
|
* Single pointer-width field containing all three leaf element fields.
|
||||||
* path to the leaf node corresponding to key 0. Depending on what keys
|
* For example, on a 64-bit x64 system with 48 significant virtual
|
||||||
* have been used to store to the tree, an arbitrary combination of
|
* memory address bits, the index, extent, and slab fields are packed as
|
||||||
* subtree pointers may remain NULL.
|
* such:
|
||||||
*
|
*
|
||||||
* Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4.
|
* x: index
|
||||||
* This results in a 3-level tree, and the leftmost leaf can be directly
|
* e: extent
|
||||||
* accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding
|
* b: slab
|
||||||
* 0x00000000) can be accessed via subtrees[1], and the remainder of the
|
|
||||||
* tree can be accessed via subtrees[0].
|
|
||||||
*
|
*
|
||||||
* levels[0] : [<unused> | 0x0001******** | 0x0002******** | ...]
|
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b
|
||||||
*
|
|
||||||
* levels[1] : [<unused> | 0x00000001**** | 0x00000002**** | ... ]
|
|
||||||
*
|
|
||||||
* levels[2] : [val(0x000000000000) | val(0x000000000001) | ...]
|
|
||||||
*
|
|
||||||
* This has practical implications on x64, which currently uses only the
|
|
||||||
* lower 47 bits of virtual address space in userland, thus leaving
|
|
||||||
* subtrees[0] unused and avoiding a level of tree traversal.
|
|
||||||
*/
|
*/
|
||||||
union {
|
atomic_p_t le_bits;
|
||||||
void *subtree_pun;
|
#else
|
||||||
rtree_node_elm_t *subtree;
|
atomic_p_t le_extent; /* (extent_t *) */
|
||||||
|
atomic_u_t le_szind; /* (szind_t) */
|
||||||
|
atomic_b_t le_slab; /* (bool) */
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
typedef struct rtree_level_s rtree_level_t;
|
||||||
|
struct rtree_level_s {
|
||||||
/* Number of key bits distinguished by this level. */
|
/* Number of key bits distinguished by this level. */
|
||||||
unsigned bits;
|
unsigned bits;
|
||||||
/*
|
/*
|
||||||
@ -79,288 +76,399 @@ struct rtree_level_s {
|
|||||||
unsigned cumbits;
|
unsigned cumbits;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
typedef struct rtree_s rtree_t;
|
||||||
struct rtree_s {
|
struct rtree_s {
|
||||||
rtree_node_alloc_t *alloc;
|
malloc_mutex_t init_lock;
|
||||||
rtree_node_dalloc_t *dalloc;
|
/* Number of elements based on rtree_levels[0].bits. */
|
||||||
unsigned height;
|
#if RTREE_HEIGHT > 1
|
||||||
/*
|
rtree_node_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
|
||||||
* Precomputed table used to convert from the number of leading 0 key
|
#else
|
||||||
* bits to which subtree level to start at.
|
rtree_leaf_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)];
|
||||||
*/
|
#endif
|
||||||
unsigned start_level[RTREE_HEIGHT_MAX];
|
|
||||||
rtree_level_t levels[RTREE_HEIGHT_MAX];
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
/*
|
||||||
/******************************************************************************/
|
* Split the bits into one to three partitions depending on number of
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
* significant bits. It the number of bits does not divide evenly into the
|
||||||
|
* number of levels, place one remainder bit per level starting at the leaf
|
||||||
bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
|
* level.
|
||||||
rtree_node_dalloc_t *dalloc);
|
*/
|
||||||
void rtree_delete(rtree_t *rtree);
|
static const rtree_level_t rtree_levels[] = {
|
||||||
rtree_node_elm_t *rtree_subtree_read_hard(rtree_t *rtree,
|
#if RTREE_HEIGHT == 1
|
||||||
unsigned level);
|
{RTREE_NSB, RTREE_NHIB + RTREE_NSB}
|
||||||
rtree_node_elm_t *rtree_child_read_hard(rtree_t *rtree,
|
#elif RTREE_HEIGHT == 2
|
||||||
rtree_node_elm_t *elm, unsigned level);
|
{RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2},
|
||||||
|
{RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB}
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#elif RTREE_HEIGHT == 3
|
||||||
/******************************************************************************/
|
{RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3},
|
||||||
#ifdef JEMALLOC_H_INLINES
|
{RTREE_NSB/3 + RTREE_NSB%3/2,
|
||||||
|
RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2},
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
{RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB}
|
||||||
unsigned rtree_start_level(rtree_t *rtree, uintptr_t key);
|
#else
|
||||||
uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level);
|
# error Unsupported rtree height
|
||||||
|
|
||||||
bool rtree_node_valid(rtree_node_elm_t *node);
|
|
||||||
rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm,
|
|
||||||
bool dependent);
|
|
||||||
rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
|
|
||||||
unsigned level, bool dependent);
|
|
||||||
extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
|
|
||||||
bool dependent);
|
|
||||||
void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm,
|
|
||||||
const extent_node_t *val);
|
|
||||||
rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level,
|
|
||||||
bool dependent);
|
|
||||||
rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level,
|
|
||||||
bool dependent);
|
|
||||||
|
|
||||||
extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
|
|
||||||
bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val);
|
|
||||||
#endif
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
|
bool rtree_new(rtree_t *rtree, bool zeroed);
|
||||||
JEMALLOC_ALWAYS_INLINE unsigned
|
|
||||||
rtree_start_level(rtree_t *rtree, uintptr_t key)
|
|
||||||
{
|
|
||||||
unsigned start_level;
|
|
||||||
|
|
||||||
if (unlikely(key == 0))
|
typedef rtree_node_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t);
|
||||||
return (rtree->height - 1);
|
extern rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc;
|
||||||
|
|
||||||
start_level = rtree->start_level[lg_floor(key) >>
|
typedef rtree_leaf_elm_t *(rtree_leaf_alloc_t)(tsdn_t *, rtree_t *, size_t);
|
||||||
LG_RTREE_BITS_PER_LEVEL];
|
extern rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc;
|
||||||
assert(start_level < rtree->height);
|
|
||||||
return (start_level);
|
typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_node_elm_t *);
|
||||||
|
extern rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc;
|
||||||
|
|
||||||
|
typedef void (rtree_leaf_dalloc_t)(tsdn_t *, rtree_t *, rtree_leaf_elm_t *);
|
||||||
|
extern rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc;
|
||||||
|
#ifdef JEMALLOC_JET
|
||||||
|
void rtree_delete(tsdn_t *tsdn, rtree_t *rtree);
|
||||||
|
#endif
|
||||||
|
rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree,
|
||||||
|
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE uintptr_t
|
||||||
|
rtree_leafkey(uintptr_t key) {
|
||||||
|
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
|
||||||
|
unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
|
||||||
|
rtree_levels[RTREE_HEIGHT-1].bits);
|
||||||
|
unsigned maskbits = ptrbits - cumbits;
|
||||||
|
uintptr_t mask = ~((ZU(1) << maskbits) - 1);
|
||||||
|
return (key & mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
|
rtree_cache_direct_map(uintptr_t key) {
|
||||||
|
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
|
||||||
|
unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
|
||||||
|
rtree_levels[RTREE_HEIGHT-1].bits);
|
||||||
|
unsigned maskbits = ptrbits - cumbits;
|
||||||
|
return (size_t)((key >> maskbits) & (RTREE_CTX_NCACHE - 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE uintptr_t
|
JEMALLOC_ALWAYS_INLINE uintptr_t
|
||||||
rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
|
rtree_subkey(uintptr_t key, unsigned level) {
|
||||||
{
|
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
|
||||||
|
unsigned cumbits = rtree_levels[level].cumbits;
|
||||||
return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
unsigned shiftbits = ptrbits - cumbits;
|
||||||
rtree->levels[level].cumbits)) & ((ZU(1) <<
|
unsigned maskbits = rtree_levels[level].bits;
|
||||||
rtree->levels[level].bits) - 1));
|
uintptr_t mask = (ZU(1) << maskbits) - 1;
|
||||||
|
return ((key >> shiftbits) & mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE bool
|
|
||||||
rtree_node_valid(rtree_node_elm_t *node)
|
|
||||||
{
|
|
||||||
|
|
||||||
return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
|
|
||||||
rtree_child_tryread(rtree_node_elm_t *elm, bool dependent)
|
|
||||||
{
|
|
||||||
rtree_node_elm_t *child;
|
|
||||||
|
|
||||||
/* Double-checked read (first read may be stale. */
|
|
||||||
child = elm->child;
|
|
||||||
if (!dependent && !rtree_node_valid(child))
|
|
||||||
child = atomic_read_p(&elm->pun);
|
|
||||||
assert(!dependent || child != NULL);
|
|
||||||
return (child);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
|
|
||||||
rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level,
|
|
||||||
bool dependent)
|
|
||||||
{
|
|
||||||
rtree_node_elm_t *child;
|
|
||||||
|
|
||||||
child = rtree_child_tryread(elm, dependent);
|
|
||||||
if (!dependent && unlikely(!rtree_node_valid(child)))
|
|
||||||
child = rtree_child_read_hard(rtree, elm, level);
|
|
||||||
assert(!dependent || child != NULL);
|
|
||||||
return (child);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE extent_node_t *
|
|
||||||
rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
|
|
||||||
{
|
|
||||||
|
|
||||||
if (dependent) {
|
|
||||||
/*
|
/*
|
||||||
* Reading a val on behalf of a pointer to a valid allocation is
|
* Atomic getters.
|
||||||
* guaranteed to be a clean read even without synchronization,
|
*
|
||||||
|
* dependent: Reading a value on behalf of a pointer to a valid allocation
|
||||||
|
* is guaranteed to be a clean read even without synchronization,
|
||||||
* because the rtree update became visible in memory before the
|
* because the rtree update became visible in memory before the
|
||||||
* pointer came into existence.
|
* pointer came into existence.
|
||||||
*/
|
* !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be
|
||||||
return (elm->val);
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* An arbitrary read, e.g. on behalf of ivsalloc(), may not be
|
|
||||||
* dependent on a previous rtree write, which means a stale read
|
* dependent on a previous rtree write, which means a stale read
|
||||||
* could result if synchronization were omitted here.
|
* could result if synchronization were omitted here.
|
||||||
*/
|
*/
|
||||||
return (atomic_read_p(&elm->pun));
|
# ifdef RTREE_LEAF_COMPACT
|
||||||
}
|
JEMALLOC_ALWAYS_INLINE uintptr_t
|
||||||
|
rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||||
|
bool dependent) {
|
||||||
|
return (uintptr_t)atomic_load_p(&elm->le_bits, dependent
|
||||||
|
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||||
rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val)
|
rtree_leaf_elm_bits_extent_get(uintptr_t bits) {
|
||||||
{
|
/* Restore sign-extended high bits, mask slab bit. */
|
||||||
|
return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >>
|
||||||
atomic_write_p(&elm->pun, val);
|
RTREE_NHIB) & ~((uintptr_t)0x1));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
|
JEMALLOC_ALWAYS_INLINE szind_t
|
||||||
rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent)
|
rtree_leaf_elm_bits_szind_get(uintptr_t bits) {
|
||||||
{
|
return (szind_t)(bits >> LG_VADDR);
|
||||||
rtree_node_elm_t *subtree;
|
|
||||||
|
|
||||||
/* Double-checked read (first read may be stale. */
|
|
||||||
subtree = rtree->levels[level].subtree;
|
|
||||||
if (!dependent && unlikely(!rtree_node_valid(subtree)))
|
|
||||||
subtree = atomic_read_p(&rtree->levels[level].subtree_pun);
|
|
||||||
assert(!dependent || subtree != NULL);
|
|
||||||
return (subtree);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent)
|
rtree_leaf_elm_bits_slab_get(uintptr_t bits) {
|
||||||
{
|
return (bool)(bits & (uintptr_t)0x1);
|
||||||
rtree_node_elm_t *subtree;
|
|
||||||
|
|
||||||
subtree = rtree_subtree_tryread(rtree, level, dependent);
|
|
||||||
if (!dependent && unlikely(!rtree_node_valid(subtree)))
|
|
||||||
subtree = rtree_subtree_read_hard(rtree, level);
|
|
||||||
assert(!dependent || subtree != NULL);
|
|
||||||
return (subtree);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE extent_node_t *
|
# endif
|
||||||
rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
|
|
||||||
{
|
|
||||||
uintptr_t subkey;
|
|
||||||
unsigned start_level;
|
|
||||||
rtree_node_elm_t *node;
|
|
||||||
|
|
||||||
start_level = rtree_start_level(rtree, key);
|
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||||
|
rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||||
node = rtree_subtree_tryread(rtree, start_level, dependent);
|
bool dependent) {
|
||||||
#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height)
|
#ifdef RTREE_LEAF_COMPACT
|
||||||
switch (start_level + RTREE_GET_BIAS) {
|
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||||
#define RTREE_GET_SUBTREE(level) \
|
return rtree_leaf_elm_bits_extent_get(bits);
|
||||||
case level: \
|
#else
|
||||||
assert(level < (RTREE_HEIGHT_MAX-1)); \
|
extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent
|
||||||
if (!dependent && unlikely(!rtree_node_valid(node))) \
|
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
|
||||||
return (NULL); \
|
return extent;
|
||||||
subkey = rtree_subkey(rtree, key, level - \
|
|
||||||
RTREE_GET_BIAS); \
|
|
||||||
node = rtree_child_tryread(&node[subkey], dependent); \
|
|
||||||
/* Fall through. */
|
|
||||||
#define RTREE_GET_LEAF(level) \
|
|
||||||
case level: \
|
|
||||||
assert(level == (RTREE_HEIGHT_MAX-1)); \
|
|
||||||
if (!dependent && unlikely(!rtree_node_valid(node))) \
|
|
||||||
return (NULL); \
|
|
||||||
subkey = rtree_subkey(rtree, key, level - \
|
|
||||||
RTREE_GET_BIAS); \
|
|
||||||
/* \
|
|
||||||
* node is a leaf, so it contains values rather than \
|
|
||||||
* child pointers. \
|
|
||||||
*/ \
|
|
||||||
return (rtree_val_read(rtree, &node[subkey], \
|
|
||||||
dependent));
|
|
||||||
#if RTREE_HEIGHT_MAX > 1
|
|
||||||
RTREE_GET_SUBTREE(0)
|
|
||||||
#endif
|
#endif
|
||||||
#if RTREE_HEIGHT_MAX > 2
|
|
||||||
RTREE_GET_SUBTREE(1)
|
|
||||||
#endif
|
|
||||||
#if RTREE_HEIGHT_MAX > 3
|
|
||||||
RTREE_GET_SUBTREE(2)
|
|
||||||
#endif
|
|
||||||
#if RTREE_HEIGHT_MAX > 4
|
|
||||||
RTREE_GET_SUBTREE(3)
|
|
||||||
#endif
|
|
||||||
#if RTREE_HEIGHT_MAX > 5
|
|
||||||
RTREE_GET_SUBTREE(4)
|
|
||||||
#endif
|
|
||||||
#if RTREE_HEIGHT_MAX > 6
|
|
||||||
RTREE_GET_SUBTREE(5)
|
|
||||||
#endif
|
|
||||||
#if RTREE_HEIGHT_MAX > 7
|
|
||||||
RTREE_GET_SUBTREE(6)
|
|
||||||
#endif
|
|
||||||
#if RTREE_HEIGHT_MAX > 8
|
|
||||||
RTREE_GET_SUBTREE(7)
|
|
||||||
#endif
|
|
||||||
#if RTREE_HEIGHT_MAX > 9
|
|
||||||
RTREE_GET_SUBTREE(8)
|
|
||||||
#endif
|
|
||||||
#if RTREE_HEIGHT_MAX > 10
|
|
||||||
RTREE_GET_SUBTREE(9)
|
|
||||||
#endif
|
|
||||||
#if RTREE_HEIGHT_MAX > 11
|
|
||||||
RTREE_GET_SUBTREE(10)
|
|
||||||
#endif
|
|
||||||
#if RTREE_HEIGHT_MAX > 12
|
|
||||||
RTREE_GET_SUBTREE(11)
|
|
||||||
#endif
|
|
||||||
#if RTREE_HEIGHT_MAX > 13
|
|
||||||
RTREE_GET_SUBTREE(12)
|
|
||||||
#endif
|
|
||||||
#if RTREE_HEIGHT_MAX > 14
|
|
||||||
RTREE_GET_SUBTREE(13)
|
|
||||||
#endif
|
|
||||||
#if RTREE_HEIGHT_MAX > 15
|
|
||||||
RTREE_GET_SUBTREE(14)
|
|
||||||
#endif
|
|
||||||
#if RTREE_HEIGHT_MAX > 16
|
|
||||||
# error Unsupported RTREE_HEIGHT_MAX
|
|
||||||
#endif
|
|
||||||
RTREE_GET_LEAF(RTREE_HEIGHT_MAX-1)
|
|
||||||
#undef RTREE_GET_SUBTREE
|
|
||||||
#undef RTREE_GET_LEAF
|
|
||||||
default: not_reached();
|
|
||||||
}
|
|
||||||
#undef RTREE_GET_BIAS
|
|
||||||
not_reached();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
JEMALLOC_ALWAYS_INLINE szind_t
|
||||||
rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
|
rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||||
{
|
bool dependent) {
|
||||||
uintptr_t subkey;
|
#ifdef RTREE_LEAF_COMPACT
|
||||||
unsigned i, start_level;
|
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||||
rtree_node_elm_t *node, *child;
|
return rtree_leaf_elm_bits_szind_get(bits);
|
||||||
|
#else
|
||||||
|
return (szind_t)atomic_load_u(&elm->le_szind, dependent ? ATOMIC_RELAXED
|
||||||
|
: ATOMIC_ACQUIRE);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
start_level = rtree_start_level(rtree, key);
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
|
rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||||
|
bool dependent) {
|
||||||
|
#ifdef RTREE_LEAF_COMPACT
|
||||||
|
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||||
|
return rtree_leaf_elm_bits_slab_get(bits);
|
||||||
|
#else
|
||||||
|
return atomic_load_b(&elm->le_slab, dependent ? ATOMIC_RELAXED :
|
||||||
|
ATOMIC_ACQUIRE);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
node = rtree_subtree_read(rtree, start_level, false);
|
static inline void
|
||||||
if (node == NULL)
|
rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||||
return (true);
|
extent_t *extent) {
|
||||||
for (i = start_level; /**/; i++, node = child) {
|
#ifdef RTREE_LEAF_COMPACT
|
||||||
subkey = rtree_subkey(rtree, key, i);
|
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true);
|
||||||
if (i == rtree->height - 1) {
|
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
|
||||||
|
LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1))
|
||||||
|
| ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
|
||||||
|
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||||
|
#else
|
||||||
|
atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||||
|
szind_t szind) {
|
||||||
|
assert(szind <= NSIZES);
|
||||||
|
|
||||||
|
#ifdef RTREE_LEAF_COMPACT
|
||||||
|
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
|
||||||
|
true);
|
||||||
|
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
|
||||||
|
((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
|
||||||
|
(((uintptr_t)0x1 << LG_VADDR) - 1)) |
|
||||||
|
((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
|
||||||
|
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||||
|
#else
|
||||||
|
atomic_store_u(&elm->le_szind, szind, ATOMIC_RELEASE);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||||
|
bool slab) {
|
||||||
|
#ifdef RTREE_LEAF_COMPACT
|
||||||
|
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
|
||||||
|
true);
|
||||||
|
uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
|
||||||
|
LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
|
||||||
|
(((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab);
|
||||||
|
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||||
|
#else
|
||||||
|
atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||||
|
extent_t *extent, szind_t szind, bool slab) {
|
||||||
|
#ifdef RTREE_LEAF_COMPACT
|
||||||
|
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
|
||||||
|
((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
|
||||||
|
((uintptr_t)slab);
|
||||||
|
atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
|
||||||
|
#else
|
||||||
|
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
|
||||||
|
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
|
||||||
/*
|
/*
|
||||||
* node is a leaf, so it contains values rather than
|
* Write extent last, since the element is atomically considered valid
|
||||||
* child pointers.
|
* as soon as the extent field is non-NULL.
|
||||||
*/
|
*/
|
||||||
rtree_val_write(rtree, &node[subkey], val);
|
rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent);
|
||||||
return (false);
|
|
||||||
}
|
|
||||||
assert(i + 1 < rtree->height);
|
|
||||||
child = rtree_child_read(rtree, &node[subkey], i, false);
|
|
||||||
if (child == NULL)
|
|
||||||
return (true);
|
|
||||||
}
|
|
||||||
not_reached();
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
static inline void
|
||||||
/******************************************************************************/
|
rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
|
||||||
|
rtree_leaf_elm_t *elm, szind_t szind, bool slab) {
|
||||||
|
assert(!slab || szind < NBINS);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The caller implicitly assures that it is the only writer to the szind
|
||||||
|
* and slab fields, and that the extent field cannot currently change.
|
||||||
|
*/
|
||||||
|
rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
|
||||||
|
rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
|
||||||
|
rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||||
|
uintptr_t key, bool dependent, bool init_missing) {
|
||||||
|
assert(key != 0);
|
||||||
|
assert(!dependent || !init_missing);
|
||||||
|
|
||||||
|
size_t slot = rtree_cache_direct_map(key);
|
||||||
|
uintptr_t leafkey = rtree_leafkey(key);
|
||||||
|
assert(leafkey != RTREE_LEAFKEY_INVALID);
|
||||||
|
|
||||||
|
/* Fast path: L1 direct mapped cache. */
|
||||||
|
if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) {
|
||||||
|
rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
|
||||||
|
assert(leaf != NULL);
|
||||||
|
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
|
||||||
|
return &leaf[subkey];
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* Search the L2 LRU cache. On hit, swap the matching element into the
|
||||||
|
* slot in L1 cache, and move the position in L2 up by 1.
|
||||||
|
*/
|
||||||
|
#define RTREE_CACHE_CHECK_L2(i) do { \
|
||||||
|
if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \
|
||||||
|
rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \
|
||||||
|
assert(leaf != NULL); \
|
||||||
|
if (i > 0) { \
|
||||||
|
/* Bubble up by one. */ \
|
||||||
|
rtree_ctx->l2_cache[i].leafkey = \
|
||||||
|
rtree_ctx->l2_cache[i - 1].leafkey; \
|
||||||
|
rtree_ctx->l2_cache[i].leaf = \
|
||||||
|
rtree_ctx->l2_cache[i - 1].leaf; \
|
||||||
|
rtree_ctx->l2_cache[i - 1].leafkey = \
|
||||||
|
rtree_ctx->cache[slot].leafkey; \
|
||||||
|
rtree_ctx->l2_cache[i - 1].leaf = \
|
||||||
|
rtree_ctx->cache[slot].leaf; \
|
||||||
|
} else { \
|
||||||
|
rtree_ctx->l2_cache[0].leafkey = \
|
||||||
|
rtree_ctx->cache[slot].leafkey; \
|
||||||
|
rtree_ctx->l2_cache[0].leaf = \
|
||||||
|
rtree_ctx->cache[slot].leaf; \
|
||||||
|
} \
|
||||||
|
rtree_ctx->cache[slot].leafkey = leafkey; \
|
||||||
|
rtree_ctx->cache[slot].leaf = leaf; \
|
||||||
|
uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \
|
||||||
|
return &leaf[subkey]; \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
/* Check the first cache entry. */
|
||||||
|
RTREE_CACHE_CHECK_L2(0);
|
||||||
|
/* Search the remaining cache elements. */
|
||||||
|
for (unsigned i = 1; i < RTREE_CTX_NCACHE_L2; i++) {
|
||||||
|
RTREE_CACHE_CHECK_L2(i);
|
||||||
|
}
|
||||||
|
#undef RTREE_CACHE_CHECK_L2
|
||||||
|
|
||||||
|
return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key,
|
||||||
|
dependent, init_missing);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
|
||||||
|
extent_t *extent, szind_t szind, bool slab) {
|
||||||
|
/* Use rtree_clear() to set the extent to NULL. */
|
||||||
|
assert(extent != NULL);
|
||||||
|
|
||||||
|
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
|
||||||
|
key, false, true);
|
||||||
|
if (elm == NULL) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL);
|
||||||
|
rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab);
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
|
||||||
|
rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
|
||||||
|
bool dependent) {
|
||||||
|
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
|
||||||
|
key, dependent, false);
|
||||||
|
if (!dependent && elm == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
assert(elm != NULL);
|
||||||
|
return elm;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||||
|
rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||||
|
uintptr_t key, bool dependent) {
|
||||||
|
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
|
||||||
|
dependent);
|
||||||
|
if (!dependent && elm == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE szind_t
|
||||||
|
rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||||
|
uintptr_t key, bool dependent) {
|
||||||
|
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
|
||||||
|
dependent);
|
||||||
|
if (!dependent && elm == NULL) {
|
||||||
|
return NSIZES;
|
||||||
|
}
|
||||||
|
return rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* rtree_slab_read() is intentionally omitted because slab is always read in
|
||||||
|
* conjunction with szind, which makes rtree_szind_slab_read() a better choice.
|
||||||
|
*/
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
|
rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||||
|
uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) {
|
||||||
|
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
|
||||||
|
dependent);
|
||||||
|
if (!dependent && elm == NULL) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
*r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
|
||||||
|
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
|
rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||||
|
uintptr_t key, bool dependent, szind_t *r_szind, bool *r_slab) {
|
||||||
|
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
|
||||||
|
dependent);
|
||||||
|
if (!dependent && elm == NULL) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
*r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
|
||||||
|
*r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||||
|
uintptr_t key, szind_t szind, bool slab) {
|
||||||
|
assert(!slab || szind < NBINS);
|
||||||
|
|
||||||
|
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
|
||||||
|
rtree_leaf_elm_szind_slab_update(tsdn, rtree, elm, szind, slab);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||||
|
uintptr_t key) {
|
||||||
|
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
|
||||||
|
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) !=
|
||||||
|
NULL);
|
||||||
|
rtree_leaf_elm_write(tsdn, rtree, elm, NULL, NSIZES, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_RTREE_H */
|
||||||
|
50
include/jemalloc/internal/rtree_tsd.h
Normal file
50
include/jemalloc/internal/rtree_tsd.h
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_RTREE_CTX_H
|
||||||
|
#define JEMALLOC_INTERNAL_RTREE_CTX_H
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Number of leafkey/leaf pairs to cache in L1 and L2 level respectively. Each
|
||||||
|
* entry supports an entire leaf, so the cache hit rate is typically high even
|
||||||
|
* with a small number of entries. In rare cases extent activity will straddle
|
||||||
|
* the boundary between two leaf nodes. Furthermore, an arena may use a
|
||||||
|
* combination of dss and mmap. Note that as memory usage grows past the amount
|
||||||
|
* that this cache can directly cover, the cache will become less effective if
|
||||||
|
* locality of reference is low, but the consequence is merely cache misses
|
||||||
|
* while traversing the tree nodes.
|
||||||
|
*
|
||||||
|
* The L1 direct mapped cache offers consistent and low cost on cache hit.
|
||||||
|
* However collision could affect hit rate negatively. This is resolved by
|
||||||
|
* combining with a L2 LRU cache, which requires linear search and re-ordering
|
||||||
|
* on access but suffers no collision. Note that, the cache will itself suffer
|
||||||
|
* cache misses if made overly large, plus the cost of linear search in the LRU
|
||||||
|
* cache.
|
||||||
|
*/
|
||||||
|
#define RTREE_CTX_LG_NCACHE 4
|
||||||
|
#define RTREE_CTX_NCACHE (1 << RTREE_CTX_LG_NCACHE)
|
||||||
|
#define RTREE_CTX_NCACHE_L2 8
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Zero initializer required for tsd initialization only. Proper initialization
|
||||||
|
* done via rtree_ctx_data_init().
|
||||||
|
*/
|
||||||
|
#define RTREE_CTX_ZERO_INITIALIZER {{{0}}}
|
||||||
|
|
||||||
|
|
||||||
|
typedef struct rtree_leaf_elm_s rtree_leaf_elm_t;
|
||||||
|
|
||||||
|
typedef struct rtree_ctx_cache_elm_s rtree_ctx_cache_elm_t;
|
||||||
|
struct rtree_ctx_cache_elm_s {
|
||||||
|
uintptr_t leafkey;
|
||||||
|
rtree_leaf_elm_t *leaf;
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef struct rtree_ctx_s rtree_ctx_t;
|
||||||
|
struct rtree_ctx_s {
|
||||||
|
/* Direct mapped cache. */
|
||||||
|
rtree_ctx_cache_elm_t cache[RTREE_CTX_NCACHE];
|
||||||
|
/* L2 LRU cache. */
|
||||||
|
rtree_ctx_cache_elm_t l2_cache[RTREE_CTX_NCACHE_L2];
|
||||||
|
};
|
||||||
|
|
||||||
|
void rtree_ctx_data_init(rtree_ctx_t *ctx);
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_RTREE_CTX_H */
|
@ -40,6 +40,54 @@ lg() {
|
|||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
|
lg_ceil() {
|
||||||
|
y=$1
|
||||||
|
lg ${y}; lg_floor=${lg_result}
|
||||||
|
pow2 ${lg_floor}; pow2_floor=${pow2_result}
|
||||||
|
if [ ${pow2_floor} -lt ${y} ] ; then
|
||||||
|
lg_ceil_result=$((${lg_floor} + 1))
|
||||||
|
else
|
||||||
|
lg_ceil_result=${lg_floor}
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
reg_size_compute() {
|
||||||
|
lg_grp=$1
|
||||||
|
lg_delta=$2
|
||||||
|
ndelta=$3
|
||||||
|
|
||||||
|
pow2 ${lg_grp}; grp=${pow2_result}
|
||||||
|
pow2 ${lg_delta}; delta=${pow2_result}
|
||||||
|
reg_size=$((${grp} + ${delta}*${ndelta}))
|
||||||
|
}
|
||||||
|
|
||||||
|
slab_size() {
|
||||||
|
lg_p=$1
|
||||||
|
lg_grp=$2
|
||||||
|
lg_delta=$3
|
||||||
|
ndelta=$4
|
||||||
|
|
||||||
|
pow2 ${lg_p}; p=${pow2_result}
|
||||||
|
reg_size_compute ${lg_grp} ${lg_delta} ${ndelta}
|
||||||
|
|
||||||
|
# Compute smallest slab size that is an integer multiple of reg_size.
|
||||||
|
try_slab_size=${p}
|
||||||
|
try_nregs=$((${try_slab_size} / ${reg_size}))
|
||||||
|
perfect=0
|
||||||
|
while [ ${perfect} -eq 0 ] ; do
|
||||||
|
perfect_slab_size=${try_slab_size}
|
||||||
|
perfect_nregs=${try_nregs}
|
||||||
|
|
||||||
|
try_slab_size=$((${try_slab_size} + ${p}))
|
||||||
|
try_nregs=$((${try_slab_size} / ${reg_size}))
|
||||||
|
if [ ${perfect_slab_size} -eq $((${perfect_nregs} * ${reg_size})) ] ; then
|
||||||
|
perfect=1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
slab_size_pgs=$((${perfect_slab_size} / ${p}))
|
||||||
|
}
|
||||||
|
|
||||||
size_class() {
|
size_class() {
|
||||||
index=$1
|
index=$1
|
||||||
lg_grp=$2
|
lg_grp=$2
|
||||||
@ -80,8 +128,10 @@ size_class() {
|
|||||||
|
|
||||||
if [ ${lg_size} -lt $((${lg_p} + ${lg_g})) ] ; then
|
if [ ${lg_size} -lt $((${lg_p} + ${lg_g})) ] ; then
|
||||||
bin="yes"
|
bin="yes"
|
||||||
|
slab_size ${lg_p} ${lg_grp} ${lg_delta} ${ndelta}; pgs=${slab_size_pgs}
|
||||||
else
|
else
|
||||||
bin="no"
|
bin="no"
|
||||||
|
pgs=0
|
||||||
fi
|
fi
|
||||||
if [ ${lg_size} -lt ${lg_kmax} \
|
if [ ${lg_size} -lt ${lg_kmax} \
|
||||||
-o ${lg_size} -eq ${lg_kmax} -a ${rem} = "no" ] ; then
|
-o ${lg_size} -eq ${lg_kmax} -a ${rem} = "no" ] ; then
|
||||||
@ -89,10 +139,11 @@ size_class() {
|
|||||||
else
|
else
|
||||||
lg_delta_lookup="no"
|
lg_delta_lookup="no"
|
||||||
fi
|
fi
|
||||||
printf ' SC(%3d, %6d, %8d, %6d, %3s, %3s, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${psz} ${bin} ${lg_delta_lookup}
|
printf ' SC(%3d, %6d, %8d, %6d, %3s, %3s, %3d, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${psz} ${bin} ${pgs} ${lg_delta_lookup}
|
||||||
# Defined upon return:
|
# Defined upon return:
|
||||||
# - psz ("yes" or "no")
|
# - psz ("yes" or "no")
|
||||||
# - bin ("yes" or "no")
|
# - bin ("yes" or "no")
|
||||||
|
# - pgs
|
||||||
# - lg_delta_lookup (${lg_delta} or "no")
|
# - lg_delta_lookup (${lg_delta} or "no")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -111,7 +162,7 @@ size_classes() {
|
|||||||
pow2 ${lg_g}; g=${pow2_result}
|
pow2 ${lg_g}; g=${pow2_result}
|
||||||
|
|
||||||
echo "#define SIZE_CLASSES \\"
|
echo "#define SIZE_CLASSES \\"
|
||||||
echo " /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \\"
|
echo " /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \\"
|
||||||
|
|
||||||
ntbins=0
|
ntbins=0
|
||||||
nlbins=0
|
nlbins=0
|
||||||
@ -197,7 +248,7 @@ size_classes() {
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
# Final written value is correct:
|
# Final written value is correct:
|
||||||
huge_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
|
large_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
|
||||||
index=$((${index} + 1))
|
index=$((${index} + 1))
|
||||||
ndelta=$((${ndelta} + 1))
|
ndelta=$((${ndelta} + 1))
|
||||||
done
|
done
|
||||||
@ -206,53 +257,61 @@ size_classes() {
|
|||||||
done
|
done
|
||||||
echo
|
echo
|
||||||
nsizes=${index}
|
nsizes=${index}
|
||||||
|
lg_ceil ${nsizes}; lg_ceil_nsizes=${lg_ceil_result}
|
||||||
|
|
||||||
# Defined upon completion:
|
# Defined upon completion:
|
||||||
# - ntbins
|
# - ntbins
|
||||||
# - nlbins
|
# - nlbins
|
||||||
# - nbins
|
# - nbins
|
||||||
# - nsizes
|
# - nsizes
|
||||||
|
# - lg_ceil_nsizes
|
||||||
# - npsizes
|
# - npsizes
|
||||||
# - lg_tiny_maxclass
|
# - lg_tiny_maxclass
|
||||||
# - lookup_maxclass
|
# - lookup_maxclass
|
||||||
# - small_maxclass
|
# - small_maxclass
|
||||||
# - lg_large_minclass
|
# - lg_large_minclass
|
||||||
# - huge_maxclass
|
# - large_maxclass
|
||||||
}
|
}
|
||||||
|
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
|
#ifndef JEMALLOC_INTERNAL_SIZE_CLASSES_H
|
||||||
|
#define JEMALLOC_INTERNAL_SIZE_CLASSES_H
|
||||||
|
|
||||||
/* This file was automatically generated by size_classes.sh. */
|
/* This file was automatically generated by size_classes.sh. */
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_TYPES
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This header requires LG_SIZEOF_PTR, LG_TINY_MIN, LG_QUANTUM, and LG_PAGE to
|
* This header file defines:
|
||||||
* be defined prior to inclusion, and it in turn defines:
|
|
||||||
*
|
*
|
||||||
* LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling.
|
* LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling.
|
||||||
|
* LG_TINY_MIN: Lg of minimum size class to support.
|
||||||
* SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, psz,
|
* SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, psz,
|
||||||
* bin, lg_delta_lookup) tuples.
|
* bin, pgs, lg_delta_lookup) tuples.
|
||||||
* index: Size class index.
|
* index: Size class index.
|
||||||
* lg_grp: Lg group base size (no deltas added).
|
* lg_grp: Lg group base size (no deltas added).
|
||||||
* lg_delta: Lg delta to previous size class.
|
* lg_delta: Lg delta to previous size class.
|
||||||
* ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta
|
* ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta
|
||||||
* psz: 'yes' if a multiple of the page size, 'no' otherwise.
|
* psz: 'yes' if a multiple of the page size, 'no' otherwise.
|
||||||
* bin: 'yes' if a small bin size class, 'no' otherwise.
|
* bin: 'yes' if a small bin size class, 'no' otherwise.
|
||||||
|
* pgs: Slab page count if a small bin size class, 0 otherwise.
|
||||||
* lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
|
* lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
|
||||||
* otherwise.
|
* otherwise.
|
||||||
* NTBINS: Number of tiny bins.
|
* NTBINS: Number of tiny bins.
|
||||||
* NLBINS: Number of bins supported by the lookup table.
|
* NLBINS: Number of bins supported by the lookup table.
|
||||||
* NBINS: Number of small size class bins.
|
* NBINS: Number of small size class bins.
|
||||||
* NSIZES: Number of size classes.
|
* NSIZES: Number of size classes.
|
||||||
|
* LG_CEIL_NSIZES: Number of bits required to store NSIZES.
|
||||||
* NPSIZES: Number of size classes that are a multiple of (1U << LG_PAGE).
|
* NPSIZES: Number of size classes that are a multiple of (1U << LG_PAGE).
|
||||||
* LG_TINY_MAXCLASS: Lg of maximum tiny size class.
|
* LG_TINY_MAXCLASS: Lg of maximum tiny size class.
|
||||||
* LOOKUP_MAXCLASS: Maximum size class included in lookup table.
|
* LOOKUP_MAXCLASS: Maximum size class included in lookup table.
|
||||||
* SMALL_MAXCLASS: Maximum small size class.
|
* SMALL_MAXCLASS: Maximum small size class.
|
||||||
* LG_LARGE_MINCLASS: Lg of minimum large size class.
|
* LG_LARGE_MINCLASS: Lg of minimum large size class.
|
||||||
* HUGE_MAXCLASS: Maximum (huge) size class.
|
* LARGE_MAXCLASS: Maximum (large) size class.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define LG_SIZE_CLASS_GROUP ${lg_g}
|
#define LG_SIZE_CLASS_GROUP ${lg_g}
|
||||||
|
#define LG_TINY_MIN ${lg_tmin}
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
@ -269,12 +328,14 @@ for lg_z in ${lg_zarr} ; do
|
|||||||
echo "#define NLBINS ${nlbins}"
|
echo "#define NLBINS ${nlbins}"
|
||||||
echo "#define NBINS ${nbins}"
|
echo "#define NBINS ${nbins}"
|
||||||
echo "#define NSIZES ${nsizes}"
|
echo "#define NSIZES ${nsizes}"
|
||||||
|
echo "#define LG_CEIL_NSIZES ${lg_ceil_nsizes}"
|
||||||
echo "#define NPSIZES ${npsizes}"
|
echo "#define NPSIZES ${npsizes}"
|
||||||
echo "#define LG_TINY_MAXCLASS ${lg_tiny_maxclass}"
|
echo "#define LG_TINY_MAXCLASS ${lg_tiny_maxclass}"
|
||||||
echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}"
|
echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}"
|
||||||
echo "#define SMALL_MAXCLASS ${small_maxclass}"
|
echo "#define SMALL_MAXCLASS ${small_maxclass}"
|
||||||
echo "#define LG_LARGE_MINCLASS ${lg_large_minclass}"
|
echo "#define LG_LARGE_MINCLASS ${lg_large_minclass}"
|
||||||
echo "#define HUGE_MAXCLASS ${huge_maxclass}"
|
echo "#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)"
|
||||||
|
echo "#define LARGE_MAXCLASS ${large_maxclass}"
|
||||||
echo "#endif"
|
echo "#endif"
|
||||||
echo
|
echo
|
||||||
done
|
done
|
||||||
@ -290,29 +351,11 @@ cat <<EOF
|
|||||||
#undef SIZE_CLASSES_DEFINED
|
#undef SIZE_CLASSES_DEFINED
|
||||||
/*
|
/*
|
||||||
* The size2index_tab lookup table uses uint8_t to encode each bin index, so we
|
* The size2index_tab lookup table uses uint8_t to encode each bin index, so we
|
||||||
* cannot support more than 256 small size classes. Further constrain NBINS to
|
* cannot support more than 256 small size classes.
|
||||||
* 255 since all small size classes, plus a "not small" size class must be
|
|
||||||
* stored in 8 bits of arena_chunk_map_bits_t's bits field.
|
|
||||||
*/
|
*/
|
||||||
#if (NBINS > 255)
|
#if (NBINS > 256)
|
||||||
# error "Too many small size classes"
|
# error "Too many small size classes"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
#endif /* JEMALLOC_INTERNAL_SIZE_CLASSES_H */
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
|
||||||
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
/******************************************************************************/
|
|
||||||
EOF
|
EOF
|
||||||
|
@ -1,9 +1,11 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H
|
||||||
|
#define JEMALLOC_INTERNAL_SMOOTHSTEP_H
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This file was generated by the following command:
|
* This file was generated by the following command:
|
||||||
* sh smoothstep.sh smoother 200 24 3 15
|
* sh smoothstep.sh smoother 200 24 3 15
|
||||||
*/
|
*/
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_TYPES
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This header defines a precomputed table based on the smoothstep family of
|
* This header defines a precomputed table based on the smoothstep family of
|
||||||
@ -227,20 +229,4 @@
|
|||||||
STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
|
STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
|
||||||
STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
|
STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
|
||||||
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
/******************************************************************************/
|
|
||||||
|
@ -54,12 +54,14 @@ smoothest() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
|
#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H
|
||||||
|
#define JEMALLOC_INTERNAL_SMOOTHSTEP_H
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This file was generated by the following command:
|
* This file was generated by the following command:
|
||||||
* $cmd
|
* $cmd
|
||||||
*/
|
*/
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_TYPES
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This header defines a precomputed table based on the smoothstep family of
|
* This header defines a precomputed table based on the smoothstep family of
|
||||||
@ -95,21 +97,5 @@ done
|
|||||||
echo
|
echo
|
||||||
|
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
|
||||||
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
/******************************************************************************/
|
|
||||||
EOF
|
EOF
|
||||||
|
@ -1,51 +1,36 @@
|
|||||||
/******************************************************************************/
|
#ifndef JEMALLOC_INTERNAL_SPIN_H
|
||||||
#ifdef JEMALLOC_H_TYPES
|
#define JEMALLOC_INTERNAL_SPIN_H
|
||||||
|
|
||||||
typedef struct spin_s spin_t;
|
#ifdef JEMALLOC_SPIN_C_
|
||||||
|
# define SPIN_INLINE extern inline
|
||||||
|
#else
|
||||||
|
# define SPIN_INLINE inline
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
#define SPIN_INITIALIZER {0U}
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
struct spin_s {
|
typedef struct {
|
||||||
unsigned iteration;
|
unsigned iteration;
|
||||||
};
|
} spin_t;
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
SPIN_INLINE void
|
||||||
/******************************************************************************/
|
spin_adaptive(spin_t *spin) {
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
volatile uint32_t i;
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
if (spin->iteration < 5) {
|
||||||
/******************************************************************************/
|
for (i = 0; i < (1U << spin->iteration); i++) {
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
void spin_init(spin_t *spin);
|
|
||||||
void spin_adaptive(spin_t *spin);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_SPIN_C_))
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
spin_init(spin_t *spin)
|
|
||||||
{
|
|
||||||
|
|
||||||
spin->iteration = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
spin_adaptive(spin_t *spin)
|
|
||||||
{
|
|
||||||
volatile uint64_t i;
|
|
||||||
|
|
||||||
for (i = 0; i < (KQU(1) << spin->iteration); i++)
|
|
||||||
CPU_SPINWAIT;
|
CPU_SPINWAIT;
|
||||||
|
}
|
||||||
if (spin->iteration < 63)
|
|
||||||
spin->iteration++;
|
spin->iteration++;
|
||||||
|
} else {
|
||||||
|
#ifdef _WIN32
|
||||||
|
SwitchToThread();
|
||||||
|
#else
|
||||||
|
sched_yield();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#undef SPIN_INLINE
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
/******************************************************************************/
|
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_SPIN_H */
|
||||||
|
@ -1,26 +1,51 @@
|
|||||||
/******************************************************************************/
|
#ifndef JEMALLOC_INTERNAL_STATS_H
|
||||||
#ifdef JEMALLOC_H_TYPES
|
#define JEMALLOC_INTERNAL_STATS_H
|
||||||
|
|
||||||
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
|
#include "jemalloc/internal/atomic.h"
|
||||||
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
|
#include "jemalloc/internal/mutex_prof.h"
|
||||||
typedef struct malloc_large_stats_s malloc_large_stats_t;
|
#include "jemalloc/internal/mutex.h"
|
||||||
typedef struct malloc_huge_stats_s malloc_huge_stats_t;
|
#include "jemalloc/internal/size_classes.h"
|
||||||
typedef struct arena_stats_s arena_stats_t;
|
#include "jemalloc/internal/stats_tsd.h"
|
||||||
typedef struct chunk_stats_s chunk_stats_t;
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
/* OPTION(opt, var_name, default, set_value_to) */
|
||||||
/******************************************************************************/
|
#define STATS_PRINT_OPTIONS \
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
OPTION('J', json, false, true) \
|
||||||
|
OPTION('g', general, true, false) \
|
||||||
|
OPTION('m', merged, config_stats, false) \
|
||||||
|
OPTION('d', destroyed, config_stats, false) \
|
||||||
|
OPTION('a', unmerged, config_stats, false) \
|
||||||
|
OPTION('b', bins, true, false) \
|
||||||
|
OPTION('l', large, true, false) \
|
||||||
|
OPTION('x', mutex, true, false)
|
||||||
|
|
||||||
struct tcache_bin_stats_s {
|
enum {
|
||||||
/*
|
#define OPTION(o, v, d, s) stats_print_option_num_##v,
|
||||||
* Number of allocation requests that corresponded to the size of this
|
STATS_PRINT_OPTIONS
|
||||||
* bin.
|
#undef OPTION
|
||||||
*/
|
stats_print_tot_num_options
|
||||||
uint64_t nrequests;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct malloc_bin_stats_s {
|
/* Options for stats_print. */
|
||||||
|
extern bool opt_stats_print;
|
||||||
|
extern char opt_stats_print_opts[stats_print_tot_num_options+1];
|
||||||
|
|
||||||
|
/* Implements je_malloc_stats_print. */
|
||||||
|
void stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||||
|
const char *opts);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* In those architectures that support 64-bit atomics, we use atomic updates for
|
||||||
|
* our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
|
||||||
|
* externally.
|
||||||
|
*/
|
||||||
|
#ifdef JEMALLOC_ATOMIC_U64
|
||||||
|
typedef atomic_u64_t arena_stats_u64_t;
|
||||||
|
#else
|
||||||
|
/* Must hold the arena stats mutex while reading atomically. */
|
||||||
|
typedef uint64_t arena_stats_u64_t;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
typedef struct malloc_bin_stats_s {
|
||||||
/*
|
/*
|
||||||
* Total number of allocation/deallocation requests served directly by
|
* Total number of allocation/deallocation requests served directly by
|
||||||
* the bin. Note that tcache may allocate an object, then recycle it
|
* the bin. Note that tcache may allocate an object, then recycle it
|
||||||
@ -49,149 +74,91 @@ struct malloc_bin_stats_s {
|
|||||||
/* Number of tcache flushes to this bin. */
|
/* Number of tcache flushes to this bin. */
|
||||||
uint64_t nflushes;
|
uint64_t nflushes;
|
||||||
|
|
||||||
/* Total number of runs created for this bin's size class. */
|
/* Total number of slabs created for this bin's size class. */
|
||||||
uint64_t nruns;
|
uint64_t nslabs;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Total number of runs reused by extracting them from the runs tree for
|
* Total number of slabs reused by extracting them from the slabs heap
|
||||||
* this bin's size class.
|
* for this bin's size class.
|
||||||
*/
|
*/
|
||||||
uint64_t reruns;
|
uint64_t reslabs;
|
||||||
|
|
||||||
/* Current number of runs in this bin. */
|
/* Current number of slabs in this bin. */
|
||||||
size_t curruns;
|
size_t curslabs;
|
||||||
};
|
|
||||||
|
|
||||||
struct malloc_large_stats_s {
|
mutex_prof_data_t mutex_data;
|
||||||
|
} malloc_bin_stats_t;
|
||||||
|
|
||||||
|
typedef struct malloc_large_stats_s {
|
||||||
/*
|
/*
|
||||||
* Total number of allocation/deallocation requests served directly by
|
* Total number of allocation/deallocation requests served directly by
|
||||||
* the arena. Note that tcache may allocate an object, then recycle it
|
* the arena.
|
||||||
* many times, resulting many increments to nrequests, but only one
|
|
||||||
* each to nmalloc and ndalloc.
|
|
||||||
*/
|
*/
|
||||||
uint64_t nmalloc;
|
arena_stats_u64_t nmalloc;
|
||||||
uint64_t ndalloc;
|
arena_stats_u64_t ndalloc;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Number of allocation requests that correspond to this size class.
|
* Number of allocation requests that correspond to this size class.
|
||||||
* This includes requests served by tcache, though tcache only
|
* This includes requests served by tcache, though tcache only
|
||||||
* periodically merges into this counter.
|
* periodically merges into this counter.
|
||||||
*/
|
*/
|
||||||
uint64_t nrequests;
|
arena_stats_u64_t nrequests; /* Partially derived. */
|
||||||
|
|
||||||
|
/* Current number of allocations of this size class. */
|
||||||
|
size_t curlextents; /* Derived. */
|
||||||
|
} malloc_large_stats_t;
|
||||||
|
|
||||||
|
typedef struct decay_stats_s {
|
||||||
|
/* Total number of purge sweeps. */
|
||||||
|
arena_stats_u64_t npurge;
|
||||||
|
/* Total number of madvise calls made. */
|
||||||
|
arena_stats_u64_t nmadvise;
|
||||||
|
/* Total number of pages purged. */
|
||||||
|
arena_stats_u64_t purged;
|
||||||
|
} decay_stats_t;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Current number of runs of this size class, including runs currently
|
* Arena stats. Note that fields marked "derived" are not directly maintained
|
||||||
* cached by tcache.
|
* within the arena code; rather their values are derived during stats merge
|
||||||
|
* requests.
|
||||||
*/
|
*/
|
||||||
size_t curruns;
|
typedef struct arena_stats_s {
|
||||||
};
|
#ifndef JEMALLOC_ATOMIC_U64
|
||||||
|
malloc_mutex_t mtx;
|
||||||
|
#endif
|
||||||
|
|
||||||
struct malloc_huge_stats_s {
|
/* Number of bytes currently mapped, excluding retained memory. */
|
||||||
/*
|
atomic_zu_t mapped; /* Partially derived. */
|
||||||
* Total number of allocation/deallocation requests served directly by
|
|
||||||
* the arena.
|
|
||||||
*/
|
|
||||||
uint64_t nmalloc;
|
|
||||||
uint64_t ndalloc;
|
|
||||||
|
|
||||||
/* Current number of (multi-)chunk allocations of this size class. */
|
|
||||||
size_t curhchunks;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct arena_stats_s {
|
|
||||||
/* Number of bytes currently mapped. */
|
|
||||||
size_t mapped;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Number of bytes currently retained as a side effect of munmap() being
|
* Number of unused virtual memory bytes currently retained. Retained
|
||||||
* disabled/bypassed. Retained bytes are technically mapped (though
|
* bytes are technically mapped (though always decommitted or purged),
|
||||||
* always decommitted or purged), but they are excluded from the mapped
|
* but they are excluded from the mapped statistic (above).
|
||||||
* statistic (above).
|
|
||||||
*/
|
*/
|
||||||
size_t retained;
|
atomic_zu_t retained; /* Derived. */
|
||||||
|
|
||||||
/*
|
decay_stats_t decay_dirty;
|
||||||
* Total number of purge sweeps, total number of madvise calls made,
|
decay_stats_t decay_muzzy;
|
||||||
* and total pages purged in order to keep dirty unused memory under
|
|
||||||
* control.
|
|
||||||
*/
|
|
||||||
uint64_t npurge;
|
|
||||||
uint64_t nmadvise;
|
|
||||||
uint64_t purged;
|
|
||||||
|
|
||||||
/*
|
atomic_zu_t base; /* Derived. */
|
||||||
* Number of bytes currently mapped purely for metadata purposes, and
|
atomic_zu_t internal;
|
||||||
* number of bytes currently allocated for internal metadata.
|
atomic_zu_t resident; /* Derived. */
|
||||||
*/
|
|
||||||
size_t metadata_mapped;
|
|
||||||
size_t metadata_allocated; /* Protected via atomic_*_z(). */
|
|
||||||
|
|
||||||
/* Per-size-category statistics. */
|
atomic_zu_t allocated_large; /* Derived. */
|
||||||
size_t allocated_large;
|
arena_stats_u64_t nmalloc_large; /* Derived. */
|
||||||
uint64_t nmalloc_large;
|
arena_stats_u64_t ndalloc_large; /* Derived. */
|
||||||
uint64_t ndalloc_large;
|
arena_stats_u64_t nrequests_large; /* Derived. */
|
||||||
uint64_t nrequests_large;
|
|
||||||
|
|
||||||
size_t allocated_huge;
|
/* Number of bytes cached in tcache associated with this arena. */
|
||||||
uint64_t nmalloc_huge;
|
atomic_zu_t tcache_bytes; /* Derived. */
|
||||||
uint64_t ndalloc_huge;
|
|
||||||
|
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
|
||||||
|
|
||||||
/* One element for each large size class. */
|
/* One element for each large size class. */
|
||||||
malloc_large_stats_t *lstats;
|
malloc_large_stats_t lstats[NSIZES - NBINS];
|
||||||
|
|
||||||
/* One element for each huge size class. */
|
/* Arena uptime. */
|
||||||
malloc_huge_stats_t *hstats;
|
nstime_t uptime;
|
||||||
};
|
} arena_stats_t;
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
#endif /* JEMALLOC_INTERNAL_STATS_H */
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
|
||||||
|
|
||||||
extern bool opt_stats_print;
|
|
||||||
|
|
||||||
extern size_t stats_cactive;
|
|
||||||
|
|
||||||
void stats_print(void (*write)(void *, const char *), void *cbopaque,
|
|
||||||
const char *opts);
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
size_t stats_cactive_get(void);
|
|
||||||
void stats_cactive_add(size_t size);
|
|
||||||
void stats_cactive_sub(size_t size);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
|
|
||||||
JEMALLOC_INLINE size_t
|
|
||||||
stats_cactive_get(void)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (atomic_read_z(&stats_cactive));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
stats_cactive_add(size_t size)
|
|
||||||
{
|
|
||||||
|
|
||||||
assert(size > 0);
|
|
||||||
assert((size & chunksize_mask) == 0);
|
|
||||||
|
|
||||||
atomic_add_z(&stats_cactive, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
stats_cactive_sub(size_t size)
|
|
||||||
{
|
|
||||||
|
|
||||||
assert(size > 0);
|
|
||||||
assert((size & chunksize_mask) == 0);
|
|
||||||
|
|
||||||
atomic_sub_z(&stats_cactive, size);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
/******************************************************************************/
|
|
||||||
|
12
include/jemalloc/internal/stats_tsd.h
Normal file
12
include/jemalloc/internal/stats_tsd.h
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_STATS_TSD_H
|
||||||
|
#define JEMALLOC_INTERNAL_STATS_TSD_H
|
||||||
|
|
||||||
|
typedef struct tcache_bin_stats_s {
|
||||||
|
/*
|
||||||
|
* Number of allocation requests that corresponded to the size of this
|
||||||
|
* bin.
|
||||||
|
*/
|
||||||
|
uint64_t nrequests;
|
||||||
|
} tcache_bin_stats_t;
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_STATS_TSD_H */
|
317
include/jemalloc/internal/sz.h
Normal file
317
include/jemalloc/internal/sz.h
Normal file
@ -0,0 +1,317 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_SIZE_H
|
||||||
|
#define JEMALLOC_INTERNAL_SIZE_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/bit_util.h"
|
||||||
|
#include "jemalloc/internal/pages.h"
|
||||||
|
#include "jemalloc/internal/size_classes.h"
|
||||||
|
#include "jemalloc/internal/util.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* sz module: Size computations.
|
||||||
|
*
|
||||||
|
* Some abbreviations used here:
|
||||||
|
* p: Page
|
||||||
|
* ind: Index
|
||||||
|
* s, sz: Size
|
||||||
|
* u: Usable size
|
||||||
|
* a: Aligned
|
||||||
|
*
|
||||||
|
* These are not always used completely consistently, but should be enough to
|
||||||
|
* interpret function names. E.g. sz_psz2ind converts page size to page size
|
||||||
|
* index; sz_sa2u converts a (size, alignment) allocation request to the usable
|
||||||
|
* size that would result from such an allocation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* sz_pind2sz_tab encodes the same information as could be computed by
|
||||||
|
* sz_pind2sz_compute().
|
||||||
|
*/
|
||||||
|
extern size_t const sz_pind2sz_tab[NPSIZES+1];
|
||||||
|
/*
|
||||||
|
* sz_index2size_tab encodes the same information as could be computed (at
|
||||||
|
* unacceptable cost in some code paths) by sz_index2size_compute().
|
||||||
|
*/
|
||||||
|
extern size_t const sz_index2size_tab[NSIZES];
|
||||||
|
/*
|
||||||
|
* sz_size2index_tab is a compact lookup table that rounds request sizes up to
|
||||||
|
* size classes. In order to reduce cache footprint, the table is compressed,
|
||||||
|
* and all accesses are via sz_size2index().
|
||||||
|
*/
|
||||||
|
extern uint8_t const sz_size2index_tab[];
|
||||||
|
|
||||||
|
static const size_t sz_large_pad =
|
||||||
|
#ifdef JEMALLOC_CACHE_OBLIVIOUS
|
||||||
|
PAGE
|
||||||
|
#else
|
||||||
|
0
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE pszind_t
|
||||||
|
sz_psz2ind(size_t psz) {
|
||||||
|
if (unlikely(psz > LARGE_MAXCLASS)) {
|
||||||
|
return NPSIZES;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
pszind_t x = lg_floor((psz<<1)-1);
|
||||||
|
pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
|
||||||
|
(LG_SIZE_CLASS_GROUP + LG_PAGE);
|
||||||
|
pszind_t grp = shift << LG_SIZE_CLASS_GROUP;
|
||||||
|
|
||||||
|
pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
|
||||||
|
LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
|
||||||
|
|
||||||
|
size_t delta_inverse_mask = ZD(-1) << lg_delta;
|
||||||
|
pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
|
||||||
|
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
|
||||||
|
|
||||||
|
pszind_t ind = grp + mod;
|
||||||
|
return ind;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline size_t
|
||||||
|
sz_pind2sz_compute(pszind_t pind) {
|
||||||
|
if (unlikely(pind == NPSIZES)) {
|
||||||
|
return LARGE_MAXCLASS + PAGE;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
size_t grp = pind >> LG_SIZE_CLASS_GROUP;
|
||||||
|
size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
|
||||||
|
|
||||||
|
size_t grp_size_mask = ~((!!grp)-1);
|
||||||
|
size_t grp_size = ((ZU(1) << (LG_PAGE +
|
||||||
|
(LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
|
||||||
|
|
||||||
|
size_t shift = (grp == 0) ? 1 : grp;
|
||||||
|
size_t lg_delta = shift + (LG_PAGE-1);
|
||||||
|
size_t mod_size = (mod+1) << lg_delta;
|
||||||
|
|
||||||
|
size_t sz = grp_size + mod_size;
|
||||||
|
return sz;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline size_t
|
||||||
|
sz_pind2sz_lookup(pszind_t pind) {
|
||||||
|
size_t ret = (size_t)sz_pind2sz_tab[pind];
|
||||||
|
assert(ret == sz_pind2sz_compute(pind));
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline size_t
|
||||||
|
sz_pind2sz(pszind_t pind) {
|
||||||
|
assert(pind < NPSIZES+1);
|
||||||
|
return sz_pind2sz_lookup(pind);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline size_t
|
||||||
|
sz_psz2u(size_t psz) {
|
||||||
|
if (unlikely(psz > LARGE_MAXCLASS)) {
|
||||||
|
return LARGE_MAXCLASS + PAGE;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
size_t x = lg_floor((psz<<1)-1);
|
||||||
|
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
|
||||||
|
LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
|
||||||
|
size_t delta = ZU(1) << lg_delta;
|
||||||
|
size_t delta_mask = delta - 1;
|
||||||
|
size_t usize = (psz + delta_mask) & ~delta_mask;
|
||||||
|
return usize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline szind_t
|
||||||
|
sz_size2index_compute(size_t size) {
|
||||||
|
if (unlikely(size > LARGE_MAXCLASS)) {
|
||||||
|
return NSIZES;
|
||||||
|
}
|
||||||
|
#if (NTBINS != 0)
|
||||||
|
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
|
||||||
|
szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
|
||||||
|
szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
|
||||||
|
return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
szind_t x = lg_floor((size<<1)-1);
|
||||||
|
szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
|
||||||
|
x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
|
||||||
|
szind_t grp = shift << LG_SIZE_CLASS_GROUP;
|
||||||
|
|
||||||
|
szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
|
||||||
|
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
|
||||||
|
|
||||||
|
size_t delta_inverse_mask = ZD(-1) << lg_delta;
|
||||||
|
szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
|
||||||
|
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
|
||||||
|
|
||||||
|
szind_t index = NTBINS + grp + mod;
|
||||||
|
return index;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE szind_t
|
||||||
|
sz_size2index_lookup(size_t size) {
|
||||||
|
assert(size <= LOOKUP_MAXCLASS);
|
||||||
|
{
|
||||||
|
szind_t ret = (sz_size2index_tab[(size-1) >> LG_TINY_MIN]);
|
||||||
|
assert(ret == sz_size2index_compute(size));
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE szind_t
|
||||||
|
sz_size2index(size_t size) {
|
||||||
|
assert(size > 0);
|
||||||
|
if (likely(size <= LOOKUP_MAXCLASS)) {
|
||||||
|
return sz_size2index_lookup(size);
|
||||||
|
}
|
||||||
|
return sz_size2index_compute(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline size_t
|
||||||
|
sz_index2size_compute(szind_t index) {
|
||||||
|
#if (NTBINS > 0)
|
||||||
|
if (index < NTBINS) {
|
||||||
|
return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
size_t reduced_index = index - NTBINS;
|
||||||
|
size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
|
||||||
|
size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
|
||||||
|
1);
|
||||||
|
|
||||||
|
size_t grp_size_mask = ~((!!grp)-1);
|
||||||
|
size_t grp_size = ((ZU(1) << (LG_QUANTUM +
|
||||||
|
(LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
|
||||||
|
|
||||||
|
size_t shift = (grp == 0) ? 1 : grp;
|
||||||
|
size_t lg_delta = shift + (LG_QUANTUM-1);
|
||||||
|
size_t mod_size = (mod+1) << lg_delta;
|
||||||
|
|
||||||
|
size_t usize = grp_size + mod_size;
|
||||||
|
return usize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
|
sz_index2size_lookup(szind_t index) {
|
||||||
|
size_t ret = (size_t)sz_index2size_tab[index];
|
||||||
|
assert(ret == sz_index2size_compute(index));
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
|
sz_index2size(szind_t index) {
|
||||||
|
assert(index < NSIZES);
|
||||||
|
return sz_index2size_lookup(index);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
|
sz_s2u_compute(size_t size) {
|
||||||
|
if (unlikely(size > LARGE_MAXCLASS)) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#if (NTBINS > 0)
|
||||||
|
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
|
||||||
|
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
|
||||||
|
size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
|
||||||
|
return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
|
||||||
|
(ZU(1) << lg_ceil));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
size_t x = lg_floor((size<<1)-1);
|
||||||
|
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
|
||||||
|
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
|
||||||
|
size_t delta = ZU(1) << lg_delta;
|
||||||
|
size_t delta_mask = delta - 1;
|
||||||
|
size_t usize = (size + delta_mask) & ~delta_mask;
|
||||||
|
return usize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
|
sz_s2u_lookup(size_t size) {
|
||||||
|
size_t ret = sz_index2size_lookup(sz_size2index_lookup(size));
|
||||||
|
|
||||||
|
assert(ret == sz_s2u_compute(size));
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Compute usable size that would result from allocating an object with the
|
||||||
|
* specified size.
|
||||||
|
*/
|
||||||
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
|
sz_s2u(size_t size) {
|
||||||
|
assert(size > 0);
|
||||||
|
if (likely(size <= LOOKUP_MAXCLASS)) {
|
||||||
|
return sz_s2u_lookup(size);
|
||||||
|
}
|
||||||
|
return sz_s2u_compute(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Compute usable size that would result from allocating an object with the
|
||||||
|
* specified size and alignment.
|
||||||
|
*/
|
||||||
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
|
sz_sa2u(size_t size, size_t alignment) {
|
||||||
|
size_t usize;
|
||||||
|
|
||||||
|
assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
|
||||||
|
|
||||||
|
/* Try for a small size class. */
|
||||||
|
if (size <= SMALL_MAXCLASS && alignment < PAGE) {
|
||||||
|
/*
|
||||||
|
* Round size up to the nearest multiple of alignment.
|
||||||
|
*
|
||||||
|
* This done, we can take advantage of the fact that for each
|
||||||
|
* small size class, every object is aligned at the smallest
|
||||||
|
* power of two that is non-zero in the base two representation
|
||||||
|
* of the size. For example:
|
||||||
|
*
|
||||||
|
* Size | Base 2 | Minimum alignment
|
||||||
|
* -----+----------+------------------
|
||||||
|
* 96 | 1100000 | 32
|
||||||
|
* 144 | 10100000 | 32
|
||||||
|
* 192 | 11000000 | 64
|
||||||
|
*/
|
||||||
|
usize = sz_s2u(ALIGNMENT_CEILING(size, alignment));
|
||||||
|
if (usize < LARGE_MINCLASS) {
|
||||||
|
return usize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Large size class. Beware of overflow. */
|
||||||
|
|
||||||
|
if (unlikely(alignment > LARGE_MAXCLASS)) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Make sure result is a large size class. */
|
||||||
|
if (size <= LARGE_MINCLASS) {
|
||||||
|
usize = LARGE_MINCLASS;
|
||||||
|
} else {
|
||||||
|
usize = sz_s2u(size);
|
||||||
|
if (usize < size) {
|
||||||
|
/* size_t overflow. */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Calculate the multi-page mapping that large_palloc() would need in
|
||||||
|
* order to guarantee the alignment.
|
||||||
|
*/
|
||||||
|
if (usize + sz_large_pad + PAGE_CEILING(alignment) - PAGE < usize) {
|
||||||
|
/* size_t overflow. */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return usize;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_SIZE_H */
|
@ -1,472 +0,0 @@
|
|||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_TYPES
|
|
||||||
|
|
||||||
typedef struct tcache_bin_info_s tcache_bin_info_t;
|
|
||||||
typedef struct tcache_bin_s tcache_bin_t;
|
|
||||||
typedef struct tcache_s tcache_t;
|
|
||||||
typedef struct tcaches_s tcaches_t;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* tcache pointers close to NULL are used to encode state information that is
|
|
||||||
* used for two purposes: preventing thread caching on a per thread basis and
|
|
||||||
* cleaning up during thread shutdown.
|
|
||||||
*/
|
|
||||||
#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
|
|
||||||
#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
|
|
||||||
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
|
|
||||||
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Absolute minimum number of cache slots for each small bin.
|
|
||||||
*/
|
|
||||||
#define TCACHE_NSLOTS_SMALL_MIN 20
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Absolute maximum number of cache slots for each small bin in the thread
|
|
||||||
* cache. This is an additional constraint beyond that imposed as: twice the
|
|
||||||
* number of regions per run for this size class.
|
|
||||||
*
|
|
||||||
* This constant must be an even number.
|
|
||||||
*/
|
|
||||||
#define TCACHE_NSLOTS_SMALL_MAX 200
|
|
||||||
|
|
||||||
/* Number of cache slots for large size classes. */
|
|
||||||
#define TCACHE_NSLOTS_LARGE 20
|
|
||||||
|
|
||||||
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
|
|
||||||
#define LG_TCACHE_MAXCLASS_DEFAULT 15
|
|
||||||
|
|
||||||
/*
|
|
||||||
* TCACHE_GC_SWEEP is the approximate number of allocation events between
|
|
||||||
* full GC sweeps. Integer rounding may cause the actual number to be
|
|
||||||
* slightly higher, since GC is performed incrementally.
|
|
||||||
*/
|
|
||||||
#define TCACHE_GC_SWEEP 8192
|
|
||||||
|
|
||||||
/* Number of tcache allocation/deallocation events between incremental GCs. */
|
|
||||||
#define TCACHE_GC_INCR \
|
|
||||||
((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
|
||||||
|
|
||||||
typedef enum {
|
|
||||||
tcache_enabled_false = 0, /* Enable cast to/from bool. */
|
|
||||||
tcache_enabled_true = 1,
|
|
||||||
tcache_enabled_default = 2
|
|
||||||
} tcache_enabled_t;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Read-only information associated with each element of tcache_t's tbins array
|
|
||||||
* is stored separately, mainly to reduce memory usage.
|
|
||||||
*/
|
|
||||||
struct tcache_bin_info_s {
|
|
||||||
unsigned ncached_max; /* Upper limit on ncached. */
|
|
||||||
};
|
|
||||||
|
|
||||||
struct tcache_bin_s {
|
|
||||||
tcache_bin_stats_t tstats;
|
|
||||||
int low_water; /* Min # cached since last GC. */
|
|
||||||
unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
|
|
||||||
unsigned ncached; /* # of cached objects. */
|
|
||||||
/*
|
|
||||||
* To make use of adjacent cacheline prefetch, the items in the avail
|
|
||||||
* stack goes to higher address for newer allocations. avail points
|
|
||||||
* just above the available space, which means that
|
|
||||||
* avail[-ncached, ... -1] are available items and the lowest item will
|
|
||||||
* be allocated first.
|
|
||||||
*/
|
|
||||||
void **avail; /* Stack of available objects. */
|
|
||||||
};
|
|
||||||
|
|
||||||
struct tcache_s {
|
|
||||||
ql_elm(tcache_t) link; /* Used for aggregating stats. */
|
|
||||||
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
|
|
||||||
ticker_t gc_ticker; /* Drives incremental GC. */
|
|
||||||
szind_t next_gc_bin; /* Next bin to GC. */
|
|
||||||
tcache_bin_t tbins[1]; /* Dynamically sized. */
|
|
||||||
/*
|
|
||||||
* The pointer stacks associated with tbins follow as a contiguous
|
|
||||||
* array. During tcache initialization, the avail pointer in each
|
|
||||||
* element of tbins is initialized to point to the proper offset within
|
|
||||||
* this array.
|
|
||||||
*/
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Linkage for list of available (previously used) explicit tcache IDs. */
|
|
||||||
struct tcaches_s {
|
|
||||||
union {
|
|
||||||
tcache_t *tcache;
|
|
||||||
tcaches_t *next;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
|
||||||
|
|
||||||
extern bool opt_tcache;
|
|
||||||
extern ssize_t opt_lg_tcache_max;
|
|
||||||
|
|
||||||
extern tcache_bin_info_t *tcache_bin_info;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
|
|
||||||
* large-object bins.
|
|
||||||
*/
|
|
||||||
extern unsigned nhbins;
|
|
||||||
|
|
||||||
/* Maximum cached size class. */
|
|
||||||
extern size_t tcache_maxclass;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
|
|
||||||
* usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
|
|
||||||
* completely disjoint from this data structure. tcaches starts off as a sparse
|
|
||||||
* array, so it has no physical memory footprint until individual pages are
|
|
||||||
* touched. This allows the entire array to be allocated the first time an
|
|
||||||
* explicit tcache is created without a disproportionate impact on memory usage.
|
|
||||||
*/
|
|
||||||
extern tcaches_t *tcaches;
|
|
||||||
|
|
||||||
size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
|
|
||||||
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
|
|
||||||
void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
|
||||||
tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
|
|
||||||
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|
||||||
szind_t binind, unsigned rem);
|
|
||||||
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
|
||||||
unsigned rem, tcache_t *tcache);
|
|
||||||
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
|
|
||||||
arena_t *oldarena, arena_t *newarena);
|
|
||||||
tcache_t *tcache_get_hard(tsd_t *tsd);
|
|
||||||
tcache_t *tcache_create(tsdn_t *tsdn, arena_t *arena);
|
|
||||||
void tcache_cleanup(tsd_t *tsd);
|
|
||||||
void tcache_enabled_cleanup(tsd_t *tsd);
|
|
||||||
void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
|
|
||||||
bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
|
|
||||||
void tcaches_flush(tsd_t *tsd, unsigned ind);
|
|
||||||
void tcaches_destroy(tsd_t *tsd, unsigned ind);
|
|
||||||
bool tcache_boot(tsdn_t *tsdn);
|
|
||||||
void tcache_prefork(tsdn_t *tsdn);
|
|
||||||
void tcache_postfork_parent(tsdn_t *tsdn);
|
|
||||||
void tcache_postfork_child(tsdn_t *tsdn);
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
|
||||||
/******************************************************************************/
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
|
||||||
void tcache_event(tsd_t *tsd, tcache_t *tcache);
|
|
||||||
void tcache_flush(void);
|
|
||||||
bool tcache_enabled_get(void);
|
|
||||||
tcache_t *tcache_get(tsd_t *tsd, bool create);
|
|
||||||
void tcache_enabled_set(bool enabled);
|
|
||||||
void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success);
|
|
||||||
void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
|
||||||
size_t size, szind_t ind, bool zero, bool slow_path);
|
|
||||||
void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
|
||||||
size_t size, szind_t ind, bool zero, bool slow_path);
|
|
||||||
void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
|
||||||
szind_t binind, bool slow_path);
|
|
||||||
void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
|
||||||
size_t size, bool slow_path);
|
|
||||||
tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
tcache_flush(void)
|
|
||||||
{
|
|
||||||
tsd_t *tsd;
|
|
||||||
|
|
||||||
cassert(config_tcache);
|
|
||||||
|
|
||||||
tsd = tsd_fetch();
|
|
||||||
tcache_cleanup(tsd);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
tcache_enabled_get(void)
|
|
||||||
{
|
|
||||||
tsd_t *tsd;
|
|
||||||
tcache_enabled_t tcache_enabled;
|
|
||||||
|
|
||||||
cassert(config_tcache);
|
|
||||||
|
|
||||||
tsd = tsd_fetch();
|
|
||||||
tcache_enabled = tsd_tcache_enabled_get(tsd);
|
|
||||||
if (tcache_enabled == tcache_enabled_default) {
|
|
||||||
tcache_enabled = (tcache_enabled_t)opt_tcache;
|
|
||||||
tsd_tcache_enabled_set(tsd, tcache_enabled);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ((bool)tcache_enabled);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
tcache_enabled_set(bool enabled)
|
|
||||||
{
|
|
||||||
tsd_t *tsd;
|
|
||||||
tcache_enabled_t tcache_enabled;
|
|
||||||
|
|
||||||
cassert(config_tcache);
|
|
||||||
|
|
||||||
tsd = tsd_fetch();
|
|
||||||
|
|
||||||
tcache_enabled = (tcache_enabled_t)enabled;
|
|
||||||
tsd_tcache_enabled_set(tsd, tcache_enabled);
|
|
||||||
|
|
||||||
if (!enabled)
|
|
||||||
tcache_cleanup(tsd);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE tcache_t *
|
|
||||||
tcache_get(tsd_t *tsd, bool create)
|
|
||||||
{
|
|
||||||
tcache_t *tcache;
|
|
||||||
|
|
||||||
if (!config_tcache)
|
|
||||||
return (NULL);
|
|
||||||
|
|
||||||
tcache = tsd_tcache_get(tsd);
|
|
||||||
if (!create)
|
|
||||||
return (tcache);
|
|
||||||
if (unlikely(tcache == NULL) && tsd_nominal(tsd)) {
|
|
||||||
tcache = tcache_get_hard(tsd);
|
|
||||||
tsd_tcache_set(tsd, tcache);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (tcache);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
|
||||||
tcache_event(tsd_t *tsd, tcache_t *tcache)
|
|
||||||
{
|
|
||||||
|
|
||||||
if (TCACHE_GC_INCR == 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (unlikely(ticker_tick(&tcache->gc_ticker)))
|
|
||||||
tcache_event_hard(tsd, tcache);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
|
||||||
tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success)
|
|
||||||
{
|
|
||||||
void *ret;
|
|
||||||
|
|
||||||
if (unlikely(tbin->ncached == 0)) {
|
|
||||||
tbin->low_water = -1;
|
|
||||||
*tcache_success = false;
|
|
||||||
return (NULL);
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
* tcache_success (instead of ret) should be checked upon the return of
|
|
||||||
* this function. We avoid checking (ret == NULL) because there is
|
|
||||||
* never a null stored on the avail stack (which is unknown to the
|
|
||||||
* compiler), and eagerly checking ret would cause pipeline stall
|
|
||||||
* (waiting for the cacheline).
|
|
||||||
*/
|
|
||||||
*tcache_success = true;
|
|
||||||
ret = *(tbin->avail - tbin->ncached);
|
|
||||||
tbin->ncached--;
|
|
||||||
|
|
||||||
if (unlikely((int)tbin->ncached < tbin->low_water))
|
|
||||||
tbin->low_water = tbin->ncached;
|
|
||||||
|
|
||||||
return (ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
|
||||||
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
|
||||||
szind_t binind, bool zero, bool slow_path)
|
|
||||||
{
|
|
||||||
void *ret;
|
|
||||||
tcache_bin_t *tbin;
|
|
||||||
bool tcache_success;
|
|
||||||
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
|
||||||
|
|
||||||
assert(binind < NBINS);
|
|
||||||
tbin = &tcache->tbins[binind];
|
|
||||||
ret = tcache_alloc_easy(tbin, &tcache_success);
|
|
||||||
assert(tcache_success == (ret != NULL));
|
|
||||||
if (unlikely(!tcache_success)) {
|
|
||||||
bool tcache_hard_success;
|
|
||||||
arena = arena_choose(tsd, arena);
|
|
||||||
if (unlikely(arena == NULL))
|
|
||||||
return (NULL);
|
|
||||||
|
|
||||||
ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
|
|
||||||
tbin, binind, &tcache_hard_success);
|
|
||||||
if (tcache_hard_success == false)
|
|
||||||
return (NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(ret);
|
|
||||||
/*
|
|
||||||
* Only compute usize if required. The checks in the following if
|
|
||||||
* statement are all static.
|
|
||||||
*/
|
|
||||||
if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
|
|
||||||
usize = index2size(binind);
|
|
||||||
assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (likely(!zero)) {
|
|
||||||
if (slow_path && config_fill) {
|
|
||||||
if (unlikely(opt_junk_alloc)) {
|
|
||||||
arena_alloc_junk_small(ret,
|
|
||||||
&arena_bin_info[binind], false);
|
|
||||||
} else if (unlikely(opt_zero))
|
|
||||||
memset(ret, 0, usize);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
|
|
||||||
arena_alloc_junk_small(ret, &arena_bin_info[binind],
|
|
||||||
true);
|
|
||||||
}
|
|
||||||
memset(ret, 0, usize);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (config_stats)
|
|
||||||
tbin->tstats.nrequests++;
|
|
||||||
if (config_prof)
|
|
||||||
tcache->prof_accumbytes += usize;
|
|
||||||
tcache_event(tsd, tcache);
|
|
||||||
return (ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
|
||||||
tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
|
||||||
szind_t binind, bool zero, bool slow_path)
|
|
||||||
{
|
|
||||||
void *ret;
|
|
||||||
tcache_bin_t *tbin;
|
|
||||||
bool tcache_success;
|
|
||||||
|
|
||||||
assert(binind < nhbins);
|
|
||||||
tbin = &tcache->tbins[binind];
|
|
||||||
ret = tcache_alloc_easy(tbin, &tcache_success);
|
|
||||||
assert(tcache_success == (ret != NULL));
|
|
||||||
if (unlikely(!tcache_success)) {
|
|
||||||
/*
|
|
||||||
* Only allocate one large object at a time, because it's quite
|
|
||||||
* expensive to create one and not use it.
|
|
||||||
*/
|
|
||||||
arena = arena_choose(tsd, arena);
|
|
||||||
if (unlikely(arena == NULL))
|
|
||||||
return (NULL);
|
|
||||||
|
|
||||||
ret = arena_malloc_large(tsd_tsdn(tsd), arena, binind, zero);
|
|
||||||
if (ret == NULL)
|
|
||||||
return (NULL);
|
|
||||||
} else {
|
|
||||||
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
|
||||||
|
|
||||||
/* Only compute usize on demand */
|
|
||||||
if (config_prof || (slow_path && config_fill) ||
|
|
||||||
unlikely(zero)) {
|
|
||||||
usize = index2size(binind);
|
|
||||||
assert(usize <= tcache_maxclass);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (config_prof && usize == LARGE_MINCLASS) {
|
|
||||||
arena_chunk_t *chunk =
|
|
||||||
(arena_chunk_t *)CHUNK_ADDR2BASE(ret);
|
|
||||||
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
|
|
||||||
LG_PAGE);
|
|
||||||
arena_mapbits_large_binind_set(chunk, pageind,
|
|
||||||
BININD_INVALID);
|
|
||||||
}
|
|
||||||
if (likely(!zero)) {
|
|
||||||
if (slow_path && config_fill) {
|
|
||||||
if (unlikely(opt_junk_alloc)) {
|
|
||||||
memset(ret, JEMALLOC_ALLOC_JUNK,
|
|
||||||
usize);
|
|
||||||
} else if (unlikely(opt_zero))
|
|
||||||
memset(ret, 0, usize);
|
|
||||||
}
|
|
||||||
} else
|
|
||||||
memset(ret, 0, usize);
|
|
||||||
|
|
||||||
if (config_stats)
|
|
||||||
tbin->tstats.nrequests++;
|
|
||||||
if (config_prof)
|
|
||||||
tcache->prof_accumbytes += usize;
|
|
||||||
}
|
|
||||||
|
|
||||||
tcache_event(tsd, tcache);
|
|
||||||
return (ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
|
||||||
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
|
||||||
bool slow_path)
|
|
||||||
{
|
|
||||||
tcache_bin_t *tbin;
|
|
||||||
tcache_bin_info_t *tbin_info;
|
|
||||||
|
|
||||||
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
|
|
||||||
|
|
||||||
if (slow_path && config_fill && unlikely(opt_junk_free))
|
|
||||||
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
|
|
||||||
|
|
||||||
tbin = &tcache->tbins[binind];
|
|
||||||
tbin_info = &tcache_bin_info[binind];
|
|
||||||
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
|
|
||||||
tcache_bin_flush_small(tsd, tcache, tbin, binind,
|
|
||||||
(tbin_info->ncached_max >> 1));
|
|
||||||
}
|
|
||||||
assert(tbin->ncached < tbin_info->ncached_max);
|
|
||||||
tbin->ncached++;
|
|
||||||
*(tbin->avail - tbin->ncached) = ptr;
|
|
||||||
|
|
||||||
tcache_event(tsd, tcache);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
|
||||||
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
|
|
||||||
bool slow_path)
|
|
||||||
{
|
|
||||||
szind_t binind;
|
|
||||||
tcache_bin_t *tbin;
|
|
||||||
tcache_bin_info_t *tbin_info;
|
|
||||||
|
|
||||||
assert((size & PAGE_MASK) == 0);
|
|
||||||
assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
|
|
||||||
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
|
|
||||||
|
|
||||||
binind = size2index(size);
|
|
||||||
|
|
||||||
if (slow_path && config_fill && unlikely(opt_junk_free))
|
|
||||||
arena_dalloc_junk_large(ptr, size);
|
|
||||||
|
|
||||||
tbin = &tcache->tbins[binind];
|
|
||||||
tbin_info = &tcache_bin_info[binind];
|
|
||||||
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
|
|
||||||
tcache_bin_flush_large(tsd, tbin, binind,
|
|
||||||
(tbin_info->ncached_max >> 1), tcache);
|
|
||||||
}
|
|
||||||
assert(tbin->ncached < tbin_info->ncached_max);
|
|
||||||
tbin->ncached++;
|
|
||||||
*(tbin->avail - tbin->ncached) = ptr;
|
|
||||||
|
|
||||||
tcache_event(tsd, tcache);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE tcache_t *
|
|
||||||
tcaches_get(tsd_t *tsd, unsigned ind)
|
|
||||||
{
|
|
||||||
tcaches_t *elm = &tcaches[ind];
|
|
||||||
if (unlikely(elm->tcache == NULL)) {
|
|
||||||
elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd,
|
|
||||||
NULL));
|
|
||||||
}
|
|
||||||
return (elm->tcache);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
/******************************************************************************/
|
|
55
include/jemalloc/internal/tcache_externs.h
Normal file
55
include/jemalloc/internal/tcache_externs.h
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
|
||||||
|
#define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/size_classes.h"
|
||||||
|
|
||||||
|
extern bool opt_tcache;
|
||||||
|
extern ssize_t opt_lg_tcache_max;
|
||||||
|
|
||||||
|
extern tcache_bin_info_t *tcache_bin_info;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
|
||||||
|
* large-object bins.
|
||||||
|
*/
|
||||||
|
extern unsigned nhbins;
|
||||||
|
|
||||||
|
/* Maximum cached size class. */
|
||||||
|
extern size_t tcache_maxclass;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
|
||||||
|
* usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
|
||||||
|
* completely disjoint from this data structure. tcaches starts off as a sparse
|
||||||
|
* array, so it has no physical memory footprint until individual pages are
|
||||||
|
* touched. This allows the entire array to be allocated the first time an
|
||||||
|
* explicit tcache is created without a disproportionate impact on memory usage.
|
||||||
|
*/
|
||||||
|
extern tcaches_t *tcaches;
|
||||||
|
|
||||||
|
size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
|
||||||
|
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
|
||||||
|
void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
||||||
|
tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
|
||||||
|
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||||
|
szind_t binind, unsigned rem);
|
||||||
|
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||||
|
unsigned rem, tcache_t *tcache);
|
||||||
|
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
|
||||||
|
arena_t *arena);
|
||||||
|
tcache_t *tcache_create_explicit(tsd_t *tsd);
|
||||||
|
void tcache_cleanup(tsd_t *tsd);
|
||||||
|
void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
|
||||||
|
bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
|
||||||
|
void tcaches_flush(tsd_t *tsd, unsigned ind);
|
||||||
|
void tcaches_destroy(tsd_t *tsd, unsigned ind);
|
||||||
|
bool tcache_boot(tsdn_t *tsdn);
|
||||||
|
void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
|
||||||
|
void tcache_prefork(tsdn_t *tsdn);
|
||||||
|
void tcache_postfork_parent(tsdn_t *tsdn);
|
||||||
|
void tcache_postfork_child(tsdn_t *tsdn);
|
||||||
|
void tcache_flush(void);
|
||||||
|
bool tsd_tcache_data_init(tsd_t *tsd);
|
||||||
|
bool tsd_tcache_enabled_data_init(tsd_t *tsd);
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */
|
250
include/jemalloc/internal/tcache_inlines.h
Normal file
250
include/jemalloc/internal/tcache_inlines.h
Normal file
@ -0,0 +1,250 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H
|
||||||
|
#define JEMALLOC_INTERNAL_TCACHE_INLINES_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||||
|
#include "jemalloc/internal/size_classes.h"
|
||||||
|
#include "jemalloc/internal/sz.h"
|
||||||
|
#include "jemalloc/internal/ticker.h"
|
||||||
|
#include "jemalloc/internal/util.h"
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
tcache_enabled_get(tsd_t *tsd) {
|
||||||
|
return tsd_tcache_enabled_get(tsd);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
tcache_enabled_set(tsd_t *tsd, bool enabled) {
|
||||||
|
bool was_enabled = tsd_tcache_enabled_get(tsd);
|
||||||
|
|
||||||
|
if (!was_enabled && enabled) {
|
||||||
|
tsd_tcache_data_init(tsd);
|
||||||
|
} else if (was_enabled && !enabled) {
|
||||||
|
tcache_cleanup(tsd);
|
||||||
|
}
|
||||||
|
/* Commit the state last. Above calls check current state. */
|
||||||
|
tsd_tcache_enabled_set(tsd, enabled);
|
||||||
|
tsd_slow_update(tsd);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
tcache_event(tsd_t *tsd, tcache_t *tcache) {
|
||||||
|
if (TCACHE_GC_INCR == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (unlikely(ticker_tick(&tcache->gc_ticker))) {
|
||||||
|
tcache_event_hard(tsd, tcache);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
|
tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success) {
|
||||||
|
void *ret;
|
||||||
|
|
||||||
|
if (unlikely(tbin->ncached == 0)) {
|
||||||
|
tbin->low_water = -1;
|
||||||
|
*tcache_success = false;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* tcache_success (instead of ret) should be checked upon the return of
|
||||||
|
* this function. We avoid checking (ret == NULL) because there is
|
||||||
|
* never a null stored on the avail stack (which is unknown to the
|
||||||
|
* compiler), and eagerly checking ret would cause pipeline stall
|
||||||
|
* (waiting for the cacheline).
|
||||||
|
*/
|
||||||
|
*tcache_success = true;
|
||||||
|
ret = *(tbin->avail - tbin->ncached);
|
||||||
|
tbin->ncached--;
|
||||||
|
|
||||||
|
if (unlikely((low_water_t)tbin->ncached < tbin->low_water)) {
|
||||||
|
tbin->low_water = tbin->ncached;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
|
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||||
|
szind_t binind, bool zero, bool slow_path) {
|
||||||
|
void *ret;
|
||||||
|
tcache_bin_t *tbin;
|
||||||
|
bool tcache_success;
|
||||||
|
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
||||||
|
|
||||||
|
assert(binind < NBINS);
|
||||||
|
tbin = tcache_small_bin_get(tcache, binind);
|
||||||
|
ret = tcache_alloc_easy(tbin, &tcache_success);
|
||||||
|
assert(tcache_success == (ret != NULL));
|
||||||
|
if (unlikely(!tcache_success)) {
|
||||||
|
bool tcache_hard_success;
|
||||||
|
arena = arena_choose(tsd, arena);
|
||||||
|
if (unlikely(arena == NULL)) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
|
||||||
|
tbin, binind, &tcache_hard_success);
|
||||||
|
if (tcache_hard_success == false) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(ret);
|
||||||
|
/*
|
||||||
|
* Only compute usize if required. The checks in the following if
|
||||||
|
* statement are all static.
|
||||||
|
*/
|
||||||
|
if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
|
||||||
|
usize = sz_index2size(binind);
|
||||||
|
assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (likely(!zero)) {
|
||||||
|
if (slow_path && config_fill) {
|
||||||
|
if (unlikely(opt_junk_alloc)) {
|
||||||
|
arena_alloc_junk_small(ret,
|
||||||
|
&arena_bin_info[binind], false);
|
||||||
|
} else if (unlikely(opt_zero)) {
|
||||||
|
memset(ret, 0, usize);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
|
||||||
|
arena_alloc_junk_small(ret, &arena_bin_info[binind],
|
||||||
|
true);
|
||||||
|
}
|
||||||
|
memset(ret, 0, usize);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config_stats) {
|
||||||
|
tbin->tstats.nrequests++;
|
||||||
|
}
|
||||||
|
if (config_prof) {
|
||||||
|
tcache->prof_accumbytes += usize;
|
||||||
|
}
|
||||||
|
tcache_event(tsd, tcache);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
|
tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||||
|
szind_t binind, bool zero, bool slow_path) {
|
||||||
|
void *ret;
|
||||||
|
tcache_bin_t *tbin;
|
||||||
|
bool tcache_success;
|
||||||
|
|
||||||
|
assert(binind >= NBINS &&binind < nhbins);
|
||||||
|
tbin = tcache_large_bin_get(tcache, binind);
|
||||||
|
ret = tcache_alloc_easy(tbin, &tcache_success);
|
||||||
|
assert(tcache_success == (ret != NULL));
|
||||||
|
if (unlikely(!tcache_success)) {
|
||||||
|
/*
|
||||||
|
* Only allocate one large object at a time, because it's quite
|
||||||
|
* expensive to create one and not use it.
|
||||||
|
*/
|
||||||
|
arena = arena_choose(tsd, arena);
|
||||||
|
if (unlikely(arena == NULL)) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero);
|
||||||
|
if (ret == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
||||||
|
|
||||||
|
/* Only compute usize on demand */
|
||||||
|
if (config_prof || (slow_path && config_fill) ||
|
||||||
|
unlikely(zero)) {
|
||||||
|
usize = sz_index2size(binind);
|
||||||
|
assert(usize <= tcache_maxclass);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (likely(!zero)) {
|
||||||
|
if (slow_path && config_fill) {
|
||||||
|
if (unlikely(opt_junk_alloc)) {
|
||||||
|
memset(ret, JEMALLOC_ALLOC_JUNK,
|
||||||
|
usize);
|
||||||
|
} else if (unlikely(opt_zero)) {
|
||||||
|
memset(ret, 0, usize);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
memset(ret, 0, usize);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config_stats) {
|
||||||
|
tbin->tstats.nrequests++;
|
||||||
|
}
|
||||||
|
if (config_prof) {
|
||||||
|
tcache->prof_accumbytes += usize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tcache_event(tsd, tcache);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
||||||
|
bool slow_path) {
|
||||||
|
tcache_bin_t *tbin;
|
||||||
|
tcache_bin_info_t *tbin_info;
|
||||||
|
|
||||||
|
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
|
||||||
|
|
||||||
|
if (slow_path && config_fill && unlikely(opt_junk_free)) {
|
||||||
|
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
|
||||||
|
}
|
||||||
|
|
||||||
|
tbin = tcache_small_bin_get(tcache, binind);
|
||||||
|
tbin_info = &tcache_bin_info[binind];
|
||||||
|
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
|
||||||
|
tcache_bin_flush_small(tsd, tcache, tbin, binind,
|
||||||
|
(tbin_info->ncached_max >> 1));
|
||||||
|
}
|
||||||
|
assert(tbin->ncached < tbin_info->ncached_max);
|
||||||
|
tbin->ncached++;
|
||||||
|
*(tbin->avail - tbin->ncached) = ptr;
|
||||||
|
|
||||||
|
tcache_event(tsd, tcache);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
||||||
|
bool slow_path) {
|
||||||
|
tcache_bin_t *tbin;
|
||||||
|
tcache_bin_info_t *tbin_info;
|
||||||
|
|
||||||
|
assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
|
||||||
|
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
|
||||||
|
|
||||||
|
if (slow_path && config_fill && unlikely(opt_junk_free)) {
|
||||||
|
large_dalloc_junk(ptr, sz_index2size(binind));
|
||||||
|
}
|
||||||
|
|
||||||
|
tbin = tcache_large_bin_get(tcache, binind);
|
||||||
|
tbin_info = &tcache_bin_info[binind];
|
||||||
|
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
|
||||||
|
tcache_bin_flush_large(tsd, tbin, binind,
|
||||||
|
(tbin_info->ncached_max >> 1), tcache);
|
||||||
|
}
|
||||||
|
assert(tbin->ncached < tbin_info->ncached_max);
|
||||||
|
tbin->ncached++;
|
||||||
|
*(tbin->avail - tbin->ncached) = ptr;
|
||||||
|
|
||||||
|
tcache_event(tsd, tcache);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE tcache_t *
|
||||||
|
tcaches_get(tsd_t *tsd, unsigned ind) {
|
||||||
|
tcaches_t *elm = &tcaches[ind];
|
||||||
|
if (unlikely(elm->tcache == NULL)) {
|
||||||
|
elm->tcache = tcache_create_explicit(tsd);
|
||||||
|
}
|
||||||
|
return elm->tcache;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */
|
64
include/jemalloc/internal/tcache_structs.h
Normal file
64
include/jemalloc/internal/tcache_structs.h
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
|
||||||
|
#define JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/ql.h"
|
||||||
|
#include "jemalloc/internal/size_classes.h"
|
||||||
|
#include "jemalloc/internal/stats_tsd.h"
|
||||||
|
#include "jemalloc/internal/ticker.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Read-only information associated with each element of tcache_t's tbins array
|
||||||
|
* is stored separately, mainly to reduce memory usage.
|
||||||
|
*/
|
||||||
|
struct tcache_bin_info_s {
|
||||||
|
unsigned ncached_max; /* Upper limit on ncached. */
|
||||||
|
};
|
||||||
|
|
||||||
|
struct tcache_bin_s {
|
||||||
|
low_water_t low_water; /* Min # cached since last GC. */
|
||||||
|
uint32_t ncached; /* # of cached objects. */
|
||||||
|
/*
|
||||||
|
* ncached and stats are both modified frequently. Let's keep them
|
||||||
|
* close so that they have a higher chance of being on the same
|
||||||
|
* cacheline, thus less write-backs.
|
||||||
|
*/
|
||||||
|
tcache_bin_stats_t tstats;
|
||||||
|
/*
|
||||||
|
* To make use of adjacent cacheline prefetch, the items in the avail
|
||||||
|
* stack goes to higher address for newer allocations. avail points
|
||||||
|
* just above the available space, which means that
|
||||||
|
* avail[-ncached, ... -1] are available items and the lowest item will
|
||||||
|
* be allocated first.
|
||||||
|
*/
|
||||||
|
void **avail; /* Stack of available objects. */
|
||||||
|
};
|
||||||
|
|
||||||
|
struct tcache_s {
|
||||||
|
/* Data accessed frequently first: prof, ticker and small bins. */
|
||||||
|
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
|
||||||
|
ticker_t gc_ticker; /* Drives incremental GC. */
|
||||||
|
/*
|
||||||
|
* The pointer stacks associated with tbins follow as a contiguous
|
||||||
|
* array. During tcache initialization, the avail pointer in each
|
||||||
|
* element of tbins is initialized to point to the proper offset within
|
||||||
|
* this array.
|
||||||
|
*/
|
||||||
|
tcache_bin_t tbins_small[NBINS];
|
||||||
|
/* Data accessed less often below. */
|
||||||
|
ql_elm(tcache_t) link; /* Used for aggregating stats. */
|
||||||
|
arena_t *arena; /* Associated arena. */
|
||||||
|
szind_t next_gc_bin; /* Next bin to GC. */
|
||||||
|
/* For small bins, fill (ncached_max >> lg_fill_div). */
|
||||||
|
uint8_t lg_fill_div[NBINS];
|
||||||
|
tcache_bin_t tbins_large[NSIZES-NBINS];
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Linkage for list of available (previously used) explicit tcache IDs. */
|
||||||
|
struct tcaches_s {
|
||||||
|
union {
|
||||||
|
tcache_t *tcache;
|
||||||
|
tcaches_t *next;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_TCACHE_STRUCTS_H */
|
61
include/jemalloc/internal/tcache_types.h
Normal file
61
include/jemalloc/internal/tcache_types.h
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_TCACHE_TYPES_H
|
||||||
|
#define JEMALLOC_INTERNAL_TCACHE_TYPES_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/size_classes.h"
|
||||||
|
|
||||||
|
typedef struct tcache_bin_info_s tcache_bin_info_t;
|
||||||
|
typedef struct tcache_bin_s tcache_bin_t;
|
||||||
|
typedef struct tcache_s tcache_t;
|
||||||
|
typedef struct tcaches_s tcaches_t;
|
||||||
|
|
||||||
|
/* ncached is cast to this type for comparison. */
|
||||||
|
typedef int32_t low_water_t;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* tcache pointers close to NULL are used to encode state information that is
|
||||||
|
* used for two purposes: preventing thread caching on a per thread basis and
|
||||||
|
* cleaning up during thread shutdown.
|
||||||
|
*/
|
||||||
|
#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
|
||||||
|
#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
|
||||||
|
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
|
||||||
|
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Absolute minimum number of cache slots for each small bin.
|
||||||
|
*/
|
||||||
|
#define TCACHE_NSLOTS_SMALL_MIN 20
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Absolute maximum number of cache slots for each small bin in the thread
|
||||||
|
* cache. This is an additional constraint beyond that imposed as: twice the
|
||||||
|
* number of regions per slab for this size class.
|
||||||
|
*
|
||||||
|
* This constant must be an even number.
|
||||||
|
*/
|
||||||
|
#define TCACHE_NSLOTS_SMALL_MAX 200
|
||||||
|
|
||||||
|
/* Number of cache slots for large size classes. */
|
||||||
|
#define TCACHE_NSLOTS_LARGE 20
|
||||||
|
|
||||||
|
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
|
||||||
|
#define LG_TCACHE_MAXCLASS_DEFAULT 15
|
||||||
|
|
||||||
|
/*
|
||||||
|
* TCACHE_GC_SWEEP is the approximate number of allocation events between
|
||||||
|
* full GC sweeps. Integer rounding may cause the actual number to be
|
||||||
|
* slightly higher, since GC is performed incrementally.
|
||||||
|
*/
|
||||||
|
#define TCACHE_GC_SWEEP 8192
|
||||||
|
|
||||||
|
/* Number of tcache allocation/deallocation events between incremental GCs. */
|
||||||
|
#define TCACHE_GC_INCR \
|
||||||
|
((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
|
||||||
|
|
||||||
|
/* Used in TSD static initializer only. Real init in tcache_data_init(). */
|
||||||
|
#define TCACHE_ZERO_INITIALIZER {0}
|
||||||
|
|
||||||
|
/* Used in TSD static initializer only. Will be initialized to opt_tcache. */
|
||||||
|
#define TCACHE_ENABLED_ZERO_INITIALIZER false
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_TCACHE_TYPES_H */
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user