Move slabs out of chunks.
This commit is contained in:
parent
d28e5a6696
commit
498856f44a
@ -509,26 +509,20 @@ for (i = 0; i < nbins; i++) {
|
|||||||
|
|
||||||
<para>In addition to multiple arenas, unless
|
<para>In addition to multiple arenas, unless
|
||||||
<option>--disable-tcache</option> is specified during configuration, this
|
<option>--disable-tcache</option> is specified during configuration, this
|
||||||
allocator supports thread-specific caching for small and large objects, in
|
allocator supports thread-specific caching, in order to make it possible to
|
||||||
order to make it possible to completely avoid synchronization for most
|
completely avoid synchronization for most allocation requests. Such caching
|
||||||
allocation requests. Such caching allows very fast allocation in the
|
allows very fast allocation in the common case, but it increases memory
|
||||||
common case, but it increases memory usage and fragmentation, since a
|
usage and fragmentation, since a bounded number of objects can remain
|
||||||
bounded number of objects can remain allocated in each thread cache.</para>
|
allocated in each thread cache.</para>
|
||||||
|
|
||||||
<para>Memory is conceptually broken into equal-sized chunks, where the chunk
|
<para>Memory is conceptually broken into extents. Extents are always
|
||||||
size is a power of two that is greater than the page size. Chunks are
|
aligned to multiples of the page size. This alignment makes it possible to
|
||||||
always aligned to multiples of the chunk size. This alignment makes it
|
find metadata for user objects quickly. User objects are broken into two
|
||||||
possible to find metadata for user objects very quickly. User objects are
|
categories according to size: small and large. Contiguous small objects
|
||||||
broken into three categories according to size: small, large, and huge.
|
comprise a slab, which resides within a single extent, whereas large objects
|
||||||
Multiple small and large objects can reside within a single chunk, whereas
|
each have their own extents backing them.</para>
|
||||||
huge objects each have one or more chunks backing them. Each chunk that
|
|
||||||
contains small and/or large objects tracks its contents as runs of
|
|
||||||
contiguous pages (unused, backing a set of small objects, or backing one
|
|
||||||
large object). The combination of chunk alignment and chunk page maps makes
|
|
||||||
it possible to determine all metadata regarding small and large allocations
|
|
||||||
in constant time.</para>
|
|
||||||
|
|
||||||
<para>Small objects are managed in groups by page runs. Each run maintains
|
<para>Small objects are managed in groups by slabs. Each slab maintains
|
||||||
a bitmap to track which regions are in use. Allocation requests that are no
|
a bitmap to track which regions are in use. Allocation requests that are no
|
||||||
more than half the quantum (8 or 16, depending on architecture) are rounded
|
more than half the quantum (8 or 16, depending on architecture) are rounded
|
||||||
up to the nearest power of two that is at least <code
|
up to the nearest power of two that is at least <code
|
||||||
@ -536,11 +530,9 @@ for (i = 0; i < nbins; i++) {
|
|||||||
classes are multiples of the quantum, spaced such that there are four size
|
classes are multiples of the quantum, spaced such that there are four size
|
||||||
classes for each doubling in size, which limits internal fragmentation to
|
classes for each doubling in size, which limits internal fragmentation to
|
||||||
approximately 20% for all but the smallest size classes. Small size classes
|
approximately 20% for all but the smallest size classes. Small size classes
|
||||||
are smaller than four times the page size, large size classes are smaller
|
are smaller than four times the page size, and large size classes extend
|
||||||
than the chunk size (see the <link
|
from four times the page size up to the largest size class that does not
|
||||||
linkend="opt.lg_chunk"><mallctl>opt.lg_chunk</mallctl></link> option), and
|
exceed <constant>PTRDIFF_MAX</constant>.</para>
|
||||||
huge size classes extend from the chunk size up to the largest size class
|
|
||||||
that does not exceed <constant>PTRDIFF_MAX</constant>.</para>
|
|
||||||
|
|
||||||
<para>Allocations are packed tightly together, which can be an issue for
|
<para>Allocations are packed tightly together, which can be an issue for
|
||||||
multi-threaded applications. If you need to assure that allocations do not
|
multi-threaded applications. If you need to assure that allocations do not
|
||||||
@ -560,18 +552,16 @@ for (i = 0; i < nbins; i++) {
|
|||||||
trivially succeeds in place as long as the pre-size and post-size both round
|
trivially succeeds in place as long as the pre-size and post-size both round
|
||||||
up to the same size class. No other API guarantees are made regarding
|
up to the same size class. No other API guarantees are made regarding
|
||||||
in-place resizing, but the current implementation also tries to resize large
|
in-place resizing, but the current implementation also tries to resize large
|
||||||
and huge allocations in place, as long as the pre-size and post-size are
|
allocations in place, as long as the pre-size and post-size are both large.
|
||||||
both large or both huge. In such cases shrinkage always succeeds for large
|
For shrinkage to succeed, the extent allocator must support splitting (see
|
||||||
size classes, but for huge size classes the chunk allocator must support
|
<link
|
||||||
splitting (see <link
|
|
||||||
linkend="arena.i.chunk_hooks"><mallctl>arena.<i>.chunk_hooks</mallctl></link>).
|
linkend="arena.i.chunk_hooks"><mallctl>arena.<i>.chunk_hooks</mallctl></link>).
|
||||||
Growth only succeeds if the trailing memory is currently available, and
|
Growth only succeeds if the trailing memory is currently available, and the
|
||||||
additionally for huge size classes the chunk allocator must support
|
extent allocator supports merging.</para>
|
||||||
merging.</para>
|
|
||||||
|
|
||||||
<para>Assuming 2 MiB chunks, 4 KiB pages, and a 16-byte quantum on a
|
<para>Assuming 4 KiB pages and a 16-byte quantum on a 64-bit system, the
|
||||||
64-bit system, the size classes in each category are as shown in <xref
|
size classes in each category are as shown in <xref linkend="size_classes"
|
||||||
linkend="size_classes" xrefstyle="template:Table %n"/>.</para>
|
xrefstyle="template:Table %n"/>.</para>
|
||||||
|
|
||||||
<table xml:id="size_classes" frame="all">
|
<table xml:id="size_classes" frame="all">
|
||||||
<title>Size classes</title>
|
<title>Size classes</title>
|
||||||
@ -625,7 +615,7 @@ for (i = 0; i < nbins; i++) {
|
|||||||
<entry>[10 KiB, 12 KiB, 14 KiB]</entry>
|
<entry>[10 KiB, 12 KiB, 14 KiB]</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry morerows="7">Large</entry>
|
<entry morerows="15">Large</entry>
|
||||||
<entry>2 KiB</entry>
|
<entry>2 KiB</entry>
|
||||||
<entry>[16 KiB]</entry>
|
<entry>[16 KiB]</entry>
|
||||||
</row>
|
</row>
|
||||||
@ -655,12 +645,7 @@ for (i = 0; i < nbins; i++) {
|
|||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry>256 KiB</entry>
|
<entry>256 KiB</entry>
|
||||||
<entry>[1280 KiB, 1536 KiB, 1792 KiB]</entry>
|
<entry>[1280 KiB, 1536 KiB, 1792 KiB, 2 MiB]</entry>
|
||||||
</row>
|
|
||||||
<row>
|
|
||||||
<entry morerows="8">Huge</entry>
|
|
||||||
<entry>256 KiB</entry>
|
|
||||||
<entry>[2 MiB]</entry>
|
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry>512 KiB</entry>
|
<entry>512 KiB</entry>
|
||||||
@ -1875,16 +1860,16 @@ typedef struct {
|
|||||||
(<type>uint32_t</type>)
|
(<type>uint32_t</type>)
|
||||||
<literal>r-</literal>
|
<literal>r-</literal>
|
||||||
</term>
|
</term>
|
||||||
<listitem><para>Number of regions per page run.</para></listitem>
|
<listitem><para>Number of regions per slab.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="arenas.bin.i.run_size">
|
<varlistentry id="arenas.bin.i.slab_size">
|
||||||
<term>
|
<term>
|
||||||
<mallctl>arenas.bin.<i>.run_size</mallctl>
|
<mallctl>arenas.bin.<i>.slab_size</mallctl>
|
||||||
(<type>size_t</type>)
|
(<type>size_t</type>)
|
||||||
<literal>r-</literal>
|
<literal>r-</literal>
|
||||||
</term>
|
</term>
|
||||||
<listitem><para>Number of bytes per page run.</para></listitem>
|
<listitem><para>Number of bytes per slab.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="arenas.nhchunks">
|
<varlistentry id="arenas.nhchunks">
|
||||||
@ -2185,7 +2170,7 @@ typedef struct {
|
|||||||
(<type>size_t</type>)
|
(<type>size_t</type>)
|
||||||
<literal>r-</literal>
|
<literal>r-</literal>
|
||||||
</term>
|
</term>
|
||||||
<listitem><para>Number of pages in active runs.</para></listitem>
|
<listitem><para>Number of pages in active extents.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="stats.arenas.i.pdirty">
|
<varlistentry id="stats.arenas.i.pdirty">
|
||||||
@ -2194,8 +2179,9 @@ typedef struct {
|
|||||||
(<type>size_t</type>)
|
(<type>size_t</type>)
|
||||||
<literal>r-</literal>
|
<literal>r-</literal>
|
||||||
</term>
|
</term>
|
||||||
<listitem><para>Number of pages within unused runs that are potentially
|
<listitem><para>Number of pages within unused extents that are
|
||||||
dirty, and for which <function>madvise<parameter>...</parameter>
|
potentially dirty, and for which
|
||||||
|
<function>madvise<parameter>...</parameter>
|
||||||
<parameter><constant>MADV_DONTNEED</constant></parameter></function> or
|
<parameter><constant>MADV_DONTNEED</constant></parameter></function> or
|
||||||
similar has not been called.</para></listitem>
|
similar has not been called.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
@ -2483,35 +2469,35 @@ typedef struct {
|
|||||||
<listitem><para>Cumulative number of tcache flushes.</para></listitem>
|
<listitem><para>Cumulative number of tcache flushes.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="stats.arenas.i.bins.j.nruns">
|
<varlistentry id="stats.arenas.i.bins.j.nslabs">
|
||||||
<term>
|
<term>
|
||||||
<mallctl>stats.arenas.<i>.bins.<j>.nruns</mallctl>
|
<mallctl>stats.arenas.<i>.bins.<j>.nslabs</mallctl>
|
||||||
(<type>uint64_t</type>)
|
(<type>uint64_t</type>)
|
||||||
<literal>r-</literal>
|
<literal>r-</literal>
|
||||||
[<option>--enable-stats</option>]
|
[<option>--enable-stats</option>]
|
||||||
</term>
|
</term>
|
||||||
<listitem><para>Cumulative number of runs created.</para></listitem>
|
<listitem><para>Cumulative number of slabs created.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="stats.arenas.i.bins.j.nreruns">
|
<varlistentry id="stats.arenas.i.bins.j.nreslabs">
|
||||||
<term>
|
<term>
|
||||||
<mallctl>stats.arenas.<i>.bins.<j>.nreruns</mallctl>
|
<mallctl>stats.arenas.<i>.bins.<j>.nreslabs</mallctl>
|
||||||
(<type>uint64_t</type>)
|
(<type>uint64_t</type>)
|
||||||
<literal>r-</literal>
|
<literal>r-</literal>
|
||||||
[<option>--enable-stats</option>]
|
[<option>--enable-stats</option>]
|
||||||
</term>
|
</term>
|
||||||
<listitem><para>Cumulative number of times the current run from which
|
<listitem><para>Cumulative number of times the current slab from which
|
||||||
to allocate changed.</para></listitem>
|
to allocate changed.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="stats.arenas.i.bins.j.curruns">
|
<varlistentry id="stats.arenas.i.bins.j.curslabs">
|
||||||
<term>
|
<term>
|
||||||
<mallctl>stats.arenas.<i>.bins.<j>.curruns</mallctl>
|
<mallctl>stats.arenas.<i>.bins.<j>.curslabs</mallctl>
|
||||||
(<type>size_t</type>)
|
(<type>size_t</type>)
|
||||||
<literal>r-</literal>
|
<literal>r-</literal>
|
||||||
[<option>--enable-stats</option>]
|
[<option>--enable-stats</option>]
|
||||||
</term>
|
</term>
|
||||||
<listitem><para>Current number of runs.</para></listitem>
|
<listitem><para>Current number of slabs.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="stats.arenas.i.hchunks.j.nmalloc">
|
<varlistentry id="stats.arenas.i.hchunks.j.nmalloc">
|
||||||
|
@ -3,9 +3,9 @@
|
|||||||
|
|
||||||
#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
|
#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
|
||||||
|
|
||||||
/* Maximum number of regions in one run. */
|
/* Maximum number of regions in one slab. */
|
||||||
#define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN)
|
#define LG_SLAB_MAXREGS (LG_PAGE - LG_TINY_MIN)
|
||||||
#define RUN_MAXREGS (1U << LG_RUN_MAXREGS)
|
#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The minimum ratio of active:dirty pages per arena is computed as:
|
* The minimum ratio of active:dirty pages per arena is computed as:
|
||||||
@ -29,12 +29,7 @@ typedef enum {
|
|||||||
/* Number of event ticks between time checks. */
|
/* Number of event ticks between time checks. */
|
||||||
#define DECAY_NTICKS_PER_UPDATE 1000
|
#define DECAY_NTICKS_PER_UPDATE 1000
|
||||||
|
|
||||||
typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t;
|
typedef struct arena_slab_data_s arena_slab_data_t;
|
||||||
typedef struct arena_avail_links_s arena_avail_links_t;
|
|
||||||
typedef struct arena_run_s arena_run_t;
|
|
||||||
typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
|
|
||||||
typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
|
|
||||||
typedef struct arena_chunk_s arena_chunk_t;
|
|
||||||
typedef struct arena_bin_info_s arena_bin_info_t;
|
typedef struct arena_bin_info_s arena_bin_info_t;
|
||||||
typedef struct arena_bin_s arena_bin_t;
|
typedef struct arena_bin_s arena_bin_t;
|
||||||
typedef struct arena_s arena_t;
|
typedef struct arena_s arena_t;
|
||||||
@ -45,152 +40,25 @@ typedef struct arena_tdata_s arena_tdata_t;
|
|||||||
#ifdef JEMALLOC_H_STRUCTS
|
#ifdef JEMALLOC_H_STRUCTS
|
||||||
|
|
||||||
#ifdef JEMALLOC_ARENA_STRUCTS_A
|
#ifdef JEMALLOC_ARENA_STRUCTS_A
|
||||||
struct arena_run_s {
|
struct arena_slab_data_s {
|
||||||
/* Index of bin this run is associated with. */
|
/* Index of bin this slab is associated with. */
|
||||||
szind_t binind;
|
szind_t binind;
|
||||||
|
|
||||||
/* Number of free regions in run. */
|
/* Number of free regions in slab. */
|
||||||
unsigned nfree;
|
unsigned nfree;
|
||||||
|
|
||||||
/* Per region allocated/deallocated bitmap. */
|
/* Per region allocated/deallocated bitmap. */
|
||||||
bitmap_t bitmap[BITMAP_GROUPS_MAX];
|
bitmap_t bitmap[BITMAP_GROUPS_MAX];
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Each element of the chunk map corresponds to one page within the chunk. */
|
|
||||||
struct arena_chunk_map_bits_s {
|
|
||||||
/*
|
|
||||||
* Run address (or size) and various flags are stored together. The bit
|
|
||||||
* layout looks like (assuming 32-bit system):
|
|
||||||
*
|
|
||||||
* ???????? ???????? ???nnnnn nnndumla
|
|
||||||
*
|
|
||||||
* ? : Unallocated: Run address for first/last pages, unset for internal
|
|
||||||
* pages.
|
|
||||||
* Small: Run page offset.
|
|
||||||
* Large: Run page count for first page, unset for trailing pages.
|
|
||||||
* n : binind for small size class, BININD_INVALID for large size class.
|
|
||||||
* d : dirty?
|
|
||||||
* u : unzeroed?
|
|
||||||
* m : decommitted?
|
|
||||||
* l : large?
|
|
||||||
* a : allocated?
|
|
||||||
*
|
|
||||||
* Following are example bit patterns for the three types of runs.
|
|
||||||
*
|
|
||||||
* p : run page offset
|
|
||||||
* s : run size
|
|
||||||
* n : binind for size class; large objects set these to BININD_INVALID
|
|
||||||
* x : don't care
|
|
||||||
* - : 0
|
|
||||||
* + : 1
|
|
||||||
* [DUMLA] : bit set
|
|
||||||
* [dumla] : bit unset
|
|
||||||
*
|
|
||||||
* Unallocated (clean):
|
|
||||||
* ssssssss ssssssss sss+++++ +++dum-a
|
|
||||||
* xxxxxxxx xxxxxxxx xxxxxxxx xxx-Uxxx
|
|
||||||
* ssssssss ssssssss sss+++++ +++dUm-a
|
|
||||||
*
|
|
||||||
* Unallocated (dirty):
|
|
||||||
* ssssssss ssssssss sss+++++ +++D-m-a
|
|
||||||
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
|
||||||
* ssssssss ssssssss sss+++++ +++D-m-a
|
|
||||||
*
|
|
||||||
* Small:
|
|
||||||
* pppppppp pppppppp pppnnnnn nnnd---A
|
|
||||||
* pppppppp pppppppp pppnnnnn nnn----A
|
|
||||||
* pppppppp pppppppp pppnnnnn nnnd---A
|
|
||||||
*
|
|
||||||
* Large:
|
|
||||||
* ssssssss ssssssss sss+++++ +++D--LA
|
|
||||||
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
|
||||||
* -------- -------- ---+++++ +++D--LA
|
|
||||||
*
|
|
||||||
* Large (sampled, size <= LARGE_MINCLASS):
|
|
||||||
* ssssssss ssssssss sssnnnnn nnnD--LA
|
|
||||||
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
|
||||||
* -------- -------- ---+++++ +++D--LA
|
|
||||||
*
|
|
||||||
* Large (not sampled, size == LARGE_MINCLASS):
|
|
||||||
* ssssssss ssssssss sss+++++ +++D--LA
|
|
||||||
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
|
||||||
* -------- -------- ---+++++ +++D--LA
|
|
||||||
*/
|
|
||||||
size_t bits;
|
|
||||||
#define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
|
|
||||||
#define CHUNK_MAP_LARGE ((size_t)0x02U)
|
|
||||||
#define CHUNK_MAP_STATE_MASK ((size_t)0x3U)
|
|
||||||
|
|
||||||
#define CHUNK_MAP_DECOMMITTED ((size_t)0x04U)
|
|
||||||
#define CHUNK_MAP_UNZEROED ((size_t)0x08U)
|
|
||||||
#define CHUNK_MAP_DIRTY ((size_t)0x10U)
|
|
||||||
#define CHUNK_MAP_FLAGS_MASK ((size_t)0x1cU)
|
|
||||||
|
|
||||||
#define CHUNK_MAP_BININD_SHIFT 5
|
|
||||||
#define BININD_INVALID ((size_t)0xffU)
|
|
||||||
#define CHUNK_MAP_BININD_MASK (BININD_INVALID << CHUNK_MAP_BININD_SHIFT)
|
|
||||||
#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
|
|
||||||
|
|
||||||
#define CHUNK_MAP_RUNIND_SHIFT (CHUNK_MAP_BININD_SHIFT + 8)
|
|
||||||
#define CHUNK_MAP_SIZE_SHIFT (CHUNK_MAP_RUNIND_SHIFT - LG_PAGE)
|
|
||||||
#define CHUNK_MAP_SIZE_MASK \
|
|
||||||
(~(CHUNK_MAP_BININD_MASK | CHUNK_MAP_FLAGS_MASK | CHUNK_MAP_STATE_MASK))
|
|
||||||
};
|
|
||||||
|
|
||||||
struct arena_runs_dirty_link_s {
|
|
||||||
qr(arena_runs_dirty_link_t) rd_link;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Each arena_chunk_map_misc_t corresponds to one page within the chunk, just
|
|
||||||
* like arena_chunk_map_bits_t. Two separate arrays are stored within each
|
|
||||||
* chunk header in order to improve cache locality.
|
|
||||||
*/
|
|
||||||
struct arena_chunk_map_misc_s {
|
|
||||||
/*
|
|
||||||
* Linkage for run heaps. There are two disjoint uses:
|
|
||||||
*
|
|
||||||
* 1) arena_t's runs_avail heaps.
|
|
||||||
* 2) arena_run_t conceptually uses this linkage for in-use non-full
|
|
||||||
* runs, rather than directly embedding linkage.
|
|
||||||
*/
|
|
||||||
phn(arena_chunk_map_misc_t) ph_link;
|
|
||||||
|
|
||||||
union {
|
|
||||||
/* Linkage for list of dirty runs. */
|
|
||||||
arena_runs_dirty_link_t rd;
|
|
||||||
|
|
||||||
/* Profile counters, used for large object runs. */
|
|
||||||
union {
|
|
||||||
void *prof_tctx_pun;
|
|
||||||
prof_tctx_t *prof_tctx;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Small region run metadata. */
|
|
||||||
arena_run_t run;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
typedef ph(arena_chunk_map_misc_t) arena_run_heap_t;
|
|
||||||
#endif /* JEMALLOC_ARENA_STRUCTS_A */
|
#endif /* JEMALLOC_ARENA_STRUCTS_A */
|
||||||
|
|
||||||
#ifdef JEMALLOC_ARENA_STRUCTS_B
|
#ifdef JEMALLOC_ARENA_STRUCTS_B
|
||||||
/* Arena chunk header. */
|
|
||||||
struct arena_chunk_s {
|
|
||||||
/*
|
|
||||||
* Map of pages within chunk that keeps track of free/large/small. The
|
|
||||||
* first map_bias entries are omitted, since the chunk header does not
|
|
||||||
* need to be tracked in the map. This omission saves a header page
|
|
||||||
* for common chunk sizes (e.g. 4 MiB).
|
|
||||||
*/
|
|
||||||
arena_chunk_map_bits_t map_bits[1]; /* Dynamically sized. */
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read-only information associated with each element of arena_t's bins array
|
* Read-only information associated with each element of arena_t's bins array
|
||||||
* is stored separately, partly to reduce memory usage (only one copy, rather
|
* is stored separately, partly to reduce memory usage (only one copy, rather
|
||||||
* than one per arena), but mainly to avoid false cacheline sharing.
|
* than one per arena), but mainly to avoid false cacheline sharing.
|
||||||
*
|
*
|
||||||
* Each run has the following layout:
|
* Each slab has the following layout:
|
||||||
*
|
*
|
||||||
* /--------------------\
|
* /--------------------\
|
||||||
* | region 0 |
|
* | region 0 |
|
||||||
@ -205,45 +73,42 @@ struct arena_chunk_s {
|
|||||||
* \--------------------/
|
* \--------------------/
|
||||||
*/
|
*/
|
||||||
struct arena_bin_info_s {
|
struct arena_bin_info_s {
|
||||||
/* Size of regions in a run for this bin's size class. */
|
/* Size of regions in a slab for this bin's size class. */
|
||||||
size_t reg_size;
|
size_t reg_size;
|
||||||
|
|
||||||
/* Total size of a run for this bin's size class. */
|
/* Total size of a slab for this bin's size class. */
|
||||||
size_t run_size;
|
size_t slab_size;
|
||||||
|
|
||||||
/* Total number of regions in a run for this bin's size class. */
|
/* Total number of regions in a slab for this bin's size class. */
|
||||||
uint32_t nregs;
|
uint32_t nregs;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Metadata used to manipulate bitmaps for runs associated with this
|
* Metadata used to manipulate bitmaps for slabs associated with this
|
||||||
* bin.
|
* bin.
|
||||||
*/
|
*/
|
||||||
bitmap_info_t bitmap_info;
|
bitmap_info_t bitmap_info;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct arena_bin_s {
|
struct arena_bin_s {
|
||||||
/*
|
/* All operations on arena_bin_t fields require lock ownership. */
|
||||||
* All operations on runcur, runs, and stats require that lock be
|
|
||||||
* locked. Run allocation/deallocation are protected by the arena lock,
|
|
||||||
* which may be acquired while holding one or more bin locks, but not
|
|
||||||
* vise versa.
|
|
||||||
*/
|
|
||||||
malloc_mutex_t lock;
|
malloc_mutex_t lock;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Current run being used to service allocations of this bin's size
|
* Current slab being used to service allocations of this bin's size
|
||||||
* class.
|
* class. slabcur is independent of slabs_{nonfull,full}; whenever
|
||||||
|
* slabcur is reassigned, the previous slab must be deallocated or
|
||||||
|
* inserted into slabs_{nonfull,full}.
|
||||||
*/
|
*/
|
||||||
arena_run_t *runcur;
|
extent_t *slabcur;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Heap of non-full runs. This heap is used when looking for an
|
* Heap of non-full slabs. This heap is used to assure that new
|
||||||
* existing run when runcur is no longer usable. We choose the
|
* allocations come from the non-full slab that is lowest in memory.
|
||||||
* non-full run that is lowest in memory; this policy tends to keep
|
|
||||||
* objects packed well, and it can also help reduce the number of
|
|
||||||
* almost-empty chunks.
|
|
||||||
*/
|
*/
|
||||||
arena_run_heap_t runs;
|
extent_heap_t slabs_nonfull;
|
||||||
|
|
||||||
|
/* Ring sentinel used to track full slabs. */
|
||||||
|
extent_t slabs_full;
|
||||||
|
|
||||||
/* Bin statistics. */
|
/* Bin statistics. */
|
||||||
malloc_bin_stats_t stats;
|
malloc_bin_stats_t stats;
|
||||||
@ -272,7 +137,7 @@ struct arena_s {
|
|||||||
* perspective:
|
* perspective:
|
||||||
* 1) Thread assignment (modifies nthreads) is synchronized via atomics.
|
* 1) Thread assignment (modifies nthreads) is synchronized via atomics.
|
||||||
* 2) Bin-related operations are protected by bin locks.
|
* 2) Bin-related operations are protected by bin locks.
|
||||||
* 3) Chunk- and run-related operations are protected by this mutex.
|
* 3) Chunk-related operations are protected by this mutex.
|
||||||
*/
|
*/
|
||||||
malloc_mutex_t lock;
|
malloc_mutex_t lock;
|
||||||
|
|
||||||
@ -294,32 +159,17 @@ struct arena_s {
|
|||||||
|
|
||||||
dss_prec_t dss_prec;
|
dss_prec_t dss_prec;
|
||||||
|
|
||||||
/* Extant arena chunks. */
|
|
||||||
ql_head(extent_t) achunks;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* In order to avoid rapid chunk allocation/deallocation when an arena
|
|
||||||
* oscillates right on the cusp of needing a new chunk, cache the most
|
|
||||||
* recently freed chunk. The spare is left in the arena's chunk trees
|
|
||||||
* until it is deleted.
|
|
||||||
*
|
|
||||||
* There is one spare chunk per arena, rather than one spare total, in
|
|
||||||
* order to avoid interactions between multiple threads that could make
|
|
||||||
* a single spare inadequate.
|
|
||||||
*/
|
|
||||||
extent_t *spare;
|
|
||||||
|
|
||||||
/* Minimum ratio (log base 2) of nactive:ndirty. */
|
/* Minimum ratio (log base 2) of nactive:ndirty. */
|
||||||
ssize_t lg_dirty_mult;
|
ssize_t lg_dirty_mult;
|
||||||
|
|
||||||
/* True if a thread is currently executing arena_purge_to_limit(). */
|
/* True if a thread is currently executing arena_purge_to_limit(). */
|
||||||
bool purging;
|
bool purging;
|
||||||
|
|
||||||
/* Number of pages in active runs and huge regions. */
|
/* Number of pages in active extents. */
|
||||||
size_t nactive;
|
size_t nactive;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Current count of pages within unused runs that are potentially
|
* Current count of pages within unused extents that are potentially
|
||||||
* dirty, and for which madvise(... MADV_DONTNEED) has not been called.
|
* dirty, and for which madvise(... MADV_DONTNEED) has not been called.
|
||||||
* By tracking this, we can institute a limit on how much dirty unused
|
* By tracking this, we can institute a limit on how much dirty unused
|
||||||
* memory is mapped for each arena.
|
* memory is mapped for each arena.
|
||||||
@ -327,35 +177,10 @@ struct arena_s {
|
|||||||
size_t ndirty;
|
size_t ndirty;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unused dirty memory this arena manages. Dirty memory is conceptually
|
* Ring sentinel used to track unused dirty memory. Dirty memory is
|
||||||
* tracked as an arbitrarily interleaved LRU of dirty runs and cached
|
* managed as an LRU of cached extents.
|
||||||
* chunks, but the list linkage is actually semi-duplicated in order to
|
|
||||||
* avoid extra arena_chunk_map_misc_t space overhead.
|
|
||||||
*
|
|
||||||
* LRU-----------------------------------------------------------MRU
|
|
||||||
*
|
|
||||||
* /-- arena ---\
|
|
||||||
* | |
|
|
||||||
* | |
|
|
||||||
* |------------| /-- chunk --\
|
|
||||||
* ...->|chunks_cache|<--------------------------->| /------\ |<--...
|
|
||||||
* |------------| | |extent| |
|
|
||||||
* | | | | | |
|
|
||||||
* | | /- run -\ /- run -\ | | | |
|
|
||||||
* | | | | | | | | | |
|
|
||||||
* | | | | | | | | | |
|
|
||||||
* |------------| |-------| |-------| | |------| |
|
|
||||||
* ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----...
|
|
||||||
* |------------| |-------| |-------| | |------| |
|
|
||||||
* | | | | | | | | | |
|
|
||||||
* | | | | | | | \------/ |
|
|
||||||
* | | \-------/ \-------/ | |
|
|
||||||
* | | | |
|
|
||||||
* | | | |
|
|
||||||
* \------------/ \-----------/
|
|
||||||
*/
|
*/
|
||||||
arena_runs_dirty_link_t runs_dirty;
|
extent_t extents_dirty;
|
||||||
extent_t chunks_cache;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Approximate time in seconds from the creation of a set of unused
|
* Approximate time in seconds from the creation of a set of unused
|
||||||
@ -424,16 +249,8 @@ struct arena_s {
|
|||||||
/* User-configurable chunk hook functions. */
|
/* User-configurable chunk hook functions. */
|
||||||
chunk_hooks_t chunk_hooks;
|
chunk_hooks_t chunk_hooks;
|
||||||
|
|
||||||
/* bins is used to store trees of free regions. */
|
/* bins is used to store heaps of free regions. */
|
||||||
arena_bin_t bins[NBINS];
|
arena_bin_t bins[NBINS];
|
||||||
|
|
||||||
/*
|
|
||||||
* Size-segregated address-ordered heaps of this arena's available runs,
|
|
||||||
* used for first-best-fit run allocation. Runs are quantized, i.e.
|
|
||||||
* they reside in the last heap which corresponds to a size class less
|
|
||||||
* than or equal to the run size.
|
|
||||||
*/
|
|
||||||
arena_run_heap_t runs_avail[NPSIZES];
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Used in conjunction with tsd for fast arena-related context lookup. */
|
/* Used in conjunction with tsd for fast arena-related context lookup. */
|
||||||
@ -461,15 +278,6 @@ extern ssize_t opt_decay_time;
|
|||||||
|
|
||||||
extern const arena_bin_info_t arena_bin_info[NBINS];
|
extern const arena_bin_info_t arena_bin_info[NBINS];
|
||||||
|
|
||||||
extern size_t map_bias; /* Number of arena chunk header pages. */
|
|
||||||
extern size_t map_misc_offset;
|
|
||||||
extern size_t arena_maxrun; /* Max run size for arenas. */
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_JET
|
|
||||||
typedef size_t (run_quantize_t)(size_t);
|
|
||||||
extern run_quantize_t *run_quantize_floor;
|
|
||||||
extern run_quantize_t *run_quantize_ceil;
|
|
||||||
#endif
|
|
||||||
extent_t *arena_chunk_cache_alloc(tsdn_t *tsdn, arena_t *arena,
|
extent_t *arena_chunk_cache_alloc(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
||||||
bool *zero);
|
bool *zero);
|
||||||
@ -514,10 +322,9 @@ void arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
|||||||
void arena_dalloc_promoted(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
void arena_dalloc_promoted(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
||||||
tcache_t *tcache, bool slow_path);
|
tcache_t *tcache, bool slow_path);
|
||||||
void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
|
void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
|
||||||
arena_chunk_t *chunk, extent_t *extent, void *ptr,
|
extent_t *extent, void *ptr);
|
||||||
arena_chunk_map_bits_t *bitselm);
|
void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||||
void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
|
void *ptr);
|
||||||
extent_t *extent, void *ptr, size_t pageind);
|
|
||||||
bool arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
bool arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
||||||
size_t oldsize, size_t size, size_t extra, bool zero);
|
size_t oldsize, size_t size, size_t extra, bool zero);
|
||||||
void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
|
void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
|
||||||
@ -552,70 +359,19 @@ void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
|
|||||||
#ifdef JEMALLOC_H_INLINES
|
#ifdef JEMALLOC_H_INLINES
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
#ifndef JEMALLOC_ENABLE_INLINE
|
||||||
arena_chunk_map_bits_t *arena_bitselm_get_mutable(arena_chunk_t *chunk,
|
|
||||||
size_t pageind);
|
|
||||||
const arena_chunk_map_bits_t *arena_bitselm_get_const(
|
|
||||||
const arena_chunk_t *chunk, size_t pageind);
|
|
||||||
arena_chunk_map_misc_t *arena_miscelm_get_mutable(arena_chunk_t *chunk,
|
|
||||||
size_t pageind);
|
|
||||||
const arena_chunk_map_misc_t *arena_miscelm_get_const(
|
|
||||||
const arena_chunk_t *chunk, size_t pageind);
|
|
||||||
size_t arena_miscelm_to_pageind(const extent_t *extent,
|
|
||||||
const arena_chunk_map_misc_t *miscelm);
|
|
||||||
void *arena_miscelm_to_rpages(const extent_t *extent,
|
|
||||||
const arena_chunk_map_misc_t *miscelm);
|
|
||||||
arena_chunk_map_misc_t *arena_rd_to_miscelm(const extent_t *extent,
|
|
||||||
arena_runs_dirty_link_t *rd);
|
|
||||||
arena_chunk_map_misc_t *arena_run_to_miscelm(const extent_t *extent,
|
|
||||||
arena_run_t *run);
|
|
||||||
size_t *arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind);
|
|
||||||
const size_t *arena_mapbitsp_get_const(const arena_chunk_t *chunk,
|
|
||||||
size_t pageind);
|
|
||||||
size_t arena_mapbitsp_read(const size_t *mapbitsp);
|
|
||||||
size_t arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind);
|
|
||||||
size_t arena_mapbits_size_decode(size_t mapbits);
|
|
||||||
size_t arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk,
|
|
||||||
size_t pageind);
|
|
||||||
size_t arena_mapbits_large_size_get(const arena_chunk_t *chunk,
|
|
||||||
size_t pageind);
|
|
||||||
size_t arena_mapbits_small_runind_get(const arena_chunk_t *chunk,
|
|
||||||
size_t pageind);
|
|
||||||
szind_t arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind);
|
|
||||||
size_t arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind);
|
|
||||||
size_t arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind);
|
|
||||||
size_t arena_mapbits_decommitted_get(const arena_chunk_t *chunk,
|
|
||||||
size_t pageind);
|
|
||||||
size_t arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind);
|
|
||||||
size_t arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind);
|
|
||||||
void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
|
|
||||||
size_t arena_mapbits_size_encode(size_t size);
|
|
||||||
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
|
|
||||||
size_t size, size_t flags);
|
|
||||||
void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
|
|
||||||
size_t size);
|
|
||||||
void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind,
|
|
||||||
size_t flags);
|
|
||||||
void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
|
|
||||||
size_t size, size_t flags);
|
|
||||||
void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
|
|
||||||
szind_t binind);
|
|
||||||
void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
|
|
||||||
size_t runind, szind_t binind, size_t flags);
|
|
||||||
void arena_metadata_allocated_add(arena_t *arena, size_t size);
|
void arena_metadata_allocated_add(arena_t *arena, size_t size);
|
||||||
void arena_metadata_allocated_sub(arena_t *arena, size_t size);
|
void arena_metadata_allocated_sub(arena_t *arena, size_t size);
|
||||||
size_t arena_metadata_allocated_get(arena_t *arena);
|
size_t arena_metadata_allocated_get(arena_t *arena);
|
||||||
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
|
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
|
||||||
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
|
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
|
||||||
bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
|
bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
|
||||||
szind_t arena_ptr_small_binind_get(tsdn_t *tsdn, const void *ptr,
|
|
||||||
size_t mapbits);
|
|
||||||
szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
|
szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
|
||||||
prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent,
|
prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent,
|
||||||
const void *ptr);
|
const void *ptr);
|
||||||
void arena_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
void arena_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
||||||
size_t usize, prof_tctx_t *tctx);
|
size_t usize, prof_tctx_t *tctx);
|
||||||
void arena_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
void arena_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
||||||
size_t usize, const void *old_ptr, prof_tctx_t *old_tctx);
|
prof_tctx_t *tctx);
|
||||||
void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks);
|
void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks);
|
||||||
void arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
|
void arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
|
||||||
void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
|
void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
|
||||||
@ -630,330 +386,6 @@ void arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
|
|||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
|
||||||
# ifdef JEMALLOC_ARENA_INLINE_A
|
# ifdef JEMALLOC_ARENA_INLINE_A
|
||||||
JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
|
|
||||||
arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind)
|
|
||||||
{
|
|
||||||
|
|
||||||
assert(pageind >= map_bias);
|
|
||||||
assert(pageind < chunk_npages);
|
|
||||||
|
|
||||||
return (&chunk->map_bits[pageind-map_bias]);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE const arena_chunk_map_bits_t *
|
|
||||||
arena_bitselm_get_const(const arena_chunk_t *chunk, size_t pageind)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (arena_bitselm_get_mutable((arena_chunk_t *)chunk, pageind));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
|
|
||||||
arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind)
|
|
||||||
{
|
|
||||||
|
|
||||||
assert(pageind >= map_bias);
|
|
||||||
assert(pageind < chunk_npages);
|
|
||||||
|
|
||||||
return ((arena_chunk_map_misc_t *)((uintptr_t)chunk +
|
|
||||||
(uintptr_t)map_misc_offset) + pageind-map_bias);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t *
|
|
||||||
arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
|
||||||
arena_miscelm_to_pageind(const extent_t *extent,
|
|
||||||
const arena_chunk_map_misc_t *miscelm)
|
|
||||||
{
|
|
||||||
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
|
|
||||||
size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk +
|
|
||||||
map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias;
|
|
||||||
|
|
||||||
assert(pageind >= map_bias);
|
|
||||||
assert(pageind < chunk_npages);
|
|
||||||
|
|
||||||
return (pageind);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
|
||||||
arena_miscelm_to_rpages(const extent_t *extent,
|
|
||||||
const arena_chunk_map_misc_t *miscelm)
|
|
||||||
{
|
|
||||||
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
|
|
||||||
size_t pageind = arena_miscelm_to_pageind(extent, miscelm);
|
|
||||||
|
|
||||||
return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE)));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
|
|
||||||
arena_rd_to_miscelm(const extent_t *extent, arena_runs_dirty_link_t *rd)
|
|
||||||
{
|
|
||||||
arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
|
|
||||||
*)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd));
|
|
||||||
|
|
||||||
assert(arena_miscelm_to_pageind(extent, miscelm) >= map_bias);
|
|
||||||
assert(arena_miscelm_to_pageind(extent, miscelm) < chunk_npages);
|
|
||||||
|
|
||||||
return (miscelm);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
|
|
||||||
arena_run_to_miscelm(const extent_t *extent, arena_run_t *run)
|
|
||||||
{
|
|
||||||
arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
|
|
||||||
*)((uintptr_t)run - offsetof(arena_chunk_map_misc_t, run));
|
|
||||||
|
|
||||||
assert(arena_miscelm_to_pageind(extent, miscelm) >= map_bias);
|
|
||||||
assert(arena_miscelm_to_pageind(extent, miscelm) < chunk_npages);
|
|
||||||
|
|
||||||
return (miscelm);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t *
|
|
||||||
arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (&arena_bitselm_get_mutable(chunk, pageind)->bits);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE const size_t *
|
|
||||||
arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
|
||||||
arena_mapbitsp_read(const size_t *mapbitsp)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (*mapbitsp);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
|
||||||
arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (arena_mapbitsp_read(arena_mapbitsp_get_const(chunk, pageind)));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
|
||||||
arena_mapbits_size_decode(size_t mapbits)
|
|
||||||
{
|
|
||||||
size_t size;
|
|
||||||
|
|
||||||
#if CHUNK_MAP_SIZE_SHIFT > 0
|
|
||||||
size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT;
|
|
||||||
#elif CHUNK_MAP_SIZE_SHIFT == 0
|
|
||||||
size = mapbits & CHUNK_MAP_SIZE_MASK;
|
|
||||||
#else
|
|
||||||
size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return (size);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
|
||||||
arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind)
|
|
||||||
{
|
|
||||||
size_t mapbits;
|
|
||||||
|
|
||||||
mapbits = arena_mapbits_get(chunk, pageind);
|
|
||||||
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
|
|
||||||
return (arena_mapbits_size_decode(mapbits));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
|
||||||
arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind)
|
|
||||||
{
|
|
||||||
size_t mapbits;
|
|
||||||
|
|
||||||
mapbits = arena_mapbits_get(chunk, pageind);
|
|
||||||
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
|
|
||||||
(CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
|
|
||||||
return (arena_mapbits_size_decode(mapbits));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
|
||||||
arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind)
|
|
||||||
{
|
|
||||||
size_t mapbits;
|
|
||||||
|
|
||||||
mapbits = arena_mapbits_get(chunk, pageind);
|
|
||||||
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
|
|
||||||
CHUNK_MAP_ALLOCATED);
|
|
||||||
return (mapbits >> CHUNK_MAP_RUNIND_SHIFT);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE szind_t
|
|
||||||
arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind)
|
|
||||||
{
|
|
||||||
size_t mapbits;
|
|
||||||
szind_t binind;
|
|
||||||
|
|
||||||
mapbits = arena_mapbits_get(chunk, pageind);
|
|
||||||
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
|
|
||||||
assert(binind < NBINS || binind == BININD_INVALID);
|
|
||||||
return (binind);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
|
||||||
arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind)
|
|
||||||
{
|
|
||||||
size_t mapbits;
|
|
||||||
|
|
||||||
mapbits = arena_mapbits_get(chunk, pageind);
|
|
||||||
assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
|
|
||||||
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
|
|
||||||
return (mapbits & CHUNK_MAP_DIRTY);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
|
||||||
arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind)
|
|
||||||
{
|
|
||||||
size_t mapbits;
|
|
||||||
|
|
||||||
mapbits = arena_mapbits_get(chunk, pageind);
|
|
||||||
assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
|
|
||||||
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
|
|
||||||
return (mapbits & CHUNK_MAP_UNZEROED);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
|
||||||
arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind)
|
|
||||||
{
|
|
||||||
size_t mapbits;
|
|
||||||
|
|
||||||
mapbits = arena_mapbits_get(chunk, pageind);
|
|
||||||
assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
|
|
||||||
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
|
|
||||||
return (mapbits & CHUNK_MAP_DECOMMITTED);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
|
||||||
arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind)
|
|
||||||
{
|
|
||||||
size_t mapbits;
|
|
||||||
|
|
||||||
mapbits = arena_mapbits_get(chunk, pageind);
|
|
||||||
return (mapbits & CHUNK_MAP_LARGE);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
|
||||||
arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind)
|
|
||||||
{
|
|
||||||
size_t mapbits;
|
|
||||||
|
|
||||||
mapbits = arena_mapbits_get(chunk, pageind);
|
|
||||||
return (mapbits & CHUNK_MAP_ALLOCATED);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
|
||||||
arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
|
|
||||||
{
|
|
||||||
|
|
||||||
*mapbitsp = mapbits;
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
|
||||||
arena_mapbits_size_encode(size_t size)
|
|
||||||
{
|
|
||||||
size_t mapbits;
|
|
||||||
|
|
||||||
#if CHUNK_MAP_SIZE_SHIFT > 0
|
|
||||||
mapbits = size << CHUNK_MAP_SIZE_SHIFT;
|
|
||||||
#elif CHUNK_MAP_SIZE_SHIFT == 0
|
|
||||||
mapbits = size;
|
|
||||||
#else
|
|
||||||
mapbits = size >> -CHUNK_MAP_SIZE_SHIFT;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0);
|
|
||||||
return (mapbits);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
|
||||||
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
|
|
||||||
size_t flags)
|
|
||||||
{
|
|
||||||
size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
|
|
||||||
|
|
||||||
assert((size & PAGE_MASK) == 0);
|
|
||||||
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
|
|
||||||
assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
|
|
||||||
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
|
|
||||||
arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
|
|
||||||
CHUNK_MAP_BININD_INVALID | flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
|
||||||
arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
|
|
||||||
size_t size)
|
|
||||||
{
|
|
||||||
size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
|
|
||||||
size_t mapbits = arena_mapbitsp_read(mapbitsp);
|
|
||||||
|
|
||||||
assert((size & PAGE_MASK) == 0);
|
|
||||||
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
|
|
||||||
arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
|
|
||||||
(mapbits & ~CHUNK_MAP_SIZE_MASK));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
|
||||||
arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags)
|
|
||||||
{
|
|
||||||
size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
|
|
||||||
|
|
||||||
assert((flags & CHUNK_MAP_UNZEROED) == flags);
|
|
||||||
arena_mapbitsp_write(mapbitsp, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
|
||||||
arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
|
|
||||||
size_t flags)
|
|
||||||
{
|
|
||||||
size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
|
|
||||||
|
|
||||||
assert((size & PAGE_MASK) == 0);
|
|
||||||
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
|
|
||||||
assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
|
|
||||||
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
|
|
||||||
arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
|
|
||||||
CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE |
|
|
||||||
CHUNK_MAP_ALLOCATED);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
|
||||||
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
|
|
||||||
szind_t binind)
|
|
||||||
{
|
|
||||||
size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
|
|
||||||
size_t mapbits = arena_mapbitsp_read(mapbitsp);
|
|
||||||
|
|
||||||
assert(binind <= BININD_INVALID);
|
|
||||||
assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS +
|
|
||||||
large_pad);
|
|
||||||
arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
|
|
||||||
(binind << CHUNK_MAP_BININD_SHIFT));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
|
||||||
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
|
|
||||||
szind_t binind, size_t flags)
|
|
||||||
{
|
|
||||||
size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
|
|
||||||
|
|
||||||
assert(binind < BININD_INVALID);
|
|
||||||
assert(pageind - runind >= map_bias);
|
|
||||||
assert((flags & CHUNK_MAP_UNZEROED) == flags);
|
|
||||||
arena_mapbitsp_write(mapbitsp, (runind << CHUNK_MAP_RUNIND_SHIFT) |
|
|
||||||
(binind << CHUNK_MAP_BININD_SHIFT) | flags | CHUNK_MAP_ALLOCATED);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
arena_metadata_allocated_add(arena_t *arena, size_t size)
|
arena_metadata_allocated_add(arena_t *arena, size_t size)
|
||||||
{
|
{
|
||||||
@ -1022,54 +454,6 @@ arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes)
|
|||||||
# endif /* JEMALLOC_ARENA_INLINE_A */
|
# endif /* JEMALLOC_ARENA_INLINE_A */
|
||||||
|
|
||||||
# ifdef JEMALLOC_ARENA_INLINE_B
|
# ifdef JEMALLOC_ARENA_INLINE_B
|
||||||
JEMALLOC_ALWAYS_INLINE szind_t
|
|
||||||
arena_ptr_small_binind_get(tsdn_t *tsdn, const void *ptr, size_t mapbits)
|
|
||||||
{
|
|
||||||
szind_t binind;
|
|
||||||
|
|
||||||
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
|
|
||||||
|
|
||||||
if (config_debug) {
|
|
||||||
const extent_t *extent;
|
|
||||||
arena_chunk_t *chunk;
|
|
||||||
arena_t *arena;
|
|
||||||
size_t pageind;
|
|
||||||
size_t actual_mapbits;
|
|
||||||
size_t rpages_ind;
|
|
||||||
const arena_run_t *run;
|
|
||||||
arena_bin_t *bin;
|
|
||||||
szind_t run_binind, actual_binind;
|
|
||||||
const arena_bin_info_t *bin_info;
|
|
||||||
const arena_chunk_map_misc_t *miscelm;
|
|
||||||
const void *rpages;
|
|
||||||
|
|
||||||
assert(binind != BININD_INVALID);
|
|
||||||
assert(binind < NBINS);
|
|
||||||
extent = iealloc(tsdn, ptr);
|
|
||||||
chunk = (arena_chunk_t *)extent_base_get(extent);
|
|
||||||
arena = extent_arena_get(extent);
|
|
||||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
|
||||||
actual_mapbits = arena_mapbits_get(chunk, pageind);
|
|
||||||
assert(mapbits == actual_mapbits);
|
|
||||||
assert(arena_mapbits_large_get(chunk, pageind) == 0);
|
|
||||||
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
|
||||||
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk,
|
|
||||||
pageind);
|
|
||||||
miscelm = arena_miscelm_get_const(chunk, rpages_ind);
|
|
||||||
run = &miscelm->run;
|
|
||||||
run_binind = run->binind;
|
|
||||||
bin = &arena->bins[run_binind];
|
|
||||||
actual_binind = (szind_t)(bin - arena->bins);
|
|
||||||
assert(run_binind == actual_binind);
|
|
||||||
bin_info = &arena_bin_info[actual_binind];
|
|
||||||
rpages = arena_miscelm_to_rpages(extent, miscelm);
|
|
||||||
assert(((uintptr_t)ptr - (uintptr_t)rpages) % bin_info->reg_size
|
|
||||||
== 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (binind);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE szind_t
|
JEMALLOC_INLINE szind_t
|
||||||
arena_bin_index(arena_t *arena, arena_bin_t *bin)
|
arena_bin_index(arena_t *arena, arena_bin_t *bin)
|
||||||
{
|
{
|
||||||
@ -1081,27 +465,13 @@ arena_bin_index(arena_t *arena, arena_bin_t *bin)
|
|||||||
JEMALLOC_INLINE prof_tctx_t *
|
JEMALLOC_INLINE prof_tctx_t *
|
||||||
arena_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
|
arena_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
|
||||||
{
|
{
|
||||||
prof_tctx_t *ret;
|
|
||||||
|
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
if (likely(extent_slab_get(extent))) {
|
if (unlikely(!extent_slab_get(extent)))
|
||||||
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
|
return (huge_prof_tctx_get(tsdn, extent));
|
||||||
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
return ((prof_tctx_t *)(uintptr_t)1U);
|
||||||
size_t mapbits = arena_mapbits_get(chunk, pageind);
|
|
||||||
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
|
|
||||||
if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
|
|
||||||
ret = (prof_tctx_t *)(uintptr_t)1U;
|
|
||||||
else {
|
|
||||||
arena_chunk_map_misc_t *elm =
|
|
||||||
arena_miscelm_get_mutable(chunk, pageind);
|
|
||||||
ret = atomic_read_p(&elm->prof_tctx_pun);
|
|
||||||
}
|
|
||||||
} else
|
|
||||||
ret = huge_prof_tctx_get(tsdn, extent);
|
|
||||||
|
|
||||||
return (ret);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
@ -1112,61 +482,20 @@ arena_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
|||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
if (likely(extent_slab_get(extent))) {
|
if (unlikely(!extent_slab_get(extent)))
|
||||||
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
|
|
||||||
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
|
||||||
|
|
||||||
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
|
||||||
|
|
||||||
if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx >
|
|
||||||
(uintptr_t)1U)) {
|
|
||||||
arena_chunk_map_misc_t *elm;
|
|
||||||
|
|
||||||
assert(arena_mapbits_large_get(chunk, pageind) != 0);
|
|
||||||
|
|
||||||
elm = arena_miscelm_get_mutable(chunk, pageind);
|
|
||||||
atomic_write_p(&elm->prof_tctx_pun, tctx);
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* tctx must always be initialized for large runs.
|
|
||||||
* Assert that the surrounding conditional logic is
|
|
||||||
* equivalent to checking whether ptr refers to a large
|
|
||||||
* run.
|
|
||||||
*/
|
|
||||||
assert(arena_mapbits_large_get(chunk, pageind) == 0);
|
|
||||||
}
|
|
||||||
} else
|
|
||||||
huge_prof_tctx_set(tsdn, extent, tctx);
|
huge_prof_tctx_set(tsdn, extent, tctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
arena_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
arena_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
||||||
size_t usize, const void *old_ptr, prof_tctx_t *old_tctx)
|
prof_tctx_t *tctx)
|
||||||
{
|
{
|
||||||
|
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
assert(!extent_slab_get(extent));
|
||||||
|
|
||||||
if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr &&
|
huge_prof_tctx_reset(tsdn, extent);
|
||||||
(uintptr_t)old_tctx > (uintptr_t)1U))) {
|
|
||||||
if (likely(extent_slab_get(extent))) {
|
|
||||||
arena_chunk_t *chunk =
|
|
||||||
(arena_chunk_t *)extent_base_get(extent);
|
|
||||||
size_t pageind;
|
|
||||||
arena_chunk_map_misc_t *elm;
|
|
||||||
|
|
||||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
|
|
||||||
LG_PAGE;
|
|
||||||
assert(arena_mapbits_allocated_get(chunk, pageind) !=
|
|
||||||
0);
|
|
||||||
assert(arena_mapbits_large_get(chunk, pageind) != 0);
|
|
||||||
|
|
||||||
elm = arena_miscelm_get_mutable(chunk, pageind);
|
|
||||||
atomic_write_p(&elm->prof_tctx_pun,
|
|
||||||
(prof_tctx_t *)(uintptr_t)1U);
|
|
||||||
} else
|
|
||||||
huge_prof_tctx_reset(tsdn, extent);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
@ -1231,20 +560,9 @@ arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
|
|||||||
|
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
if (likely(extent_slab_get(extent))) {
|
if (likely(extent_slab_get(extent)))
|
||||||
const arena_chunk_t *chunk =
|
ret = index2size(extent_slab_data_get_const(extent)->binind);
|
||||||
(const arena_chunk_t *)extent_base_get(extent);
|
else
|
||||||
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
|
||||||
szind_t binind;
|
|
||||||
|
|
||||||
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
|
||||||
binind = arena_mapbits_binind_get(chunk, pageind);
|
|
||||||
/* Small allocation. */
|
|
||||||
assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
|
|
||||||
arena_ptr_small_binind_get(tsdn, ptr,
|
|
||||||
arena_mapbits_get(chunk, pageind)) == binind);
|
|
||||||
ret = index2size(binind);
|
|
||||||
} else
|
|
||||||
ret = huge_salloc(tsdn, extent);
|
ret = huge_salloc(tsdn, extent);
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
@ -1260,19 +578,13 @@ arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
|
|||||||
|
|
||||||
if (likely(extent_slab_get(extent))) {
|
if (likely(extent_slab_get(extent))) {
|
||||||
/* Small allocation. */
|
/* Small allocation. */
|
||||||
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
|
|
||||||
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
|
||||||
size_t mapbits = arena_mapbits_get(chunk, pageind);
|
|
||||||
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
|
||||||
assert((mapbits & CHUNK_MAP_LARGE) == 0);
|
|
||||||
if (likely(tcache != NULL)) {
|
if (likely(tcache != NULL)) {
|
||||||
szind_t binind = arena_ptr_small_binind_get(tsdn, ptr,
|
szind_t binind = extent_slab_data_get(extent)->binind;
|
||||||
mapbits);
|
|
||||||
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, binind,
|
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, binind,
|
||||||
slow_path);
|
slow_path);
|
||||||
} else {
|
} else {
|
||||||
arena_dalloc_small(tsdn, extent_arena_get(extent),
|
arena_dalloc_small(tsdn, extent_arena_get(extent),
|
||||||
chunk, extent, ptr, pageind);
|
extent, ptr);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
size_t usize = extent_usize_get(extent);
|
size_t usize = extent_usize_get(extent);
|
||||||
@ -1282,8 +594,8 @@ arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
|
|||||||
arena_dalloc_promoted(tsdn, extent, ptr,
|
arena_dalloc_promoted(tsdn, extent, ptr,
|
||||||
tcache, slow_path);
|
tcache, slow_path);
|
||||||
} else {
|
} else {
|
||||||
tcache_dalloc_huge(tsdn_tsd(tsdn), tcache, ptr,
|
tcache_dalloc_huge(tsdn_tsd(tsdn), tcache,
|
||||||
usize, slow_path);
|
ptr, usize, slow_path);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
huge_dalloc(tsdn, extent);
|
huge_dalloc(tsdn, extent);
|
||||||
@ -1302,15 +614,12 @@ arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
|
|||||||
/* Small allocation. */
|
/* Small allocation. */
|
||||||
if (likely(tcache != NULL)) {
|
if (likely(tcache != NULL)) {
|
||||||
szind_t binind = size2index(size);
|
szind_t binind = size2index(size);
|
||||||
|
assert(binind == extent_slab_data_get(extent)->binind);
|
||||||
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, binind,
|
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, binind,
|
||||||
slow_path);
|
slow_path);
|
||||||
} else {
|
} else {
|
||||||
arena_chunk_t *chunk =
|
|
||||||
(arena_chunk_t *)extent_base_get(extent);
|
|
||||||
size_t pageind = ((uintptr_t)ptr -
|
|
||||||
(uintptr_t)chunk) >> LG_PAGE;
|
|
||||||
arena_dalloc_small(tsdn, extent_arena_get(extent),
|
arena_dalloc_small(tsdn, extent_arena_get(extent),
|
||||||
chunk, extent, ptr, pageind);
|
extent, ptr);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (likely(tcache != NULL) && size <= tcache_maxclass) {
|
if (likely(tcache != NULL) && size <= tcache_maxclass) {
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
#ifdef JEMALLOC_H_TYPES
|
#ifdef JEMALLOC_H_TYPES
|
||||||
|
|
||||||
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
|
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
|
||||||
#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS
|
#define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS
|
||||||
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
|
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
|
||||||
|
|
||||||
typedef struct bitmap_level_s bitmap_level_t;
|
typedef struct bitmap_level_s bitmap_level_t;
|
||||||
|
@ -27,9 +27,6 @@ struct extent_s {
|
|||||||
/* True if extent is active (in use). */
|
/* True if extent is active (in use). */
|
||||||
bool e_active;
|
bool e_active;
|
||||||
|
|
||||||
/* True if extent is dirty (touched). */
|
|
||||||
bool e_dirty;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The zeroed flag is used by chunk recycling code to track whether
|
* The zeroed flag is used by chunk recycling code to track whether
|
||||||
* memory is zero-filled.
|
* memory is zero-filled.
|
||||||
@ -50,21 +47,27 @@ struct extent_s {
|
|||||||
*/
|
*/
|
||||||
bool e_slab;
|
bool e_slab;
|
||||||
|
|
||||||
/* Profile counters, used for huge objects. */
|
|
||||||
union {
|
union {
|
||||||
void *e_prof_tctx_pun;
|
/* Small region slab metadata. */
|
||||||
prof_tctx_t *e_prof_tctx;
|
arena_slab_data_t e_slab_data;
|
||||||
|
|
||||||
|
/* Profile counters, used for huge objects. */
|
||||||
|
union {
|
||||||
|
void *e_prof_tctx_pun;
|
||||||
|
prof_tctx_t *e_prof_tctx;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Linkage for arena's runs_dirty and chunks_cache rings. */
|
/*
|
||||||
arena_runs_dirty_link_t rd;
|
* Linkage for arena's extents_dirty and arena_bin_t's slabs_full rings.
|
||||||
qr(extent_t) cc_link;
|
*/
|
||||||
|
qr(extent_t) qr_link;
|
||||||
|
|
||||||
union {
|
union {
|
||||||
/* Linkage for per size class address-ordered heaps. */
|
/* Linkage for per size class address-ordered heaps. */
|
||||||
phn(extent_t) ph_link;
|
phn(extent_t) ph_link;
|
||||||
|
|
||||||
/* Linkage for arena's achunks, huge, and node_cache lists. */
|
/* Linkage for arena's huge and extent_cache lists. */
|
||||||
ql_elm(extent_t) ql_link;
|
ql_elm(extent_t) ql_link;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
@ -102,11 +105,12 @@ void *extent_before_get(const extent_t *extent);
|
|||||||
void *extent_last_get(const extent_t *extent);
|
void *extent_last_get(const extent_t *extent);
|
||||||
void *extent_past_get(const extent_t *extent);
|
void *extent_past_get(const extent_t *extent);
|
||||||
bool extent_active_get(const extent_t *extent);
|
bool extent_active_get(const extent_t *extent);
|
||||||
bool extent_dirty_get(const extent_t *extent);
|
|
||||||
bool extent_retained_get(const extent_t *extent);
|
bool extent_retained_get(const extent_t *extent);
|
||||||
bool extent_zeroed_get(const extent_t *extent);
|
bool extent_zeroed_get(const extent_t *extent);
|
||||||
bool extent_committed_get(const extent_t *extent);
|
bool extent_committed_get(const extent_t *extent);
|
||||||
bool extent_slab_get(const extent_t *extent);
|
bool extent_slab_get(const extent_t *extent);
|
||||||
|
arena_slab_data_t *extent_slab_data_get(extent_t *extent);
|
||||||
|
const arena_slab_data_t *extent_slab_data_get_const(const extent_t *extent);
|
||||||
prof_tctx_t *extent_prof_tctx_get(const extent_t *extent);
|
prof_tctx_t *extent_prof_tctx_get(const extent_t *extent);
|
||||||
void extent_arena_set(extent_t *extent, arena_t *arena);
|
void extent_arena_set(extent_t *extent, arena_t *arena);
|
||||||
void extent_addr_set(extent_t *extent, void *addr);
|
void extent_addr_set(extent_t *extent, void *addr);
|
||||||
@ -114,17 +118,15 @@ void extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment);
|
|||||||
void extent_size_set(extent_t *extent, size_t size);
|
void extent_size_set(extent_t *extent, size_t size);
|
||||||
void extent_usize_set(extent_t *extent, size_t usize);
|
void extent_usize_set(extent_t *extent, size_t usize);
|
||||||
void extent_active_set(extent_t *extent, bool active);
|
void extent_active_set(extent_t *extent, bool active);
|
||||||
void extent_dirty_set(extent_t *extent, bool dirty);
|
|
||||||
void extent_zeroed_set(extent_t *extent, bool zeroed);
|
void extent_zeroed_set(extent_t *extent, bool zeroed);
|
||||||
void extent_committed_set(extent_t *extent, bool committed);
|
void extent_committed_set(extent_t *extent, bool committed);
|
||||||
void extent_slab_set(extent_t *extent, bool slab);
|
void extent_slab_set(extent_t *extent, bool slab);
|
||||||
void extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx);
|
void extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx);
|
||||||
void extent_init(extent_t *extent, arena_t *arena, void *addr,
|
void extent_init(extent_t *extent, arena_t *arena, void *addr,
|
||||||
size_t size, size_t usize, bool active, bool dirty, bool zeroed,
|
size_t size, size_t usize, bool active, bool zeroed, bool committed,
|
||||||
bool committed, bool slab);
|
bool slab);
|
||||||
void extent_dirty_insert(extent_t *extent,
|
void extent_ring_insert(extent_t *sentinel, extent_t *extent);
|
||||||
arena_runs_dirty_link_t *runs_dirty, extent_t *chunks_dirty);
|
void extent_ring_remove(extent_t *extent);
|
||||||
void extent_dirty_remove(extent_t *extent);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
|
||||||
@ -197,18 +199,11 @@ extent_active_get(const extent_t *extent)
|
|||||||
return (extent->e_active);
|
return (extent->e_active);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
extent_dirty_get(const extent_t *extent)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (extent->e_dirty);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
JEMALLOC_INLINE bool
|
||||||
extent_retained_get(const extent_t *extent)
|
extent_retained_get(const extent_t *extent)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (qr_next(&extent->rd, rd_link) == &extent->rd);
|
return (qr_next(extent, qr_link) == extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
JEMALLOC_INLINE bool
|
||||||
@ -232,6 +227,22 @@ extent_slab_get(const extent_t *extent)
|
|||||||
return (extent->e_slab);
|
return (extent->e_slab);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE arena_slab_data_t *
|
||||||
|
extent_slab_data_get(extent_t *extent)
|
||||||
|
{
|
||||||
|
|
||||||
|
assert(extent->e_slab);
|
||||||
|
return (&extent->e_slab_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE const arena_slab_data_t *
|
||||||
|
extent_slab_data_get_const(const extent_t *extent)
|
||||||
|
{
|
||||||
|
|
||||||
|
assert(extent->e_slab);
|
||||||
|
return (&extent->e_slab_data);
|
||||||
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE prof_tctx_t *
|
JEMALLOC_INLINE prof_tctx_t *
|
||||||
extent_prof_tctx_get(const extent_t *extent)
|
extent_prof_tctx_get(const extent_t *extent)
|
||||||
{
|
{
|
||||||
@ -296,13 +307,6 @@ extent_active_set(extent_t *extent, bool active)
|
|||||||
extent->e_active = active;
|
extent->e_active = active;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
|
||||||
extent_dirty_set(extent_t *extent, bool dirty)
|
|
||||||
{
|
|
||||||
|
|
||||||
extent->e_dirty = dirty;
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_zeroed_set(extent_t *extent, bool zeroed)
|
extent_zeroed_set(extent_t *extent, bool zeroed)
|
||||||
{
|
{
|
||||||
@ -333,8 +337,7 @@ extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx)
|
|||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||||
size_t usize, bool active, bool dirty, bool zeroed, bool committed,
|
size_t usize, bool active, bool zeroed, bool committed, bool slab)
|
||||||
bool slab)
|
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
||||||
@ -344,31 +347,26 @@ extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
|||||||
extent_size_set(extent, size);
|
extent_size_set(extent, size);
|
||||||
extent_usize_set(extent, usize);
|
extent_usize_set(extent, usize);
|
||||||
extent_active_set(extent, active);
|
extent_active_set(extent, active);
|
||||||
extent_dirty_set(extent, dirty);
|
|
||||||
extent_zeroed_set(extent, zeroed);
|
extent_zeroed_set(extent, zeroed);
|
||||||
extent_committed_set(extent, committed);
|
extent_committed_set(extent, committed);
|
||||||
extent_slab_set(extent, slab);
|
extent_slab_set(extent, slab);
|
||||||
if (config_prof)
|
if (config_prof)
|
||||||
extent_prof_tctx_set(extent, NULL);
|
extent_prof_tctx_set(extent, NULL);
|
||||||
qr_new(&extent->rd, rd_link);
|
qr_new(extent, qr_link);
|
||||||
qr_new(extent, cc_link);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_dirty_insert(extent_t *extent,
|
extent_ring_insert(extent_t *sentinel, extent_t *extent)
|
||||||
arena_runs_dirty_link_t *runs_dirty, extent_t *chunks_dirty)
|
|
||||||
{
|
{
|
||||||
|
|
||||||
qr_meld(runs_dirty, &extent->rd, rd_link);
|
qr_meld(sentinel, extent, qr_link);
|
||||||
qr_meld(chunks_dirty, extent, cc_link);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_dirty_remove(extent_t *extent)
|
extent_ring_remove(extent_t *extent)
|
||||||
{
|
{
|
||||||
|
|
||||||
qr_remove(&extent->rd, rd_link);
|
qr_remove(extent, qr_link);
|
||||||
qr_remove(extent, cc_link);
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -5,8 +5,6 @@ arena_alloc_junk_small
|
|||||||
arena_basic_stats_merge
|
arena_basic_stats_merge
|
||||||
arena_bin_index
|
arena_bin_index
|
||||||
arena_bin_info
|
arena_bin_info
|
||||||
arena_bitselm_get_const
|
|
||||||
arena_bitselm_get_mutable
|
|
||||||
arena_boot
|
arena_boot
|
||||||
arena_choose
|
arena_choose
|
||||||
arena_choose_hard
|
arena_choose_hard
|
||||||
@ -43,38 +41,11 @@ arena_lg_dirty_mult_get
|
|||||||
arena_lg_dirty_mult_set
|
arena_lg_dirty_mult_set
|
||||||
arena_malloc
|
arena_malloc
|
||||||
arena_malloc_hard
|
arena_malloc_hard
|
||||||
arena_mapbits_allocated_get
|
|
||||||
arena_mapbits_binind_get
|
|
||||||
arena_mapbits_decommitted_get
|
|
||||||
arena_mapbits_dirty_get
|
|
||||||
arena_mapbits_get
|
|
||||||
arena_mapbits_internal_set
|
|
||||||
arena_mapbits_large_binind_set
|
|
||||||
arena_mapbits_large_get
|
|
||||||
arena_mapbits_large_set
|
|
||||||
arena_mapbits_large_size_get
|
|
||||||
arena_mapbits_size_decode
|
|
||||||
arena_mapbits_size_encode
|
|
||||||
arena_mapbits_small_runind_get
|
|
||||||
arena_mapbits_small_set
|
|
||||||
arena_mapbits_unallocated_set
|
|
||||||
arena_mapbits_unallocated_size_get
|
|
||||||
arena_mapbits_unallocated_size_set
|
|
||||||
arena_mapbits_unzeroed_get
|
|
||||||
arena_mapbitsp_get_const
|
|
||||||
arena_mapbitsp_get_mutable
|
|
||||||
arena_mapbitsp_read
|
|
||||||
arena_mapbitsp_write
|
|
||||||
arena_maxrun
|
|
||||||
arena_maybe_purge
|
arena_maybe_purge
|
||||||
arena_metadata_allocated_add
|
arena_metadata_allocated_add
|
||||||
arena_metadata_allocated_get
|
arena_metadata_allocated_get
|
||||||
arena_metadata_allocated_sub
|
arena_metadata_allocated_sub
|
||||||
arena_migrate
|
arena_migrate
|
||||||
arena_miscelm_get_const
|
|
||||||
arena_miscelm_get_mutable
|
|
||||||
arena_miscelm_to_pageind
|
|
||||||
arena_miscelm_to_rpages
|
|
||||||
arena_new
|
arena_new
|
||||||
arena_nthreads_dec
|
arena_nthreads_dec
|
||||||
arena_nthreads_get
|
arena_nthreads_get
|
||||||
@ -93,14 +64,11 @@ arena_prof_promote
|
|||||||
arena_prof_tctx_get
|
arena_prof_tctx_get
|
||||||
arena_prof_tctx_reset
|
arena_prof_tctx_reset
|
||||||
arena_prof_tctx_set
|
arena_prof_tctx_set
|
||||||
arena_ptr_small_binind_get
|
|
||||||
arena_purge
|
arena_purge
|
||||||
arena_ralloc
|
arena_ralloc
|
||||||
arena_ralloc_junk_large
|
arena_ralloc_junk_large
|
||||||
arena_ralloc_no_move
|
arena_ralloc_no_move
|
||||||
arena_rd_to_miscelm
|
|
||||||
arena_reset
|
arena_reset
|
||||||
arena_run_to_miscelm
|
|
||||||
arena_salloc
|
arena_salloc
|
||||||
arena_sdalloc
|
arena_sdalloc
|
||||||
arena_stats_merge
|
arena_stats_merge
|
||||||
@ -213,22 +181,23 @@ extent_before_get
|
|||||||
extent_committed_get
|
extent_committed_get
|
||||||
extent_committed_set
|
extent_committed_set
|
||||||
extent_dalloc
|
extent_dalloc
|
||||||
extent_dirty_get
|
|
||||||
extent_dirty_insert
|
|
||||||
extent_dirty_remove
|
|
||||||
extent_dirty_set
|
|
||||||
extent_init
|
extent_init
|
||||||
extent_last_get
|
extent_last_get
|
||||||
extent_past_get
|
extent_past_get
|
||||||
extent_prof_tctx_get
|
extent_prof_tctx_get
|
||||||
extent_prof_tctx_set
|
extent_prof_tctx_set
|
||||||
extent_retained_get
|
extent_retained_get
|
||||||
|
extent_ring_insert
|
||||||
|
extent_ring_remove
|
||||||
extent_size_get
|
extent_size_get
|
||||||
extent_size_set
|
extent_size_set
|
||||||
extent_size_quantize_ceil
|
extent_size_quantize_ceil
|
||||||
extent_size_quantize_floor
|
extent_size_quantize_floor
|
||||||
|
extent_slab_data_get
|
||||||
|
extent_slab_data_get_const
|
||||||
extent_slab_get
|
extent_slab_get
|
||||||
extent_slab_set
|
extent_slab_set
|
||||||
|
extent_slab_data_get
|
||||||
extent_usize_get
|
extent_usize_get
|
||||||
extent_zeroed_get
|
extent_zeroed_get
|
||||||
extent_zeroed_set
|
extent_zeroed_set
|
||||||
@ -309,8 +278,6 @@ malloc_tsd_no_cleanup
|
|||||||
malloc_vcprintf
|
malloc_vcprintf
|
||||||
malloc_vsnprintf
|
malloc_vsnprintf
|
||||||
malloc_write
|
malloc_write
|
||||||
map_bias
|
|
||||||
map_misc_offset
|
|
||||||
mb_write
|
mb_write
|
||||||
narenas_auto
|
narenas_auto
|
||||||
narenas_tdata_cleanup
|
narenas_tdata_cleanup
|
||||||
@ -451,8 +418,6 @@ rtree_subtree_read
|
|||||||
rtree_subtree_read_hard
|
rtree_subtree_read_hard
|
||||||
rtree_subtree_tryread
|
rtree_subtree_tryread
|
||||||
rtree_write
|
rtree_write
|
||||||
run_quantize_ceil
|
|
||||||
run_quantize_floor
|
|
||||||
s2u
|
s2u
|
||||||
s2u_compute
|
s2u_compute
|
||||||
s2u_lookup
|
s2u_lookup
|
||||||
|
@ -335,8 +335,8 @@ prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const extent_t *extent,
|
|||||||
void prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
void prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
||||||
size_t usize, prof_tctx_t *tctx);
|
size_t usize, prof_tctx_t *tctx);
|
||||||
void prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
void prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
||||||
size_t usize, const void *old_ptr, prof_tctx_t *tctx);
|
prof_tctx_t *tctx);
|
||||||
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
|
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
|
||||||
prof_tdata_t **tdata_out);
|
prof_tdata_t **tdata_out);
|
||||||
prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
|
prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
|
||||||
bool update);
|
bool update);
|
||||||
@ -344,7 +344,8 @@ void prof_malloc(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
|||||||
size_t usize, prof_tctx_t *tctx);
|
size_t usize, prof_tctx_t *tctx);
|
||||||
void prof_realloc(tsd_t *tsd, extent_t *extent, const void *ptr,
|
void prof_realloc(tsd_t *tsd, extent_t *extent, const void *ptr,
|
||||||
size_t usize, prof_tctx_t *tctx, bool prof_active, bool updated,
|
size_t usize, prof_tctx_t *tctx, bool prof_active, bool updated,
|
||||||
const void *old_ptr, size_t old_usize, prof_tctx_t *old_tctx);
|
extent_t *old_extent, const void *old_ptr, size_t old_usize,
|
||||||
|
prof_tctx_t *old_tctx);
|
||||||
void prof_free(tsd_t *tsd, const extent_t *extent, const void *ptr,
|
void prof_free(tsd_t *tsd, const extent_t *extent, const void *ptr,
|
||||||
size_t usize);
|
size_t usize);
|
||||||
#endif
|
#endif
|
||||||
@ -421,14 +422,14 @@ prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr, size_t usize,
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr, size_t usize,
|
prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
||||||
const void *old_ptr, prof_tctx_t *old_tctx)
|
prof_tctx_t *tctx)
|
||||||
{
|
{
|
||||||
|
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
arena_prof_tctx_reset(tsdn, extent, ptr, usize, old_ptr, old_tctx);
|
arena_prof_tctx_reset(tsdn, extent, ptr, tctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE bool
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
@ -501,10 +502,10 @@ prof_malloc(tsdn_t *tsdn, extent_t *extent, const void *ptr, size_t usize,
|
|||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
prof_realloc(tsd_t *tsd, extent_t *extent, const void *ptr, size_t usize,
|
prof_realloc(tsd_t *tsd, extent_t *extent, const void *ptr, size_t usize,
|
||||||
prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
|
prof_tctx_t *tctx, bool prof_active, bool updated, extent_t *old_extent,
|
||||||
size_t old_usize, prof_tctx_t *old_tctx)
|
const void *old_ptr, size_t old_usize, prof_tctx_t *old_tctx)
|
||||||
{
|
{
|
||||||
bool sampled, old_sampled;
|
bool sampled, old_sampled, moved;
|
||||||
|
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
|
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
|
||||||
@ -523,19 +524,30 @@ prof_realloc(tsd_t *tsd, extent_t *extent, const void *ptr, size_t usize,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The following code must differentiate among eight possible cases,
|
||||||
|
* based on three boolean conditions.
|
||||||
|
*/
|
||||||
sampled = ((uintptr_t)tctx > (uintptr_t)1U);
|
sampled = ((uintptr_t)tctx > (uintptr_t)1U);
|
||||||
old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
|
old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
|
||||||
|
moved = (ptr != old_ptr);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The following block must only execute if this is a non-moving
|
||||||
|
* reallocation, because for moving reallocation the old allocation will
|
||||||
|
* be deallocated via a separate call.
|
||||||
|
*/
|
||||||
|
if (unlikely(old_sampled) && !moved)
|
||||||
|
prof_free_sampled_object(tsd, old_usize, old_tctx);
|
||||||
|
|
||||||
if (unlikely(sampled)) {
|
if (unlikely(sampled)) {
|
||||||
prof_malloc_sample_object(tsd_tsdn(tsd), extent, ptr, usize,
|
prof_malloc_sample_object(tsd_tsdn(tsd), extent, ptr, usize,
|
||||||
tctx);
|
tctx);
|
||||||
} else {
|
} else if (moved) {
|
||||||
prof_tctx_reset(tsd_tsdn(tsd), extent, ptr, usize, old_ptr,
|
prof_tctx_set(tsd_tsdn(tsd), extent, ptr, usize,
|
||||||
old_tctx);
|
(prof_tctx_t *)(uintptr_t)1U);
|
||||||
}
|
} else if (unlikely(old_sampled))
|
||||||
|
prof_tctx_reset(tsd_tsdn(tsd), extent, ptr, tctx);
|
||||||
if (unlikely(old_sampled))
|
|
||||||
prof_free_sampled_object(tsd, old_usize, old_tctx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
@ -50,7 +50,7 @@ reg_size_compute() {
|
|||||||
reg_size=$((${grp} + ${delta}*${ndelta}))
|
reg_size=$((${grp} + ${delta}*${ndelta}))
|
||||||
}
|
}
|
||||||
|
|
||||||
run_size() {
|
slab_size() {
|
||||||
lg_p=$1
|
lg_p=$1
|
||||||
lg_grp=$2
|
lg_grp=$2
|
||||||
lg_delta=$3
|
lg_delta=$3
|
||||||
@ -59,22 +59,22 @@ run_size() {
|
|||||||
pow2 ${lg_p}; p=${pow2_result}
|
pow2 ${lg_p}; p=${pow2_result}
|
||||||
reg_size_compute ${lg_grp} ${lg_delta} ${ndelta}
|
reg_size_compute ${lg_grp} ${lg_delta} ${ndelta}
|
||||||
|
|
||||||
# Compute smallest run size that is an integer multiple of reg_size.
|
# Compute smallest slab size that is an integer multiple of reg_size.
|
||||||
try_run_size=${p}
|
try_slab_size=${p}
|
||||||
try_nregs=$((${try_run_size} / ${reg_size}))
|
try_nregs=$((${try_slab_size} / ${reg_size}))
|
||||||
perfect=0
|
perfect=0
|
||||||
while [ ${perfect} -eq 0 ] ; do
|
while [ ${perfect} -eq 0 ] ; do
|
||||||
perfect_run_size=${try_run_size}
|
perfect_slab_size=${try_slab_size}
|
||||||
perfect_nregs=${try_nregs}
|
perfect_nregs=${try_nregs}
|
||||||
|
|
||||||
try_run_size=$((${try_run_size} + ${p}))
|
try_slab_size=$((${try_slab_size} + ${p}))
|
||||||
try_nregs=$((${try_run_size} / ${reg_size}))
|
try_nregs=$((${try_slab_size} / ${reg_size}))
|
||||||
if [ ${perfect_run_size} -eq $((${perfect_nregs} * ${reg_size})) ] ; then
|
if [ ${perfect_slab_size} -eq $((${perfect_nregs} * ${reg_size})) ] ; then
|
||||||
perfect=1
|
perfect=1
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
run_size_pgs=$((${perfect_run_size} / ${p}))
|
slab_size_pgs=$((${perfect_slab_size} / ${p}))
|
||||||
}
|
}
|
||||||
|
|
||||||
size_class() {
|
size_class() {
|
||||||
@ -117,7 +117,7 @@ size_class() {
|
|||||||
|
|
||||||
if [ ${lg_size} -lt $((${lg_p} + ${lg_g})) ] ; then
|
if [ ${lg_size} -lt $((${lg_p} + ${lg_g})) ] ; then
|
||||||
bin="yes"
|
bin="yes"
|
||||||
run_size ${lg_p} ${lg_grp} ${lg_delta} ${ndelta}; pgs=${run_size_pgs}
|
slab_size ${lg_p} ${lg_grp} ${lg_delta} ${ndelta}; pgs=${slab_size_pgs}
|
||||||
else
|
else
|
||||||
bin="no"
|
bin="no"
|
||||||
pgs=0
|
pgs=0
|
||||||
@ -278,7 +278,7 @@ cat <<EOF
|
|||||||
* ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta
|
* ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta
|
||||||
* psz: 'yes' if a multiple of the page size, 'no' otherwise.
|
* psz: 'yes' if a multiple of the page size, 'no' otherwise.
|
||||||
* bin: 'yes' if a small bin size class, 'no' otherwise.
|
* bin: 'yes' if a small bin size class, 'no' otherwise.
|
||||||
* pgs: Run page count if a small bin size class, 0 otherwise.
|
* pgs: Slab page count if a small bin size class, 0 otherwise.
|
||||||
* lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
|
* lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
|
||||||
* otherwise.
|
* otherwise.
|
||||||
* NTBINS: Number of tiny bins.
|
* NTBINS: Number of tiny bins.
|
||||||
|
@ -48,17 +48,17 @@ struct malloc_bin_stats_s {
|
|||||||
/* Number of tcache flushes to this bin. */
|
/* Number of tcache flushes to this bin. */
|
||||||
uint64_t nflushes;
|
uint64_t nflushes;
|
||||||
|
|
||||||
/* Total number of runs created for this bin's size class. */
|
/* Total number of slabs created for this bin's size class. */
|
||||||
uint64_t nruns;
|
uint64_t nslabs;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Total number of runs reused by extracting them from the runs tree for
|
* Total number of slabs reused by extracting them from the slabs heap
|
||||||
* this bin's size class.
|
* for this bin's size class.
|
||||||
*/
|
*/
|
||||||
uint64_t reruns;
|
uint64_t reslabs;
|
||||||
|
|
||||||
/* Current number of runs in this bin. */
|
/* Current number of slabs in this bin. */
|
||||||
size_t curruns;
|
size_t curslabs;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct malloc_huge_stats_s {
|
struct malloc_huge_stats_s {
|
||||||
|
@ -24,7 +24,7 @@ typedef struct tcaches_s tcaches_t;
|
|||||||
/*
|
/*
|
||||||
* Absolute maximum number of cache slots for each small bin in the thread
|
* Absolute maximum number of cache slots for each small bin in the thread
|
||||||
* cache. This is an additional constraint beyond that imposed as: twice the
|
* cache. This is an additional constraint beyond that imposed as: twice the
|
||||||
* number of regions per run for this size class.
|
* number of regions per slab for this size class.
|
||||||
*
|
*
|
||||||
* This constant must be an even number.
|
* This constant must be an even number.
|
||||||
*/
|
*/
|
||||||
|
1633
src/arena.c
1633
src/arena.c
File diff suppressed because it is too large
Load Diff
@ -74,8 +74,7 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
|
|||||||
base_resident += PAGE_CEILING(nsize);
|
base_resident += PAGE_CEILING(nsize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
extent_init(extent, NULL, addr, csize, 0, true, false, true, true,
|
extent_init(extent, NULL, addr, csize, 0, true, true, true, false);
|
||||||
false);
|
|
||||||
return (extent);
|
return (extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
11
src/chunk.c
11
src/chunk.c
@ -558,8 +558,7 @@ chunk_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
|
|||||||
extent_dalloc(tsdn, arena, extent);
|
extent_dalloc(tsdn, arena, extent);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
extent_init(extent, arena, addr, size, usize, true, false, zero, commit,
|
extent_init(extent, arena, addr, size, usize, true, zero, commit, slab);
|
||||||
slab);
|
|
||||||
if (pad != 0)
|
if (pad != 0)
|
||||||
extent_addr_randomize(tsdn, extent, alignment);
|
extent_addr_randomize(tsdn, extent, alignment);
|
||||||
if (chunk_register(tsdn, extent)) {
|
if (chunk_register(tsdn, extent)) {
|
||||||
@ -828,8 +827,8 @@ chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
|
|
||||||
extent_init(&lead, arena, extent_addr_get(extent), size_a,
|
extent_init(&lead, arena, extent_addr_get(extent), size_a,
|
||||||
usize_a, extent_active_get(extent),
|
usize_a, extent_active_get(extent),
|
||||||
extent_dirty_get(extent), extent_zeroed_get(extent),
|
extent_zeroed_get(extent), extent_committed_get(extent),
|
||||||
extent_committed_get(extent), extent_slab_get(extent));
|
extent_slab_get(extent));
|
||||||
|
|
||||||
if (extent_rtree_acquire(tsdn, &lead, false, true, &lead_elm_a,
|
if (extent_rtree_acquire(tsdn, &lead, false, true, &lead_elm_a,
|
||||||
&lead_elm_b))
|
&lead_elm_b))
|
||||||
@ -838,8 +837,8 @@ chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
|
|
||||||
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
|
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
|
||||||
size_a), size_b, usize_b, extent_active_get(extent),
|
size_a), size_b, usize_b, extent_active_get(extent),
|
||||||
extent_dirty_get(extent), extent_zeroed_get(extent),
|
extent_zeroed_get(extent), extent_committed_get(extent),
|
||||||
extent_committed_get(extent), extent_slab_get(extent));
|
extent_slab_get(extent));
|
||||||
if (extent_rtree_acquire(tsdn, trail, false, true, &trail_elm_a,
|
if (extent_rtree_acquire(tsdn, trail, false, true, &trail_elm_a,
|
||||||
&trail_elm_b))
|
&trail_elm_b))
|
||||||
goto label_error_c;
|
goto label_error_c;
|
||||||
|
@ -121,7 +121,7 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
|||||||
pad_size = (uintptr_t)ret - (uintptr_t)pad_addr;
|
pad_size = (uintptr_t)ret - (uintptr_t)pad_addr;
|
||||||
if (pad_size != 0) {
|
if (pad_size != 0) {
|
||||||
extent_init(pad, arena, pad_addr, pad_size,
|
extent_init(pad, arena, pad_addr, pad_size,
|
||||||
pad_size, false, true, false, true, false);
|
pad_size, false, false, true, false);
|
||||||
}
|
}
|
||||||
dss_next = (void *)((uintptr_t)ret + size);
|
dss_next = (void *)((uintptr_t)ret + size);
|
||||||
if ((uintptr_t)ret < (uintptr_t)dss_max ||
|
if ((uintptr_t)ret < (uintptr_t)dss_max ||
|
||||||
|
37
src/ctl.c
37
src/ctl.c
@ -124,7 +124,7 @@ CTL_PROTO(arena_i_chunk_hooks)
|
|||||||
INDEX_PROTO(arena_i)
|
INDEX_PROTO(arena_i)
|
||||||
CTL_PROTO(arenas_bin_i_size)
|
CTL_PROTO(arenas_bin_i_size)
|
||||||
CTL_PROTO(arenas_bin_i_nregs)
|
CTL_PROTO(arenas_bin_i_nregs)
|
||||||
CTL_PROTO(arenas_bin_i_run_size)
|
CTL_PROTO(arenas_bin_i_slab_size)
|
||||||
INDEX_PROTO(arenas_bin_i)
|
INDEX_PROTO(arenas_bin_i)
|
||||||
CTL_PROTO(arenas_hchunk_i_size)
|
CTL_PROTO(arenas_hchunk_i_size)
|
||||||
INDEX_PROTO(arenas_hchunk_i)
|
INDEX_PROTO(arenas_hchunk_i)
|
||||||
@ -160,9 +160,9 @@ CTL_PROTO(stats_arenas_i_bins_j_nrequests)
|
|||||||
CTL_PROTO(stats_arenas_i_bins_j_curregs)
|
CTL_PROTO(stats_arenas_i_bins_j_curregs)
|
||||||
CTL_PROTO(stats_arenas_i_bins_j_nfills)
|
CTL_PROTO(stats_arenas_i_bins_j_nfills)
|
||||||
CTL_PROTO(stats_arenas_i_bins_j_nflushes)
|
CTL_PROTO(stats_arenas_i_bins_j_nflushes)
|
||||||
CTL_PROTO(stats_arenas_i_bins_j_nruns)
|
CTL_PROTO(stats_arenas_i_bins_j_nslabs)
|
||||||
CTL_PROTO(stats_arenas_i_bins_j_nreruns)
|
CTL_PROTO(stats_arenas_i_bins_j_nreslabs)
|
||||||
CTL_PROTO(stats_arenas_i_bins_j_curruns)
|
CTL_PROTO(stats_arenas_i_bins_j_curslabs)
|
||||||
INDEX_PROTO(stats_arenas_i_bins_j)
|
INDEX_PROTO(stats_arenas_i_bins_j)
|
||||||
CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc)
|
CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc)
|
||||||
CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc)
|
CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc)
|
||||||
@ -300,7 +300,7 @@ static const ctl_indexed_node_t arena_node[] = {
|
|||||||
static const ctl_named_node_t arenas_bin_i_node[] = {
|
static const ctl_named_node_t arenas_bin_i_node[] = {
|
||||||
{NAME("size"), CTL(arenas_bin_i_size)},
|
{NAME("size"), CTL(arenas_bin_i_size)},
|
||||||
{NAME("nregs"), CTL(arenas_bin_i_nregs)},
|
{NAME("nregs"), CTL(arenas_bin_i_nregs)},
|
||||||
{NAME("run_size"), CTL(arenas_bin_i_run_size)}
|
{NAME("slab_size"), CTL(arenas_bin_i_slab_size)}
|
||||||
};
|
};
|
||||||
static const ctl_named_node_t super_arenas_bin_i_node[] = {
|
static const ctl_named_node_t super_arenas_bin_i_node[] = {
|
||||||
{NAME(""), CHILD(named, arenas_bin_i)}
|
{NAME(""), CHILD(named, arenas_bin_i)}
|
||||||
@ -373,9 +373,9 @@ static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
|
|||||||
{NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)},
|
{NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)},
|
||||||
{NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
|
{NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
|
||||||
{NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
|
{NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
|
||||||
{NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)},
|
{NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)},
|
||||||
{NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)},
|
{NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)},
|
||||||
{NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)}
|
{NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)}
|
||||||
};
|
};
|
||||||
static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
|
static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
|
||||||
{NAME(""), CHILD(named, stats_arenas_i_bins_j)}
|
{NAME(""), CHILD(named, stats_arenas_i_bins_j)}
|
||||||
@ -549,9 +549,10 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
|
|||||||
sstats->bstats[i].nflushes +=
|
sstats->bstats[i].nflushes +=
|
||||||
astats->bstats[i].nflushes;
|
astats->bstats[i].nflushes;
|
||||||
}
|
}
|
||||||
sstats->bstats[i].nruns += astats->bstats[i].nruns;
|
sstats->bstats[i].nslabs += astats->bstats[i].nslabs;
|
||||||
sstats->bstats[i].reruns += astats->bstats[i].reruns;
|
sstats->bstats[i].reslabs += astats->bstats[i].reslabs;
|
||||||
sstats->bstats[i].curruns += astats->bstats[i].curruns;
|
sstats->bstats[i].curslabs +=
|
||||||
|
astats->bstats[i].curslabs;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < NSIZES - NBINS; i++) {
|
for (i = 0; i < NSIZES - NBINS; i++) {
|
||||||
@ -1801,7 +1802,7 @@ CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
|
|||||||
CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
|
CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
|
||||||
CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
|
CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
|
||||||
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
|
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
|
||||||
CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
|
CTL_RO_NL_GEN(arenas_bin_i_slab_size, arena_bin_info[mib[2]].slab_size, size_t)
|
||||||
static const ctl_named_node_t *
|
static const ctl_named_node_t *
|
||||||
arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
|
arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
|
||||||
{
|
{
|
||||||
@ -2032,12 +2033,12 @@ CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
|
|||||||
ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
|
ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
|
||||||
CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
|
CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
|
||||||
ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t)
|
ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t)
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns,
|
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
|
||||||
ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t)
|
ctl_stats.arenas[mib[2]].bstats[mib[4]].nslabs, uint64_t)
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
|
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
|
||||||
ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t)
|
ctl_stats.arenas[mib[2]].bstats[mib[4]].reslabs, uint64_t)
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
|
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
|
||||||
ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
|
ctl_stats.arenas[mib[2]].bstats[mib[4]].curslabs, size_t)
|
||||||
|
|
||||||
static const ctl_named_node_t *
|
static const ctl_named_node_t *
|
||||||
stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
|
stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
|
||||||
|
@ -153,8 +153,8 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
|||||||
* Zero the trailing bytes of the original allocation's
|
* Zero the trailing bytes of the original allocation's
|
||||||
* last page, since they are in an indeterminate state.
|
* last page, since they are in an indeterminate state.
|
||||||
* There will always be trailing bytes, because ptr's
|
* There will always be trailing bytes, because ptr's
|
||||||
* offset from the beginning of the run is a multiple of
|
* offset from the beginning of the extent is a multiple
|
||||||
* CACHELINE in [0 .. PAGE).
|
* of CACHELINE in [0 .. PAGE).
|
||||||
*/
|
*/
|
||||||
void *zbase = (void *)
|
void *zbase = (void *)
|
||||||
((uintptr_t)extent_addr_get(extent) + oldusize);
|
((uintptr_t)extent_addr_get(extent) + oldusize);
|
||||||
|
@ -1707,28 +1707,30 @@ irealloc_prof_sample(tsd_t *tsd, extent_t *extent, void *old_ptr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE_C void *
|
JEMALLOC_ALWAYS_INLINE_C void *
|
||||||
irealloc_prof(tsd_t *tsd, extent_t *extent, void *old_ptr, size_t old_usize,
|
irealloc_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize,
|
||||||
size_t usize)
|
size_t usize)
|
||||||
{
|
{
|
||||||
void *p;
|
void *p;
|
||||||
extent_t *e;
|
extent_t *extent;
|
||||||
bool prof_active;
|
bool prof_active;
|
||||||
prof_tctx_t *old_tctx, *tctx;
|
prof_tctx_t *old_tctx, *tctx;
|
||||||
|
|
||||||
prof_active = prof_active_get_unlocked();
|
prof_active = prof_active_get_unlocked();
|
||||||
old_tctx = prof_tctx_get(tsd_tsdn(tsd), extent, old_ptr);
|
old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_extent, old_ptr);
|
||||||
tctx = prof_alloc_prep(tsd, usize, prof_active, true);
|
tctx = prof_alloc_prep(tsd, usize, prof_active, true);
|
||||||
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
|
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
|
||||||
p = irealloc_prof_sample(tsd, extent, old_ptr, old_usize, usize,
|
p = irealloc_prof_sample(tsd, old_extent, old_ptr, old_usize,
|
||||||
tctx);
|
usize, tctx);
|
||||||
} else
|
} else {
|
||||||
p = iralloc(tsd, extent, old_ptr, old_usize, usize, 0, false);
|
p = iralloc(tsd, old_extent, old_ptr, old_usize, usize, 0,
|
||||||
|
false);
|
||||||
|
}
|
||||||
if (unlikely(p == NULL)) {
|
if (unlikely(p == NULL)) {
|
||||||
prof_alloc_rollback(tsd, tctx, true);
|
prof_alloc_rollback(tsd, tctx, true);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
e = (p == old_ptr) ? extent : iealloc(tsd_tsdn(tsd), p);
|
extent = (p == old_ptr) ? old_extent : iealloc(tsd_tsdn(tsd), p);
|
||||||
prof_realloc(tsd, e, p, usize, tctx, prof_active, true,
|
prof_realloc(tsd, extent, p, usize, tctx, prof_active, true, old_extent,
|
||||||
old_ptr, old_usize, old_tctx);
|
old_ptr, old_usize, old_tctx);
|
||||||
|
|
||||||
return (p);
|
return (p);
|
||||||
@ -2146,24 +2148,24 @@ irallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *old_ptr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE_C void *
|
JEMALLOC_ALWAYS_INLINE_C void *
|
||||||
irallocx_prof(tsd_t *tsd, extent_t *extent, void *old_ptr, size_t old_usize,
|
irallocx_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize,
|
||||||
size_t size, size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
|
size_t size, size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
|
||||||
arena_t *arena)
|
arena_t *arena)
|
||||||
{
|
{
|
||||||
void *p;
|
void *p;
|
||||||
extent_t *e;
|
extent_t *extent;
|
||||||
bool prof_active;
|
bool prof_active;
|
||||||
prof_tctx_t *old_tctx, *tctx;
|
prof_tctx_t *old_tctx, *tctx;
|
||||||
|
|
||||||
prof_active = prof_active_get_unlocked();
|
prof_active = prof_active_get_unlocked();
|
||||||
old_tctx = prof_tctx_get(tsd_tsdn(tsd), extent, old_ptr);
|
old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_extent, old_ptr);
|
||||||
tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
|
tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
|
||||||
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
|
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
|
||||||
p = irallocx_prof_sample(tsd_tsdn(tsd), extent, old_ptr,
|
p = irallocx_prof_sample(tsd_tsdn(tsd), old_extent, old_ptr,
|
||||||
old_usize, *usize, alignment, zero, tcache, arena, tctx);
|
old_usize, *usize, alignment, zero, tcache, arena, tctx);
|
||||||
} else {
|
} else {
|
||||||
p = iralloct(tsd_tsdn(tsd), extent, old_ptr, old_usize, size,
|
p = iralloct(tsd_tsdn(tsd), old_extent, old_ptr, old_usize,
|
||||||
alignment, zero, tcache, arena);
|
size, alignment, zero, tcache, arena);
|
||||||
}
|
}
|
||||||
if (unlikely(p == NULL)) {
|
if (unlikely(p == NULL)) {
|
||||||
prof_alloc_rollback(tsd, tctx, true);
|
prof_alloc_rollback(tsd, tctx, true);
|
||||||
@ -2179,12 +2181,12 @@ irallocx_prof(tsd_t *tsd, extent_t *extent, void *old_ptr, size_t old_usize,
|
|||||||
* be the same as the current usize because of in-place large
|
* be the same as the current usize because of in-place large
|
||||||
* reallocation. Therefore, query the actual value of usize.
|
* reallocation. Therefore, query the actual value of usize.
|
||||||
*/
|
*/
|
||||||
e = extent;
|
extent = old_extent;
|
||||||
*usize = isalloc(tsd_tsdn(tsd), e, p);
|
*usize = isalloc(tsd_tsdn(tsd), extent, p);
|
||||||
} else
|
} else
|
||||||
e = iealloc(tsd_tsdn(tsd), p);
|
extent = iealloc(tsd_tsdn(tsd), p);
|
||||||
prof_realloc(tsd, e, p, *usize, tctx, prof_active, true, old_ptr,
|
prof_realloc(tsd, extent, p, *usize, tctx, prof_active, true,
|
||||||
old_usize, old_tctx);
|
old_extent, old_ptr, old_usize, old_tctx);
|
||||||
|
|
||||||
return (p);
|
return (p);
|
||||||
}
|
}
|
||||||
@ -2338,8 +2340,8 @@ ixallocx_prof(tsd_t *tsd, extent_t *extent, void *ptr, size_t old_usize,
|
|||||||
prof_alloc_rollback(tsd, tctx, false);
|
prof_alloc_rollback(tsd, tctx, false);
|
||||||
return (usize);
|
return (usize);
|
||||||
}
|
}
|
||||||
prof_realloc(tsd, extent, ptr, usize, tctx, prof_active, false, ptr,
|
prof_realloc(tsd, extent, ptr, usize, tctx, prof_active, false, extent,
|
||||||
old_usize, old_tctx);
|
ptr, old_usize, old_tctx);
|
||||||
|
|
||||||
return (usize);
|
return (usize);
|
||||||
}
|
}
|
||||||
|
45
src/stats.c
45
src/stats.c
@ -58,29 +58,29 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
if (config_tcache) {
|
if (config_tcache) {
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"bins: size ind allocated nmalloc"
|
"bins: size ind allocated nmalloc"
|
||||||
" ndalloc nrequests curregs curruns regs"
|
" ndalloc nrequests curregs curslabs regs"
|
||||||
" pgs util nfills nflushes newruns"
|
" pgs util nfills nflushes newslabs"
|
||||||
" reruns\n");
|
" reslabs\n");
|
||||||
} else {
|
} else {
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"bins: size ind allocated nmalloc"
|
"bins: size ind allocated nmalloc"
|
||||||
" ndalloc nrequests curregs curruns regs"
|
" ndalloc nrequests curregs curslabs regs"
|
||||||
" pgs util newruns reruns\n");
|
" pgs util newslabs reslabs\n");
|
||||||
}
|
}
|
||||||
CTL_GET("arenas.nbins", &nbins, unsigned);
|
CTL_GET("arenas.nbins", &nbins, unsigned);
|
||||||
for (j = 0, in_gap = false; j < nbins; j++) {
|
for (j = 0, in_gap = false; j < nbins; j++) {
|
||||||
uint64_t nruns;
|
uint64_t nslabs;
|
||||||
|
|
||||||
CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns,
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.nslabs", i, j, &nslabs,
|
||||||
uint64_t);
|
uint64_t);
|
||||||
if (nruns == 0)
|
if (nslabs == 0)
|
||||||
in_gap = true;
|
in_gap = true;
|
||||||
else {
|
else {
|
||||||
size_t reg_size, run_size, curregs, availregs, milli;
|
size_t reg_size, slab_size, curregs, availregs, milli;
|
||||||
size_t curruns;
|
size_t curslabs;
|
||||||
uint32_t nregs;
|
uint32_t nregs;
|
||||||
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
|
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
|
||||||
uint64_t reruns;
|
uint64_t reslabs;
|
||||||
char util[6]; /* "x.yyy". */
|
char util[6]; /* "x.yyy". */
|
||||||
|
|
||||||
if (in_gap) {
|
if (in_gap) {
|
||||||
@ -90,7 +90,7 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
}
|
}
|
||||||
CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t);
|
CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t);
|
||||||
CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
|
CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
|
||||||
CTL_M2_GET("arenas.bin.0.run_size", j, &run_size,
|
CTL_M2_GET("arenas.bin.0.slab_size", j, &slab_size,
|
||||||
size_t);
|
size_t);
|
||||||
CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j,
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j,
|
||||||
&nmalloc, uint64_t);
|
&nmalloc, uint64_t);
|
||||||
@ -106,12 +106,12 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes",
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes",
|
||||||
i, j, &nflushes, uint64_t);
|
i, j, &nflushes, uint64_t);
|
||||||
}
|
}
|
||||||
CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j,
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.nreslabs", i, j,
|
||||||
&reruns, uint64_t);
|
&reslabs, uint64_t);
|
||||||
CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j,
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j,
|
||||||
&curruns, size_t);
|
&curslabs, size_t);
|
||||||
|
|
||||||
availregs = nregs * curruns;
|
availregs = nregs * curslabs;
|
||||||
milli = (availregs != 0) ? (1000 * curregs) / availregs
|
milli = (availregs != 0) ? (1000 * curregs) / availregs
|
||||||
: 1000;
|
: 1000;
|
||||||
assert(milli <= 1000);
|
assert(milli <= 1000);
|
||||||
@ -134,9 +134,9 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
" %12zu %4u %3zu %-5s %12"FMTu64
|
" %12zu %4u %3zu %-5s %12"FMTu64
|
||||||
" %12"FMTu64" %12"FMTu64" %12"FMTu64"\n",
|
" %12"FMTu64" %12"FMTu64" %12"FMTu64"\n",
|
||||||
reg_size, j, curregs * reg_size, nmalloc,
|
reg_size, j, curregs * reg_size, nmalloc,
|
||||||
ndalloc, nrequests, curregs, curruns, nregs,
|
ndalloc, nrequests, curregs, curslabs,
|
||||||
run_size / page, util, nfills, nflushes,
|
nregs, slab_size / page, util, nfills,
|
||||||
nruns, reruns);
|
nflushes, nslabs, reslabs);
|
||||||
} else {
|
} else {
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"%20zu %3u %12zu %12"FMTu64
|
"%20zu %3u %12zu %12"FMTu64
|
||||||
@ -144,8 +144,9 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
" %12zu %4u %3zu %-5s %12"FMTu64
|
" %12zu %4u %3zu %-5s %12"FMTu64
|
||||||
" %12"FMTu64"\n",
|
" %12"FMTu64"\n",
|
||||||
reg_size, j, curregs * reg_size, nmalloc,
|
reg_size, j, curregs * reg_size, nmalloc,
|
||||||
ndalloc, nrequests, curregs, curruns, nregs,
|
ndalloc, nrequests, curregs, curslabs,
|
||||||
run_size / page, util, nruns, reruns);
|
nregs, slab_size / page, util, nslabs,
|
||||||
|
reslabs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -127,14 +127,8 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
|
|
||||||
extent = iealloc(tsd_tsdn(tsd), ptr);
|
extent = iealloc(tsd_tsdn(tsd), ptr);
|
||||||
if (extent_arena_get(extent) == bin_arena) {
|
if (extent_arena_get(extent) == bin_arena) {
|
||||||
arena_chunk_t *chunk =
|
|
||||||
(arena_chunk_t *)extent_base_get(extent);
|
|
||||||
size_t pageind = ((uintptr_t)ptr -
|
|
||||||
(uintptr_t)chunk) >> LG_PAGE;
|
|
||||||
arena_chunk_map_bits_t *bitselm =
|
|
||||||
arena_bitselm_get_mutable(chunk, pageind);
|
|
||||||
arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
|
arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
|
||||||
bin_arena, chunk, extent, ptr, bitselm);
|
bin_arena, extent, ptr);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* This object was allocated via a different
|
* This object was allocated via a different
|
||||||
|
@ -16,7 +16,7 @@ TEST_BEGIN(test_small_extent_size)
|
|||||||
assert_d_eq(mallctl("arenas.nbins", &nbins, &sz, NULL, 0), 0,
|
assert_d_eq(mallctl("arenas.nbins", &nbins, &sz, NULL, 0), 0,
|
||||||
"Unexpected mallctl failure");
|
"Unexpected mallctl failure");
|
||||||
|
|
||||||
assert_d_eq(mallctlnametomib("arenas.bin.0.run_size", mib, &miblen), 0,
|
assert_d_eq(mallctlnametomib("arenas.bin.0.slab_size", mib, &miblen), 0,
|
||||||
"Unexpected mallctlnametomib failure");
|
"Unexpected mallctlnametomib failure");
|
||||||
for (i = 0; i < nbins; i++) {
|
for (i = 0; i < nbins; i++) {
|
||||||
mib[2] = i;
|
mib[2] = i;
|
||||||
@ -71,12 +71,12 @@ TEST_BEGIN(test_huge_extent_size)
|
|||||||
ceil = extent_size_quantize_ceil(extent_size);
|
ceil = extent_size_quantize_ceil(extent_size);
|
||||||
|
|
||||||
assert_zu_eq(extent_size, floor,
|
assert_zu_eq(extent_size, floor,
|
||||||
"Large run quantization should be a no-op for precise "
|
"Extent quantization should be a no-op for precise size "
|
||||||
"size (lextent_size=%zu, extent_size=%zu)", lextent_size,
|
"(lextent_size=%zu, extent_size=%zu)", lextent_size,
|
||||||
extent_size);
|
extent_size);
|
||||||
assert_zu_eq(extent_size, ceil,
|
assert_zu_eq(extent_size, ceil,
|
||||||
"Large run quantization should be a no-op for precise "
|
"Extent quantization should be a no-op for precise size "
|
||||||
"size (lextent_size=%zu, extent_size=%zu)", lextent_size,
|
"(lextent_size=%zu, extent_size=%zu)", lextent_size,
|
||||||
extent_size);
|
extent_size);
|
||||||
|
|
||||||
if (i > 0) {
|
if (i > 0) {
|
||||||
|
@ -615,7 +615,8 @@ TEST_BEGIN(test_arenas_bin_constants)
|
|||||||
|
|
||||||
TEST_ARENAS_BIN_CONSTANT(size_t, size, arena_bin_info[0].reg_size);
|
TEST_ARENAS_BIN_CONSTANT(size_t, size, arena_bin_info[0].reg_size);
|
||||||
TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, arena_bin_info[0].nregs);
|
TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, arena_bin_info[0].nregs);
|
||||||
TEST_ARENAS_BIN_CONSTANT(size_t, run_size, arena_bin_info[0].run_size);
|
TEST_ARENAS_BIN_CONSTANT(size_t, slab_size,
|
||||||
|
arena_bin_info[0].slab_size);
|
||||||
|
|
||||||
#undef TEST_ARENAS_BIN_CONSTANT
|
#undef TEST_ARENAS_BIN_CONSTANT
|
||||||
}
|
}
|
||||||
|
@ -229,9 +229,9 @@ TEST_BEGIN(test_stats_arenas_bins)
|
|||||||
{
|
{
|
||||||
unsigned arena;
|
unsigned arena;
|
||||||
void *p;
|
void *p;
|
||||||
size_t sz, curruns, curregs;
|
size_t sz, curslabs, curregs;
|
||||||
uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes;
|
uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes;
|
||||||
uint64_t nruns, nreruns;
|
uint64_t nslabs, nreslabs;
|
||||||
int expected = config_stats ? 0 : ENOENT;
|
int expected = config_stats ? 0 : ENOENT;
|
||||||
|
|
||||||
arena = 0;
|
arena = 0;
|
||||||
@ -266,12 +266,12 @@ TEST_BEGIN(test_stats_arenas_bins)
|
|||||||
NULL, 0), config_tcache ? expected : ENOENT,
|
NULL, 0), config_tcache ? expected : ENOENT,
|
||||||
"Unexpected mallctl() result");
|
"Unexpected mallctl() result");
|
||||||
|
|
||||||
assert_d_eq(mallctl("stats.arenas.0.bins.0.nruns", &nruns, &sz,
|
assert_d_eq(mallctl("stats.arenas.0.bins.0.nslabs", &nslabs, &sz,
|
||||||
NULL, 0), expected, "Unexpected mallctl() result");
|
NULL, 0), expected, "Unexpected mallctl() result");
|
||||||
assert_d_eq(mallctl("stats.arenas.0.bins.0.nreruns", &nreruns, &sz,
|
assert_d_eq(mallctl("stats.arenas.0.bins.0.nreslabs", &nreslabs, &sz,
|
||||||
NULL, 0), expected, "Unexpected mallctl() result");
|
NULL, 0), expected, "Unexpected mallctl() result");
|
||||||
sz = sizeof(size_t);
|
sz = sizeof(size_t);
|
||||||
assert_d_eq(mallctl("stats.arenas.0.bins.0.curruns", &curruns, &sz,
|
assert_d_eq(mallctl("stats.arenas.0.bins.0.curslabs", &curslabs, &sz,
|
||||||
NULL, 0), expected, "Unexpected mallctl() result");
|
NULL, 0), expected, "Unexpected mallctl() result");
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
@ -289,10 +289,10 @@ TEST_BEGIN(test_stats_arenas_bins)
|
|||||||
assert_u64_gt(nflushes, 0,
|
assert_u64_gt(nflushes, 0,
|
||||||
"At least one flush should have occurred");
|
"At least one flush should have occurred");
|
||||||
}
|
}
|
||||||
assert_u64_gt(nruns, 0,
|
assert_u64_gt(nslabs, 0,
|
||||||
"At least one run should have been allocated");
|
"At least one slab should have been allocated");
|
||||||
assert_zu_gt(curruns, 0,
|
assert_zu_gt(curslabs, 0,
|
||||||
"At least one run should be currently allocated");
|
"At least one slab should be currently allocated");
|
||||||
}
|
}
|
||||||
|
|
||||||
dallocx(p, 0);
|
dallocx(p, 0);
|
||||||
|
Loading…
Reference in New Issue
Block a user