Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks allow control over chunk allocation/deallocation, decommit/commit, purging, and splitting/merging, such that the application can rely on jemalloc's internal chunk caching and retaining functionality, yet implement a variety of chunk management mechanisms and policies. Merge the chunks_[sz]ad_{mmap,dss} red-black trees into chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries to honor the dss precedence setting; prior to this change the precedence setting was also consulted when recycling chunks. Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead deallocate them in arena_unstash_purged(), so that the dirty memory linkage remains valid until after the last time it is used. This resolves #176 and #201.
This commit is contained in:
parent
d059b9d6a1
commit
b49a334a64
@ -37,8 +37,7 @@ brevity. Much more detail can be found in the git revision history:
|
|||||||
"opt.prof_thread_active_init", "prof.thread_active_init", and
|
"opt.prof_thread_active_init", "prof.thread_active_init", and
|
||||||
"thread.prof.active" mallctls.
|
"thread.prof.active" mallctls.
|
||||||
- Add support for per arena application-specified chunk allocators, configured
|
- Add support for per arena application-specified chunk allocators, configured
|
||||||
via the "arena<i>.chunk.alloc", "arena<i>.chunk.dalloc", and
|
via the "arena.<i>.chunk_hooks" mallctl.
|
||||||
"arena.<i>.chunk.purge" mallctls.
|
|
||||||
- Refactor huge allocation to be managed by arenas, so that arenas now
|
- Refactor huge allocation to be managed by arenas, so that arenas now
|
||||||
function as general purpose independent allocators. This is important in
|
function as general purpose independent allocators. This is important in
|
||||||
the context of user-specified chunk allocators, aside from the scalability
|
the context of user-specified chunk allocators, aside from the scalability
|
||||||
|
@ -82,9 +82,10 @@ C_SRCS := $(srcroot)src/jemalloc.c $(srcroot)src/arena.c \
|
|||||||
$(srcroot)src/chunk.c $(srcroot)src/chunk_dss.c \
|
$(srcroot)src/chunk.c $(srcroot)src/chunk_dss.c \
|
||||||
$(srcroot)src/chunk_mmap.c $(srcroot)src/ckh.c $(srcroot)src/ctl.c \
|
$(srcroot)src/chunk_mmap.c $(srcroot)src/ckh.c $(srcroot)src/ctl.c \
|
||||||
$(srcroot)src/extent.c $(srcroot)src/hash.c $(srcroot)src/huge.c \
|
$(srcroot)src/extent.c $(srcroot)src/hash.c $(srcroot)src/huge.c \
|
||||||
$(srcroot)src/mb.c $(srcroot)src/mutex.c $(srcroot)src/prof.c \
|
$(srcroot)src/mb.c $(srcroot)src/mutex.c $(srcroot)src/pages.c \
|
||||||
$(srcroot)src/quarantine.c $(srcroot)src/rtree.c $(srcroot)src/stats.c \
|
$(srcroot)src/prof.c $(srcroot)src/quarantine.c $(srcroot)src/rtree.c \
|
||||||
$(srcroot)src/tcache.c $(srcroot)src/util.c $(srcroot)src/tsd.c
|
$(srcroot)src/stats.c $(srcroot)src/tcache.c $(srcroot)src/util.c \
|
||||||
|
$(srcroot)src/tsd.c
|
||||||
ifeq ($(enable_valgrind), 1)
|
ifeq ($(enable_valgrind), 1)
|
||||||
C_SRCS += $(srcroot)src/valgrind.c
|
C_SRCS += $(srcroot)src/valgrind.c
|
||||||
endif
|
endif
|
||||||
|
@ -1518,18 +1518,48 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
for additional information.</para></listitem>
|
for additional information.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="arena.i.chunk.alloc">
|
<varlistentry id="arena.i.chunk_hooks">
|
||||||
<term>
|
<term>
|
||||||
<mallctl>arena.<i>.chunk.alloc</mallctl>
|
<mallctl>arena.<i>.chunk_hooks</mallctl>
|
||||||
(<type>chunk_alloc_t *</type>)
|
(<type>chunk_hooks_t</type>)
|
||||||
<literal>rw</literal>
|
<literal>rw</literal>
|
||||||
</term>
|
</term>
|
||||||
<listitem><para>Get or set the chunk allocation function for arena
|
<listitem><para>Get or set the chunk management hook functions for arena
|
||||||
<i>. If setting, the chunk deallocation function should
|
<i>. The functions must be capable of operating on all extant
|
||||||
also be set via <link linkend="arena.i.chunk.dalloc">
|
chunks associated with arena <i>, usually by passing unknown
|
||||||
<mallctl>arena.<i>.chunk.dalloc</mallctl></link> to a companion
|
chunks to the replaced functions. In practice, it is feasible to
|
||||||
function that knows how to deallocate the chunks.
|
control allocation for arenas created via <link
|
||||||
<funcsynopsis><funcprototype>
|
linkend="arenas.extend"><mallctl>arenas.extend</mallctl></link> such
|
||||||
|
that all chunks originate from an application-supplied chunk allocator
|
||||||
|
(by setting custom chunk hook functions just after arena creation), but
|
||||||
|
the automatically created arenas may have already created chunks prior
|
||||||
|
to the application having an opportunity to take over chunk
|
||||||
|
allocation.</para>
|
||||||
|
|
||||||
|
<para><programlisting language="C"><![CDATA[
|
||||||
|
typedef struct {
|
||||||
|
chunk_alloc_t *alloc;
|
||||||
|
chunk_dalloc_t *dalloc;
|
||||||
|
chunk_commit_t *commit;
|
||||||
|
chunk_decommit_t *decommit;
|
||||||
|
chunk_purge_t *purge;
|
||||||
|
chunk_split_t *split;
|
||||||
|
chunk_merge_t *merge;
|
||||||
|
} chunk_hooks_t;]]></programlisting>
|
||||||
|
The <type>chunk_hooks_t</type> structure comprises function pointers
|
||||||
|
which are described individually below. jemalloc uses these
|
||||||
|
functions to manage chunk lifetime, which starts off with allocation of
|
||||||
|
mapped committed memory, in the simplest case followed by deallocation.
|
||||||
|
However, there are performance and platform reasons to retain chunks for
|
||||||
|
later reuse. Cleanup attempts cascade from deallocation to decommit to
|
||||||
|
purging, which gives the chunk management functions opportunities to
|
||||||
|
reject the most permanent cleanup operations in favor of less permanent
|
||||||
|
(and often less costly) operations. The chunk splitting and merging
|
||||||
|
operations can also be opted out of, but this is mainly intended to
|
||||||
|
support platforms on which virtual memory mappings provided by the
|
||||||
|
operating system kernel do not automatically coalesce and split.</para>
|
||||||
|
|
||||||
|
<para><funcsynopsis><funcprototype>
|
||||||
<funcdef>typedef void *<function>(chunk_alloc_t)</function></funcdef>
|
<funcdef>typedef void *<function>(chunk_alloc_t)</function></funcdef>
|
||||||
<paramdef>void *<parameter>chunk</parameter></paramdef>
|
<paramdef>void *<parameter>chunk</parameter></paramdef>
|
||||||
<paramdef>size_t <parameter>size</parameter></paramdef>
|
<paramdef>size_t <parameter>size</parameter></paramdef>
|
||||||
@ -1539,9 +1569,9 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
</funcprototype></funcsynopsis>
|
</funcprototype></funcsynopsis>
|
||||||
A chunk allocation function conforms to the <type>chunk_alloc_t</type>
|
A chunk allocation function conforms to the <type>chunk_alloc_t</type>
|
||||||
type and upon success returns a pointer to <parameter>size</parameter>
|
type and upon success returns a pointer to <parameter>size</parameter>
|
||||||
bytes of memory on behalf of arena <parameter>arena_ind</parameter> such
|
bytes of mapped committed memory on behalf of arena
|
||||||
that the chunk's base address is a multiple of
|
<parameter>arena_ind</parameter> such that the chunk's base address is a
|
||||||
<parameter>alignment</parameter>, as well as setting
|
multiple of <parameter>alignment</parameter>, as well as setting
|
||||||
<parameter>*zero</parameter> to indicate whether the chunk is zeroed.
|
<parameter>*zero</parameter> to indicate whether the chunk is zeroed.
|
||||||
Upon error the function returns <constant>NULL</constant> and leaves
|
Upon error the function returns <constant>NULL</constant> and leaves
|
||||||
<parameter>*zero</parameter> unmodified. The
|
<parameter>*zero</parameter> unmodified. The
|
||||||
@ -1550,34 +1580,16 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
of two at least as large as the chunk size. Zeroing is mandatory if
|
of two at least as large as the chunk size. Zeroing is mandatory if
|
||||||
<parameter>*zero</parameter> is true upon function entry. If
|
<parameter>*zero</parameter> is true upon function entry. If
|
||||||
<parameter>chunk</parameter> is not <constant>NULL</constant>, the
|
<parameter>chunk</parameter> is not <constant>NULL</constant>, the
|
||||||
returned pointer must be <parameter>chunk</parameter> or
|
returned pointer must be <parameter>chunk</parameter> on success or
|
||||||
<constant>NULL</constant> if it could not be allocated.</para>
|
<constant>NULL</constant> on error. Committed memory may be committed
|
||||||
|
in absolute terms as on a system that does not overcommit, or in
|
||||||
<para>Note that replacing the default chunk allocation function makes
|
implicit terms as on a system that overcommits and satisfies physical
|
||||||
the arena's <link
|
memory needs on demand via soft page faults. Note that replacing the
|
||||||
|
default chunk allocation function makes the arena's <link
|
||||||
linkend="arena.i.dss"><mallctl>arena.<i>.dss</mallctl></link>
|
linkend="arena.i.dss"><mallctl>arena.<i>.dss</mallctl></link>
|
||||||
setting irrelevant.</para></listitem>
|
setting irrelevant.</para>
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
<varlistentry id="arena.i.chunk.dalloc">
|
<para><funcsynopsis><funcprototype>
|
||||||
<term>
|
|
||||||
<mallctl>arena.<i>.chunk.dalloc</mallctl>
|
|
||||||
(<type>chunk_dalloc_t *</type>)
|
|
||||||
<literal>rw</literal>
|
|
||||||
</term>
|
|
||||||
<listitem><para>Get or set the chunk deallocation function for arena
|
|
||||||
<i>. If setting, the chunk deallocation function must
|
|
||||||
be capable of deallocating all extant chunks associated with arena
|
|
||||||
<i>, usually by passing unknown chunks to the deallocation
|
|
||||||
function that was replaced. In practice, it is feasible to control
|
|
||||||
allocation for arenas created via <link
|
|
||||||
linkend="arenas.extend"><mallctl>arenas.extend</mallctl></link> such
|
|
||||||
that all chunks originate from an application-supplied chunk allocator
|
|
||||||
(by setting custom chunk allocation/deallocation/purge functions just
|
|
||||||
after arena creation), but the automatically created arenas may have
|
|
||||||
already created chunks prior to the application having an opportunity to
|
|
||||||
take over chunk allocation.
|
|
||||||
<funcsynopsis><funcprototype>
|
|
||||||
<funcdef>typedef bool <function>(chunk_dalloc_t)</function></funcdef>
|
<funcdef>typedef bool <function>(chunk_dalloc_t)</function></funcdef>
|
||||||
<paramdef>void *<parameter>chunk</parameter></paramdef>
|
<paramdef>void *<parameter>chunk</parameter></paramdef>
|
||||||
<paramdef>size_t <parameter>size</parameter></paramdef>
|
<paramdef>size_t <parameter>size</parameter></paramdef>
|
||||||
@ -1587,46 +1599,99 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
<type>chunk_dalloc_t</type> type and deallocates a
|
<type>chunk_dalloc_t</type> type and deallocates a
|
||||||
<parameter>chunk</parameter> of given <parameter>size</parameter> on
|
<parameter>chunk</parameter> of given <parameter>size</parameter> on
|
||||||
behalf of arena <parameter>arena_ind</parameter>, returning false upon
|
behalf of arena <parameter>arena_ind</parameter>, returning false upon
|
||||||
success.</para></listitem>
|
success. If the function returns true, this indicates opt-out from
|
||||||
</varlistentry>
|
deallocation; the virtual memory mapping associated with the chunk
|
||||||
|
remains mapped, committed, and available for future use, in which case
|
||||||
|
it will be automatically retained for later reuse.</para>
|
||||||
|
|
||||||
<varlistentry id="arena.i.chunk.purge">
|
<para><funcsynopsis><funcprototype>
|
||||||
<term>
|
<funcdef>typedef bool <function>(chunk_commit_t)</function></funcdef>
|
||||||
<mallctl>arena.<i>.chunk.purge</mallctl>
|
<paramdef>void *<parameter>chunk</parameter></paramdef>
|
||||||
(<type>chunk_purge_t *</type>)
|
<paramdef>size_t <parameter>size</parameter></paramdef>
|
||||||
<literal>rw</literal>
|
<paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
|
||||||
</term>
|
</funcprototype></funcsynopsis>
|
||||||
<listitem><para>Get or set the chunk purge function for arena <i>.
|
A chunk commit function conforms to the <type>chunk_commit_t</type> type
|
||||||
A chunk purge function optionally discards physical pages associated
|
and commits zeroed physical memory to back a
|
||||||
with pages in the chunk's virtual memory range but leaves the virtual
|
<parameter>chunk</parameter> of given <parameter>size</parameter> on
|
||||||
memory mapping intact, and indicates via its return value whether pages
|
behalf of arena <parameter>arena_ind</parameter>, returning false upon
|
||||||
in the virtual memory range will be zero-filled the next time they are
|
success. Committed memory may be committed in absolute terms as on a
|
||||||
accessed. If setting, the chunk purge function must be capable of
|
system that does not overcommit, or in implicit terms as on a system
|
||||||
purging all extant chunks associated with arena <i>, usually by
|
that overcommits and satisfies physical memory needs on demand via soft
|
||||||
passing unknown chunks to the purge function that was replaced. In
|
page faults. If the function returns true, this indicates insufficient
|
||||||
practice, it is feasible to control allocation for arenas created via
|
physical memory to satisfy the request.</para>
|
||||||
<link linkend="arenas.extend"><mallctl>arenas.extend</mallctl></link>
|
|
||||||
such that all chunks originate from an application-supplied chunk
|
<para><funcsynopsis><funcprototype>
|
||||||
allocator (by setting custom chunk allocation/deallocation/purge
|
<funcdef>typedef bool <function>(chunk_decommit_t)</function></funcdef>
|
||||||
functions just after arena creation), but the automatically created
|
<paramdef>void *<parameter>chunk</parameter></paramdef>
|
||||||
arenas may have already created chunks prior to the application having
|
<paramdef>size_t <parameter>size</parameter></paramdef>
|
||||||
an opportunity to take over chunk allocation.
|
<paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
|
||||||
<funcsynopsis><funcprototype>
|
</funcprototype></funcsynopsis>
|
||||||
|
A chunk decommit function conforms to the <type>chunk_decommit_t</type>
|
||||||
|
type and decommits any physical memory that is backing a
|
||||||
|
<parameter>chunk</parameter> of given <parameter>size</parameter> on
|
||||||
|
behalf of arena <parameter>arena_ind</parameter>, returning false upon
|
||||||
|
success, in which case the chunk will be committed via the chunk commit
|
||||||
|
function before being reused. If the function returns true, this
|
||||||
|
indicates opt-out from decommit; the memory remains committed and
|
||||||
|
available for future use, in which case it will be automatically
|
||||||
|
retained for later reuse.</para>
|
||||||
|
|
||||||
|
<para><funcsynopsis><funcprototype>
|
||||||
<funcdef>typedef bool <function>(chunk_purge_t)</function></funcdef>
|
<funcdef>typedef bool <function>(chunk_purge_t)</function></funcdef>
|
||||||
<paramdef>void *<parameter>chunk</parameter></paramdef>
|
<paramdef>void *<parameter>chunk</parameter></paramdef>
|
||||||
|
<paramdef>size_t<parameter>size</parameter></paramdef>
|
||||||
<paramdef>size_t <parameter>offset</parameter></paramdef>
|
<paramdef>size_t <parameter>offset</parameter></paramdef>
|
||||||
<paramdef>size_t <parameter>length</parameter></paramdef>
|
<paramdef>size_t <parameter>length</parameter></paramdef>
|
||||||
<paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
|
<paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
|
||||||
</funcprototype></funcsynopsis>
|
</funcprototype></funcsynopsis>
|
||||||
A chunk purge function conforms to the <type>chunk_purge_t</type> type
|
A chunk purge function conforms to the <type>chunk_purge_t</type> type
|
||||||
and purges pages within <parameter>chunk</parameter> at
|
and optionally discards physical pages within the virtual memory mapping
|
||||||
<parameter>offset</parameter> bytes, extending for
|
associated with <parameter>chunk</parameter> of given
|
||||||
<parameter>length</parameter> on behalf of arena
|
<parameter>size</parameter> at <parameter>offset</parameter> bytes,
|
||||||
|
extending for <parameter>length</parameter> on behalf of arena
|
||||||
<parameter>arena_ind</parameter>, returning false if pages within the
|
<parameter>arena_ind</parameter>, returning false if pages within the
|
||||||
purged virtual memory range will be zero-filled the next time they are
|
purged virtual memory range will be zero-filled the next time they are
|
||||||
accessed. Note that the memory range being purged may span multiple
|
accessed.</para>
|
||||||
contiguous chunks, e.g. when purging memory that backed a huge
|
|
||||||
allocation.</para></listitem>
|
<para><funcsynopsis><funcprototype>
|
||||||
|
<funcdef>typedef bool <function>(chunk_split_t)</function></funcdef>
|
||||||
|
<paramdef>void *<parameter>chunk</parameter></paramdef>
|
||||||
|
<paramdef>size_t <parameter>size</parameter></paramdef>
|
||||||
|
<paramdef>size_t <parameter>size_a</parameter></paramdef>
|
||||||
|
<paramdef>size_t <parameter>size_b</parameter></paramdef>
|
||||||
|
<paramdef>bool <parameter>committed</parameter></paramdef>
|
||||||
|
<paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
|
||||||
|
</funcprototype></funcsynopsis>
|
||||||
|
A chunk split function conforms to the <type>chunk_split_t</type> type
|
||||||
|
and optionally splits <parameter>chunk</parameter> of given
|
||||||
|
<parameter>size</parameter> into two adjacent chunks, the first of
|
||||||
|
<parameter>size_a</parameter> bytes, and the second of
|
||||||
|
<parameter>size_b</parameter> bytes, operating on
|
||||||
|
<parameter>committed</parameter>/decommitted memory as indicated, on
|
||||||
|
behalf of arena <parameter>arena_ind</parameter>, returning false upon
|
||||||
|
success. If the function returns true, this indicates that the chunk
|
||||||
|
remains unsplit and therefore should continue to be operated on as a
|
||||||
|
whole.</para>
|
||||||
|
|
||||||
|
<para><funcsynopsis><funcprototype>
|
||||||
|
<funcdef>typedef bool <function>(chunk_merge_t)</function></funcdef>
|
||||||
|
<paramdef>void *<parameter>chunk_a</parameter></paramdef>
|
||||||
|
<paramdef>size_t <parameter>size_a</parameter></paramdef>
|
||||||
|
<paramdef>void *<parameter>chunk_b</parameter></paramdef>
|
||||||
|
<paramdef>size_t <parameter>size_b</parameter></paramdef>
|
||||||
|
<paramdef>bool <parameter>committed</parameter></paramdef>
|
||||||
|
<paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
|
||||||
|
</funcprototype></funcsynopsis>
|
||||||
|
A chunk merge function conforms to the <type>chunk_merge_t</type> type
|
||||||
|
and optionally merges adjacent chunks, <parameter>chunk_a</parameter> of
|
||||||
|
given <parameter>size_a</parameter> and <parameter>chunk_b</parameter>
|
||||||
|
of given <parameter>size_b</parameter> into one contiguous chunk,
|
||||||
|
operating on <parameter>committed</parameter>/decommitted memory as
|
||||||
|
indicated, on behalf of arena <parameter>arena_ind</parameter>,
|
||||||
|
returning false upon success. If the function returns true, this
|
||||||
|
indicates that the chunks remain distinct mappings and therefore should
|
||||||
|
continue to be operated on independently.</para>
|
||||||
|
</listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="arenas.narenas">
|
<varlistentry id="arenas.narenas">
|
||||||
|
@ -379,23 +379,18 @@ struct arena_s {
|
|||||||
* orderings are needed, which is why there are two trees with the same
|
* orderings are needed, which is why there are two trees with the same
|
||||||
* contents.
|
* contents.
|
||||||
*/
|
*/
|
||||||
extent_tree_t chunks_szad_cache;
|
extent_tree_t chunks_szad_cached;
|
||||||
extent_tree_t chunks_ad_cache;
|
extent_tree_t chunks_ad_cached;
|
||||||
extent_tree_t chunks_szad_mmap;
|
extent_tree_t chunks_szad_retained;
|
||||||
extent_tree_t chunks_ad_mmap;
|
extent_tree_t chunks_ad_retained;
|
||||||
extent_tree_t chunks_szad_dss;
|
|
||||||
extent_tree_t chunks_ad_dss;
|
|
||||||
malloc_mutex_t chunks_mtx;
|
malloc_mutex_t chunks_mtx;
|
||||||
/* Cache of nodes that were allocated via base_alloc(). */
|
/* Cache of nodes that were allocated via base_alloc(). */
|
||||||
ql_head(extent_node_t) node_cache;
|
ql_head(extent_node_t) node_cache;
|
||||||
malloc_mutex_t node_cache_mtx;
|
malloc_mutex_t node_cache_mtx;
|
||||||
|
|
||||||
/*
|
/* User-configurable chunk hook functions. */
|
||||||
* User-configurable chunk allocation/deallocation/purge functions.
|
chunk_hooks_t chunk_hooks;
|
||||||
*/
|
|
||||||
chunk_alloc_t *chunk_alloc;
|
|
||||||
chunk_dalloc_t *chunk_dalloc;
|
|
||||||
chunk_purge_t *chunk_purge;
|
|
||||||
|
|
||||||
/* bins is used to store trees of free regions. */
|
/* bins is used to store trees of free regions. */
|
||||||
arena_bin_t bins[NBINS];
|
arena_bin_t bins[NBINS];
|
||||||
|
@ -19,6 +19,16 @@
|
|||||||
#define CHUNK_CEILING(s) \
|
#define CHUNK_CEILING(s) \
|
||||||
(((s) + chunksize_mask) & ~chunksize_mask)
|
(((s) + chunksize_mask) & ~chunksize_mask)
|
||||||
|
|
||||||
|
#define CHUNK_HOOKS_INITIALIZER { \
|
||||||
|
NULL, \
|
||||||
|
NULL, \
|
||||||
|
NULL, \
|
||||||
|
NULL, \
|
||||||
|
NULL, \
|
||||||
|
NULL, \
|
||||||
|
NULL \
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
#endif /* JEMALLOC_H_TYPES */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
#ifdef JEMALLOC_H_STRUCTS
|
||||||
@ -36,30 +46,30 @@ extern size_t chunksize;
|
|||||||
extern size_t chunksize_mask; /* (chunksize - 1). */
|
extern size_t chunksize_mask; /* (chunksize - 1). */
|
||||||
extern size_t chunk_npages;
|
extern size_t chunk_npages;
|
||||||
|
|
||||||
|
extern const chunk_hooks_t chunk_hooks_default;
|
||||||
|
|
||||||
|
chunk_hooks_t chunk_hooks_get(arena_t *arena);
|
||||||
|
chunk_hooks_t chunk_hooks_set(arena_t *arena,
|
||||||
|
const chunk_hooks_t *chunk_hooks);
|
||||||
|
|
||||||
bool chunk_register(const void *chunk, const extent_node_t *node);
|
bool chunk_register(const void *chunk, const extent_node_t *node);
|
||||||
void chunk_deregister(const void *chunk, const extent_node_t *node);
|
void chunk_deregister(const void *chunk, const extent_node_t *node);
|
||||||
void *chunk_alloc_base(size_t size);
|
void *chunk_alloc_base(size_t size);
|
||||||
void *chunk_alloc_cache(arena_t *arena, void *new_addr, size_t size,
|
void *chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
size_t alignment, bool *zero, bool dalloc_node);
|
void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||||
void *chunk_alloc_default(void *new_addr, size_t size, size_t alignment,
|
bool dalloc_node);
|
||||||
bool *zero, unsigned arena_ind);
|
void *chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
void *chunk_alloc_wrapper(arena_t *arena, chunk_alloc_t *chunk_alloc,
|
|
||||||
void *new_addr, size_t size, size_t alignment, bool *zero);
|
void *new_addr, size_t size, size_t alignment, bool *zero);
|
||||||
void chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
|
void chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size,
|
void *chunk, size_t size);
|
||||||
bool zeroed);
|
void chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
void chunk_dalloc_cache(arena_t *arena, void *chunk, size_t size);
|
void *chunk, size_t size, bool zeroed);
|
||||||
void chunk_dalloc_arena(arena_t *arena, void *chunk, size_t size,
|
void chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
bool zeroed);
|
|
||||||
bool chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind);
|
|
||||||
void chunk_dalloc_wrapper(arena_t *arena, chunk_dalloc_t *chunk_dalloc,
|
|
||||||
void *chunk, size_t size);
|
void *chunk, size_t size);
|
||||||
bool chunk_purge_arena(arena_t *arena, void *chunk, size_t offset,
|
bool chunk_purge_arena(arena_t *arena, void *chunk, size_t offset,
|
||||||
size_t length);
|
size_t length);
|
||||||
bool chunk_purge_default(void *chunk, size_t offset, size_t length,
|
bool chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
unsigned arena_ind);
|
void *chunk, size_t size, size_t offset, size_t length);
|
||||||
bool chunk_purge_wrapper(arena_t *arena, chunk_purge_t *chunk_purge,
|
|
||||||
void *chunk, size_t offset, size_t length);
|
|
||||||
bool chunk_boot(void);
|
bool chunk_boot(void);
|
||||||
void chunk_prefork(void);
|
void chunk_prefork(void);
|
||||||
void chunk_postfork_parent(void);
|
void chunk_postfork_parent(void);
|
||||||
|
@ -9,8 +9,6 @@
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
bool pages_purge(void *addr, size_t length);
|
|
||||||
|
|
||||||
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
|
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
|
||||||
bool chunk_dalloc_mmap(void *chunk, size_t size);
|
bool chunk_dalloc_mmap(void *chunk, size_t size);
|
||||||
|
|
||||||
|
@ -18,6 +18,13 @@ struct extent_node_s {
|
|||||||
/* Total region size. */
|
/* Total region size. */
|
||||||
size_t en_size;
|
size_t en_size;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* True if physical memory is committed to the extent, whether
|
||||||
|
* explicitly or implicitly as on a system that overcommits and
|
||||||
|
* satisfies physical mamory needs on demand via soft page faults.
|
||||||
|
*/
|
||||||
|
bool en_committed;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The zeroed flag is used by chunk recycling code to track whether
|
* The zeroed flag is used by chunk recycling code to track whether
|
||||||
* memory is zero-filled.
|
* memory is zero-filled.
|
||||||
@ -66,17 +73,19 @@ rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
|
|||||||
arena_t *extent_node_arena_get(const extent_node_t *node);
|
arena_t *extent_node_arena_get(const extent_node_t *node);
|
||||||
void *extent_node_addr_get(const extent_node_t *node);
|
void *extent_node_addr_get(const extent_node_t *node);
|
||||||
size_t extent_node_size_get(const extent_node_t *node);
|
size_t extent_node_size_get(const extent_node_t *node);
|
||||||
|
bool extent_node_committed_get(const extent_node_t *node);
|
||||||
bool extent_node_zeroed_get(const extent_node_t *node);
|
bool extent_node_zeroed_get(const extent_node_t *node);
|
||||||
bool extent_node_achunk_get(const extent_node_t *node);
|
bool extent_node_achunk_get(const extent_node_t *node);
|
||||||
prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
|
prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
|
||||||
void extent_node_arena_set(extent_node_t *node, arena_t *arena);
|
void extent_node_arena_set(extent_node_t *node, arena_t *arena);
|
||||||
void extent_node_addr_set(extent_node_t *node, void *addr);
|
void extent_node_addr_set(extent_node_t *node, void *addr);
|
||||||
void extent_node_size_set(extent_node_t *node, size_t size);
|
void extent_node_size_set(extent_node_t *node, size_t size);
|
||||||
|
void extent_node_committed_set(extent_node_t *node, bool committed);
|
||||||
void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
|
void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
|
||||||
void extent_node_achunk_set(extent_node_t *node, bool achunk);
|
void extent_node_achunk_set(extent_node_t *node, bool achunk);
|
||||||
void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
|
void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
|
||||||
void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
|
void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
|
||||||
size_t size, bool zeroed);
|
size_t size, bool committed, bool zeroed);
|
||||||
void extent_node_dirty_linkage_init(extent_node_t *node);
|
void extent_node_dirty_linkage_init(extent_node_t *node);
|
||||||
void extent_node_dirty_insert(extent_node_t *node,
|
void extent_node_dirty_insert(extent_node_t *node,
|
||||||
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
|
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
|
||||||
@ -105,6 +114,13 @@ extent_node_size_get(const extent_node_t *node)
|
|||||||
return (node->en_size);
|
return (node->en_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE bool
|
||||||
|
extent_node_committed_get(const extent_node_t *node)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (node->en_committed);
|
||||||
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
JEMALLOC_INLINE bool
|
||||||
extent_node_zeroed_get(const extent_node_t *node)
|
extent_node_zeroed_get(const extent_node_t *node)
|
||||||
{
|
{
|
||||||
@ -147,6 +163,13 @@ extent_node_size_set(extent_node_t *node, size_t size)
|
|||||||
node->en_size = size;
|
node->en_size = size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE void
|
||||||
|
extent_node_committed_set(extent_node_t *node, bool committed)
|
||||||
|
{
|
||||||
|
|
||||||
|
node->en_committed = committed;
|
||||||
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_node_zeroed_set(extent_node_t *node, bool zeroed)
|
extent_node_zeroed_set(extent_node_t *node, bool zeroed)
|
||||||
{
|
{
|
||||||
@ -170,12 +193,13 @@ extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
|
|||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
|
extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
|
||||||
bool zeroed)
|
bool committed, bool zeroed)
|
||||||
{
|
{
|
||||||
|
|
||||||
extent_node_arena_set(node, arena);
|
extent_node_arena_set(node, arena);
|
||||||
extent_node_addr_set(node, addr);
|
extent_node_addr_set(node, addr);
|
||||||
extent_node_size_set(node, size);
|
extent_node_size_set(node, size);
|
||||||
|
extent_node_committed_set(node, committed);
|
||||||
extent_node_zeroed_set(node, zeroed);
|
extent_node_zeroed_set(node, zeroed);
|
||||||
extent_node_achunk_set(node, false);
|
extent_node_achunk_set(node, false);
|
||||||
if (config_prof)
|
if (config_prof)
|
||||||
|
@ -367,6 +367,7 @@ typedef unsigned index_t;
|
|||||||
#include "jemalloc/internal/bitmap.h"
|
#include "jemalloc/internal/bitmap.h"
|
||||||
#include "jemalloc/internal/base.h"
|
#include "jemalloc/internal/base.h"
|
||||||
#include "jemalloc/internal/rtree.h"
|
#include "jemalloc/internal/rtree.h"
|
||||||
|
#include "jemalloc/internal/pages.h"
|
||||||
#include "jemalloc/internal/chunk.h"
|
#include "jemalloc/internal/chunk.h"
|
||||||
#include "jemalloc/internal/huge.h"
|
#include "jemalloc/internal/huge.h"
|
||||||
#include "jemalloc/internal/tcache.h"
|
#include "jemalloc/internal/tcache.h"
|
||||||
@ -398,6 +399,7 @@ typedef unsigned index_t;
|
|||||||
#undef JEMALLOC_ARENA_STRUCTS_B
|
#undef JEMALLOC_ARENA_STRUCTS_B
|
||||||
#include "jemalloc/internal/base.h"
|
#include "jemalloc/internal/base.h"
|
||||||
#include "jemalloc/internal/rtree.h"
|
#include "jemalloc/internal/rtree.h"
|
||||||
|
#include "jemalloc/internal/pages.h"
|
||||||
#include "jemalloc/internal/chunk.h"
|
#include "jemalloc/internal/chunk.h"
|
||||||
#include "jemalloc/internal/huge.h"
|
#include "jemalloc/internal/huge.h"
|
||||||
#include "jemalloc/internal/tcache.h"
|
#include "jemalloc/internal/tcache.h"
|
||||||
@ -477,6 +479,7 @@ void jemalloc_postfork_child(void);
|
|||||||
#include "jemalloc/internal/arena.h"
|
#include "jemalloc/internal/arena.h"
|
||||||
#include "jemalloc/internal/base.h"
|
#include "jemalloc/internal/base.h"
|
||||||
#include "jemalloc/internal/rtree.h"
|
#include "jemalloc/internal/rtree.h"
|
||||||
|
#include "jemalloc/internal/pages.h"
|
||||||
#include "jemalloc/internal/chunk.h"
|
#include "jemalloc/internal/chunk.h"
|
||||||
#include "jemalloc/internal/huge.h"
|
#include "jemalloc/internal/huge.h"
|
||||||
#include "jemalloc/internal/tcache.h"
|
#include "jemalloc/internal/tcache.h"
|
||||||
@ -503,6 +506,7 @@ void jemalloc_postfork_child(void);
|
|||||||
#include "jemalloc/internal/extent.h"
|
#include "jemalloc/internal/extent.h"
|
||||||
#include "jemalloc/internal/base.h"
|
#include "jemalloc/internal/base.h"
|
||||||
#include "jemalloc/internal/rtree.h"
|
#include "jemalloc/internal/rtree.h"
|
||||||
|
#include "jemalloc/internal/pages.h"
|
||||||
#include "jemalloc/internal/chunk.h"
|
#include "jemalloc/internal/chunk.h"
|
||||||
#include "jemalloc/internal/huge.h"
|
#include "jemalloc/internal/huge.h"
|
||||||
|
|
||||||
|
26
include/jemalloc/internal/pages.h
Normal file
26
include/jemalloc/internal/pages.h
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
/******************************************************************************/
|
||||||
|
#ifdef JEMALLOC_H_TYPES
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_H_TYPES */
|
||||||
|
/******************************************************************************/
|
||||||
|
#ifdef JEMALLOC_H_STRUCTS
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_H_STRUCTS */
|
||||||
|
/******************************************************************************/
|
||||||
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
|
void *pages_map(void *addr, size_t size);
|
||||||
|
void pages_unmap(void *addr, size_t size);
|
||||||
|
void *pages_trim(void *addr, size_t alloc_size, size_t leadsize,
|
||||||
|
size_t size);
|
||||||
|
bool pages_commit(void *addr, size_t size);
|
||||||
|
bool pages_decommit(void *addr, size_t size);
|
||||||
|
bool pages_purge(void *addr, size_t size);
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
|
/******************************************************************************/
|
||||||
|
#ifdef JEMALLOC_H_INLINES
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_H_INLINES */
|
||||||
|
/******************************************************************************/
|
||||||
|
|
@ -132,14 +132,12 @@ bt_init
|
|||||||
buferror
|
buferror
|
||||||
chunk_alloc_cache
|
chunk_alloc_cache
|
||||||
chunk_alloc_base
|
chunk_alloc_base
|
||||||
chunk_alloc_default
|
|
||||||
chunk_alloc_dss
|
chunk_alloc_dss
|
||||||
chunk_alloc_mmap
|
chunk_alloc_mmap
|
||||||
chunk_alloc_wrapper
|
chunk_alloc_wrapper
|
||||||
chunk_boot
|
chunk_boot
|
||||||
chunk_dalloc_arena
|
chunk_dalloc_arena
|
||||||
chunk_dalloc_cache
|
chunk_dalloc_cache
|
||||||
chunk_dalloc_default
|
|
||||||
chunk_dalloc_mmap
|
chunk_dalloc_mmap
|
||||||
chunk_dalloc_wrapper
|
chunk_dalloc_wrapper
|
||||||
chunk_deregister
|
chunk_deregister
|
||||||
@ -149,6 +147,9 @@ chunk_dss_postfork_parent
|
|||||||
chunk_dss_prec_get
|
chunk_dss_prec_get
|
||||||
chunk_dss_prec_set
|
chunk_dss_prec_set
|
||||||
chunk_dss_prefork
|
chunk_dss_prefork
|
||||||
|
chunk_hooks_default
|
||||||
|
chunk_hooks_get
|
||||||
|
chunk_hooks_set
|
||||||
chunk_in_dss
|
chunk_in_dss
|
||||||
chunk_lookup
|
chunk_lookup
|
||||||
chunk_npages
|
chunk_npages
|
||||||
@ -156,9 +157,7 @@ chunk_postfork_child
|
|||||||
chunk_postfork_parent
|
chunk_postfork_parent
|
||||||
chunk_prefork
|
chunk_prefork
|
||||||
chunk_purge_arena
|
chunk_purge_arena
|
||||||
chunk_purge_default
|
|
||||||
chunk_purge_wrapper
|
chunk_purge_wrapper
|
||||||
chunk_record
|
|
||||||
chunk_register
|
chunk_register
|
||||||
chunks_rtree
|
chunks_rtree
|
||||||
chunksize
|
chunksize
|
||||||
@ -347,7 +346,12 @@ opt_utrace
|
|||||||
opt_xmalloc
|
opt_xmalloc
|
||||||
opt_zero
|
opt_zero
|
||||||
p2rz
|
p2rz
|
||||||
|
pages_commit
|
||||||
|
pages_decommit
|
||||||
|
pages_map
|
||||||
pages_purge
|
pages_purge
|
||||||
|
pages_trim
|
||||||
|
pages_unmap
|
||||||
pow2_ceil
|
pow2_ceil
|
||||||
prof_active_get
|
prof_active_get
|
||||||
prof_active_get_unlocked
|
prof_active_get_unlocked
|
||||||
|
@ -1,3 +1,55 @@
|
|||||||
|
/*
|
||||||
|
* void *
|
||||||
|
* chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||||
|
* unsigned arena_ind);
|
||||||
|
*/
|
||||||
typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, unsigned);
|
typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, unsigned);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* bool
|
||||||
|
* chunk_dalloc(void *chunk, size_t size, unsigned arena_ind);
|
||||||
|
*/
|
||||||
typedef bool (chunk_dalloc_t)(void *, size_t, unsigned);
|
typedef bool (chunk_dalloc_t)(void *, size_t, unsigned);
|
||||||
typedef bool (chunk_purge_t)(void *, size_t, size_t, unsigned);
|
|
||||||
|
/*
|
||||||
|
* bool
|
||||||
|
* chunk_commit(void *chunk, size_t size, unsigned arena_ind);
|
||||||
|
*/
|
||||||
|
typedef bool (chunk_commit_t)(void *, size_t, unsigned);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* bool
|
||||||
|
* chunk_decommit(void *chunk, size_t size, unsigned arena_ind);
|
||||||
|
*/
|
||||||
|
typedef bool (chunk_decommit_t)(void *, size_t, unsigned);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* bool
|
||||||
|
* chunk_purge(void *chunk, size_t size, size_t offset, size_t length,
|
||||||
|
* unsigned arena_ind);
|
||||||
|
*/
|
||||||
|
typedef bool (chunk_purge_t)(void *, size_t, size_t, size_t, unsigned);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* bool
|
||||||
|
* chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b,
|
||||||
|
* bool committed, unsigned arena_ind);
|
||||||
|
*/
|
||||||
|
typedef bool (chunk_split_t)(void *, size_t, size_t, size_t, bool, unsigned);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* bool
|
||||||
|
* chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
|
||||||
|
* bool committed, unsigned arena_ind);
|
||||||
|
*/
|
||||||
|
typedef bool (chunk_merge_t)(void *, size_t, void *, size_t, bool, unsigned);
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
chunk_alloc_t *alloc;
|
||||||
|
chunk_dalloc_t *dalloc;
|
||||||
|
chunk_commit_t *commit;
|
||||||
|
chunk_decommit_t *decommit;
|
||||||
|
chunk_purge_t *purge;
|
||||||
|
chunk_split_t *split;
|
||||||
|
chunk_merge_t *merge;
|
||||||
|
} chunk_hooks_t;
|
||||||
|
172
src/arena.c
172
src/arena.c
@ -516,23 +516,23 @@ static bool
|
|||||||
arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
|
arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
|
||||||
{
|
{
|
||||||
|
|
||||||
extent_node_init(&chunk->node, arena, chunk, chunksize, zero);
|
extent_node_init(&chunk->node, arena, chunk, chunksize, true, zero);
|
||||||
extent_node_achunk_set(&chunk->node, true);
|
extent_node_achunk_set(&chunk->node, true);
|
||||||
return (chunk_register(chunk, &chunk->node));
|
return (chunk_register(chunk, &chunk->node));
|
||||||
}
|
}
|
||||||
|
|
||||||
static arena_chunk_t *
|
static arena_chunk_t *
|
||||||
arena_chunk_alloc_internal_hard(arena_t *arena, bool *zero)
|
arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
|
bool *zero)
|
||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
chunk_alloc_t *chunk_alloc = arena->chunk_alloc;
|
|
||||||
chunk_dalloc_t *chunk_dalloc = arena->chunk_dalloc;
|
|
||||||
|
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_alloc, NULL,
|
|
||||||
|
chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL,
|
||||||
chunksize, chunksize, zero);
|
chunksize, chunksize, zero);
|
||||||
if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) {
|
if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) {
|
||||||
chunk_dalloc_wrapper(arena, chunk_dalloc, (void *)chunk,
|
chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
|
||||||
chunksize);
|
chunksize);
|
||||||
chunk = NULL;
|
chunk = NULL;
|
||||||
}
|
}
|
||||||
@ -545,19 +545,18 @@ static arena_chunk_t *
|
|||||||
arena_chunk_alloc_internal(arena_t *arena, bool *zero)
|
arena_chunk_alloc_internal(arena_t *arena, bool *zero)
|
||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
|
|
||||||
if (likely(arena->chunk_alloc == chunk_alloc_default)) {
|
chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize,
|
||||||
chunk = chunk_alloc_cache(arena, NULL, chunksize, chunksize,
|
chunksize, zero, true);
|
||||||
zero, true);
|
if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) {
|
||||||
if (chunk != NULL && arena_chunk_register(arena, chunk,
|
chunk_dalloc_cache(arena, &chunk_hooks, chunk, chunksize);
|
||||||
*zero)) {
|
|
||||||
chunk_dalloc_cache(arena, chunk, chunksize);
|
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
} else
|
if (chunk == NULL) {
|
||||||
chunk = NULL;
|
chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks,
|
||||||
if (chunk == NULL)
|
zero);
|
||||||
chunk = arena_chunk_alloc_internal_hard(arena, zero);
|
}
|
||||||
|
|
||||||
if (config_stats && chunk != NULL) {
|
if (config_stats && chunk != NULL) {
|
||||||
arena->stats.mapped += chunksize;
|
arena->stats.mapped += chunksize;
|
||||||
@ -657,7 +656,7 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
|
|||||||
|
|
||||||
if (arena->spare != NULL) {
|
if (arena->spare != NULL) {
|
||||||
arena_chunk_t *spare = arena->spare;
|
arena_chunk_t *spare = arena->spare;
|
||||||
chunk_dalloc_t *chunk_dalloc;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
|
|
||||||
arena->spare = chunk;
|
arena->spare = chunk;
|
||||||
if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
|
if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
|
||||||
@ -667,15 +666,8 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
|
|||||||
|
|
||||||
chunk_deregister(spare, &spare->node);
|
chunk_deregister(spare, &spare->node);
|
||||||
|
|
||||||
chunk_dalloc = arena->chunk_dalloc;
|
chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare,
|
||||||
if (likely(chunk_dalloc == chunk_dalloc_default))
|
|
||||||
chunk_dalloc_cache(arena, (void *)spare, chunksize);
|
|
||||||
else {
|
|
||||||
malloc_mutex_unlock(&arena->lock);
|
|
||||||
chunk_dalloc_wrapper(arena, chunk_dalloc, (void *)spare,
|
|
||||||
chunksize);
|
chunksize);
|
||||||
malloc_mutex_lock(&arena->lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
arena->stats.mapped -= chunksize;
|
arena->stats.mapped -= chunksize;
|
||||||
@ -781,12 +773,12 @@ arena_node_dalloc(arena_t *arena, extent_node_t *node)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
arena_chunk_alloc_huge_hard(arena_t *arena, chunk_alloc_t *chunk_alloc,
|
arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
size_t usize, size_t alignment, bool *zero, size_t csize)
|
size_t usize, size_t alignment, bool *zero, size_t csize)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
ret = chunk_alloc_wrapper(arena, chunk_alloc, NULL, csize, alignment,
|
ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment,
|
||||||
zero);
|
zero);
|
||||||
if (ret == NULL) {
|
if (ret == NULL) {
|
||||||
/* Revert optimistic stats updates. */
|
/* Revert optimistic stats updates. */
|
||||||
@ -807,7 +799,7 @@ arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
|
|||||||
bool *zero)
|
bool *zero)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
chunk_alloc_t *chunk_alloc;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
size_t csize = CHUNK_CEILING(usize);
|
size_t csize = CHUNK_CEILING(usize);
|
||||||
|
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(&arena->lock);
|
||||||
@ -819,15 +811,11 @@ arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
|
|||||||
}
|
}
|
||||||
arena->nactive += (usize >> LG_PAGE);
|
arena->nactive += (usize >> LG_PAGE);
|
||||||
|
|
||||||
chunk_alloc = arena->chunk_alloc;
|
ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment,
|
||||||
if (likely(chunk_alloc == chunk_alloc_default)) {
|
zero, true);
|
||||||
ret = chunk_alloc_cache(arena, NULL, csize, alignment, zero,
|
|
||||||
true);
|
|
||||||
} else
|
|
||||||
ret = NULL;
|
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
if (ret == NULL) {
|
if (ret == NULL) {
|
||||||
ret = arena_chunk_alloc_huge_hard(arena, chunk_alloc, usize,
|
ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize,
|
||||||
alignment, zero, csize);
|
alignment, zero, csize);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -839,12 +827,11 @@ arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
|
|||||||
void
|
void
|
||||||
arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
|
arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
|
||||||
{
|
{
|
||||||
chunk_dalloc_t *chunk_dalloc;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
size_t csize;
|
size_t csize;
|
||||||
|
|
||||||
csize = CHUNK_CEILING(usize);
|
csize = CHUNK_CEILING(usize);
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(&arena->lock);
|
||||||
chunk_dalloc = arena->chunk_dalloc;
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
arena_huge_dalloc_stats_update(arena, usize);
|
arena_huge_dalloc_stats_update(arena, usize);
|
||||||
arena->stats.mapped -= usize;
|
arena->stats.mapped -= usize;
|
||||||
@ -852,13 +839,8 @@ arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
|
|||||||
}
|
}
|
||||||
arena->nactive -= (usize >> LG_PAGE);
|
arena->nactive -= (usize >> LG_PAGE);
|
||||||
|
|
||||||
if (likely(chunk_dalloc == chunk_dalloc_default)) {
|
chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize);
|
||||||
chunk_dalloc_cache(arena, chunk, csize);
|
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
} else {
|
|
||||||
malloc_mutex_unlock(&arena->lock);
|
|
||||||
chunk_dalloc_wrapper(arena, chunk_dalloc, chunk, csize);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -904,30 +886,23 @@ arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
|
|||||||
arena->nactive -= udiff >> LG_PAGE;
|
arena->nactive -= udiff >> LG_PAGE;
|
||||||
|
|
||||||
if (cdiff != 0) {
|
if (cdiff != 0) {
|
||||||
chunk_dalloc_t *chunk_dalloc = arena->chunk_dalloc;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
void *nchunk = (void *)((uintptr_t)chunk +
|
void *nchunk = (void *)((uintptr_t)chunk +
|
||||||
CHUNK_CEILING(usize));
|
CHUNK_CEILING(usize));
|
||||||
|
|
||||||
if (likely(chunk_dalloc == chunk_dalloc_default)) {
|
chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff);
|
||||||
chunk_dalloc_cache(arena, nchunk, cdiff);
|
|
||||||
malloc_mutex_unlock(&arena->lock);
|
|
||||||
} else {
|
|
||||||
malloc_mutex_unlock(&arena->lock);
|
|
||||||
chunk_dalloc_wrapper(arena, chunk_dalloc, nchunk,
|
|
||||||
cdiff);
|
|
||||||
}
|
}
|
||||||
} else
|
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
static bool
|
||||||
arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_alloc_t *chunk_alloc,
|
arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
size_t oldsize, size_t usize, bool *zero, void *nchunk, size_t udiff,
|
void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk,
|
||||||
size_t cdiff)
|
size_t udiff, size_t cdiff)
|
||||||
{
|
{
|
||||||
bool err;
|
bool err;
|
||||||
|
|
||||||
err = (chunk_alloc_wrapper(arena, chunk_alloc, nchunk, cdiff, chunksize,
|
err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize,
|
||||||
zero) == NULL);
|
zero) == NULL);
|
||||||
if (err) {
|
if (err) {
|
||||||
/* Revert optimistic stats updates. */
|
/* Revert optimistic stats updates. */
|
||||||
@ -939,6 +914,10 @@ arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_alloc_t *chunk_alloc,
|
|||||||
}
|
}
|
||||||
arena->nactive -= (udiff >> LG_PAGE);
|
arena->nactive -= (udiff >> LG_PAGE);
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
|
} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
|
||||||
|
cdiff, true, arena->ind)) {
|
||||||
|
chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero);
|
||||||
|
err = true;
|
||||||
}
|
}
|
||||||
return (err);
|
return (err);
|
||||||
}
|
}
|
||||||
@ -948,11 +927,13 @@ arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
|
|||||||
size_t usize, bool *zero)
|
size_t usize, bool *zero)
|
||||||
{
|
{
|
||||||
bool err;
|
bool err;
|
||||||
chunk_alloc_t *chunk_alloc;
|
chunk_hooks_t chunk_hooks;
|
||||||
void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
|
void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
|
||||||
size_t udiff = usize - oldsize;
|
size_t udiff = usize - oldsize;
|
||||||
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
|
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
|
||||||
|
|
||||||
|
chunk_hooks = chunk_hooks_get(arena);
|
||||||
|
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(&arena->lock);
|
||||||
|
|
||||||
/* Optimistically update stats. */
|
/* Optimistically update stats. */
|
||||||
@ -962,16 +943,17 @@ arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
|
|||||||
}
|
}
|
||||||
arena->nactive += (udiff >> LG_PAGE);
|
arena->nactive += (udiff >> LG_PAGE);
|
||||||
|
|
||||||
chunk_alloc = arena->chunk_alloc;
|
err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff,
|
||||||
if (likely(chunk_alloc == chunk_alloc_default)) {
|
chunksize, zero, true) == NULL);
|
||||||
err = (chunk_alloc_cache(arena, nchunk, cdiff, chunksize, zero,
|
|
||||||
true) == NULL);
|
|
||||||
} else
|
|
||||||
err = true;
|
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
if (err) {
|
if (err) {
|
||||||
err = arena_chunk_ralloc_huge_expand_hard(arena, chunk_alloc,
|
err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks,
|
||||||
oldsize, usize, zero, nchunk, udiff, cdiff);
|
chunk, oldsize, usize, zero, nchunk, udiff,
|
||||||
|
cdiff);
|
||||||
|
} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
|
||||||
|
cdiff, true, arena->ind)) {
|
||||||
|
chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero);
|
||||||
|
err = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config_stats && !err)
|
if (config_stats && !err)
|
||||||
@ -1198,8 +1180,8 @@ arena_compute_npurge(arena_t *arena, bool all)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
|
arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
|
||||||
arena_runs_dirty_link_t *purge_runs_sentinel,
|
size_t npurge, arena_runs_dirty_link_t *purge_runs_sentinel,
|
||||||
extent_node_t *purge_chunks_sentinel)
|
extent_node_t *purge_chunks_sentinel)
|
||||||
{
|
{
|
||||||
arena_runs_dirty_link_t *rdelm, *rdelm_next;
|
arena_runs_dirty_link_t *rdelm, *rdelm_next;
|
||||||
@ -1224,7 +1206,7 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
|
|||||||
* dalloc_node=false argument to chunk_alloc_cache().
|
* dalloc_node=false argument to chunk_alloc_cache().
|
||||||
*/
|
*/
|
||||||
zero = false;
|
zero = false;
|
||||||
chunk = chunk_alloc_cache(arena,
|
chunk = chunk_alloc_cache(arena, chunk_hooks,
|
||||||
extent_node_addr_get(chunkselm),
|
extent_node_addr_get(chunkselm),
|
||||||
extent_node_size_get(chunkselm), chunksize, &zero,
|
extent_node_size_get(chunkselm), chunksize, &zero,
|
||||||
false);
|
false);
|
||||||
@ -1278,12 +1260,11 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
arena_purge_stashed(arena_t *arena,
|
arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
arena_runs_dirty_link_t *purge_runs_sentinel,
|
arena_runs_dirty_link_t *purge_runs_sentinel,
|
||||||
extent_node_t *purge_chunks_sentinel)
|
extent_node_t *purge_chunks_sentinel)
|
||||||
{
|
{
|
||||||
size_t npurged, nmadvise;
|
size_t npurged, nmadvise;
|
||||||
chunk_purge_t *chunk_purge;
|
|
||||||
arena_runs_dirty_link_t *rdelm;
|
arena_runs_dirty_link_t *rdelm;
|
||||||
extent_node_t *chunkselm;
|
extent_node_t *chunkselm;
|
||||||
|
|
||||||
@ -1291,7 +1272,6 @@ arena_purge_stashed(arena_t *arena,
|
|||||||
nmadvise = 0;
|
nmadvise = 0;
|
||||||
npurged = 0;
|
npurged = 0;
|
||||||
|
|
||||||
chunk_purge = arena->chunk_purge;
|
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
for (rdelm = qr_next(purge_runs_sentinel, rd_link),
|
for (rdelm = qr_next(purge_runs_sentinel, rd_link),
|
||||||
chunkselm = qr_next(purge_chunks_sentinel, cc_link);
|
chunkselm = qr_next(purge_chunks_sentinel, cc_link);
|
||||||
@ -1299,13 +1279,16 @@ arena_purge_stashed(arena_t *arena,
|
|||||||
size_t npages;
|
size_t npages;
|
||||||
|
|
||||||
if (rdelm == &chunkselm->rd) {
|
if (rdelm == &chunkselm->rd) {
|
||||||
|
/*
|
||||||
|
* Don't actually purge the chunk here because 1)
|
||||||
|
* chunkselm is embedded in the chunk and must remain
|
||||||
|
* valid, and 2) we deallocate the chunk in
|
||||||
|
* arena_unstash_purged(), where it is destroyed,
|
||||||
|
* decommitted, or purged, depending on chunk
|
||||||
|
* deallocation policy.
|
||||||
|
*/
|
||||||
size_t size = extent_node_size_get(chunkselm);
|
size_t size = extent_node_size_get(chunkselm);
|
||||||
bool unzeroed;
|
|
||||||
|
|
||||||
npages = size >> LG_PAGE;
|
npages = size >> LG_PAGE;
|
||||||
unzeroed = chunk_purge_wrapper(arena, chunk_purge,
|
|
||||||
extent_node_addr_get(chunkselm), 0, size);
|
|
||||||
extent_node_zeroed_set(chunkselm, !unzeroed);
|
|
||||||
chunkselm = qr_next(chunkselm, cc_link);
|
chunkselm = qr_next(chunkselm, cc_link);
|
||||||
} else {
|
} else {
|
||||||
size_t pageind, run_size, flag_unzeroed, i;
|
size_t pageind, run_size, flag_unzeroed, i;
|
||||||
@ -1319,8 +1302,9 @@ arena_purge_stashed(arena_t *arena,
|
|||||||
npages = run_size >> LG_PAGE;
|
npages = run_size >> LG_PAGE;
|
||||||
|
|
||||||
assert(pageind + npages <= chunk_npages);
|
assert(pageind + npages <= chunk_npages);
|
||||||
unzeroed = chunk_purge_wrapper(arena, chunk_purge,
|
unzeroed = chunk_purge_wrapper(arena,
|
||||||
chunk, pageind << LG_PAGE, run_size);
|
chunk_hooks, chunk, chunksize, pageind << LG_PAGE,
|
||||||
|
run_size);
|
||||||
flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
|
flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1355,14 +1339,14 @@ arena_purge_stashed(arena_t *arena,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_unstash_purged(arena_t *arena,
|
arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
arena_runs_dirty_link_t *purge_runs_sentinel,
|
arena_runs_dirty_link_t *purge_runs_sentinel,
|
||||||
extent_node_t *purge_chunks_sentinel)
|
extent_node_t *purge_chunks_sentinel)
|
||||||
{
|
{
|
||||||
arena_runs_dirty_link_t *rdelm, *rdelm_next;
|
arena_runs_dirty_link_t *rdelm, *rdelm_next;
|
||||||
extent_node_t *chunkselm;
|
extent_node_t *chunkselm;
|
||||||
|
|
||||||
/* Deallocate runs. */
|
/* Deallocate chunks/runs. */
|
||||||
for (rdelm = qr_next(purge_runs_sentinel, rd_link),
|
for (rdelm = qr_next(purge_runs_sentinel, rd_link),
|
||||||
chunkselm = qr_next(purge_chunks_sentinel, cc_link);
|
chunkselm = qr_next(purge_chunks_sentinel, cc_link);
|
||||||
rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
|
rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
|
||||||
@ -1376,7 +1360,8 @@ arena_unstash_purged(arena_t *arena,
|
|||||||
extent_node_dirty_remove(chunkselm);
|
extent_node_dirty_remove(chunkselm);
|
||||||
arena_node_dalloc(arena, chunkselm);
|
arena_node_dalloc(arena, chunkselm);
|
||||||
chunkselm = chunkselm_next;
|
chunkselm = chunkselm_next;
|
||||||
chunk_dalloc_arena(arena, addr, size, zeroed);
|
chunk_dalloc_arena(arena, chunk_hooks, addr, size,
|
||||||
|
zeroed);
|
||||||
} else {
|
} else {
|
||||||
arena_chunk_map_misc_t *miscelm =
|
arena_chunk_map_misc_t *miscelm =
|
||||||
arena_rd_to_miscelm(rdelm);
|
arena_rd_to_miscelm(rdelm);
|
||||||
@ -1390,6 +1375,7 @@ arena_unstash_purged(arena_t *arena,
|
|||||||
static void
|
static void
|
||||||
arena_purge(arena_t *arena, bool all)
|
arena_purge(arena_t *arena, bool all)
|
||||||
{
|
{
|
||||||
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
size_t npurge, npurgeable, npurged;
|
size_t npurge, npurgeable, npurged;
|
||||||
arena_runs_dirty_link_t purge_runs_sentinel;
|
arena_runs_dirty_link_t purge_runs_sentinel;
|
||||||
extent_node_t purge_chunks_sentinel;
|
extent_node_t purge_chunks_sentinel;
|
||||||
@ -1413,13 +1399,13 @@ arena_purge(arena_t *arena, bool all)
|
|||||||
qr_new(&purge_runs_sentinel, rd_link);
|
qr_new(&purge_runs_sentinel, rd_link);
|
||||||
extent_node_dirty_linkage_init(&purge_chunks_sentinel);
|
extent_node_dirty_linkage_init(&purge_chunks_sentinel);
|
||||||
|
|
||||||
npurgeable = arena_stash_dirty(arena, all, npurge, &purge_runs_sentinel,
|
npurgeable = arena_stash_dirty(arena, &chunk_hooks, all, npurge,
|
||||||
&purge_chunks_sentinel);
|
&purge_runs_sentinel, &purge_chunks_sentinel);
|
||||||
assert(npurgeable >= npurge);
|
assert(npurgeable >= npurge);
|
||||||
npurged = arena_purge_stashed(arena, &purge_runs_sentinel,
|
npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel,
|
||||||
&purge_chunks_sentinel);
|
&purge_chunks_sentinel);
|
||||||
assert(npurged == npurgeable);
|
assert(npurged == npurgeable);
|
||||||
arena_unstash_purged(arena, &purge_runs_sentinel,
|
arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel,
|
||||||
&purge_chunks_sentinel);
|
&purge_chunks_sentinel);
|
||||||
|
|
||||||
arena->purging = false;
|
arena->purging = false;
|
||||||
@ -2874,21 +2860,17 @@ arena_new(unsigned ind)
|
|||||||
if (malloc_mutex_init(&arena->huge_mtx))
|
if (malloc_mutex_init(&arena->huge_mtx))
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
extent_tree_szad_new(&arena->chunks_szad_cache);
|
extent_tree_szad_new(&arena->chunks_szad_cached);
|
||||||
extent_tree_ad_new(&arena->chunks_ad_cache);
|
extent_tree_ad_new(&arena->chunks_ad_cached);
|
||||||
extent_tree_szad_new(&arena->chunks_szad_mmap);
|
extent_tree_szad_new(&arena->chunks_szad_retained);
|
||||||
extent_tree_ad_new(&arena->chunks_ad_mmap);
|
extent_tree_ad_new(&arena->chunks_ad_retained);
|
||||||
extent_tree_szad_new(&arena->chunks_szad_dss);
|
|
||||||
extent_tree_ad_new(&arena->chunks_ad_dss);
|
|
||||||
if (malloc_mutex_init(&arena->chunks_mtx))
|
if (malloc_mutex_init(&arena->chunks_mtx))
|
||||||
return (NULL);
|
return (NULL);
|
||||||
ql_new(&arena->node_cache);
|
ql_new(&arena->node_cache);
|
||||||
if (malloc_mutex_init(&arena->node_cache_mtx))
|
if (malloc_mutex_init(&arena->node_cache_mtx))
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
arena->chunk_alloc = chunk_alloc_default;
|
arena->chunk_hooks = chunk_hooks_default;
|
||||||
arena->chunk_dalloc = chunk_dalloc_default;
|
|
||||||
arena->chunk_purge = chunk_purge_default;
|
|
||||||
|
|
||||||
/* Initialize bins. */
|
/* Initialize bins. */
|
||||||
for (i = 0; i < NBINS; i++) {
|
for (i = 0; i < NBINS; i++) {
|
||||||
|
@ -66,7 +66,7 @@ base_chunk_alloc(size_t minsize)
|
|||||||
base_resident += PAGE_CEILING(nsize);
|
base_resident += PAGE_CEILING(nsize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
extent_node_init(node, NULL, addr, csize, true);
|
extent_node_init(node, NULL, addr, csize, true, true);
|
||||||
return (node);
|
return (node);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -90,7 +90,7 @@ base_alloc(size_t size)
|
|||||||
csize = CACHELINE_CEILING(size);
|
csize = CACHELINE_CEILING(size);
|
||||||
|
|
||||||
usize = s2u(csize);
|
usize = s2u(csize);
|
||||||
extent_node_init(&key, NULL, NULL, usize, false);
|
extent_node_init(&key, NULL, NULL, usize, true, false);
|
||||||
malloc_mutex_lock(&base_mtx);
|
malloc_mutex_lock(&base_mtx);
|
||||||
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
|
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
|
||||||
if (node != NULL) {
|
if (node != NULL) {
|
||||||
|
346
src/chunk.c
346
src/chunk.c
@ -18,7 +18,103 @@ size_t chunksize;
|
|||||||
size_t chunksize_mask; /* (chunksize - 1). */
|
size_t chunksize_mask; /* (chunksize - 1). */
|
||||||
size_t chunk_npages;
|
size_t chunk_npages;
|
||||||
|
|
||||||
|
static void *chunk_alloc_default(void *new_addr, size_t size,
|
||||||
|
size_t alignment, bool *zero, unsigned arena_ind);
|
||||||
|
static bool chunk_dalloc_default(void *chunk, size_t size,
|
||||||
|
unsigned arena_ind);
|
||||||
|
static bool chunk_commit_default(void *chunk, size_t size,
|
||||||
|
unsigned arena_ind);
|
||||||
|
static bool chunk_decommit_default(void *chunk, size_t size,
|
||||||
|
unsigned arena_ind);
|
||||||
|
static bool chunk_purge_default(void *chunk, size_t size, size_t offset,
|
||||||
|
size_t length, unsigned arena_ind);
|
||||||
|
static bool chunk_split_default(void *chunk, size_t size, size_t size_a,
|
||||||
|
size_t size_b, bool committed, unsigned arena_ind);
|
||||||
|
static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b,
|
||||||
|
size_t size_b, bool committed, unsigned arena_ind);
|
||||||
|
|
||||||
|
const chunk_hooks_t chunk_hooks_default = {
|
||||||
|
chunk_alloc_default,
|
||||||
|
chunk_dalloc_default,
|
||||||
|
chunk_commit_default,
|
||||||
|
chunk_decommit_default,
|
||||||
|
chunk_purge_default,
|
||||||
|
chunk_split_default,
|
||||||
|
chunk_merge_default
|
||||||
|
};
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
/*
|
||||||
|
* Function prototypes for static functions that are referenced prior to
|
||||||
|
* definition.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
|
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
||||||
|
void *chunk, size_t size, bool committed, bool zeroed);
|
||||||
|
|
||||||
|
/******************************************************************************/
|
||||||
|
|
||||||
|
static chunk_hooks_t
|
||||||
|
chunk_hooks_get_locked(arena_t *arena)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (arena->chunk_hooks);
|
||||||
|
}
|
||||||
|
|
||||||
|
chunk_hooks_t
|
||||||
|
chunk_hooks_get(arena_t *arena)
|
||||||
|
{
|
||||||
|
chunk_hooks_t chunk_hooks;
|
||||||
|
|
||||||
|
malloc_mutex_lock(&arena->chunks_mtx);
|
||||||
|
chunk_hooks = chunk_hooks_get_locked(arena);
|
||||||
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||||
|
|
||||||
|
return (chunk_hooks);
|
||||||
|
}
|
||||||
|
|
||||||
|
chunk_hooks_t
|
||||||
|
chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
|
||||||
|
{
|
||||||
|
chunk_hooks_t old_chunk_hooks;
|
||||||
|
|
||||||
|
malloc_mutex_lock(&arena->chunks_mtx);
|
||||||
|
old_chunk_hooks = arena->chunk_hooks;
|
||||||
|
arena->chunk_hooks = *chunk_hooks;
|
||||||
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||||
|
|
||||||
|
return (old_chunk_hooks);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
|
bool locked)
|
||||||
|
{
|
||||||
|
static const chunk_hooks_t uninitialized_hooks =
|
||||||
|
CHUNK_HOOKS_INITIALIZER;
|
||||||
|
|
||||||
|
if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
|
||||||
|
0) {
|
||||||
|
*chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
|
||||||
|
chunk_hooks_get(arena);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
chunk_hooks_assure_initialized_locked(arena_t *arena,
|
||||||
|
chunk_hooks_t *chunk_hooks)
|
||||||
|
{
|
||||||
|
|
||||||
|
chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
|
||||||
|
{
|
||||||
|
|
||||||
|
chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
|
||||||
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
chunk_register(const void *chunk, const extent_node_t *node)
|
chunk_register(const void *chunk, const extent_node_t *node)
|
||||||
@ -74,21 +170,26 @@ chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
|
|||||||
|
|
||||||
assert(size == CHUNK_CEILING(size));
|
assert(size == CHUNK_CEILING(size));
|
||||||
|
|
||||||
extent_node_init(&key, arena, NULL, size, false);
|
extent_node_init(&key, arena, NULL, size, false, false);
|
||||||
return (extent_tree_szad_nsearch(chunks_szad, &key));
|
return (extent_tree_szad_nsearch(chunks_szad, &key));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
extent_tree_t *chunks_ad, bool cache, void *new_addr, size_t size,
|
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
||||||
size_t alignment, bool *zero, bool dalloc_node)
|
void *new_addr, size_t size, size_t alignment, bool *zero, bool dalloc_node)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
size_t alloc_size, leadsize, trailsize;
|
size_t alloc_size, leadsize, trailsize;
|
||||||
bool zeroed;
|
bool committed, zeroed;
|
||||||
|
|
||||||
assert(new_addr == NULL || alignment == chunksize);
|
assert(new_addr == NULL || alignment == chunksize);
|
||||||
|
/*
|
||||||
|
* Cached chunks use the node linkage embedded in their headers, in
|
||||||
|
* which case dalloc_node is true, and new_addr is non-NULL because
|
||||||
|
* we're operating on a specific chunk.
|
||||||
|
*/
|
||||||
assert(dalloc_node || new_addr != NULL);
|
assert(dalloc_node || new_addr != NULL);
|
||||||
|
|
||||||
alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
|
alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
|
||||||
@ -96,9 +197,11 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
|||||||
if (alloc_size < size)
|
if (alloc_size < size)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
malloc_mutex_lock(&arena->chunks_mtx);
|
malloc_mutex_lock(&arena->chunks_mtx);
|
||||||
|
chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
|
||||||
if (new_addr != NULL) {
|
if (new_addr != NULL) {
|
||||||
extent_node_t key;
|
extent_node_t key;
|
||||||
extent_node_init(&key, arena, new_addr, alloc_size, false);
|
extent_node_init(&key, arena, new_addr, alloc_size, false,
|
||||||
|
false);
|
||||||
node = extent_tree_ad_search(chunks_ad, &key);
|
node = extent_tree_ad_search(chunks_ad, &key);
|
||||||
} else {
|
} else {
|
||||||
node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
|
node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
|
||||||
@ -115,9 +218,17 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
|||||||
assert(extent_node_size_get(node) >= leadsize + size);
|
assert(extent_node_size_get(node) >= leadsize + size);
|
||||||
trailsize = extent_node_size_get(node) - leadsize - size;
|
trailsize = extent_node_size_get(node) - leadsize - size;
|
||||||
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
|
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
|
||||||
|
committed = extent_node_committed_get(node);
|
||||||
zeroed = extent_node_zeroed_get(node);
|
zeroed = extent_node_zeroed_get(node);
|
||||||
if (zeroed)
|
if (zeroed)
|
||||||
*zero = true;
|
*zero = true;
|
||||||
|
/* Split the lead. */
|
||||||
|
if (leadsize != 0 &&
|
||||||
|
chunk_hooks->split(extent_node_addr_get(node),
|
||||||
|
extent_node_size_get(node), leadsize, size, false, arena->ind)) {
|
||||||
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||||
|
return (NULL);
|
||||||
|
}
|
||||||
/* Remove node from the tree. */
|
/* Remove node from the tree. */
|
||||||
extent_tree_szad_remove(chunks_szad, node);
|
extent_tree_szad_remove(chunks_szad, node);
|
||||||
extent_tree_ad_remove(chunks_ad, node);
|
extent_tree_ad_remove(chunks_ad, node);
|
||||||
@ -131,23 +242,40 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
|||||||
node = NULL;
|
node = NULL;
|
||||||
}
|
}
|
||||||
if (trailsize != 0) {
|
if (trailsize != 0) {
|
||||||
|
/* Split the trail. */
|
||||||
|
if (chunk_hooks->split(ret, size + trailsize, size,
|
||||||
|
trailsize, false, arena->ind)) {
|
||||||
|
if (dalloc_node && node != NULL)
|
||||||
|
arena_node_dalloc(arena, node);
|
||||||
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||||
|
chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
|
||||||
|
cache, ret, size + trailsize, committed, zeroed);
|
||||||
|
return (NULL);
|
||||||
|
}
|
||||||
/* Insert the trailing space as a smaller chunk. */
|
/* Insert the trailing space as a smaller chunk. */
|
||||||
if (node == NULL) {
|
if (node == NULL) {
|
||||||
node = arena_node_alloc(arena);
|
node = arena_node_alloc(arena);
|
||||||
if (node == NULL) {
|
if (node == NULL) {
|
||||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||||
chunk_record(arena, chunks_szad, chunks_ad,
|
chunk_record(arena, chunk_hooks, chunks_szad,
|
||||||
cache, ret, size, zeroed);
|
chunks_ad, cache, ret, size + trailsize,
|
||||||
|
committed, zeroed);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
|
extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
|
||||||
trailsize, zeroed);
|
trailsize, committed, zeroed);
|
||||||
extent_tree_szad_insert(chunks_szad, node);
|
extent_tree_szad_insert(chunks_szad, node);
|
||||||
extent_tree_ad_insert(chunks_ad, node);
|
extent_tree_ad_insert(chunks_ad, node);
|
||||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||||
node = NULL;
|
node = NULL;
|
||||||
}
|
}
|
||||||
|
if (!committed && chunk_hooks->commit(ret, size, arena->ind)) {
|
||||||
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||||
|
chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
|
||||||
|
ret, size, committed, zeroed);
|
||||||
|
return (NULL);
|
||||||
|
}
|
||||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||||
|
|
||||||
assert(dalloc_node || node != NULL);
|
assert(dalloc_node || node != NULL);
|
||||||
@ -168,20 +296,6 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
|||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
|
||||||
chunk_alloc_core_dss(arena_t *arena, void *new_addr, size_t size,
|
|
||||||
size_t alignment, bool *zero)
|
|
||||||
{
|
|
||||||
void *ret;
|
|
||||||
|
|
||||||
if ((ret = chunk_recycle(arena, &arena->chunks_szad_dss,
|
|
||||||
&arena->chunks_ad_dss, false, new_addr, size, alignment, zero,
|
|
||||||
true)) != NULL)
|
|
||||||
return (ret);
|
|
||||||
ret = chunk_alloc_dss(arena, new_addr, size, alignment, zero);
|
|
||||||
return (ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the caller specifies (!*zero), it is still possible to receive zeroed
|
* If the caller specifies (!*zero), it is still possible to receive zeroed
|
||||||
* memory, in which case *zero is toggled to true. arena_chunk_alloc() takes
|
* memory, in which case *zero is toggled to true. arena_chunk_alloc() takes
|
||||||
@ -193,33 +307,33 @@ chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
|||||||
bool *zero, dss_prec_t dss_prec)
|
bool *zero, dss_prec_t dss_prec)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
|
|
||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
assert((size & chunksize_mask) == 0);
|
assert((size & chunksize_mask) == 0);
|
||||||
assert(alignment != 0);
|
assert(alignment != 0);
|
||||||
assert((alignment & chunksize_mask) == 0);
|
assert((alignment & chunksize_mask) == 0);
|
||||||
|
|
||||||
|
/* Retained. */
|
||||||
|
if ((ret = chunk_recycle(arena, &chunk_hooks,
|
||||||
|
&arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
|
||||||
|
new_addr, size, alignment, zero, true)) != NULL)
|
||||||
|
return (ret);
|
||||||
|
|
||||||
/* "primary" dss. */
|
/* "primary" dss. */
|
||||||
if (have_dss && dss_prec == dss_prec_primary && (ret =
|
if (have_dss && dss_prec == dss_prec_primary && (ret =
|
||||||
chunk_alloc_core_dss(arena, new_addr, size, alignment, zero)) !=
|
chunk_alloc_dss(arena, new_addr, size, alignment, zero)) != NULL)
|
||||||
NULL)
|
|
||||||
return (ret);
|
|
||||||
/* mmap. */
|
|
||||||
if (!config_munmap && (ret = chunk_recycle(arena,
|
|
||||||
&arena->chunks_szad_mmap, &arena->chunks_ad_mmap, false, new_addr,
|
|
||||||
size, alignment, zero, true)) != NULL)
|
|
||||||
return (ret);
|
return (ret);
|
||||||
/*
|
/*
|
||||||
* Requesting an address is not implemented for chunk_alloc_mmap(), so
|
* mmap. Requesting an address is not implemented for
|
||||||
* only call it if (new_addr == NULL).
|
* chunk_alloc_mmap(), so only call it if (new_addr == NULL).
|
||||||
*/
|
*/
|
||||||
if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero))
|
if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero))
|
||||||
!= NULL)
|
!= NULL)
|
||||||
return (ret);
|
return (ret);
|
||||||
/* "secondary" dss. */
|
/* "secondary" dss. */
|
||||||
if (have_dss && dss_prec == dss_prec_secondary && (ret =
|
if (have_dss && dss_prec == dss_prec_secondary && (ret =
|
||||||
chunk_alloc_core_dss(arena, new_addr, size, alignment, zero)) !=
|
chunk_alloc_dss(arena, new_addr, size, alignment, zero)) != NULL)
|
||||||
NULL)
|
|
||||||
return (ret);
|
return (ret);
|
||||||
|
|
||||||
/* All strategies for allocation failed. */
|
/* All strategies for allocation failed. */
|
||||||
@ -248,8 +362,8 @@ chunk_alloc_base(size_t size)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
chunk_alloc_cache(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
|
||||||
bool *zero, bool dalloc_node)
|
size_t size, size_t alignment, bool *zero, bool dalloc_node)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
@ -258,8 +372,8 @@ chunk_alloc_cache(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
|||||||
assert(alignment != 0);
|
assert(alignment != 0);
|
||||||
assert((alignment & chunksize_mask) == 0);
|
assert((alignment & chunksize_mask) == 0);
|
||||||
|
|
||||||
ret = chunk_recycle(arena, &arena->chunks_szad_cache,
|
ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
|
||||||
&arena->chunks_ad_cache, true, new_addr, size, alignment, zero,
|
&arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
|
||||||
dalloc_node);
|
dalloc_node);
|
||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
@ -285,11 +399,13 @@ chunk_arena_get(unsigned arena_ind)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
chunk_alloc_arena(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||||
bool *zero)
|
unsigned arena_ind)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
arena_t *arena;
|
||||||
|
|
||||||
|
arena = chunk_arena_get(arena_ind);
|
||||||
ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
|
ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
|
||||||
arena->dss_prec);
|
arena->dss_prec);
|
||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
@ -300,55 +416,45 @@ chunk_alloc_arena(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
|||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Default arena chunk allocation routine in the absence of user override. This
|
|
||||||
* function isn't actually used by jemalloc, but it does the right thing if the
|
|
||||||
* application passes calls through to it during chunk allocation.
|
|
||||||
*/
|
|
||||||
void *
|
void *
|
||||||
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
|
||||||
unsigned arena_ind)
|
|
||||||
{
|
|
||||||
arena_t *arena;
|
|
||||||
|
|
||||||
arena = chunk_arena_get(arena_ind);
|
|
||||||
return (chunk_alloc_arena(arena, new_addr, size, alignment, zero));
|
|
||||||
}
|
|
||||||
|
|
||||||
void *
|
|
||||||
chunk_alloc_wrapper(arena_t *arena, chunk_alloc_t *chunk_alloc, void *new_addr,
|
|
||||||
size_t size, size_t alignment, bool *zero)
|
size_t size, size_t alignment, bool *zero)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
ret = chunk_alloc(new_addr, size, alignment, zero, arena->ind);
|
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
||||||
|
ret = chunk_hooks->alloc(new_addr, size, alignment, zero, arena->ind);
|
||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
if (config_valgrind && chunk_alloc != chunk_alloc_default)
|
if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
|
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
static void
|
||||||
chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
|
chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed)
|
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
||||||
|
void *chunk, size_t size, bool committed, bool zeroed)
|
||||||
{
|
{
|
||||||
bool unzeroed;
|
bool unzeroed;
|
||||||
extent_node_t *node, *prev;
|
extent_node_t *node, *prev;
|
||||||
extent_node_t key;
|
extent_node_t key;
|
||||||
|
|
||||||
assert(maps_coalesce || size == chunksize);
|
|
||||||
assert(!cache || !zeroed);
|
assert(!cache || !zeroed);
|
||||||
unzeroed = cache || !zeroed;
|
unzeroed = cache || !zeroed;
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
||||||
|
|
||||||
malloc_mutex_lock(&arena->chunks_mtx);
|
malloc_mutex_lock(&arena->chunks_mtx);
|
||||||
|
chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
|
||||||
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
|
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
|
||||||
false);
|
false, false);
|
||||||
node = extent_tree_ad_nsearch(chunks_ad, &key);
|
node = extent_tree_ad_nsearch(chunks_ad, &key);
|
||||||
/* Try to coalesce forward. */
|
/* Try to coalesce forward. */
|
||||||
if (node != NULL && extent_node_addr_get(node) ==
|
if (node != NULL && extent_node_addr_get(node) ==
|
||||||
extent_node_addr_get(&key)) {
|
extent_node_addr_get(&key) && extent_node_committed_get(node) ==
|
||||||
|
committed && !chunk_hooks->merge(chunk, size,
|
||||||
|
extent_node_addr_get(node), extent_node_size_get(node), false,
|
||||||
|
arena->ind)) {
|
||||||
/*
|
/*
|
||||||
* Coalesce chunk with the following address range. This does
|
* Coalesce chunk with the following address range. This does
|
||||||
* not change the position within chunks_ad, so only
|
* not change the position within chunks_ad, so only
|
||||||
@ -373,12 +479,13 @@ chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
|
|||||||
* a virtual memory leak.
|
* a virtual memory leak.
|
||||||
*/
|
*/
|
||||||
if (cache) {
|
if (cache) {
|
||||||
chunk_purge_wrapper(arena, arena->chunk_purge,
|
chunk_purge_wrapper(arena, chunk_hooks, chunk,
|
||||||
chunk, 0, size);
|
size, 0, size);
|
||||||
}
|
}
|
||||||
goto label_return;
|
goto label_return;
|
||||||
}
|
}
|
||||||
extent_node_init(node, arena, chunk, size, !unzeroed);
|
extent_node_init(node, arena, chunk, size, committed,
|
||||||
|
!unzeroed);
|
||||||
extent_tree_ad_insert(chunks_ad, node);
|
extent_tree_ad_insert(chunks_ad, node);
|
||||||
extent_tree_szad_insert(chunks_szad, node);
|
extent_tree_szad_insert(chunks_szad, node);
|
||||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||||
@ -387,7 +494,10 @@ chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
|
|||||||
/* Try to coalesce backward. */
|
/* Try to coalesce backward. */
|
||||||
prev = extent_tree_ad_prev(chunks_ad, node);
|
prev = extent_tree_ad_prev(chunks_ad, node);
|
||||||
if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
|
if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
|
||||||
extent_node_size_get(prev)) == chunk) {
|
extent_node_size_get(prev)) == chunk &&
|
||||||
|
extent_node_committed_get(prev) == committed &&
|
||||||
|
!chunk_hooks->merge(extent_node_addr_get(prev),
|
||||||
|
extent_node_size_get(prev), chunk, size, false, arena->ind)) {
|
||||||
/*
|
/*
|
||||||
* Coalesce chunk with the previous address range. This does
|
* Coalesce chunk with the previous address range. This does
|
||||||
* not change the position within chunks_ad, so only
|
* not change the position within chunks_ad, so only
|
||||||
@ -414,7 +524,8 @@ label_return:
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
chunk_dalloc_cache(arena_t *arena, void *chunk, size_t size)
|
chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||||
|
size_t size)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(chunk != NULL);
|
assert(chunk != NULL);
|
||||||
@ -422,57 +533,68 @@ chunk_dalloc_cache(arena_t *arena, void *chunk, size_t size)
|
|||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
assert((size & chunksize_mask) == 0);
|
assert((size & chunksize_mask) == 0);
|
||||||
|
|
||||||
if (!maps_coalesce && size != chunksize) {
|
chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
|
||||||
chunk_dalloc_arena(arena, chunk, size, false);
|
&arena->chunks_ad_cached, true, chunk, size, true, false);
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
chunk_record(arena, &arena->chunks_szad_cache, &arena->chunks_ad_cache,
|
|
||||||
true, chunk, size, false);
|
|
||||||
arena_maybe_purge(arena);
|
arena_maybe_purge(arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
chunk_dalloc_arena(arena_t *arena, void *chunk, size_t size, bool zeroed)
|
chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||||
|
size_t size, bool zeroed)
|
||||||
{
|
{
|
||||||
|
bool committed;
|
||||||
|
|
||||||
assert(chunk != NULL);
|
assert(chunk != NULL);
|
||||||
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
assert((size & chunksize_mask) == 0);
|
assert((size & chunksize_mask) == 0);
|
||||||
|
|
||||||
if (have_dss && chunk_in_dss(chunk)) {
|
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
||||||
chunk_record(arena, &arena->chunks_szad_dss,
|
/* Try to deallocate. */
|
||||||
&arena->chunks_ad_dss, false, chunk, size, zeroed);
|
if (!chunk_hooks->dalloc(chunk, size, arena->ind))
|
||||||
} else if (chunk_dalloc_mmap(chunk, size)) {
|
return;
|
||||||
chunk_record(arena, &arena->chunks_szad_mmap,
|
/* Try to decommit; purge if that fails. */
|
||||||
&arena->chunks_ad_mmap, false, chunk, size, zeroed);
|
committed = chunk_hooks->decommit(chunk, size, arena->ind);
|
||||||
}
|
zeroed = !committed || chunk_hooks->purge(chunk, size, 0, size,
|
||||||
|
arena->ind);
|
||||||
|
chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
|
||||||
|
&arena->chunks_ad_retained, false, chunk, size, committed, zeroed);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static bool
|
||||||
* Default arena chunk deallocation routine in the absence of user override.
|
|
||||||
* This function isn't actually used by jemalloc, but it does the right thing if
|
|
||||||
* the application passes calls through to it during chunk deallocation.
|
|
||||||
*/
|
|
||||||
bool
|
|
||||||
chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
|
chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
|
||||||
{
|
{
|
||||||
|
|
||||||
chunk_dalloc_arena(chunk_arena_get(arena_ind), chunk, size, false);
|
if (!have_dss || !chunk_in_dss(chunk))
|
||||||
return (false);
|
return (chunk_dalloc_mmap(chunk, size));
|
||||||
|
return (true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
chunk_dalloc_wrapper(arena_t *arena, chunk_dalloc_t *chunk_dalloc, void *chunk,
|
chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||||
size_t size)
|
size_t size)
|
||||||
{
|
{
|
||||||
|
|
||||||
chunk_dalloc(chunk, size, arena->ind);
|
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
||||||
if (config_valgrind && chunk_dalloc != chunk_dalloc_default)
|
chunk_hooks->dalloc(chunk, size, arena->ind);
|
||||||
|
if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default)
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
chunk_commit_default(void *chunk, size_t size, unsigned arena_ind)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (pages_commit(chunk, size));
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
chunk_decommit_default(void *chunk, size_t size, unsigned arena_ind)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (pages_decommit(chunk, size));
|
||||||
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
|
chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
|
||||||
{
|
{
|
||||||
@ -487,8 +609,8 @@ chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
|
|||||||
length));
|
length));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
static bool
|
||||||
chunk_purge_default(void *chunk, size_t offset, size_t length,
|
chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
|
||||||
unsigned arena_ind)
|
unsigned arena_ind)
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -497,11 +619,35 @@ chunk_purge_default(void *chunk, size_t offset, size_t length,
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
chunk_purge_wrapper(arena_t *arena, chunk_purge_t *chunk_purge, void *chunk,
|
chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||||
size_t offset, size_t length)
|
size_t size, size_t offset, size_t length)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (chunk_purge(chunk, offset, length, arena->ind));
|
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
||||||
|
return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
|
||||||
|
bool committed, unsigned arena_ind)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (!maps_coalesce)
|
||||||
|
return (true);
|
||||||
|
return (false);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
|
||||||
|
bool committed, unsigned arena_ind)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (!maps_coalesce)
|
||||||
|
return (true);
|
||||||
|
if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
|
||||||
|
return (true);
|
||||||
|
|
||||||
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static rtree_node_elm_t *
|
static rtree_node_elm_t *
|
||||||
|
@ -134,10 +134,10 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
|||||||
dss_max = dss_next;
|
dss_max = dss_next;
|
||||||
malloc_mutex_unlock(&dss_mtx);
|
malloc_mutex_unlock(&dss_mtx);
|
||||||
if (cpad_size != 0) {
|
if (cpad_size != 0) {
|
||||||
chunk_record(arena,
|
chunk_hooks_t chunk_hooks =
|
||||||
&arena->chunks_szad_dss,
|
CHUNK_HOOKS_INITIALIZER;
|
||||||
&arena->chunks_ad_dss, false, cpad,
|
chunk_dalloc_wrapper(arena,
|
||||||
cpad_size, false);
|
&chunk_hooks, cpad, cpad_size);
|
||||||
}
|
}
|
||||||
if (*zero) {
|
if (*zero) {
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
|
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
|
||||||
|
131
src/chunk_mmap.c
131
src/chunk_mmap.c
@ -2,137 +2,6 @@
|
|||||||
#include "jemalloc/internal/jemalloc_internal.h"
|
#include "jemalloc/internal/jemalloc_internal.h"
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Function prototypes for non-inline static functions. */
|
|
||||||
|
|
||||||
static void *pages_map(void *addr, size_t size);
|
|
||||||
static void pages_unmap(void *addr, size_t size);
|
|
||||||
static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
|
|
||||||
bool *zero);
|
|
||||||
|
|
||||||
/******************************************************************************/
|
|
||||||
|
|
||||||
static void *
|
|
||||||
pages_map(void *addr, size_t size)
|
|
||||||
{
|
|
||||||
void *ret;
|
|
||||||
|
|
||||||
assert(size != 0);
|
|
||||||
|
|
||||||
#ifdef _WIN32
|
|
||||||
/*
|
|
||||||
* If VirtualAlloc can't allocate at the given address when one is
|
|
||||||
* given, it fails and returns NULL.
|
|
||||||
*/
|
|
||||||
ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
|
|
||||||
PAGE_READWRITE);
|
|
||||||
#else
|
|
||||||
/*
|
|
||||||
* We don't use MAP_FIXED here, because it can cause the *replacement*
|
|
||||||
* of existing mappings, and we only want to create new mappings.
|
|
||||||
*/
|
|
||||||
ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
|
|
||||||
-1, 0);
|
|
||||||
assert(ret != NULL);
|
|
||||||
|
|
||||||
if (ret == MAP_FAILED)
|
|
||||||
ret = NULL;
|
|
||||||
else if (addr != NULL && ret != addr) {
|
|
||||||
/*
|
|
||||||
* We succeeded in mapping memory, but not in the right place.
|
|
||||||
*/
|
|
||||||
pages_unmap(ret, size);
|
|
||||||
ret = NULL;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
assert(ret == NULL || (addr == NULL && ret != addr)
|
|
||||||
|| (addr != NULL && ret == addr));
|
|
||||||
return (ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
pages_unmap(void *addr, size_t size)
|
|
||||||
{
|
|
||||||
|
|
||||||
#ifdef _WIN32
|
|
||||||
if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
|
|
||||||
#else
|
|
||||||
if (munmap(addr, size) == -1)
|
|
||||||
#endif
|
|
||||||
{
|
|
||||||
char buf[BUFERROR_BUF];
|
|
||||||
|
|
||||||
buferror(get_errno(), buf, sizeof(buf));
|
|
||||||
malloc_printf("<jemalloc>: Error in "
|
|
||||||
#ifdef _WIN32
|
|
||||||
"VirtualFree"
|
|
||||||
#else
|
|
||||||
"munmap"
|
|
||||||
#endif
|
|
||||||
"(): %s\n", buf);
|
|
||||||
if (opt_abort)
|
|
||||||
abort();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void *
|
|
||||||
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
|
|
||||||
{
|
|
||||||
void *ret = (void *)((uintptr_t)addr + leadsize);
|
|
||||||
|
|
||||||
assert(alloc_size >= leadsize + size);
|
|
||||||
#ifdef _WIN32
|
|
||||||
{
|
|
||||||
void *new_addr;
|
|
||||||
|
|
||||||
pages_unmap(addr, alloc_size);
|
|
||||||
new_addr = pages_map(ret, size);
|
|
||||||
if (new_addr == ret)
|
|
||||||
return (ret);
|
|
||||||
if (new_addr)
|
|
||||||
pages_unmap(new_addr, size);
|
|
||||||
return (NULL);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
{
|
|
||||||
size_t trailsize = alloc_size - leadsize - size;
|
|
||||||
|
|
||||||
if (leadsize != 0)
|
|
||||||
pages_unmap(addr, leadsize);
|
|
||||||
if (trailsize != 0)
|
|
||||||
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
|
|
||||||
return (ret);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
|
||||||
pages_purge(void *addr, size_t length)
|
|
||||||
{
|
|
||||||
bool unzeroed;
|
|
||||||
|
|
||||||
#ifdef _WIN32
|
|
||||||
VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
|
|
||||||
unzeroed = true;
|
|
||||||
#elif defined(JEMALLOC_HAVE_MADVISE)
|
|
||||||
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
|
|
||||||
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
|
|
||||||
# define JEMALLOC_MADV_ZEROS true
|
|
||||||
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
|
|
||||||
# define JEMALLOC_MADV_PURGE MADV_FREE
|
|
||||||
# define JEMALLOC_MADV_ZEROS false
|
|
||||||
# else
|
|
||||||
# error "No madvise(2) flag defined for purging unused dirty pages."
|
|
||||||
# endif
|
|
||||||
int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
|
|
||||||
unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
|
|
||||||
# undef JEMALLOC_MADV_PURGE
|
|
||||||
# undef JEMALLOC_MADV_ZEROS
|
|
||||||
#else
|
|
||||||
/* Last resort no-op. */
|
|
||||||
unzeroed = true;
|
|
||||||
#endif
|
|
||||||
return (unzeroed);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
|
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
|
||||||
|
73
src/ctl.c
73
src/ctl.c
@ -118,9 +118,7 @@ CTL_PROTO(arena_i_purge)
|
|||||||
static void arena_purge(unsigned arena_ind);
|
static void arena_purge(unsigned arena_ind);
|
||||||
CTL_PROTO(arena_i_dss)
|
CTL_PROTO(arena_i_dss)
|
||||||
CTL_PROTO(arena_i_lg_dirty_mult)
|
CTL_PROTO(arena_i_lg_dirty_mult)
|
||||||
CTL_PROTO(arena_i_chunk_alloc)
|
CTL_PROTO(arena_i_chunk_hooks)
|
||||||
CTL_PROTO(arena_i_chunk_dalloc)
|
|
||||||
CTL_PROTO(arena_i_chunk_purge)
|
|
||||||
INDEX_PROTO(arena_i)
|
INDEX_PROTO(arena_i)
|
||||||
CTL_PROTO(arenas_bin_i_size)
|
CTL_PROTO(arenas_bin_i_size)
|
||||||
CTL_PROTO(arenas_bin_i_nregs)
|
CTL_PROTO(arenas_bin_i_nregs)
|
||||||
@ -288,17 +286,11 @@ static const ctl_named_node_t tcache_node[] = {
|
|||||||
{NAME("destroy"), CTL(tcache_destroy)}
|
{NAME("destroy"), CTL(tcache_destroy)}
|
||||||
};
|
};
|
||||||
|
|
||||||
static const ctl_named_node_t chunk_node[] = {
|
|
||||||
{NAME("alloc"), CTL(arena_i_chunk_alloc)},
|
|
||||||
{NAME("dalloc"), CTL(arena_i_chunk_dalloc)},
|
|
||||||
{NAME("purge"), CTL(arena_i_chunk_purge)}
|
|
||||||
};
|
|
||||||
|
|
||||||
static const ctl_named_node_t arena_i_node[] = {
|
static const ctl_named_node_t arena_i_node[] = {
|
||||||
{NAME("purge"), CTL(arena_i_purge)},
|
{NAME("purge"), CTL(arena_i_purge)},
|
||||||
{NAME("dss"), CTL(arena_i_dss)},
|
{NAME("dss"), CTL(arena_i_dss)},
|
||||||
{NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)},
|
{NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)},
|
||||||
{NAME("chunk"), CHILD(named, chunk)},
|
{NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)}
|
||||||
};
|
};
|
||||||
static const ctl_named_node_t super_arena_i_node[] = {
|
static const ctl_named_node_t super_arena_i_node[] = {
|
||||||
{NAME(""), CHILD(named, arena_i)}
|
{NAME(""), CHILD(named, arena_i)}
|
||||||
@ -1064,7 +1056,7 @@ ctl_postfork_child(void)
|
|||||||
memcpy(oldp, (void *)&(v), copylen); \
|
memcpy(oldp, (void *)&(v), copylen); \
|
||||||
ret = EINVAL; \
|
ret = EINVAL; \
|
||||||
goto label_return; \
|
goto label_return; \
|
||||||
} else \
|
} \
|
||||||
*(t *)oldp = (v); \
|
*(t *)oldp = (v); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
@ -1682,37 +1674,36 @@ label_return:
|
|||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define CHUNK_FUNC(n) \
|
static int
|
||||||
static int \
|
arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp,
|
||||||
arena_i_chunk_##n##_ctl(const size_t *mib, size_t miblen, void *oldp, \
|
size_t *oldlenp, void *newp, size_t newlen)
|
||||||
size_t *oldlenp, void *newp, size_t newlen) \
|
{
|
||||||
{ \
|
int ret;
|
||||||
\
|
unsigned arena_ind = mib[1];
|
||||||
int ret; \
|
arena_t *arena;
|
||||||
unsigned arena_ind = mib[1]; \
|
|
||||||
arena_t *arena; \
|
malloc_mutex_lock(&ctl_mtx);
|
||||||
\
|
if (arena_ind < narenas_total_get() && (arena =
|
||||||
malloc_mutex_lock(&ctl_mtx); \
|
arena_get(tsd_fetch(), arena_ind, false, true)) != NULL) {
|
||||||
if (arena_ind < narenas_total_get() && (arena = \
|
if (newp != NULL) {
|
||||||
arena_get(tsd_fetch(), arena_ind, false, true)) != NULL) { \
|
chunk_hooks_t old_chunk_hooks, new_chunk_hooks;
|
||||||
malloc_mutex_lock(&arena->lock); \
|
WRITE(new_chunk_hooks, chunk_hooks_t);
|
||||||
READ(arena->chunk_##n, chunk_##n##_t *); \
|
old_chunk_hooks = chunk_hooks_set(arena,
|
||||||
WRITE(arena->chunk_##n, chunk_##n##_t *); \
|
&new_chunk_hooks);
|
||||||
} else { \
|
READ(old_chunk_hooks, chunk_hooks_t);
|
||||||
ret = EFAULT; \
|
} else {
|
||||||
goto label_outer_return; \
|
chunk_hooks_t old_chunk_hooks = chunk_hooks_get(arena);
|
||||||
} \
|
READ(old_chunk_hooks, chunk_hooks_t);
|
||||||
ret = 0; \
|
}
|
||||||
label_return: \
|
} else {
|
||||||
malloc_mutex_unlock(&arena->lock); \
|
ret = EFAULT;
|
||||||
label_outer_return: \
|
goto label_return;
|
||||||
malloc_mutex_unlock(&ctl_mtx); \
|
}
|
||||||
return (ret); \
|
ret = 0;
|
||||||
|
label_return:
|
||||||
|
malloc_mutex_unlock(&ctl_mtx);
|
||||||
|
return (ret);
|
||||||
}
|
}
|
||||||
CHUNK_FUNC(alloc)
|
|
||||||
CHUNK_FUNC(dalloc)
|
|
||||||
CHUNK_FUNC(purge)
|
|
||||||
#undef CHUNK_FUNC
|
|
||||||
|
|
||||||
static const ctl_named_node_t *
|
static const ctl_named_node_t *
|
||||||
arena_i_index(const size_t *mib, size_t miblen, size_t i)
|
arena_i_index(const size_t *mib, size_t miblen, size_t i)
|
||||||
|
44
src/huge.c
44
src/huge.c
@ -79,7 +79,7 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
|
|||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
extent_node_init(node, arena, ret, size, is_zeroed);
|
extent_node_init(node, arena, ret, size, true, is_zeroed);
|
||||||
|
|
||||||
if (huge_node_set(ret, node)) {
|
if (huge_node_set(ret, node)) {
|
||||||
arena_chunk_dalloc_huge(arena, ret, size);
|
arena_chunk_dalloc_huge(arena, ret, size);
|
||||||
@ -132,7 +132,7 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
|
|||||||
size_t usize_next;
|
size_t usize_next;
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
chunk_purge_t *chunk_purge;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
bool zeroed;
|
bool zeroed;
|
||||||
|
|
||||||
/* Increase usize to incorporate extra. */
|
/* Increase usize to incorporate extra. */
|
||||||
@ -145,15 +145,11 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
|
|||||||
node = huge_node_get(ptr);
|
node = huge_node_get(ptr);
|
||||||
arena = extent_node_arena_get(node);
|
arena = extent_node_arena_get(node);
|
||||||
|
|
||||||
malloc_mutex_lock(&arena->lock);
|
|
||||||
chunk_purge = arena->chunk_purge;
|
|
||||||
malloc_mutex_unlock(&arena->lock);
|
|
||||||
|
|
||||||
/* Fill if necessary (shrinking). */
|
/* Fill if necessary (shrinking). */
|
||||||
if (oldsize > usize) {
|
if (oldsize > usize) {
|
||||||
size_t sdiff = oldsize - usize;
|
size_t sdiff = oldsize - usize;
|
||||||
zeroed = !chunk_purge_wrapper(arena, chunk_purge, ptr, usize,
|
zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, ptr,
|
||||||
sdiff);
|
CHUNK_CEILING(usize), usize, sdiff);
|
||||||
if (config_fill && unlikely(opt_junk_free)) {
|
if (config_fill && unlikely(opt_junk_free)) {
|
||||||
memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
|
memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
|
||||||
zeroed = false;
|
zeroed = false;
|
||||||
@ -185,26 +181,31 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static bool
|
||||||
huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
|
huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
|
||||||
{
|
{
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
chunk_purge_t *chunk_purge;
|
chunk_hooks_t chunk_hooks;
|
||||||
|
size_t cdiff;
|
||||||
bool zeroed;
|
bool zeroed;
|
||||||
|
|
||||||
node = huge_node_get(ptr);
|
node = huge_node_get(ptr);
|
||||||
arena = extent_node_arena_get(node);
|
arena = extent_node_arena_get(node);
|
||||||
|
chunk_hooks = chunk_hooks_get(arena);
|
||||||
|
|
||||||
malloc_mutex_lock(&arena->lock);
|
/* Split excess chunks. */
|
||||||
chunk_purge = arena->chunk_purge;
|
cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
|
||||||
malloc_mutex_unlock(&arena->lock);
|
if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
|
||||||
|
CHUNK_CEILING(usize), cdiff, true, arena->ind))
|
||||||
|
return (true);
|
||||||
|
|
||||||
if (oldsize > usize) {
|
if (oldsize > usize) {
|
||||||
size_t sdiff = oldsize - usize;
|
size_t sdiff = oldsize - usize;
|
||||||
zeroed = !chunk_purge_wrapper(arena, chunk_purge,
|
zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
|
||||||
CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
|
CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
|
||||||
CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
|
CHUNK_CEILING(usize), CHUNK_ADDR2OFFSET((uintptr_t)ptr +
|
||||||
|
usize), sdiff);
|
||||||
if (config_fill && unlikely(opt_junk_free)) {
|
if (config_fill && unlikely(opt_junk_free)) {
|
||||||
huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
|
huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
|
||||||
sdiff);
|
sdiff);
|
||||||
@ -222,6 +223,8 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
|
|||||||
|
|
||||||
/* Zap the excess chunks. */
|
/* Zap the excess chunks. */
|
||||||
arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
|
arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
|
||||||
|
|
||||||
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
@ -304,14 +307,9 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
|
|||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!maps_coalesce)
|
/* Attempt to shrink the allocation in-place. */
|
||||||
return (true);
|
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize))
|
||||||
|
return (huge_ralloc_no_move_shrink(ptr, oldsize, usize));
|
||||||
/* Shrink the allocation in-place. */
|
|
||||||
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)) {
|
|
||||||
huge_ralloc_no_move_shrink(ptr, oldsize, usize);
|
|
||||||
return (false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Attempt to expand the allocation in-place. */
|
/* Attempt to expand the allocation in-place. */
|
||||||
if (huge_ralloc_no_move_expand(ptr, oldsize, size + extra, zero)) {
|
if (huge_ralloc_no_move_expand(ptr, oldsize, size + extra, zero)) {
|
||||||
|
167
src/pages.c
Normal file
167
src/pages.c
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
#define JEMALLOC_PAGES_C_
|
||||||
|
#include "jemalloc/internal/jemalloc_internal.h"
|
||||||
|
|
||||||
|
/******************************************************************************/
|
||||||
|
|
||||||
|
void *
|
||||||
|
pages_map(void *addr, size_t size)
|
||||||
|
{
|
||||||
|
void *ret;
|
||||||
|
|
||||||
|
assert(size != 0);
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
/*
|
||||||
|
* If VirtualAlloc can't allocate at the given address when one is
|
||||||
|
* given, it fails and returns NULL.
|
||||||
|
*/
|
||||||
|
ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
|
||||||
|
PAGE_READWRITE);
|
||||||
|
#else
|
||||||
|
/*
|
||||||
|
* We don't use MAP_FIXED here, because it can cause the *replacement*
|
||||||
|
* of existing mappings, and we only want to create new mappings.
|
||||||
|
*/
|
||||||
|
ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
|
||||||
|
-1, 0);
|
||||||
|
assert(ret != NULL);
|
||||||
|
|
||||||
|
if (ret == MAP_FAILED)
|
||||||
|
ret = NULL;
|
||||||
|
else if (addr != NULL && ret != addr) {
|
||||||
|
/*
|
||||||
|
* We succeeded in mapping memory, but not in the right place.
|
||||||
|
*/
|
||||||
|
pages_unmap(ret, size);
|
||||||
|
ret = NULL;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
assert(ret == NULL || (addr == NULL && ret != addr)
|
||||||
|
|| (addr != NULL && ret == addr));
|
||||||
|
return (ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
pages_unmap(void *addr, size_t size)
|
||||||
|
{
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
|
||||||
|
#else
|
||||||
|
if (munmap(addr, size) == -1)
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
char buf[BUFERROR_BUF];
|
||||||
|
|
||||||
|
buferror(get_errno(), buf, sizeof(buf));
|
||||||
|
malloc_printf("<jemalloc>: Error in "
|
||||||
|
#ifdef _WIN32
|
||||||
|
"VirtualFree"
|
||||||
|
#else
|
||||||
|
"munmap"
|
||||||
|
#endif
|
||||||
|
"(): %s\n", buf);
|
||||||
|
if (opt_abort)
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void *
|
||||||
|
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
|
||||||
|
{
|
||||||
|
void *ret = (void *)((uintptr_t)addr + leadsize);
|
||||||
|
|
||||||
|
assert(alloc_size >= leadsize + size);
|
||||||
|
#ifdef _WIN32
|
||||||
|
{
|
||||||
|
void *new_addr;
|
||||||
|
|
||||||
|
pages_unmap(addr, alloc_size);
|
||||||
|
new_addr = pages_map(ret, size);
|
||||||
|
if (new_addr == ret)
|
||||||
|
return (ret);
|
||||||
|
if (new_addr)
|
||||||
|
pages_unmap(new_addr, size);
|
||||||
|
return (NULL);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
{
|
||||||
|
size_t trailsize = alloc_size - leadsize - size;
|
||||||
|
|
||||||
|
if (leadsize != 0)
|
||||||
|
pages_unmap(addr, leadsize);
|
||||||
|
if (trailsize != 0)
|
||||||
|
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
|
||||||
|
return (ret);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
pages_commit_impl(void *addr, size_t size, bool commit)
|
||||||
|
{
|
||||||
|
|
||||||
|
#ifndef _WIN32
|
||||||
|
if (config_debug) {
|
||||||
|
int prot = commit ? (PROT_READ | PROT_WRITE) : PROT_NONE;
|
||||||
|
void *result = mmap(addr, size, prot, MAP_PRIVATE | MAP_ANON |
|
||||||
|
MAP_FIXED, -1, 0);
|
||||||
|
if (result == MAP_FAILED)
|
||||||
|
return (true);
|
||||||
|
if (result != addr) {
|
||||||
|
/*
|
||||||
|
* We succeeded in mapping memory, but not in the right
|
||||||
|
* place.
|
||||||
|
*/
|
||||||
|
pages_unmap(result, size);
|
||||||
|
return (true);
|
||||||
|
}
|
||||||
|
return (false);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
return (true);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
pages_commit(void *addr, size_t size)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (pages_commit_impl(addr, size, true));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
pages_decommit(void *addr, size_t size)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (pages_commit_impl(addr, size, false));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
pages_purge(void *addr, size_t size)
|
||||||
|
{
|
||||||
|
bool unzeroed;
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
|
||||||
|
unzeroed = true;
|
||||||
|
#elif defined(JEMALLOC_HAVE_MADVISE)
|
||||||
|
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||||
|
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
|
||||||
|
# define JEMALLOC_MADV_ZEROS true
|
||||||
|
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
|
||||||
|
# define JEMALLOC_MADV_PURGE MADV_FREE
|
||||||
|
# define JEMALLOC_MADV_ZEROS false
|
||||||
|
# else
|
||||||
|
# error "No madvise(2) flag defined for purging unused dirty pages."
|
||||||
|
# endif
|
||||||
|
int err = madvise(addr, size, JEMALLOC_MADV_PURGE);
|
||||||
|
unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
|
||||||
|
# undef JEMALLOC_MADV_PURGE
|
||||||
|
# undef JEMALLOC_MADV_ZEROS
|
||||||
|
#else
|
||||||
|
/* Last resort no-op. */
|
||||||
|
unzeroed = true;
|
||||||
|
#endif
|
||||||
|
return (unzeroed);
|
||||||
|
}
|
||||||
|
|
@ -1,59 +1,140 @@
|
|||||||
#include "test/jemalloc_test.h"
|
#include "test/jemalloc_test.h"
|
||||||
|
|
||||||
chunk_alloc_t *old_alloc;
|
static chunk_hooks_t orig_hooks;
|
||||||
chunk_dalloc_t *old_dalloc;
|
static chunk_hooks_t old_hooks;
|
||||||
chunk_purge_t *old_purge;
|
|
||||||
bool purged;
|
static bool do_dalloc = true;
|
||||||
|
static bool do_decommit;
|
||||||
|
|
||||||
|
static bool did_alloc;
|
||||||
|
static bool did_dalloc;
|
||||||
|
static bool did_commit;
|
||||||
|
static bool did_decommit;
|
||||||
|
static bool did_purge;
|
||||||
|
static bool did_split;
|
||||||
|
static bool did_merge;
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
# define TRACE_HOOK(fmt, ...) malloc_printf(fmt, __VA_ARGS__)
|
||||||
|
#else
|
||||||
|
# define TRACE_HOOK(fmt, ...)
|
||||||
|
#endif
|
||||||
|
|
||||||
void *
|
void *
|
||||||
chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
|
chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||||
unsigned arena_ind)
|
unsigned arena_ind)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (old_alloc(new_addr, size, alignment, zero, arena_ind));
|
TRACE_HOOK("%s(new_addr=%p, size=%zu, alignment=%zu, *zero=%s, "
|
||||||
|
"arena_ind=%u)\n", __func__, new_addr, size, alignment, *zero ?
|
||||||
|
"true" : "false", arena_ind);
|
||||||
|
did_alloc = true;
|
||||||
|
return (old_hooks.alloc(new_addr, size, alignment, zero, arena_ind));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
chunk_dalloc(void *chunk, size_t size, unsigned arena_ind)
|
chunk_dalloc(void *chunk, size_t size, unsigned arena_ind)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (old_dalloc(chunk, size, arena_ind));
|
TRACE_HOOK("%s(chunk=%p, size=%zu, arena_ind=%u)\n", __func__, chunk,
|
||||||
|
size, arena_ind);
|
||||||
|
did_dalloc = true;
|
||||||
|
if (!do_dalloc)
|
||||||
|
return (true);
|
||||||
|
return (old_hooks.dalloc(chunk, size, arena_ind));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
chunk_purge(void *chunk, size_t offset, size_t length, unsigned arena_ind)
|
chunk_commit(void *chunk, size_t size, unsigned arena_ind)
|
||||||
{
|
{
|
||||||
|
|
||||||
purged = true;
|
TRACE_HOOK("%s(chunk=%p, size=%zu, arena_ind=%u)\n", __func__, chunk,
|
||||||
return (old_purge(chunk, offset, length, arena_ind));
|
size, arena_ind);
|
||||||
|
did_commit = true;
|
||||||
|
memset(chunk, 0, size);
|
||||||
|
return (false);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
chunk_decommit(void *chunk, size_t size, unsigned arena_ind)
|
||||||
|
{
|
||||||
|
|
||||||
|
TRACE_HOOK("%s(chunk=%p, size=%zu, arena_ind=%u)\n", __func__, chunk,
|
||||||
|
size, arena_ind);
|
||||||
|
did_decommit = true;
|
||||||
|
return (!do_decommit);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
chunk_purge(void *chunk, size_t size, size_t offset, size_t length,
|
||||||
|
unsigned arena_ind)
|
||||||
|
{
|
||||||
|
|
||||||
|
TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu "
|
||||||
|
"arena_ind=%u)\n", __func__, chunk, size, offset, length,
|
||||||
|
arena_ind);
|
||||||
|
did_purge = true;
|
||||||
|
return (old_hooks.purge(chunk, size, offset, length, arena_ind));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b,
|
||||||
|
bool committed, unsigned arena_ind)
|
||||||
|
{
|
||||||
|
|
||||||
|
TRACE_HOOK("%s(chunk=%p, size=%zu, size_a=%zu, size_b=%zu, "
|
||||||
|
"committed=%s, arena_ind=%u)\n", __func__, chunk, size, size_a,
|
||||||
|
size_b, committed ? "true" : "false", arena_ind);
|
||||||
|
did_split = true;
|
||||||
|
return (old_hooks.split(chunk, size, size_a, size_b, committed,
|
||||||
|
arena_ind));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
|
||||||
|
bool committed, unsigned arena_ind)
|
||||||
|
{
|
||||||
|
|
||||||
|
TRACE_HOOK("%s(chunk_a=%p, size_a=%zu, chunk_b=%p size_b=%zu, "
|
||||||
|
"committed=%s, arena_ind=%u)\n", __func__, chunk_a, size_a, chunk_b,
|
||||||
|
size_b, committed ? "true" : "false", arena_ind);
|
||||||
|
did_merge = true;
|
||||||
|
return (old_hooks.merge(chunk_a, size_a, chunk_b, size_b,
|
||||||
|
committed, arena_ind));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_BEGIN(test_chunk)
|
TEST_BEGIN(test_chunk)
|
||||||
{
|
{
|
||||||
void *p;
|
void *p;
|
||||||
chunk_alloc_t *new_alloc;
|
|
||||||
chunk_dalloc_t *new_dalloc;
|
|
||||||
chunk_purge_t *new_purge;
|
|
||||||
size_t old_size, new_size, huge0, huge1, huge2, sz;
|
size_t old_size, new_size, huge0, huge1, huge2, sz;
|
||||||
|
chunk_hooks_t new_hooks = {
|
||||||
|
chunk_alloc,
|
||||||
|
chunk_dalloc,
|
||||||
|
chunk_commit,
|
||||||
|
chunk_decommit,
|
||||||
|
chunk_purge,
|
||||||
|
chunk_split,
|
||||||
|
chunk_merge
|
||||||
|
};
|
||||||
|
|
||||||
new_alloc = chunk_alloc;
|
/* Install custom chunk hooks. */
|
||||||
new_dalloc = chunk_dalloc;
|
old_size = sizeof(chunk_hooks_t);
|
||||||
new_purge = chunk_purge;
|
new_size = sizeof(chunk_hooks_t);
|
||||||
old_size = sizeof(chunk_alloc_t *);
|
assert_d_eq(mallctl("arena.0.chunk_hooks", &old_hooks, &old_size,
|
||||||
new_size = sizeof(chunk_alloc_t *);
|
&new_hooks, new_size), 0, "Unexpected chunk_hooks error");
|
||||||
|
orig_hooks = old_hooks;
|
||||||
assert_d_eq(mallctl("arena.0.chunk.alloc", &old_alloc, &old_size,
|
assert_ptr_ne(old_hooks.alloc, chunk_alloc, "Unexpected alloc error");
|
||||||
&new_alloc, new_size), 0, "Unexpected alloc error");
|
assert_ptr_ne(old_hooks.dalloc, chunk_dalloc,
|
||||||
assert_ptr_ne(old_alloc, new_alloc, "Unexpected alloc error");
|
"Unexpected dalloc error");
|
||||||
|
assert_ptr_ne(old_hooks.commit, chunk_commit,
|
||||||
assert_d_eq(mallctl("arena.0.chunk.dalloc", &old_dalloc, &old_size,
|
"Unexpected commit error");
|
||||||
&new_dalloc, new_size), 0, "Unexpected dalloc error");
|
assert_ptr_ne(old_hooks.decommit, chunk_decommit,
|
||||||
assert_ptr_ne(old_dalloc, new_dalloc, "Unexpected dalloc error");
|
"Unexpected decommit error");
|
||||||
|
assert_ptr_ne(old_hooks.purge, chunk_purge, "Unexpected purge error");
|
||||||
assert_d_eq(mallctl("arena.0.chunk.purge", &old_purge, &old_size,
|
assert_ptr_ne(old_hooks.split, chunk_split, "Unexpected split error");
|
||||||
&new_purge, new_size), 0, "Unexpected purge error");
|
assert_ptr_ne(old_hooks.merge, chunk_merge, "Unexpected merge error");
|
||||||
assert_ptr_ne(old_purge, new_purge, "Unexpected purge error");
|
|
||||||
|
|
||||||
|
/* Get huge size classes. */
|
||||||
sz = sizeof(size_t);
|
sz = sizeof(size_t);
|
||||||
assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
|
assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
|
||||||
"Unexpected arenas.hchunk.0.size failure");
|
"Unexpected arenas.hchunk.0.size failure");
|
||||||
@ -61,6 +142,49 @@ TEST_BEGIN(test_chunk)
|
|||||||
"Unexpected arenas.hchunk.1.size failure");
|
"Unexpected arenas.hchunk.1.size failure");
|
||||||
assert_d_eq(mallctl("arenas.hchunk.2.size", &huge2, &sz, NULL, 0), 0,
|
assert_d_eq(mallctl("arenas.hchunk.2.size", &huge2, &sz, NULL, 0), 0,
|
||||||
"Unexpected arenas.hchunk.2.size failure");
|
"Unexpected arenas.hchunk.2.size failure");
|
||||||
|
|
||||||
|
/* Test dalloc/decommit/purge cascade. */
|
||||||
|
do_dalloc = false;
|
||||||
|
do_decommit = false;
|
||||||
|
p = mallocx(huge0 * 2, 0);
|
||||||
|
assert_ptr_not_null(p, "Unexpected mallocx() error");
|
||||||
|
did_dalloc = false;
|
||||||
|
did_decommit = false;
|
||||||
|
did_purge = false;
|
||||||
|
assert_zu_eq(xallocx(p, huge0, 0, 0), huge0,
|
||||||
|
"Unexpected xallocx() failure");
|
||||||
|
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
|
||||||
|
"Unexpected arena.0.purge error");
|
||||||
|
assert_true(did_dalloc, "Expected dalloc");
|
||||||
|
assert_true(did_decommit, "Expected decommit");
|
||||||
|
assert_true(did_purge, "Expected purge");
|
||||||
|
dallocx(p, 0);
|
||||||
|
do_dalloc = true;
|
||||||
|
|
||||||
|
/* Test decommit/commit and observe split/merge. */
|
||||||
|
do_dalloc = false;
|
||||||
|
do_decommit = true;
|
||||||
|
p = mallocx(huge0 * 2, 0);
|
||||||
|
assert_ptr_not_null(p, "Unexpected mallocx() error");
|
||||||
|
did_decommit = false;
|
||||||
|
did_commit = false;
|
||||||
|
did_split = false;
|
||||||
|
did_merge = false;
|
||||||
|
assert_zu_eq(xallocx(p, huge0, 0, 0), huge0,
|
||||||
|
"Unexpected xallocx() failure");
|
||||||
|
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
|
||||||
|
"Unexpected arena.0.purge error");
|
||||||
|
assert_true(did_decommit, "Expected decommit");
|
||||||
|
assert_true(did_split, "Expected split");
|
||||||
|
assert_zu_eq(xallocx(p, huge0 * 2, 0, 0), huge0 * 2,
|
||||||
|
"Unexpected xallocx() failure");
|
||||||
|
assert_true(did_commit, "Expected commit");
|
||||||
|
assert_true(did_commit, "Expected merge");
|
||||||
|
dallocx(p, 0);
|
||||||
|
do_dalloc = true;
|
||||||
|
do_decommit = false;
|
||||||
|
|
||||||
|
/* Test purge for partial-chunk huge allocations. */
|
||||||
if (huge0 * 2 > huge2) {
|
if (huge0 * 2 > huge2) {
|
||||||
/*
|
/*
|
||||||
* There are at least four size classes per doubling, so a
|
* There are at least four size classes per doubling, so a
|
||||||
@ -69,23 +193,37 @@ TEST_BEGIN(test_chunk)
|
|||||||
*/
|
*/
|
||||||
p = mallocx(huge2, 0);
|
p = mallocx(huge2, 0);
|
||||||
assert_ptr_not_null(p, "Unexpected mallocx() error");
|
assert_ptr_not_null(p, "Unexpected mallocx() error");
|
||||||
purged = false;
|
did_purge = false;
|
||||||
assert_zu_eq(xallocx(p, huge1, 0, 0), huge1,
|
assert_zu_eq(xallocx(p, huge1, 0, 0), huge1,
|
||||||
"Unexpected xallocx() failure");
|
"Unexpected xallocx() failure");
|
||||||
assert_true(purged, "Unexpected purge");
|
assert_true(did_purge, "Unexpected purge");
|
||||||
dallocx(p, 0);
|
dallocx(p, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Make sure non-huge allocation succeeds. */
|
||||||
p = mallocx(42, 0);
|
p = mallocx(42, 0);
|
||||||
assert_ptr_not_null(p, "Unexpected mallocx() error");
|
assert_ptr_not_null(p, "Unexpected mallocx() error");
|
||||||
free(p);
|
dallocx(p, 0);
|
||||||
|
|
||||||
assert_d_eq(mallctl("arena.0.chunk.alloc", NULL, NULL, &old_alloc,
|
/* Restore chunk hooks. */
|
||||||
old_size), 0, "Unexpected alloc error");
|
assert_d_eq(mallctl("arena.0.chunk_hooks", NULL, NULL, &old_hooks,
|
||||||
assert_d_eq(mallctl("arena.0.chunk.dalloc", NULL, NULL, &old_dalloc,
|
new_size), 0, "Unexpected chunk_hooks error");
|
||||||
old_size), 0, "Unexpected dalloc error");
|
assert_d_eq(mallctl("arena.0.chunk_hooks", &old_hooks, &old_size,
|
||||||
assert_d_eq(mallctl("arena.0.chunk.purge", NULL, NULL, &old_purge,
|
NULL, 0), 0, "Unexpected chunk_hooks error");
|
||||||
old_size), 0, "Unexpected purge error");
|
assert_ptr_eq(old_hooks.alloc, orig_hooks.alloc,
|
||||||
|
"Unexpected alloc error");
|
||||||
|
assert_ptr_eq(old_hooks.dalloc, orig_hooks.dalloc,
|
||||||
|
"Unexpected dalloc error");
|
||||||
|
assert_ptr_eq(old_hooks.commit, orig_hooks.commit,
|
||||||
|
"Unexpected commit error");
|
||||||
|
assert_ptr_eq(old_hooks.decommit, orig_hooks.decommit,
|
||||||
|
"Unexpected decommit error");
|
||||||
|
assert_ptr_eq(old_hooks.purge, orig_hooks.purge,
|
||||||
|
"Unexpected purge error");
|
||||||
|
assert_ptr_eq(old_hooks.split, orig_hooks.split,
|
||||||
|
"Unexpected split error");
|
||||||
|
assert_ptr_eq(old_hooks.merge, orig_hooks.merge,
|
||||||
|
"Unexpected merge error");
|
||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user