Implement metadata statistics.

There are three categories of metadata:

- Base allocations are used for bootstrap-sensitive internal allocator
  data structures.
- Arena chunk headers comprise pages which track the states of the
  non-metadata pages.
- Internal allocations differ from application-originated allocations
  in that they are for internal use, and that they are omitted from heap
  profiles.

The metadata statistics comprise the metadata categories as follows:

- stats.metadata: All metadata -- base + arena chunk headers + internal
  allocations.
- stats.arenas.<i>.metadata.mapped: Arena chunk headers.
- stats.arenas.<i>.metadata.allocated: Internal allocations.  This is
  reported separately from the other metadata statistics because it
  overlaps with the allocated and active statistics, whereas the other
  metadata statistics do not.

Base allocations are not reported separately, though their magnitude can
be computed by subtracting the arena-specific metadata.

This resolves #163.
This commit is contained in:
Jason Evans 2014-11-27 17:22:36 -02:00
parent ec98a44662
commit 4581b97809
18 changed files with 393 additions and 204 deletions

View File

@ -1773,6 +1773,21 @@ malloc_conf = "xmalloc:true";]]></programlisting>
entirely devoted to allocator metadata.</para></listitem> entirely devoted to allocator metadata.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.metadata">
<term>
<mallctl>stats.metadata</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Total number of bytes dedicated to metadata, which
comprise base allocations used for bootstrap-sensitive internal
allocator data structures, arena chunk headers (see <link
linkend="stats.arenas.i.metadata.mapped"><mallctl>stats.arenas.&lt;i&gt;.metadata.mapped</mallctl></link>),
and internal allocations (see <link
linkend="stats.arenas.i.metadata.allocated"><mallctl>stats.arenas.&lt;i&gt;.metadata.allocated</mallctl></link>).</para></listitem>
</varlistentry>
<varlistentry id="stats.mapped"> <varlistentry id="stats.mapped">
<term> <term>
<mallctl>stats.mapped</mallctl> <mallctl>stats.mapped</mallctl>
@ -1875,6 +1890,38 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Number of mapped bytes.</para></listitem> <listitem><para>Number of mapped bytes.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="stats.arenas.i.metadata.mapped">
<term>
<mallctl>stats.arenas.&lt;i&gt;.metadata.mapped</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Number of mapped bytes in arena chunk headers, which
track the states of the non-metadata pages.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.metadata.allocated">
<term>
<mallctl>stats.arenas.&lt;i&gt;.metadata.allocated</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Number of bytes dedicated to internal allocations.
Internal allocations differ from application-originated allocations in
that they are for internal use, and that they are omitted from heap
profiles. This statistic is reported separately from <link
linkend="stats.metadata"><mallctl>stats.metadata</mallctl></link> and
<link
linkend="stats.arenas.i.metadata.mapped"><mallctl>stats.arenas.&lt;i&gt;.metadata.mapped</mallctl></link>
because it overlaps with e.g. the <link
linkend="stats.allocated"><mallctl>stats.allocated</mallctl></link> and
<link linkend="stats.active"><mallctl>stats.active</mallctl></link>
statistics, whereas the other metadata statistics do
not.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.npurge"> <varlistentry id="stats.arenas.i.npurge">
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.npurge</mallctl> <mallctl>stats.arenas.&lt;i&gt;.npurge</mallctl>

View File

@ -437,6 +437,9 @@ void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
size_t runind, index_t binind, size_t flags); size_t runind, index_t binind, size_t flags);
void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind, void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
size_t unzeroed); size_t unzeroed);
void arena_metadata_allocated_add(arena_t *arena, size_t size);
void arena_metadata_allocated_sub(arena_t *arena, size_t size);
size_t arena_metadata_allocated_get(arena_t *arena);
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes); bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes); bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum(arena_t *arena, uint64_t accumbytes); bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
@ -448,6 +451,7 @@ prof_tctx_t *arena_prof_tctx_get(const void *ptr);
void arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx); void arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
bool try_tcache); bool try_tcache);
arena_t *arena_aalloc(const void *ptr);
size_t arena_salloc(const void *ptr, bool demote); size_t arena_salloc(const void *ptr, bool demote);
void arena_dalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, void arena_dalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr,
bool try_tcache); bool try_tcache);
@ -699,6 +703,27 @@ arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
unzeroed); unzeroed);
} }
JEMALLOC_INLINE void
arena_metadata_allocated_add(arena_t *arena, size_t size)
{
atomic_add_z(&arena->stats.metadata_allocated, size);
}
JEMALLOC_INLINE void
arena_metadata_allocated_sub(arena_t *arena, size_t size)
{
atomic_sub_z(&arena->stats.metadata_allocated, size);
}
JEMALLOC_INLINE size_t
arena_metadata_allocated_get(arena_t *arena)
{
return (atomic_read_z(&arena->stats.metadata_allocated));
}
JEMALLOC_INLINE bool JEMALLOC_INLINE bool
arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes) arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
{ {
@ -952,6 +977,15 @@ arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
} }
} }
JEMALLOC_ALWAYS_INLINE arena_t *
arena_aalloc(const void *ptr)
{
arena_chunk_t *chunk;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
return (chunk->arena);
}
/* Return the size of the allocation pointed to by ptr. */ /* Return the size of the allocation pointed to by ptr. */
JEMALLOC_ALWAYS_INLINE size_t JEMALLOC_ALWAYS_INLINE size_t
arena_salloc(const void *ptr, bool demote) arena_salloc(const void *ptr, bool demote)

View File

@ -13,6 +13,7 @@ void *base_alloc(size_t size);
void *base_calloc(size_t number, size_t size); void *base_calloc(size_t number, size_t size);
extent_node_t *base_node_alloc(void); extent_node_t *base_node_alloc(void);
void base_node_dalloc(extent_node_t *node); void base_node_dalloc(extent_node_t *node);
size_t base_allocated_get(void);
bool base_boot(void); bool base_boot(void);
void base_prefork(void); void base_prefork(void);
void base_postfork_parent(void); void base_postfork_parent(void);

View File

@ -52,6 +52,7 @@ struct ctl_arena_stats_s {
struct ctl_stats_s { struct ctl_stats_s {
size_t allocated; size_t allocated;
size_t active; size_t active;
size_t metadata;
size_t mapped; size_t mapped;
struct { struct {
size_t current; /* stats_chunks.curchunks */ size_t current; /* stats_chunks.curchunks */

View File

@ -23,6 +23,7 @@ typedef void (huge_dalloc_junk_t)(void *, size_t);
extern huge_dalloc_junk_t *huge_dalloc_junk; extern huge_dalloc_junk_t *huge_dalloc_junk;
#endif #endif
void huge_dalloc(tsd_t *tsd, void *ptr, bool try_tcache); void huge_dalloc(tsd_t *tsd, void *ptr, bool try_tcache);
arena_t *huge_aalloc(const void *ptr);
size_t huge_salloc(const void *ptr); size_t huge_salloc(const void *ptr);
prof_tctx_t *huge_prof_tctx_get(const void *ptr); prof_tctx_t *huge_prof_tctx_get(const void *ptr);
void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx); void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);

View File

@ -404,8 +404,9 @@ extern size_t const index2size_tab[NSIZES];
extern uint8_t const size2index_tab[]; extern uint8_t const size2index_tab[];
arena_t *a0get(void); arena_t *a0get(void);
void *a0malloc(size_t size, bool zero); void *a0malloc(size_t size);
void a0dalloc(void *ptr); void a0dalloc(void *ptr);
size_t a0allocated(void);
arena_t *arenas_extend(unsigned ind); arena_t *arenas_extend(unsigned ind);
arena_t *arena_init(unsigned ind); arena_t *arena_init(unsigned ind);
unsigned narenas_total_get(void); unsigned narenas_total_get(void);
@ -776,21 +777,27 @@ arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
#include "jemalloc/internal/quarantine.h" #include "jemalloc/internal/quarantine.h"
#ifndef JEMALLOC_ENABLE_INLINE #ifndef JEMALLOC_ENABLE_INLINE
arena_t *iaalloc(const void *ptr);
size_t isalloc(const void *ptr, bool demote);
void *iallocztm(tsd_t *tsd, size_t size, bool zero, bool try_tcache,
bool is_metadata, arena_t *arena);
void *imalloct(tsd_t *tsd, size_t size, bool try_tcache, arena_t *arena); void *imalloct(tsd_t *tsd, size_t size, bool try_tcache, arena_t *arena);
void *imalloc(tsd_t *tsd, size_t size); void *imalloc(tsd_t *tsd, size_t size);
void *icalloct(tsd_t *tsd, size_t size, bool try_tcache, arena_t *arena); void *icalloct(tsd_t *tsd, size_t size, bool try_tcache, arena_t *arena);
void *icalloc(tsd_t *tsd, size_t size); void *icalloc(tsd_t *tsd, size_t size);
void *ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
bool try_tcache, bool is_metadata, arena_t *arena);
void *ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero, void *ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
bool try_tcache, arena_t *arena); bool try_tcache, arena_t *arena);
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero); void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
size_t isalloc(const void *ptr, bool demote);
size_t ivsalloc(const void *ptr, bool demote); size_t ivsalloc(const void *ptr, bool demote);
size_t u2rz(size_t usize); size_t u2rz(size_t usize);
size_t p2rz(const void *ptr); size_t p2rz(const void *ptr);
void idalloctm(tsd_t *tsd, void *ptr, bool try_tcache, bool is_metadata);
void idalloct(tsd_t *tsd, void *ptr, bool try_tcache); void idalloct(tsd_t *tsd, void *ptr, bool try_tcache);
void isdalloct(tsd_t *tsd, void *ptr, size_t size, bool try_tcache);
void idalloc(tsd_t *tsd, void *ptr); void idalloc(tsd_t *tsd, void *ptr);
void iqalloc(tsd_t *tsd, void *ptr, bool try_tcache); void iqalloc(tsd_t *tsd, void *ptr, bool try_tcache);
void isdalloct(tsd_t *tsd, void *ptr, size_t size, bool try_tcache);
void isqalloc(tsd_t *tsd, void *ptr, size_t size, bool try_tcache); void isqalloc(tsd_t *tsd, void *ptr, size_t size, bool try_tcache);
void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
@ -805,76 +812,21 @@ bool ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra,
#endif #endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_ALWAYS_INLINE void * JEMALLOC_ALWAYS_INLINE arena_t *
imalloct(tsd_t *tsd, size_t size, bool try_tcache, arena_t *arena) iaalloc(const void *ptr)
{ {
arena_t *arena;
arena_chunk_t *chunk;
assert(size != 0); assert(ptr != NULL);
if (likely(size <= arena_maxclass)) chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
return (arena_malloc(tsd, arena, size, false, try_tcache)); if (likely(chunk != ptr))
arena = arena_aalloc(ptr);
else else
return (huge_malloc(tsd, arena, size, false, try_tcache)); arena = huge_aalloc(ptr);
}
JEMALLOC_ALWAYS_INLINE void * return (arena);
imalloc(tsd_t *tsd, size_t size)
{
return (imalloct(tsd, size, true, NULL));
}
JEMALLOC_ALWAYS_INLINE void *
icalloct(tsd_t *tsd, size_t size, bool try_tcache, arena_t *arena)
{
if (likely(size <= arena_maxclass))
return (arena_malloc(tsd, arena, size, true, try_tcache));
else
return (huge_malloc(tsd, arena, size, true, try_tcache));
}
JEMALLOC_ALWAYS_INLINE void *
icalloc(tsd_t *tsd, size_t size)
{
return (icalloct(tsd, size, true, NULL));
}
JEMALLOC_ALWAYS_INLINE void *
ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena)
{
void *ret;
assert(usize != 0);
assert(usize == sa2u(usize, alignment));
if (usize <= SMALL_MAXCLASS && alignment < PAGE)
ret = arena_malloc(tsd, arena, usize, zero, try_tcache);
else {
if (likely(usize <= arena_maxclass)) {
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL))
return (NULL);
ret = arena_palloc(arena, usize, alignment, zero);
} else if (likely(alignment <= chunksize))
ret = huge_malloc(tsd, arena, usize, zero, try_tcache);
else {
ret = huge_palloc(tsd, arena, usize, alignment, zero,
try_tcache);
}
}
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
{
return (ipalloct(tsd, usize, alignment, zero, true, NULL));
} }
/* /*
@ -901,6 +853,101 @@ isalloc(const void *ptr, bool demote)
return (ret); return (ret);
} }
JEMALLOC_ALWAYS_INLINE void *
iallocztm(tsd_t *tsd, size_t size, bool zero, bool try_tcache, bool is_metadata,
arena_t *arena)
{
void *ret;
assert(size != 0);
if (likely(size <= arena_maxclass))
ret = arena_malloc(tsd, arena, size, zero, try_tcache);
else
ret = huge_malloc(tsd, arena, size, zero, try_tcache);
if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
config_prof));
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
imalloct(tsd_t *tsd, size_t size, bool try_tcache, arena_t *arena)
{
return (iallocztm(tsd, size, false, try_tcache, false, arena));
}
JEMALLOC_ALWAYS_INLINE void *
imalloc(tsd_t *tsd, size_t size)
{
return (iallocztm(tsd, size, false, true, false, NULL));
}
JEMALLOC_ALWAYS_INLINE void *
icalloct(tsd_t *tsd, size_t size, bool try_tcache, arena_t *arena)
{
return (iallocztm(tsd, size, true, try_tcache, false, arena));
}
JEMALLOC_ALWAYS_INLINE void *
icalloc(tsd_t *tsd, size_t size)
{
return (iallocztm(tsd, size, true, true, false, NULL));
}
JEMALLOC_ALWAYS_INLINE void *
ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
bool try_tcache, bool is_metadata, arena_t *arena)
{
void *ret;
assert(usize != 0);
assert(usize == sa2u(usize, alignment));
if (usize <= SMALL_MAXCLASS && alignment < PAGE)
ret = arena_malloc(tsd, arena, usize, zero, try_tcache);
else {
if (likely(usize <= arena_maxclass)) {
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL))
return (NULL);
ret = arena_palloc(arena, usize, alignment, zero);
} else if (likely(alignment <= chunksize))
ret = huge_malloc(tsd, arena, usize, zero, try_tcache);
else {
ret = huge_palloc(tsd, arena, usize, alignment, zero,
try_tcache);
}
}
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
config_prof));
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena)
{
return (ipallocztm(tsd, usize, alignment, zero, try_tcache, false,
arena));
}
JEMALLOC_ALWAYS_INLINE void *
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
{
return (ipallocztm(tsd, usize, alignment, zero, true, false, NULL));
}
JEMALLOC_ALWAYS_INLINE size_t JEMALLOC_ALWAYS_INLINE size_t
ivsalloc(const void *ptr, bool demote) ivsalloc(const void *ptr, bool demote)
{ {
@ -935,11 +982,15 @@ p2rz(const void *ptr)
} }
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
idalloct(tsd_t *tsd, void *ptr, bool try_tcache) idalloctm(tsd_t *tsd, void *ptr, bool try_tcache, bool is_metadata)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
assert(ptr != NULL); assert(ptr != NULL);
if (config_stats && is_metadata) {
arena_metadata_allocated_sub(iaalloc(ptr), isalloc(ptr,
config_prof));
}
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr)) if (likely(chunk != ptr))
@ -948,6 +999,30 @@ idalloct(tsd_t *tsd, void *ptr, bool try_tcache)
huge_dalloc(tsd, ptr, try_tcache); huge_dalloc(tsd, ptr, try_tcache);
} }
JEMALLOC_ALWAYS_INLINE void
idalloct(tsd_t *tsd, void *ptr, bool try_tcache)
{
idalloctm(tsd, ptr, try_tcache, false);
}
JEMALLOC_ALWAYS_INLINE void
idalloc(tsd_t *tsd, void *ptr)
{
idalloctm(tsd, ptr, true, false);
}
JEMALLOC_ALWAYS_INLINE void
iqalloc(tsd_t *tsd, void *ptr, bool try_tcache)
{
if (config_fill && unlikely(opt_quarantine))
quarantine(tsd, ptr);
else
idalloctm(tsd, ptr, try_tcache, false);
}
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
isdalloct(tsd_t *tsd, void *ptr, size_t size, bool try_tcache) isdalloct(tsd_t *tsd, void *ptr, size_t size, bool try_tcache)
{ {
@ -962,23 +1037,6 @@ isdalloct(tsd_t *tsd, void *ptr, size_t size, bool try_tcache)
huge_dalloc(tsd, ptr, try_tcache); huge_dalloc(tsd, ptr, try_tcache);
} }
JEMALLOC_ALWAYS_INLINE void
idalloc(tsd_t *tsd, void *ptr)
{
idalloct(tsd, ptr, true);
}
JEMALLOC_ALWAYS_INLINE void
iqalloc(tsd_t *tsd, void *ptr, bool try_tcache)
{
if (config_fill && unlikely(opt_quarantine))
quarantine(tsd, ptr);
else
idalloct(tsd, ptr, try_tcache);
}
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
isqalloc(tsd_t *tsd, void *ptr, size_t size, bool try_tcache) isqalloc(tsd_t *tsd, void *ptr, size_t size, bool try_tcache)
{ {

View File

@ -1,6 +1,7 @@
a0dalloc a0dalloc
a0get a0get
a0malloc a0malloc
arena_aalloc
arena_get arena_get
arena_get_hard arena_get_hard
arena_alloc_junk_small arena_alloc_junk_small
@ -50,6 +51,9 @@ arena_mapbitsp_read
arena_mapbitsp_write arena_mapbitsp_write
arena_maxclass arena_maxclass
arena_maxrun arena_maxrun
arena_metadata_allocated_add
arena_metadata_allocated_get
arena_metadata_allocated_sub
arena_migrate arena_migrate
arena_miscelm_get arena_miscelm_get
arena_miscelm_to_pageind arena_miscelm_to_pageind
@ -90,6 +94,7 @@ atomic_sub_uint32
atomic_sub_uint64 atomic_sub_uint64
atomic_sub_z atomic_sub_z
base_alloc base_alloc
base_allocated_get
base_boot base_boot
base_calloc base_calloc
base_node_alloc base_node_alloc
@ -205,6 +210,7 @@ hash_rotl_64
hash_x64_128 hash_x64_128
hash_x86_128 hash_x86_128
hash_x86_32 hash_x86_32
huge_aalloc
huge_allocated huge_allocated
huge_boot huge_boot
huge_dalloc huge_dalloc
@ -221,10 +227,13 @@ huge_prof_tctx_set
huge_ralloc huge_ralloc
huge_ralloc_no_move huge_ralloc_no_move
huge_salloc huge_salloc
iaalloc
iallocztm
icalloc icalloc
icalloct icalloct
idalloc idalloc
idalloct idalloct
idalloctm
imalloc imalloc
imalloct imalloct
in_valgrind in_valgrind
@ -234,6 +243,7 @@ index2size_lookup
index2size_tab index2size_tab
ipalloc ipalloc
ipalloct ipalloct
ipallocztm
iqalloc iqalloc
iralloc iralloc
iralloct iralloct

View File

@ -111,6 +111,13 @@ struct arena_stats_s {
uint64_t nmadvise; uint64_t nmadvise;
uint64_t purged; uint64_t purged;
/*
* Number of bytes currently mapped purely for metadata purposes, and
* number of bytes currently allocated for internal metadata.
*/
size_t metadata_mapped;
size_t metadata_allocated; /* Protected via atomic_*_z(). */
/* Per-size-category statistics. */ /* Per-size-category statistics. */
size_t allocated_large; size_t allocated_large;
uint64_t nmalloc_large; uint64_t nmalloc_large;

View File

@ -405,8 +405,10 @@ arena_chunk_alloc_internal(arena_t *arena, size_t size, size_t alignment,
chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc, chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc,
arena->ind, NULL, size, alignment, zero); arena->ind, NULL, size, alignment, zero);
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
if (config_stats && chunk != NULL) if (config_stats && chunk != NULL) {
arena->stats.mapped += chunksize; arena->stats.mapped += chunksize;
arena->stats.metadata_mapped += (map_bias << LG_PAGE);
}
return (chunk); return (chunk);
} }
@ -514,8 +516,10 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);
chunk_dalloc((void *)spare, chunksize, arena->ind); chunk_dalloc((void *)spare, chunksize, arena->ind);
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
if (config_stats) if (config_stats) {
arena->stats.mapped -= chunksize; arena->stats.mapped -= chunksize;
arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
}
} else } else
arena->spare = chunk; arena->spare = chunk;
} }
@ -2273,6 +2277,8 @@ arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
astats->npurge += arena->stats.npurge; astats->npurge += arena->stats.npurge;
astats->nmadvise += arena->stats.nmadvise; astats->nmadvise += arena->stats.nmadvise;
astats->purged += arena->stats.purged; astats->purged += arena->stats.purged;
astats->metadata_mapped += arena->stats.metadata_mapped;
astats->metadata_allocated += arena_metadata_allocated_get(arena);
astats->allocated_large += arena->stats.allocated_large; astats->allocated_large += arena->stats.allocated_large;
astats->nmalloc_large += arena->stats.nmalloc_large; astats->nmalloc_large += arena->stats.nmalloc_large;
astats->ndalloc_large += arena->stats.ndalloc_large; astats->ndalloc_large += arena->stats.ndalloc_large;

View File

@ -16,6 +16,8 @@ static void *base_next_addr;
static void *base_past_addr; /* Addr immediately past base_pages. */ static void *base_past_addr; /* Addr immediately past base_pages. */
static extent_node_t *base_nodes; static extent_node_t *base_nodes;
static size_t base_allocated;
/******************************************************************************/ /******************************************************************************/
static bool static bool
@ -54,6 +56,8 @@ base_alloc(size_t size)
/* Allocate. */ /* Allocate. */
ret = base_next_addr; ret = base_next_addr;
base_next_addr = (void *)((uintptr_t)base_next_addr + csize); base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
if (config_stats)
base_allocated += csize;
malloc_mutex_unlock(&base_mtx); malloc_mutex_unlock(&base_mtx);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, csize); JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
@ -102,6 +106,17 @@ base_node_dalloc(extent_node_t *node)
malloc_mutex_unlock(&base_mtx); malloc_mutex_unlock(&base_mtx);
} }
size_t
base_allocated_get(void)
{
size_t ret;
malloc_mutex_lock(&base_mtx);
ret = base_allocated;
malloc_mutex_unlock(&base_mtx);
return (ret);
}
bool bool
base_boot(void) base_boot(void)
{ {

View File

@ -183,10 +183,13 @@ CTL_PROTO(stats_arenas_i_mapped)
CTL_PROTO(stats_arenas_i_npurge) CTL_PROTO(stats_arenas_i_npurge)
CTL_PROTO(stats_arenas_i_nmadvise) CTL_PROTO(stats_arenas_i_nmadvise)
CTL_PROTO(stats_arenas_i_purged) CTL_PROTO(stats_arenas_i_purged)
CTL_PROTO(stats_arenas_i_metadata_mapped)
CTL_PROTO(stats_arenas_i_metadata_allocated)
INDEX_PROTO(stats_arenas_i) INDEX_PROTO(stats_arenas_i)
CTL_PROTO(stats_cactive) CTL_PROTO(stats_cactive)
CTL_PROTO(stats_allocated) CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active) CTL_PROTO(stats_active)
CTL_PROTO(stats_metadata)
CTL_PROTO(stats_mapped) CTL_PROTO(stats_mapped)
/******************************************************************************/ /******************************************************************************/
@ -355,6 +358,11 @@ static const ctl_named_node_t stats_chunks_node[] = {
{NAME("high"), CTL(stats_chunks_high)} {NAME("high"), CTL(stats_chunks_high)}
}; };
static const ctl_named_node_t stats_arenas_i_metadata_node[] = {
{NAME("mapped"), CTL(stats_arenas_i_metadata_mapped)},
{NAME("allocated"), CTL(stats_arenas_i_metadata_allocated)}
};
static const ctl_named_node_t stats_arenas_i_small_node[] = { static const ctl_named_node_t stats_arenas_i_small_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, {NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
@ -432,6 +440,7 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("npurge"), CTL(stats_arenas_i_npurge)}, {NAME("npurge"), CTL(stats_arenas_i_npurge)},
{NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)}, {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
{NAME("purged"), CTL(stats_arenas_i_purged)}, {NAME("purged"), CTL(stats_arenas_i_purged)},
{NAME("metadata"), CHILD(named, stats_arenas_i_metadata)},
{NAME("small"), CHILD(named, stats_arenas_i_small)}, {NAME("small"), CHILD(named, stats_arenas_i_small)},
{NAME("large"), CHILD(named, stats_arenas_i_large)}, {NAME("large"), CHILD(named, stats_arenas_i_large)},
{NAME("huge"), CHILD(named, stats_arenas_i_huge)}, {NAME("huge"), CHILD(named, stats_arenas_i_huge)},
@ -451,6 +460,7 @@ static const ctl_named_node_t stats_node[] = {
{NAME("cactive"), CTL(stats_cactive)}, {NAME("cactive"), CTL(stats_cactive)},
{NAME("allocated"), CTL(stats_allocated)}, {NAME("allocated"), CTL(stats_allocated)},
{NAME("active"), CTL(stats_active)}, {NAME("active"), CTL(stats_active)},
{NAME("metadata"), CTL(stats_metadata)},
{NAME("mapped"), CTL(stats_mapped)}, {NAME("mapped"), CTL(stats_mapped)},
{NAME("chunks"), CHILD(named, stats_chunks)}, {NAME("chunks"), CHILD(named, stats_chunks)},
{NAME("arenas"), CHILD(indexed, stats_arenas)} {NAME("arenas"), CHILD(indexed, stats_arenas)}
@ -484,14 +494,14 @@ ctl_arena_init(ctl_arena_stats_t *astats)
if (astats->lstats == NULL) { if (astats->lstats == NULL) {
astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses * astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses *
sizeof(malloc_large_stats_t), false); sizeof(malloc_large_stats_t));
if (astats->lstats == NULL) if (astats->lstats == NULL)
return (true); return (true);
} }
if (astats->hstats == NULL) { if (astats->hstats == NULL) {
astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses * astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses *
sizeof(malloc_huge_stats_t), false); sizeof(malloc_huge_stats_t));
if (astats->hstats == NULL) if (astats->hstats == NULL)
return (true); return (true);
} }
@ -551,6 +561,9 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
sstats->astats.nmadvise += astats->astats.nmadvise; sstats->astats.nmadvise += astats->astats.nmadvise;
sstats->astats.purged += astats->astats.purged; sstats->astats.purged += astats->astats.purged;
sstats->astats.metadata_mapped += astats->astats.metadata_mapped;
sstats->astats.metadata_allocated += astats->astats.metadata_allocated;
sstats->allocated_small += astats->allocated_small; sstats->allocated_small += astats->allocated_small;
sstats->nmalloc_small += astats->nmalloc_small; sstats->nmalloc_small += astats->nmalloc_small;
sstats->ndalloc_small += astats->ndalloc_small; sstats->ndalloc_small += astats->ndalloc_small;
@ -627,7 +640,7 @@ ctl_grow(void)
/* Allocate extended arena stats. */ /* Allocate extended arena stats. */
astats = (ctl_arena_stats_t *)a0malloc((ctl_stats.narenas + 2) * astats = (ctl_arena_stats_t *)a0malloc((ctl_stats.narenas + 2) *
sizeof(ctl_arena_stats_t), false); sizeof(ctl_arena_stats_t));
if (astats == NULL) if (astats == NULL)
return (true); return (true);
@ -704,6 +717,10 @@ ctl_refresh(void)
+ ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge; + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge;
ctl_stats.active = ctl_stats.active =
(ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE); (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE);
ctl_stats.metadata = base_allocated_get()
+ ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped
+ ctl_stats.arenas[ctl_stats.narenas].astats
.metadata_allocated;
ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk); ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
} }
@ -723,7 +740,7 @@ ctl_init(void)
*/ */
ctl_stats.narenas = narenas_total_get(); ctl_stats.narenas = narenas_total_get();
ctl_stats.arenas = (ctl_arena_stats_t *)a0malloc( ctl_stats.arenas = (ctl_arena_stats_t *)a0malloc(
(ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t), false); (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
if (ctl_stats.arenas == NULL) { if (ctl_stats.arenas == NULL) {
ret = true; ret = true;
goto label_return; goto label_return;
@ -1806,6 +1823,7 @@ CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *) CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t) CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t)
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current, CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
@ -1825,6 +1843,10 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t) ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_purged, CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
ctl_stats.arenas[mib[2]].astats.purged, uint64_t) ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_mapped,
ctl_stats.arenas[mib[2]].astats.metadata_mapped, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_allocated,
ctl_stats.arenas[mib[2]].astats.metadata_allocated, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
ctl_stats.arenas[mib[2]].allocated_small, size_t) ctl_stats.arenas[mib[2]].allocated_small, size_t)

View File

@ -37,8 +37,8 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
/* Allocate one or more contiguous chunks for this request. */ /* Allocate one or more contiguous chunks for this request. */
/* Allocate an extent node with which to track the chunk. */ /* Allocate an extent node with which to track the chunk. */
node = ipalloct(tsd, CACHELINE_CEILING(sizeof(extent_node_t)), node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
CACHELINE, false, try_tcache, NULL); CACHELINE, false, try_tcache, true, arena);
if (node == NULL) if (node == NULL)
return (NULL); return (NULL);
@ -50,7 +50,7 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
arena = arena_choose(tsd, arena); arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena, if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
usize, alignment, &is_zeroed)) == NULL) { usize, alignment, &is_zeroed)) == NULL) {
idalloct(tsd, node, try_tcache); idalloctm(tsd, node, try_tcache, true);
return (NULL); return (NULL);
} }
@ -73,6 +73,33 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
return (ret); return (ret);
} }
static extent_node_t *
huge_node_locked(const void *ptr)
{
extent_node_t *node, key;
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
malloc_mutex_unlock(&huge_mtx);
return (node);
}
static extent_node_t *
huge_node(const void *ptr)
{
extent_node_t *node;
malloc_mutex_lock(&huge_mtx);
node = huge_node_locked(ptr);
malloc_mutex_unlock(&huge_mtx);
return (node);
}
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
#undef huge_dalloc_junk #undef huge_dalloc_junk
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl) #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
@ -102,7 +129,7 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
{ {
size_t usize_next; size_t usize_next;
bool zeroed; bool zeroed;
extent_node_t *node, key; extent_node_t *node;
arena_t *arena; arena_t *arena;
/* Increase usize to incorporate extra. */ /* Increase usize to incorporate extra. */
@ -126,10 +153,7 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
zeroed = true; zeroed = true;
malloc_mutex_lock(&huge_mtx); malloc_mutex_lock(&huge_mtx);
key.addr = ptr; node = huge_node_locked(ptr);
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
arena = node->arena; arena = node->arena;
/* Update the size of the huge allocation. */ /* Update the size of the huge allocation. */
assert(node->size != usize); assert(node->size != usize);
@ -159,7 +183,7 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
{ {
size_t sdiff; size_t sdiff;
bool zeroed; bool zeroed;
extent_node_t *node, key; extent_node_t *node;
arena_t *arena; arena_t *arena;
sdiff = CHUNK_CEILING(usize) - usize; sdiff = CHUNK_CEILING(usize) - usize;
@ -172,10 +196,7 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
} }
malloc_mutex_lock(&huge_mtx); malloc_mutex_lock(&huge_mtx);
key.addr = ptr; node = huge_node_locked(ptr);
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
arena = node->arena; arena = node->arena;
/* Update the size of the huge allocation. */ /* Update the size of the huge allocation. */
node->size = usize; node->size = usize;
@ -190,7 +211,7 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
static bool static bool
huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) { huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
size_t usize; size_t usize;
extent_node_t *node, key; extent_node_t *node;
arena_t *arena; arena_t *arena;
bool is_zeroed_subchunk, is_zeroed_chunk; bool is_zeroed_subchunk, is_zeroed_chunk;
@ -201,10 +222,7 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
} }
malloc_mutex_lock(&huge_mtx); malloc_mutex_lock(&huge_mtx);
key.addr = ptr; node = huge_node_locked(ptr);
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
arena = node->arena; arena = node->arena;
is_zeroed_subchunk = node->zeroed; is_zeroed_subchunk = node->zeroed;
malloc_mutex_unlock(&huge_mtx); malloc_mutex_unlock(&huge_mtx);
@ -342,77 +360,44 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
void void
huge_dalloc(tsd_t *tsd, void *ptr, bool try_tcache) huge_dalloc(tsd_t *tsd, void *ptr, bool try_tcache)
{ {
extent_node_t *node, key; extent_node_t *node;
malloc_mutex_lock(&huge_mtx); malloc_mutex_lock(&huge_mtx);
/* Extract from tree of huge allocations. */ node = huge_node_locked(ptr);
key.addr = ptr;
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
extent_tree_ad_remove(&huge, node); extent_tree_ad_remove(&huge, node);
malloc_mutex_unlock(&huge_mtx); malloc_mutex_unlock(&huge_mtx);
huge_dalloc_junk(node->addr, node->size); huge_dalloc_junk(node->addr, node->size);
arena_chunk_dalloc_huge(node->arena, node->addr, node->size); arena_chunk_dalloc_huge(node->arena, node->addr, node->size);
idalloct(tsd, node, try_tcache); idalloctm(tsd, node, try_tcache, true);
}
arena_t *
huge_aalloc(const void *ptr)
{
return (huge_node(ptr)->arena);
} }
size_t size_t
huge_salloc(const void *ptr) huge_salloc(const void *ptr)
{ {
size_t ret;
extent_node_t *node, key;
malloc_mutex_lock(&huge_mtx); return (huge_node(ptr)->size);
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
ret = node->size;
malloc_mutex_unlock(&huge_mtx);
return (ret);
} }
prof_tctx_t * prof_tctx_t *
huge_prof_tctx_get(const void *ptr) huge_prof_tctx_get(const void *ptr)
{ {
prof_tctx_t *ret;
extent_node_t *node, key;
malloc_mutex_lock(&huge_mtx); return (huge_node(ptr)->prof_tctx);
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
ret = node->prof_tctx;
malloc_mutex_unlock(&huge_mtx);
return (ret);
} }
void void
huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx) huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
{ {
extent_node_t *node, key;
malloc_mutex_lock(&huge_mtx); huge_node(ptr)->prof_tctx = tctx;
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
node->prof_tctx = tctx;
malloc_mutex_unlock(&huge_mtx);
} }
bool bool

View File

@ -289,45 +289,34 @@ a0get(void)
} }
static void * static void *
a0imalloc(size_t size, bool zero) a0ialloc(size_t size, bool zero, bool is_metadata)
{ {
void *ret;
if (unlikely(malloc_init_a0())) if (unlikely(malloc_init_a0()))
return (NULL); return (NULL);
if (likely(size <= arena_maxclass)) return (iallocztm(NULL, size, zero, false, is_metadata, a0get()));
ret = arena_malloc(NULL, a0get(), size, zero, false);
else
ret = huge_malloc(NULL, a0get(), size, zero, false);
return (ret);
} }
static void static void
a0idalloc(void *ptr) a0idalloc(void *ptr, bool is_metadata)
{ {
arena_chunk_t *chunk;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); idalloctm(NULL, ptr, false, is_metadata);
if (likely(chunk != ptr))
arena_dalloc(NULL, chunk, ptr, false);
else
huge_dalloc(NULL, ptr, false);
} }
void * void *
a0malloc(size_t size, bool zero) a0malloc(size_t size)
{ {
return (a0imalloc(size, zero)); return (a0ialloc(size, false, true));
} }
void void
a0dalloc(void *ptr) a0dalloc(void *ptr)
{ {
a0idalloc(ptr); a0idalloc(ptr, true);
} }
/* /*
@ -343,7 +332,7 @@ bootstrap_malloc(size_t size)
if (unlikely(size == 0)) if (unlikely(size == 0))
size = 1; size = 1;
return (a0imalloc(size, false)); return (a0ialloc(size, false, false));
} }
void * void *
@ -357,7 +346,7 @@ bootstrap_calloc(size_t num, size_t size)
num_size = 1; num_size = 1;
} }
return (a0imalloc(num_size, true)); return (a0ialloc(num_size, true, false));
} }
void void
@ -367,7 +356,7 @@ bootstrap_free(void *ptr)
if (unlikely(ptr == NULL)) if (unlikely(ptr == NULL))
return; return;
a0idalloc(ptr); a0idalloc(ptr, false);
} }
/* Create a new arena and insert it into the arenas array at index ind. */ /* Create a new arena and insert it into the arenas array at index ind. */
@ -382,7 +371,7 @@ arena_init_locked(unsigned ind)
unsigned narenas_new = narenas_total + 1; unsigned narenas_new = narenas_total + 1;
arena_t **arenas_new = arena_t **arenas_new =
(arena_t **)a0malloc(CACHELINE_CEILING(narenas_new * (arena_t **)a0malloc(CACHELINE_CEILING(narenas_new *
sizeof(arena_t *)), false); sizeof(arena_t *)));
if (arenas_new == NULL) if (arenas_new == NULL)
return (NULL); return (NULL);
memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *)); memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *));
@ -519,7 +508,7 @@ arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
if (!*arenas_cache_bypassp) { if (!*arenas_cache_bypassp) {
*arenas_cache_bypassp = true; *arenas_cache_bypassp = true;
arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) * arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
narenas_cache, false); narenas_cache);
*arenas_cache_bypassp = false; *arenas_cache_bypassp = false;
} else } else
arenas_cache = NULL; arenas_cache = NULL;
@ -1202,6 +1191,8 @@ malloc_init_hard_a0_locked(void)
arena_boot(); arena_boot();
if (config_tcache && tcache_boot()) if (config_tcache && tcache_boot())
return (true); return (true);
if (config_tcache && tcache_boot())
malloc_mutex_unlock(&init_lock);
if (huge_boot()) if (huge_boot())
return (true); return (true);
if (malloc_mutex_init(&arenas_lock)) if (malloc_mutex_init(&arenas_lock))

View File

@ -532,8 +532,8 @@ prof_gctx_create(tsd_t *tsd, prof_bt_t *bt)
/* /*
* Create a single allocation that has space for vec of length bt->len. * Create a single allocation that has space for vec of length bt->len.
*/ */
prof_gctx_t *gctx = (prof_gctx_t *)imalloc(tsd, offsetof(prof_gctx_t, prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, offsetof(prof_gctx_t,
vec) + (bt->len * sizeof(void *))); vec) + (bt->len * sizeof(void *)), false, true, true, NULL);
if (gctx == NULL) if (gctx == NULL)
return (NULL); return (NULL);
gctx->lock = prof_gctx_mutex_choose(); gctx->lock = prof_gctx_mutex_choose();
@ -574,7 +574,7 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
prof_leave(tsd, tdata_self); prof_leave(tsd, tdata_self);
/* Destroy gctx. */ /* Destroy gctx. */
malloc_mutex_unlock(gctx->lock); malloc_mutex_unlock(gctx->lock);
idalloc(tsd, gctx); idalloctm(tsd, gctx, true, true);
} else { } else {
/* /*
* Compensate for increment in prof_tctx_destroy() or * Compensate for increment in prof_tctx_destroy() or
@ -674,7 +674,7 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
prof_tdata_destroy(tsd, tdata, false); prof_tdata_destroy(tsd, tdata, false);
if (destroy_tctx) if (destroy_tctx)
idalloc(tsd, tctx); idalloctm(tsd, tctx, true, true);
} }
static bool static bool
@ -703,7 +703,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
/* OOM. */ /* OOM. */
prof_leave(tsd, tdata); prof_leave(tsd, tdata);
idalloc(tsd, gctx.v); idalloctm(tsd, gctx.v, true, true);
return (true); return (true);
} }
new_gctx = true; new_gctx = true;
@ -760,7 +760,8 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
return (NULL); return (NULL);
/* Link a prof_tctx_t into gctx for this thread. */ /* Link a prof_tctx_t into gctx for this thread. */
ret.v = imalloc(tsd, sizeof(prof_tctx_t)); ret.v = iallocztm(tsd, sizeof(prof_tctx_t), false, true, true,
NULL);
if (ret.p == NULL) { if (ret.p == NULL) {
if (new_gctx) if (new_gctx)
prof_gctx_try_destroy(tsd, tdata, gctx, tdata); prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
@ -778,7 +779,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
if (error) { if (error) {
if (new_gctx) if (new_gctx)
prof_gctx_try_destroy(tsd, tdata, gctx, tdata); prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
idalloc(tsd, ret.v); idalloctm(tsd, ret.v, true, true);
return (NULL); return (NULL);
} }
malloc_mutex_lock(gctx->lock); malloc_mutex_lock(gctx->lock);
@ -1158,7 +1159,7 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
to_destroy); to_destroy);
tctx_tree_remove(&gctx->tctxs, tctx_tree_remove(&gctx->tctxs,
to_destroy); to_destroy);
idalloc(tsd, to_destroy); idalloctm(tsd, to_destroy, true, true);
} else } else
next = NULL; next = NULL;
} while (next != NULL); } while (next != NULL);
@ -1640,7 +1641,8 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
cassert(config_prof); cassert(config_prof);
/* Initialize an empty cache for this thread. */ /* Initialize an empty cache for this thread. */
tdata = (prof_tdata_t *)imalloc(tsd, sizeof(prof_tdata_t)); tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t), false,
true, true, NULL);
if (tdata == NULL) if (tdata == NULL)
return (NULL); return (NULL);
@ -1653,7 +1655,7 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS,
prof_bt_hash, prof_bt_keycomp)) { prof_bt_hash, prof_bt_keycomp)) {
idalloc(tsd, tdata); idalloctm(tsd, tdata, true, true);
return (NULL); return (NULL);
} }
@ -1706,9 +1708,9 @@ prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
tdata_tree_remove(&tdatas, tdata); tdata_tree_remove(&tdatas, tdata);
if (tdata->thread_name != NULL) if (tdata->thread_name != NULL)
idalloc(tsd, tdata->thread_name); idalloctm(tsd, tdata->thread_name, true, true);
ckh_delete(tsd, &tdata->bt2tctx); ckh_delete(tsd, &tdata->bt2tctx);
idalloc(tsd, tdata); idalloctm(tsd, tdata, true, true);
} }
static void static void
@ -1869,7 +1871,7 @@ prof_thread_name_alloc(tsd_t *tsd, const char *thread_name)
if (size == 1) if (size == 1)
return (""); return ("");
ret = imalloc(tsd, size); ret = iallocztm(tsd, size, false, true, true, NULL);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
memcpy(ret, thread_name, size); memcpy(ret, thread_name, size);
@ -1901,7 +1903,7 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name)
return (EAGAIN); return (EAGAIN);
if (tdata->thread_name != NULL) { if (tdata->thread_name != NULL) {
idalloc(tsd, tdata->thread_name); idalloctm(tsd, tdata->thread_name, true, true);
tdata->thread_name = NULL; tdata->thread_name = NULL;
} }
if (strlen(s) > 0) if (strlen(s) > 0)

View File

@ -26,8 +26,9 @@ quarantine_init(tsd_t *tsd, size_t lg_maxobjs)
assert(tsd_nominal(tsd)); assert(tsd_nominal(tsd));
quarantine = (quarantine_t *)imalloc(tsd, offsetof(quarantine_t, objs) + quarantine = (quarantine_t *)iallocztm(tsd, offsetof(quarantine_t, objs)
((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t))); + ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)), false, true,
true, NULL);
if (quarantine == NULL) if (quarantine == NULL)
return (NULL); return (NULL);
quarantine->curbytes = 0; quarantine->curbytes = 0;
@ -54,7 +55,7 @@ quarantine_alloc_hook_work(tsd_t *tsd)
if (tsd_quarantine_get(tsd) == NULL) if (tsd_quarantine_get(tsd) == NULL)
tsd_quarantine_set(tsd, quarantine); tsd_quarantine_set(tsd, quarantine);
else else
idalloc(tsd, quarantine); idalloctm(tsd, quarantine, true, true);
} }
static quarantine_t * static quarantine_t *
@ -86,7 +87,7 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b * memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
sizeof(quarantine_obj_t)); sizeof(quarantine_obj_t));
} }
idalloc(tsd, quarantine); idalloctm(tsd, quarantine, true, true);
tsd_quarantine_set(tsd, ret); tsd_quarantine_set(tsd, ret);
return (ret); return (ret);
@ -176,7 +177,7 @@ quarantine_cleanup(tsd_t *tsd)
quarantine = tsd_quarantine_get(tsd); quarantine = tsd_quarantine_get(tsd);
if (quarantine != NULL) { if (quarantine != NULL) {
quarantine_drain(tsd, quarantine, 0); quarantine_drain(tsd, quarantine, 0);
idalloc(tsd, quarantine); idalloctm(tsd, quarantine, true, true);
tsd_quarantine_set(tsd, NULL); tsd_quarantine_set(tsd, NULL);
} }
} }

View File

@ -265,6 +265,7 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned nthreads; unsigned nthreads;
const char *dss; const char *dss;
size_t page, pactive, pdirty, mapped; size_t page, pactive, pdirty, mapped;
size_t metadata_mapped, metadata_allocated;
uint64_t npurge, nmadvise, purged; uint64_t npurge, nmadvise, purged;
size_t small_allocated; size_t small_allocated;
uint64_t small_nmalloc, small_ndalloc, small_nrequests; uint64_t small_nmalloc, small_ndalloc, small_nrequests;
@ -331,6 +332,12 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t); CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n",
mapped); mapped);
CTL_I_GET("stats.arenas.0.metadata.mapped", &metadata_mapped, size_t);
CTL_I_GET("stats.arenas.0.metadata.allocated", &metadata_allocated,
size_t);
malloc_cprintf(write_cb, cbopaque,
"metadata: mapped: %zu, allocated: %zu\n", metadata_mapped,
metadata_allocated);
if (bins) if (bins)
stats_arena_bins_print(write_cb, cbopaque, i); stats_arena_bins_print(write_cb, cbopaque, i);
@ -539,17 +546,18 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
if (config_stats) { if (config_stats) {
size_t *cactive; size_t *cactive;
size_t allocated, active, mapped; size_t allocated, active, metadata, mapped;
size_t chunks_current, chunks_high; size_t chunks_current, chunks_high;
uint64_t chunks_total; uint64_t chunks_total;
CTL_GET("stats.cactive", &cactive, size_t *); CTL_GET("stats.cactive", &cactive, size_t *);
CTL_GET("stats.allocated", &allocated, size_t); CTL_GET("stats.allocated", &allocated, size_t);
CTL_GET("stats.active", &active, size_t); CTL_GET("stats.active", &active, size_t);
CTL_GET("stats.metadata", &metadata, size_t);
CTL_GET("stats.mapped", &mapped, size_t); CTL_GET("stats.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"Allocated: %zu, active: %zu, mapped: %zu\n", "Allocated: %zu, active: %zu, metadata: %zu, mapped: %zu\n",
allocated, active, mapped); allocated, active, metadata, mapped);
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"Current active ceiling: %zu\n", atomic_read_z(cactive)); "Current active ceiling: %zu\n", atomic_read_z(cactive));

View File

@ -298,7 +298,7 @@ tcache_create(tsd_t *tsd, arena_t *arena)
/* Avoid false cacheline sharing. */ /* Avoid false cacheline sharing. */
size = sa2u(size, CACHELINE); size = sa2u(size, CACHELINE);
tcache = ipalloct(tsd, size, CACHELINE, true, false, arena); tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, arena);
if (tcache == NULL) if (tcache == NULL)
return (NULL); return (NULL);
@ -353,7 +353,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
arena_prof_accum(tcache->arena, tcache->prof_accumbytes)) arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
prof_idump(); prof_idump();
idalloct(tsd, tcache, false); idalloctm(tsd, tcache, false, true);
} }
void void

View File

@ -15,7 +15,7 @@ void *
malloc_tsd_malloc(size_t size) malloc_tsd_malloc(size_t size)
{ {
return (a0malloc(CACHELINE_CEILING(size), false)); return (a0malloc(CACHELINE_CEILING(size)));
} }
void void