Fix huge allocation statistics.

This commit is contained in:
Jason Evans 2014-10-14 22:20:00 -07:00
parent 0cdabd2d48
commit 9b41ac909f
5 changed files with 254 additions and 162 deletions

View File

@ -1719,9 +1719,8 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</term>
<listitem><para>Pointer to a counter that contains an approximate count
of the current number of bytes in active pages. The estimate may be
high, but never low, because each arena rounds up to the nearest
multiple of the chunk size when computing its contribution to the
counter. Note that the <link
high, but never low, because each arena rounds up when computing its
contribution to the counter. Note that the <link
linkend="epoch"><mallctl>epoch</mallctl></link> mallctl has no bearing
on this counter. Furthermore, counter consistency is maintained via
atomic operations, so it is necessary to use an atomic operation in

View File

@ -338,9 +338,15 @@ extern size_t arena_maxclass; /* Max size class for arenas. */
extern unsigned nlclasses; /* Number of large size classes. */
extern unsigned nhclasses; /* Number of huge size classes. */
void *arena_chunk_alloc_huge(arena_t *arena, void *new_addr, size_t usize,
size_t alignment, bool *zero);
void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
bool *zero);
void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize);
void arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk,
size_t oldsize, size_t usize);
void arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk,
size_t oldsize, size_t usize);
bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk,
size_t oldsize, size_t usize, bool *zero);
void arena_purge_all(arena_t *arena);
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
index_t binind, uint64_t prof_accumbytes);

View File

@ -13,6 +13,9 @@ arena_choose
arena_choose_hard
arena_chunk_alloc_huge
arena_chunk_dalloc_huge
arena_chunk_ralloc_huge_expand
arena_chunk_ralloc_huge_shrink
arena_chunk_ralloc_huge_similar
arena_cleanup
arena_dalloc
arena_dalloc_bin

View File

@ -411,52 +411,6 @@ arena_chunk_alloc_internal(arena_t *arena, size_t size, size_t alignment,
return (chunk);
}
void *
arena_chunk_alloc_huge(arena_t *arena, void *new_addr, size_t usize,
size_t alignment, bool *zero)
{
void *ret;
chunk_alloc_t *chunk_alloc;
chunk_dalloc_t *chunk_dalloc;
malloc_mutex_lock(&arena->lock);
chunk_alloc = arena->chunk_alloc;
chunk_dalloc = arena->chunk_dalloc;
if (config_stats) {
index_t index = size2index(usize) - nlclasses - NBINS;
/* Optimistically update stats prior to unlocking. */
arena->stats.allocated_huge += usize;
arena->stats.nmalloc_huge++;
arena->stats.hstats[index].nmalloc++;
arena->stats.hstats[index].curhchunks++;
arena->stats.mapped += usize;
}
arena->nactive += (usize >> LG_PAGE);
malloc_mutex_unlock(&arena->lock);
ret = chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind,
new_addr, usize, alignment, zero);
if (config_stats) {
if (ret != NULL)
stats_cactive_add(usize);
else {
index_t index = size2index(usize) - nlclasses - NBINS;
malloc_mutex_lock(&arena->lock);
/* Revert optimistic stats updates. */
arena->stats.allocated_huge -= usize;
arena->stats.nmalloc_huge--;
arena->stats.hstats[index].nmalloc--;
arena->stats.hstats[index].curhchunks--;
arena->stats.mapped -= usize;
malloc_mutex_unlock(&arena->lock);
}
}
return (ret);
}
static arena_chunk_t *
arena_chunk_init_hard(arena_t *arena)
{
@ -528,41 +482,6 @@ arena_chunk_alloc(arena_t *arena)
return (chunk);
}
static void
arena_chunk_dalloc_internal(arena_t *arena, arena_chunk_t *chunk)
{
chunk_dalloc_t *chunk_dalloc;
chunk_dalloc = arena->chunk_dalloc;
malloc_mutex_unlock(&arena->lock);
chunk_dalloc((void *)chunk, chunksize, arena->ind);
malloc_mutex_lock(&arena->lock);
if (config_stats)
arena->stats.mapped -= chunksize;
}
void
arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
{
chunk_dalloc_t *chunk_dalloc;
malloc_mutex_lock(&arena->lock);
chunk_dalloc = arena->chunk_dalloc;
if (config_stats) {
index_t index = size2index(usize) - nlclasses - NBINS;
arena->stats.ndalloc_huge++;
arena->stats.allocated_huge -= usize;
arena->stats.hstats[index].ndalloc++;
arena->stats.hstats[index].curhchunks--;
arena->stats.mapped -= usize;
stats_cactive_sub(usize);
}
arena->nactive -= (usize >> LG_PAGE);
malloc_mutex_unlock(&arena->lock);
chunk_dalloc(chunk, usize, arena->ind);
}
static void
arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
{
@ -584,17 +503,237 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
if (arena->spare != NULL) {
arena_chunk_t *spare = arena->spare;
chunk_dalloc_t *chunk_dalloc;
arena->spare = chunk;
if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
arena_dirty_remove(arena, spare, map_bias,
chunk_npages-map_bias);
}
arena_chunk_dalloc_internal(arena, spare);
chunk_dalloc = arena->chunk_dalloc;
malloc_mutex_unlock(&arena->lock);
chunk_dalloc((void *)spare, chunksize, arena->ind);
malloc_mutex_lock(&arena->lock);
if (config_stats)
arena->stats.mapped -= chunksize;
} else
arena->spare = chunk;
}
static void
arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
{
index_t index = size2index(usize) - nlclasses - NBINS;
cassert(config_stats);
arena->stats.nmalloc_huge++;
arena->stats.allocated_huge += usize;
arena->stats.hstats[index].nmalloc++;
arena->stats.hstats[index].curhchunks++;
}
static void
arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
{
index_t index = size2index(usize) - nlclasses - NBINS;
cassert(config_stats);
arena->stats.nmalloc_huge--;
arena->stats.allocated_huge -= usize;
arena->stats.hstats[index].nmalloc--;
arena->stats.hstats[index].curhchunks--;
}
static void
arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
{
index_t index = size2index(usize) - nlclasses - NBINS;
cassert(config_stats);
arena->stats.ndalloc_huge++;
arena->stats.allocated_huge -= usize;
arena->stats.hstats[index].ndalloc++;
arena->stats.hstats[index].curhchunks--;
}
static void
arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
{
index_t index = size2index(usize) - nlclasses - NBINS;
cassert(config_stats);
arena->stats.ndalloc_huge--;
arena->stats.allocated_huge += usize;
arena->stats.hstats[index].ndalloc--;
arena->stats.hstats[index].curhchunks++;
}
static void
arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
{
arena_huge_dalloc_stats_update(arena, oldsize);
arena_huge_malloc_stats_update(arena, usize);
}
static void
arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
size_t usize)
{
arena_huge_dalloc_stats_update_undo(arena, oldsize);
arena_huge_malloc_stats_update_undo(arena, usize);
}
void *
arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
bool *zero)
{
void *ret;
chunk_alloc_t *chunk_alloc;
chunk_dalloc_t *chunk_dalloc;
size_t csize = CHUNK_CEILING(usize);
malloc_mutex_lock(&arena->lock);
chunk_alloc = arena->chunk_alloc;
chunk_dalloc = arena->chunk_dalloc;
if (config_stats) {
/* Optimistically update stats prior to unlocking. */
arena_huge_malloc_stats_update(arena, usize);
arena->stats.mapped += usize;
}
arena->nactive += (usize >> LG_PAGE);
malloc_mutex_unlock(&arena->lock);
ret = chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind, NULL,
csize, alignment, zero);
if (ret == NULL) {
/* Revert optimistic stats updates. */
malloc_mutex_lock(&arena->lock);
if (config_stats) {
arena_huge_malloc_stats_update_undo(arena, usize);
arena->stats.mapped -= usize;
}
arena->nactive -= (usize >> LG_PAGE);
malloc_mutex_unlock(&arena->lock);
return (NULL);
}
if (config_stats)
stats_cactive_add(usize);
return (ret);
}
void
arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
{
chunk_dalloc_t *chunk_dalloc;
malloc_mutex_lock(&arena->lock);
chunk_dalloc = arena->chunk_dalloc;
if (config_stats) {
arena_huge_dalloc_stats_update(arena, usize);
arena->stats.mapped -= usize;
stats_cactive_sub(usize);
}
arena->nactive -= (usize >> LG_PAGE);
malloc_mutex_unlock(&arena->lock);
chunk_dalloc(chunk, CHUNK_CEILING(usize), arena->ind);
}
void
arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize,
size_t usize)
{
assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
assert(oldsize != usize);
malloc_mutex_lock(&arena->lock);
if (config_stats)
arena_huge_ralloc_stats_update(arena, oldsize, usize);
if (oldsize < usize) {
size_t udiff = usize - oldsize;
arena->nactive += udiff >> LG_PAGE;
if (config_stats)
stats_cactive_add(udiff);
} else {
size_t udiff = oldsize - usize;
arena->nactive -= udiff >> LG_PAGE;
if (config_stats)
stats_cactive_sub(udiff);
}
malloc_mutex_unlock(&arena->lock);
}
void
arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
size_t usize)
{
chunk_dalloc_t *chunk_dalloc;
size_t udiff = oldsize - usize;
size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
malloc_mutex_lock(&arena->lock);
chunk_dalloc = arena->chunk_dalloc;
if (config_stats) {
arena_huge_ralloc_stats_update(arena, oldsize, usize);
if (cdiff != 0) {
arena->stats.mapped -= cdiff;
stats_cactive_sub(udiff);
}
}
arena->nactive -= udiff >> LG_PAGE;
malloc_mutex_unlock(&arena->lock);
if (cdiff != 0)
chunk_dalloc(chunk + CHUNK_CEILING(usize), cdiff, arena->ind);
}
bool
arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
size_t usize, bool *zero)
{
chunk_alloc_t *chunk_alloc;
chunk_dalloc_t *chunk_dalloc;
size_t udiff = usize - oldsize;
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
malloc_mutex_lock(&arena->lock);
chunk_alloc = arena->chunk_alloc;
chunk_dalloc = arena->chunk_dalloc;
if (config_stats) {
/* Optimistically update stats prior to unlocking. */
arena_huge_ralloc_stats_update(arena, oldsize, usize);
arena->stats.mapped += cdiff;
}
arena->nactive += (udiff >> LG_PAGE);
malloc_mutex_unlock(&arena->lock);
if (chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind, chunk +
CHUNK_CEILING(oldsize), cdiff, chunksize, zero) == NULL) {
/* Revert optimistic stats updates. */
malloc_mutex_lock(&arena->lock);
if (config_stats) {
arena_huge_ralloc_stats_update_undo(arena,
oldsize, usize);
arena->stats.mapped -= cdiff;
}
arena->nactive -= (udiff >> LG_PAGE);
malloc_mutex_unlock(&arena->lock);
return (true);
}
if (config_stats)
stats_cactive_add(udiff);
return (false);
}
static arena_run_t *
arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
{

View File

@ -31,15 +31,11 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
bool zero, bool try_tcache)
{
void *ret;
size_t csize;
extent_node_t *node;
bool is_zeroed;
/* Allocate one or more contiguous chunks for this request. */
csize = CHUNK_CEILING(usize);
assert(csize >= usize);
/* Allocate an extent node with which to track the chunk. */
node = ipalloct(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
CACHELINE, false, try_tcache, NULL);
@ -56,7 +52,7 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
base_node_dalloc(node);
return (NULL);
}
ret = arena_chunk_alloc_huge(arena, NULL, csize, alignment, &is_zeroed);
ret = arena_chunk_alloc_huge(arena, usize, alignment, &is_zeroed);
if (ret == NULL) {
idalloct(tsd, node, try_tcache);
return (NULL);
@ -104,25 +100,6 @@ huge_dalloc_junk(void *ptr, size_t usize)
huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
#endif
static void
huge_ralloc_no_move_stats_update(arena_t *arena, size_t oldsize, size_t usize)
{
index_t oldindex = size2index(oldsize) - nlclasses - NBINS;
index_t index = size2index(usize) - nlclasses - NBINS;
cassert(config_stats);
arena->stats.ndalloc_huge++;
arena->stats.allocated_huge -= oldsize;
arena->stats.hstats[oldindex].ndalloc++;
arena->stats.hstats[oldindex].curhchunks--;
arena->stats.nmalloc_huge++;
arena->stats.allocated_huge += usize;
arena->stats.hstats[index].nmalloc++;
arena->stats.hstats[index].curhchunks++;
}
static void
huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
size_t size, size_t extra, bool zero)
@ -135,34 +112,33 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
while (usize < s2u(size+extra) && (usize_next = s2u(usize+1)) < oldsize)
usize = usize_next;
malloc_mutex_lock(&huge_mtx);
if (oldsize == usize)
return;
malloc_mutex_lock(&huge_mtx);
key.addr = ptr;
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
arena = node->arena;
/* Update the size of the huge allocation if it changed. */
if (oldsize != usize) {
assert(node->size != usize);
node->size = usize;
}
/* Update the size of the huge allocation. */
assert(node->size != usize);
node->size = usize;
malloc_mutex_unlock(&huge_mtx);
/* Fill if necessary. */
/* Fill if necessary (shrinking). */
if (config_fill && unlikely(opt_junk) && oldsize > usize)
memset(ptr + usize, 0x5a, oldsize - usize);
arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
/* Fill if necessary (growing). */
if (oldsize < usize) {
if (zero || (config_fill && unlikely(opt_zero)))
memset(ptr + oldsize, 0, usize - oldsize);
else if (config_fill && unlikely(opt_junk))
memset(ptr + oldsize, 0xa5, usize - oldsize);
} else if (config_fill && unlikely(opt_junk) && oldsize > usize)
memset(ptr + usize, 0x5a, oldsize - usize);
if (config_stats)
huge_ralloc_no_move_stats_update(arena, oldsize, usize);
}
}
static void
@ -170,44 +146,28 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
{
extent_node_t *node, key;
arena_t *arena;
void *excess_addr;
size_t excess_size;
malloc_mutex_lock(&huge_mtx);
key.addr = ptr;
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
arena = node->arena;
/* Update the size of the huge allocation. */
node->size = usize;
malloc_mutex_unlock(&huge_mtx);
excess_addr = node->addr + CHUNK_CEILING(usize);
excess_size = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
/* Zap the excess chunks. */
huge_dalloc_junk(ptr + usize, oldsize - usize);
if (excess_size > 0)
arena_chunk_dalloc_huge(arena, excess_addr, excess_size);
if (config_stats)
huge_ralloc_no_move_stats_update(arena, oldsize, usize);
arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
}
static bool
huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
size_t usize;
void *expand_addr;
size_t expand_size;
extent_node_t *node, key;
arena_t *arena;
bool is_zeroed;
void *ret;
usize = s2u(size);
if (usize == 0) {
@ -215,19 +175,12 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
return (true);
}
expand_addr = ptr + CHUNK_CEILING(oldsize);
expand_size = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
assert(expand_size > 0);
malloc_mutex_lock(&huge_mtx);
key.addr = ptr;
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
arena = node->arena;
malloc_mutex_unlock(&huge_mtx);
/*
@ -235,12 +188,10 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
ret = arena_chunk_alloc_huge(arena, expand_addr, expand_size, chunksize,
&is_zeroed);
if (ret == NULL)
return (true);
assert(ret == expand_addr);
if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
&is_zeroed))
return (true);
malloc_mutex_lock(&huge_mtx);
/* Update the size of the huge allocation. */
@ -254,9 +205,6 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
memset(ptr + oldsize, 0, usize - oldsize);
}
if (config_stats)
huge_ralloc_no_move_stats_update(arena, oldsize, usize);
return (false);
}
@ -363,19 +311,16 @@ huge_dalloc(tsd_t *tsd, void *ptr, bool try_tcache)
extent_node_t *node, key;
malloc_mutex_lock(&huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = ptr;
node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
extent_tree_ad_remove(&huge, node);
malloc_mutex_unlock(&huge_mtx);
huge_dalloc_junk(node->addr, node->size);
arena_chunk_dalloc_huge(node->arena, node->addr,
CHUNK_CEILING(node->size));
arena_chunk_dalloc_huge(node->arena, node->addr, node->size);
idalloct(tsd, node, try_tcache);
}