Fix chunk accounting related to triggering gdump profiles.

Fix in place huge reallocation to update the chunk counters that are
used for triggering gdump profiles.
This commit is contained in:
Jason Evans 2016-05-11 00:52:59 -07:00
parent 3a9ec67626
commit 7790a0ba40
2 changed files with 16 additions and 0 deletions

View File

@ -23,6 +23,7 @@ brevity. Much more detail can be found in the git revision history:
to avoid unfortunate interactions during fork(2). (@jasone) to avoid unfortunate interactions during fork(2). (@jasone)
Bug fixes: Bug fixes:
- Fix chunk accounting related to triggering gdump profiles. (@jasone)
- Link against librt for clock_gettime(2) if glibc < 2.17. (@jasone) - Link against librt for clock_gettime(2) if glibc < 2.17. (@jasone)
- Scale leak report summary according to sampling probability. (@jasone) - Scale leak report summary according to sampling probability. (@jasone)

View File

@ -23,6 +23,15 @@ huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
return (chunk_register(tsdn, ptr, node)); return (chunk_register(tsdn, ptr, node));
} }
static void
huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
{
bool err;
err = huge_node_set(tsdn, ptr, node);
assert(!err);
}
static void static void
huge_node_unset(const void *ptr, const extent_node_t *node) huge_node_unset(const void *ptr, const extent_node_t *node)
{ {
@ -162,8 +171,10 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
malloc_mutex_lock(tsdn, &arena->huge_mtx); malloc_mutex_lock(tsdn, &arena->huge_mtx);
/* Update the size of the huge allocation. */ /* Update the size of the huge allocation. */
huge_node_unset(ptr, node);
assert(extent_node_size_get(node) != usize); assert(extent_node_size_get(node) != usize);
extent_node_size_set(node, usize); extent_node_size_set(node, usize);
huge_node_reset(tsdn, ptr, node);
/* Update zeroed. */ /* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed); extent_node_zeroed_set(node, post_zeroed);
malloc_mutex_unlock(tsdn, &arena->huge_mtx); malloc_mutex_unlock(tsdn, &arena->huge_mtx);
@ -224,7 +235,9 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
malloc_mutex_lock(tsdn, &arena->huge_mtx); malloc_mutex_lock(tsdn, &arena->huge_mtx);
/* Update the size of the huge allocation. */ /* Update the size of the huge allocation. */
huge_node_unset(ptr, node);
extent_node_size_set(node, usize); extent_node_size_set(node, usize);
huge_node_reset(tsdn, ptr, node);
/* Update zeroed. */ /* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed); extent_node_zeroed_set(node, post_zeroed);
malloc_mutex_unlock(tsdn, &arena->huge_mtx); malloc_mutex_unlock(tsdn, &arena->huge_mtx);
@ -260,7 +273,9 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
malloc_mutex_lock(tsdn, &arena->huge_mtx); malloc_mutex_lock(tsdn, &arena->huge_mtx);
/* Update the size of the huge allocation. */ /* Update the size of the huge allocation. */
huge_node_unset(ptr, node);
extent_node_size_set(node, usize); extent_node_size_set(node, usize);
huge_node_reset(tsdn, ptr, node);
malloc_mutex_unlock(tsdn, &arena->huge_mtx); malloc_mutex_unlock(tsdn, &arena->huge_mtx);
if (zero || (config_fill && unlikely(opt_zero))) { if (zero || (config_fill && unlikely(opt_zero))) {