Refactor arena_cactive_update() into arena_cactive_{add,sub}().

This removes an implicit conversion from size_t to ssize_t.  For cactive
decreases, the size_t value was intentionally underflowed to generate
"negative" values (actually positive values above the positive range of
ssize_t), and the conversion to ssize_t was undefined according to C
language semantics.

This regression was perpetuated by
1522937e9c (Fix the cactive statistic.)
and first release in 4.0.0, which in retrospect only fixed one of two
problems introduced by aa5113b1fd
(Refactor overly large/complex functions) and first released in 3.5.0.
This commit is contained in:
Jason Evans 2016-02-26 17:29:35 -08:00
parent a62e94cabb
commit 3763d3b5f9

View File

@ -373,15 +373,27 @@ arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
}
static void
arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
arena_cactive_add(arena_t *arena, size_t add_pages)
{
if (config_stats) {
ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages
- sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
size_t cactive_add = CHUNK_CEILING((arena->nactive +
add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
LG_PAGE);
if (cactive_diff != 0)
stats_cactive_add(cactive_diff);
if (cactive_add != 0)
stats_cactive_add(cactive_add);
}
}
static void
arena_cactive_sub(arena_t *arena, size_t sub_pages)
{
if (config_stats) {
size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
if (cactive_sub != 0)
stats_cactive_sub(cactive_sub);
}
}
@ -403,7 +415,7 @@ arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
arena_avail_remove(arena, chunk, run_ind, total_pages);
if (flag_dirty != 0)
arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
arena_cactive_update(arena, need_pages, 0);
arena_cactive_add(arena, need_pages);
arena->nactive += need_pages;
/* Keep track of trailing unused pages for later use. */
@ -1915,7 +1927,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
assert(run_ind < chunk_npages);
size = arena_run_size_get(arena, chunk, run, run_ind);
run_pages = (size >> LG_PAGE);
arena_cactive_update(arena, 0, run_pages);
arena_cactive_sub(arena, run_pages);
arena->nactive -= run_pages;
/*