Rename arena_maxclass to large_maxclass.

arena_maxclass is no longer an appropriate name, because arenas also
manage huge allocations.
This commit is contained in:
Jason Evans 2015-09-11 20:50:20 -07:00
parent 560a4e1e01
commit 676df88e48
8 changed files with 28 additions and 28 deletions

View File

@ -424,7 +424,7 @@ extern arena_bin_info_t arena_bin_info[NBINS];
extern size_t map_bias; /* Number of arena chunk header pages. */
extern size_t map_misc_offset;
extern size_t arena_maxrun; /* Max run size for arenas. */
extern size_t arena_maxclass; /* Max size class for arenas. */
extern size_t large_maxclass; /* Max large size class. */
extern unsigned nlclasses; /* Number of large size classes. */
extern unsigned nhclasses; /* Number of huge size classes. */
@ -1143,7 +1143,7 @@ arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
zero));
} else
return (arena_malloc_small(arena, size, zero));
} else if (likely(size <= arena_maxclass)) {
} else if (likely(size <= large_maxclass)) {
/*
* Initialize tcache after checking size in order to avoid
* infinite recursion during tcache initialization.

View File

@ -705,7 +705,7 @@ sa2u(size_t size, size_t alignment)
}
/* Try for a large size class. */
if (likely(size <= arena_maxclass) && likely(alignment < chunksize)) {
if (likely(size <= large_maxclass) && likely(alignment < chunksize)) {
/*
* We can't achieve subpage alignment, so round up alignment
* to the minimum that can actually be supported.

View File

@ -58,7 +58,6 @@ arena_mapbits_unallocated_set
arena_mapbits_unallocated_size_get
arena_mapbits_unallocated_size_set
arena_mapbits_unzeroed_get
arena_maxclass
arena_maxrun
arena_maybe_purge
arena_metadata_allocated_add
@ -285,6 +284,7 @@ ixalloc
jemalloc_postfork_child
jemalloc_postfork_parent
jemalloc_prefork
large_maxclass
lg_floor
malloc_cprintf
malloc_mutex_init

View File

@ -11,7 +11,7 @@ arena_bin_info_t arena_bin_info[NBINS];
size_t map_bias;
size_t map_misc_offset;
size_t arena_maxrun; /* Max run size for arenas. */
size_t arena_maxclass; /* Max size class for arenas. */
size_t large_maxclass; /* Max large size class. */
static size_t small_maxrun; /* Max run size used for small size classes. */
static bool *small_run_tab; /* Valid small run page multiples. */
unsigned nlclasses; /* Number of large size classes. */
@ -2357,7 +2357,7 @@ arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
&& (usize & PAGE_MASK) == 0))) {
/* Small; alignment doesn't require special run placement. */
ret = arena_malloc(tsd, arena, usize, zero, tcache);
} else if (usize <= arena_maxclass && alignment <= PAGE) {
} else if (usize <= large_maxclass && alignment <= PAGE) {
/*
* Large; alignment doesn't require special run placement.
* However, the cached pointer may be at a random offset from
@ -2368,7 +2368,7 @@ arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
if (config_cache_oblivious)
ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
} else {
if (likely(usize <= arena_maxclass)) {
if (likely(usize <= large_maxclass)) {
ret = arena_palloc_large(tsd, arena, usize, alignment,
zero);
} else if (likely(alignment <= chunksize))
@ -2800,7 +2800,7 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
extra = HUGE_MAXCLASS - size;
usize_max = s2u(size + extra);
if (likely(oldsize <= arena_maxclass && usize_min <= arena_maxclass)) {
if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
/*
* Avoid moving the allocation if the size class can be left the
* same.
@ -2852,7 +2852,7 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
if (usize == 0)
return (NULL);
if (likely(usize <= arena_maxclass)) {
if (likely(usize <= large_maxclass)) {
size_t copysize;
/* Try to avoid moving the allocation. */
@ -3258,17 +3258,17 @@ arena_boot(void)
arena_maxrun = chunksize - (map_bias << LG_PAGE);
assert(arena_maxrun > 0);
arena_maxclass = index2size(size2index(chunksize)-1);
if (arena_maxclass > arena_maxrun) {
large_maxclass = index2size(size2index(chunksize)-1);
if (large_maxclass > arena_maxrun) {
/*
* For small chunk sizes it's possible for there to be fewer
* non-header pages available than are necessary to serve the
* size classes just below chunksize.
*/
arena_maxclass = arena_maxrun;
large_maxclass = arena_maxrun;
}
assert(arena_maxclass > 0);
nlclasses = size2index(arena_maxclass) - size2index(SMALL_MAXCLASS);
assert(large_maxclass > 0);
nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
nhclasses = NSIZES - nlclasses - NBINS;
bin_info_init();

View File

@ -496,13 +496,13 @@ tcache_boot(void)
unsigned i;
/*
* If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
* If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
* known.
*/
if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
tcache_maxclass = SMALL_MAXCLASS;
else if ((1U << opt_lg_tcache_max) > arena_maxclass)
tcache_maxclass = arena_maxclass;
else if ((1U << opt_lg_tcache_max) > large_maxclass)
tcache_maxclass = large_maxclass;
else
tcache_maxclass = (1U << opt_lg_tcache_max);

View File

@ -140,7 +140,7 @@ TEST_BEGIN(test_junk_large)
{
test_skip_if(!config_fill);
test_junk(SMALL_MAXCLASS+1, arena_maxclass);
test_junk(SMALL_MAXCLASS+1, large_maxclass);
}
TEST_END
@ -148,7 +148,7 @@ TEST_BEGIN(test_junk_huge)
{
test_skip_if(!config_fill);
test_junk(arena_maxclass+1, chunksize*2);
test_junk(large_maxclass+1, chunksize*2);
}
TEST_END
@ -172,8 +172,8 @@ arena_ralloc_junk_large_intercept(void *ptr, size_t old_usize, size_t usize)
{
arena_ralloc_junk_large_orig(ptr, old_usize, usize);
assert_zu_eq(old_usize, arena_maxclass, "Unexpected old_usize");
assert_zu_eq(usize, shrink_size(arena_maxclass), "Unexpected usize");
assert_zu_eq(old_usize, large_maxclass, "Unexpected old_usize");
assert_zu_eq(usize, shrink_size(large_maxclass), "Unexpected usize");
most_recently_trimmed = ptr;
}
@ -181,13 +181,13 @@ TEST_BEGIN(test_junk_large_ralloc_shrink)
{
void *p1, *p2;
p1 = mallocx(arena_maxclass, 0);
p1 = mallocx(large_maxclass, 0);
assert_ptr_not_null(p1, "Unexpected mallocx() failure");
arena_ralloc_junk_large_orig = arena_ralloc_junk_large;
arena_ralloc_junk_large = arena_ralloc_junk_large_intercept;
p2 = rallocx(p1, shrink_size(arena_maxclass), 0);
p2 = rallocx(p1, shrink_size(large_maxclass), 0);
assert_ptr_eq(p1, p2, "Unexpected move during shrink");
arena_ralloc_junk_large = arena_ralloc_junk_large_orig;

View File

@ -42,7 +42,7 @@ TEST_BEGIN(test_stats_huge)
size_t sz;
int expected = config_stats ? 0 : ENOENT;
p = mallocx(arena_maxclass+1, 0);
p = mallocx(large_maxclass+1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
@ -88,7 +88,7 @@ TEST_BEGIN(test_stats_arenas_summary)
little = mallocx(SMALL_MAXCLASS, 0);
assert_ptr_not_null(little, "Unexpected mallocx() failure");
large = mallocx(arena_maxclass, 0);
large = mallocx(large_maxclass, 0);
assert_ptr_not_null(large, "Unexpected mallocx() failure");
huge = mallocx(chunksize, 0);
assert_ptr_not_null(huge, "Unexpected mallocx() failure");
@ -200,7 +200,7 @@ TEST_BEGIN(test_stats_arenas_large)
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
p = mallocx(arena_maxclass, 0);
p = mallocx(large_maxclass, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,

View File

@ -55,7 +55,7 @@ TEST_BEGIN(test_zero_large)
{
test_skip_if(!config_fill);
test_zero(SMALL_MAXCLASS+1, arena_maxclass);
test_zero(SMALL_MAXCLASS+1, large_maxclass);
}
TEST_END
@ -63,7 +63,7 @@ TEST_BEGIN(test_zero_huge)
{
test_skip_if(!config_fill);
test_zero(arena_maxclass+1, chunksize*2);
test_zero(large_maxclass+1, chunksize*2);
}
TEST_END