Refactor huge allocation to be managed by arenas.

Refactor huge allocation to be managed by arenas (though the global
red-black tree of huge allocations remains for lookup during
deallocation).  This is the logical conclusion of recent changes that 1)
made per arena dss precedence apply to huge allocation, and 2) made it
possible to replace the per arena chunk allocation/deallocation
functions.

Remove the top level huge stats, and replace them with per arena huge
stats.

Normalize function names and types to *dalloc* (some were *dealloc*).

Remove the --enable-mremap option.  As jemalloc currently operates, this
is a performace regression for some applications, but planned work to
logarithmically space huge size classes should provide similar amortized
performance.  The motivation for this change was that mremap-based huge
reallocation forced leaky abstractions that prevented refactoring.
This commit is contained in:
Jason Evans
2014-05-15 22:22:27 -07:00
parent fb7fe50a88
commit e2deab7a75
28 changed files with 384 additions and 470 deletions

View File

@@ -1,13 +1,13 @@
#include "test/jemalloc_test.h"
chunk_alloc_t *old_alloc;
chunk_dealloc_t *old_dealloc;
chunk_dalloc_t *old_dalloc;
bool
chunk_dealloc(void *chunk, size_t size, unsigned arena_ind)
chunk_dalloc(void *chunk, size_t size, unsigned arena_ind)
{
return (old_dealloc(chunk, size, arena_ind));
return (old_dalloc(chunk, size, arena_ind));
}
void *
@@ -21,11 +21,11 @@ TEST_BEGIN(test_chunk)
{
void *p;
chunk_alloc_t *new_alloc;
chunk_dealloc_t *new_dealloc;
chunk_dalloc_t *new_dalloc;
size_t old_size, new_size;
new_alloc = chunk_alloc;
new_dealloc = chunk_dealloc;
new_dalloc = chunk_dalloc;
old_size = sizeof(chunk_alloc_t *);
new_size = sizeof(chunk_alloc_t *);
@@ -34,11 +34,9 @@ TEST_BEGIN(test_chunk)
"Unexpected alloc error");
assert_ptr_ne(old_alloc, new_alloc,
"Unexpected alloc error");
assert_d_eq(mallctl("arena.0.chunk.dealloc", &old_dealloc,
&old_size, &new_dealloc, new_size), 0,
"Unexpected dealloc error");
assert_ptr_ne(old_dealloc, new_dealloc,
"Unexpected dealloc error");
assert_d_eq(mallctl("arena.0.chunk.dalloc", &old_dalloc, &old_size,
&new_dalloc, new_size), 0, "Unexpected dalloc error");
assert_ptr_ne(old_dalloc, new_dalloc, "Unexpected dalloc error");
p = mallocx(42, 0);
assert_ptr_ne(p, NULL, "Unexpected alloc error");
@@ -47,9 +45,8 @@ TEST_BEGIN(test_chunk)
assert_d_eq(mallctl("arena.0.chunk.alloc", NULL,
NULL, &old_alloc, old_size), 0,
"Unexpected alloc error");
assert_d_eq(mallctl("arena.0.chunk.dealloc", NULL,
NULL, &old_dealloc, old_size), 0,
"Unexpected dealloc error");
assert_d_eq(mallctl("arena.0.chunk.dalloc", NULL, NULL, &old_dalloc,
old_size), 0, "Unexpected dalloc error");
}
TEST_END

View File

@@ -1,45 +0,0 @@
#include "test/jemalloc_test.h"
TEST_BEGIN(test_mremap)
{
int err;
size_t sz, lg_chunk, chunksize, i;
char *p, *q;
sz = sizeof(lg_chunk);
err = mallctl("opt.lg_chunk", &lg_chunk, &sz, NULL, 0);
assert_d_eq(err, 0, "Error in mallctl(): %s", strerror(err));
chunksize = ((size_t)1U) << lg_chunk;
p = (char *)malloc(chunksize);
assert_ptr_not_null(p, "malloc(%zu) --> %p", chunksize, p);
memset(p, 'a', chunksize);
q = (char *)realloc(p, chunksize * 2);
assert_ptr_not_null(q, "realloc(%p, %zu) --> %p", p, chunksize * 2,
q);
for (i = 0; i < chunksize; i++) {
assert_c_eq(q[i], 'a',
"realloc() should preserve existing bytes across copies");
}
p = q;
q = (char *)realloc(p, chunksize);
assert_ptr_not_null(q, "realloc(%p, %zu) --> %p", p, chunksize, q);
for (i = 0; i < chunksize; i++) {
assert_c_eq(q[i], 'a',
"realloc() should preserve existing bytes across copies");
}
free(q);
}
TEST_END
int
main(void)
{
return (test(
test_mremap));
}

View File

@@ -92,12 +92,9 @@ test_junk(size_t sz_min, size_t sz_max)
s = (char *)rallocx(s, sz+1, 0);
assert_ptr_not_null((void *)s,
"Unexpected rallocx() failure");
if (!config_mremap || sz+1 <= arena_maxclass) {
assert_ptr_eq(most_recently_junked, junked,
"Expected region of size %zu to be "
"junk-filled",
sz);
}
assert_ptr_eq(most_recently_junked, junked,
"Expected region of size %zu to be junk-filled",
sz);
}
}

View File

@@ -129,7 +129,6 @@ TEST_BEGIN(test_mallctl_config)
TEST_MALLCTL_CONFIG(debug);
TEST_MALLCTL_CONFIG(fill);
TEST_MALLCTL_CONFIG(lazy_lock);
TEST_MALLCTL_CONFIG(mremap);
TEST_MALLCTL_CONFIG(munmap);
TEST_MALLCTL_CONFIG(prof);
TEST_MALLCTL_CONFIG(prof_libgcc);

View File

@@ -60,7 +60,7 @@ TEST_BEGIN(test_stats_huge)
void *p;
uint64_t epoch;
size_t allocated;
uint64_t nmalloc, ndalloc;
uint64_t nmalloc, ndalloc, nrequests;
size_t sz;
int expected = config_stats ? 0 : ENOENT;
@@ -71,19 +71,23 @@ TEST_BEGIN(test_stats_huge)
"Unexpected mallctl() failure");
sz = sizeof(size_t);
assert_d_eq(mallctl("stats.huge.allocated", &allocated, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.huge.nmalloc", &nmalloc, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.huge.ndalloc", &ndalloc, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz, NULL,
0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz, NULL,
0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", &nrequests, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_zu_gt(allocated, 0,
"allocated should be greater than zero");
assert_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
assert_u64_le(nmalloc, nrequests,
"nmalloc should no larger than nrequests");
}
dallocx(p, 0);