a95018ee81
This adds support for expanding huge allocations in-place by requesting memory at a specific address from the chunk allocator. It's currently only implemented for the chunk recycling path, although in theory it could also be done by optimistically allocating new chunks. On Linux, it could attempt an in-place mremap. However, that won't work in practice since the heap is grown downwards and memory is not unmapped (in a normal build, at least). Repeated vector reallocation micro-benchmark: #include <string.h> #include <stdlib.h> int main(void) { for (size_t i = 0; i < 100; i++) { void *ptr = NULL; size_t old_size = 0; for (size_t size = 4; size < (1 << 30); size *= 2) { ptr = realloc(ptr, size); if (!ptr) return 1; memset(ptr + old_size, 0xff, size - old_size); old_size = size; } free(ptr); } } The glibc allocator fails to do any in-place reallocations on this benchmark once it passes the M_MMAP_THRESHOLD (default 128k) but it elides the cost of copies via mremap, which is currently not something that jemalloc can use. With this improvement, jemalloc still fails to do any in-place huge reallocations for the first outer loop, but then succeeds 100% of the time for the remaining 99 iterations. The time spent doing allocations and copies drops down to under 5%, with nearly all of it spent doing purging + faulting (when huge pages are disabled) and the array memset. An improved mremap API (MREMAP_RETAIN - #138) would be far more general but this is a portable optimization and would still be useful on Linux for xallocx. Numbers with transparent huge pages enabled: glibc (copies elided via MREMAP_MAYMOVE): 8.471s jemalloc: 17.816s jemalloc + no-op madvise: 13.236s jemalloc + this commit: 6.787s jemalloc + this commit + no-op madvise: 6.144s Numbers with transparent huge pages disabled: glibc (copies elided via MREMAP_MAYMOVE): 15.403s jemalloc: 39.456s jemalloc + no-op madvise: 12.768s jemalloc + this commit: 15.534s jemalloc + this commit + no-op madvise: 6.354s Closes #137
60 lines
1.4 KiB
C
60 lines
1.4 KiB
C
#include "test/jemalloc_test.h"
|
|
|
|
chunk_alloc_t *old_alloc;
|
|
chunk_dalloc_t *old_dalloc;
|
|
|
|
bool
|
|
chunk_dalloc(void *chunk, size_t size, unsigned arena_ind)
|
|
{
|
|
|
|
return (old_dalloc(chunk, size, arena_ind));
|
|
}
|
|
|
|
void *
|
|
chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
|
|
unsigned arena_ind)
|
|
{
|
|
|
|
return (old_alloc(new_addr, size, alignment, zero, arena_ind));
|
|
}
|
|
|
|
TEST_BEGIN(test_chunk)
|
|
{
|
|
void *p;
|
|
chunk_alloc_t *new_alloc;
|
|
chunk_dalloc_t *new_dalloc;
|
|
size_t old_size, new_size;
|
|
|
|
new_alloc = chunk_alloc;
|
|
new_dalloc = chunk_dalloc;
|
|
old_size = sizeof(chunk_alloc_t *);
|
|
new_size = sizeof(chunk_alloc_t *);
|
|
|
|
assert_d_eq(mallctl("arena.0.chunk.alloc", &old_alloc,
|
|
&old_size, &new_alloc, new_size), 0,
|
|
"Unexpected alloc error");
|
|
assert_ptr_ne(old_alloc, new_alloc,
|
|
"Unexpected alloc error");
|
|
assert_d_eq(mallctl("arena.0.chunk.dalloc", &old_dalloc, &old_size,
|
|
&new_dalloc, new_size), 0, "Unexpected dalloc error");
|
|
assert_ptr_ne(old_dalloc, new_dalloc, "Unexpected dalloc error");
|
|
|
|
p = mallocx(42, 0);
|
|
assert_ptr_ne(p, NULL, "Unexpected alloc error");
|
|
free(p);
|
|
|
|
assert_d_eq(mallctl("arena.0.chunk.alloc", NULL,
|
|
NULL, &old_alloc, old_size), 0,
|
|
"Unexpected alloc error");
|
|
assert_d_eq(mallctl("arena.0.chunk.dalloc", NULL, NULL, &old_dalloc,
|
|
old_size), 0, "Unexpected dalloc error");
|
|
}
|
|
TEST_END
|
|
|
|
int
|
|
main(void)
|
|
{
|
|
|
|
return (test(test_chunk));
|
|
}
|