2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_TYPES
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Size and alignment of memory chunks that are allocated by the OS's virtual
|
|
|
|
* memory system.
|
|
|
|
*/
|
|
|
|
#define LG_CHUNK_DEFAULT 22
|
|
|
|
|
|
|
|
/* Return the chunk address for allocation address a. */
|
|
|
|
#define CHUNK_ADDR2BASE(a) \
|
|
|
|
((void *)((uintptr_t)(a) & ~chunksize_mask))
|
|
|
|
|
|
|
|
/* Return the chunk offset of address a. */
|
|
|
|
#define CHUNK_ADDR2OFFSET(a) \
|
|
|
|
((size_t)((uintptr_t)(a) & chunksize_mask))
|
|
|
|
|
|
|
|
/* Return the smallest chunk multiple that is >= s. */
|
|
|
|
#define CHUNK_CEILING(s) \
|
|
|
|
(((s) + chunksize_mask) & ~chunksize_mask)
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_TYPES */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_STRUCTS
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_STRUCTS */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_EXTERNS
|
|
|
|
|
2010-01-24 18:53:40 +08:00
|
|
|
extern size_t opt_lg_chunk;
|
2012-10-12 04:53:15 +08:00
|
|
|
extern const char *opt_dss;
|
2010-01-24 18:53:40 +08:00
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
/* Protects stats_chunks; currently not used for any other purpose. */
|
|
|
|
extern malloc_mutex_t chunks_mtx;
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Chunk statistics. */
|
|
|
|
extern chunk_stats_t stats_chunks;
|
|
|
|
|
2010-09-06 01:35:13 +08:00
|
|
|
extern rtree_t *chunks_rtree;
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
extern size_t chunksize;
|
|
|
|
extern size_t chunksize_mask; /* (chunksize - 1). */
|
|
|
|
extern size_t chunk_npages;
|
|
|
|
|
2014-05-16 13:22:27 +08:00
|
|
|
void *chunk_alloc_base(size_t size);
|
|
|
|
void *chunk_alloc_arena(chunk_alloc_t *chunk_alloc,
|
Attempt to expand huge allocations in-place.
This adds support for expanding huge allocations in-place by requesting
memory at a specific address from the chunk allocator.
It's currently only implemented for the chunk recycling path, although
in theory it could also be done by optimistically allocating new chunks.
On Linux, it could attempt an in-place mremap. However, that won't work
in practice since the heap is grown downwards and memory is not unmapped
(in a normal build, at least).
Repeated vector reallocation micro-benchmark:
#include <string.h>
#include <stdlib.h>
int main(void) {
for (size_t i = 0; i < 100; i++) {
void *ptr = NULL;
size_t old_size = 0;
for (size_t size = 4; size < (1 << 30); size *= 2) {
ptr = realloc(ptr, size);
if (!ptr) return 1;
memset(ptr + old_size, 0xff, size - old_size);
old_size = size;
}
free(ptr);
}
}
The glibc allocator fails to do any in-place reallocations on this
benchmark once it passes the M_MMAP_THRESHOLD (default 128k) but it
elides the cost of copies via mremap, which is currently not something
that jemalloc can use.
With this improvement, jemalloc still fails to do any in-place huge
reallocations for the first outer loop, but then succeeds 100% of the
time for the remaining 99 iterations. The time spent doing allocations
and copies drops down to under 5%, with nearly all of it spent doing
purging + faulting (when huge pages are disabled) and the array memset.
An improved mremap API (MREMAP_RETAIN - #138) would be far more general
but this is a portable optimization and would still be useful on Linux
for xallocx.
Numbers with transparent huge pages enabled:
glibc (copies elided via MREMAP_MAYMOVE): 8.471s
jemalloc: 17.816s
jemalloc + no-op madvise: 13.236s
jemalloc + this commit: 6.787s
jemalloc + this commit + no-op madvise: 6.144s
Numbers with transparent huge pages disabled:
glibc (copies elided via MREMAP_MAYMOVE): 15.403s
jemalloc: 39.456s
jemalloc + no-op madvise: 12.768s
jemalloc + this commit: 15.534s
jemalloc + this commit + no-op madvise: 6.354s
Closes #137
2014-10-04 13:39:32 +08:00
|
|
|
chunk_dalloc_t *chunk_dalloc, unsigned arena_ind, void *new_addr,
|
|
|
|
size_t size, size_t alignment, bool *zero);
|
|
|
|
void *chunk_alloc_default(void *new_addr, size_t size, size_t alignment,
|
|
|
|
bool *zero, unsigned arena_ind);
|
2012-10-12 04:53:15 +08:00
|
|
|
void chunk_unmap(void *chunk, size_t size);
|
2014-05-16 13:22:27 +08:00
|
|
|
bool chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind);
|
2012-04-22 10:17:21 +08:00
|
|
|
bool chunk_boot(void);
|
2012-10-10 05:46:22 +08:00
|
|
|
void chunk_prefork(void);
|
|
|
|
void chunk_postfork_parent(void);
|
|
|
|
void chunk_postfork_child(void);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_EXTERNS */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_INLINES
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_INLINES */
|
|
|
|
/******************************************************************************/
|
2010-01-24 18:53:40 +08:00
|
|
|
|
2010-02-12 06:45:59 +08:00
|
|
|
#include "jemalloc/internal/chunk_dss.h"
|
|
|
|
#include "jemalloc/internal/chunk_mmap.h"
|