2017-01-20 13:41:41 +08:00
|
|
|
#define JEMALLOC_LARGE_C_
|
2010-02-12 06:45:59 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal.h"
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
|
|
|
void *
|
2017-01-16 08:56:30 +08:00
|
|
|
large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) {
|
2016-02-26 07:29:49 +08:00
|
|
|
assert(usize == s2u(usize));
|
2014-10-06 08:54:10 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return large_palloc(tsdn, arena, usize, CACHELINE, zero);
|
2012-04-11 01:50:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
2016-06-01 05:50:21 +08:00
|
|
|
large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
2017-01-16 08:56:30 +08:00
|
|
|
bool zero) {
|
2016-02-26 07:29:49 +08:00
|
|
|
size_t ausize;
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_t *extent;
|
2012-04-22 07:04:51 +08:00
|
|
|
bool is_zeroed;
|
2016-05-28 15:17:28 +08:00
|
|
|
UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
assert(!tsdn_null(tsdn) || arena != NULL);
|
|
|
|
|
2016-02-26 07:29:49 +08:00
|
|
|
ausize = sa2u(usize, alignment);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2015-07-24 08:16:32 +08:00
|
|
|
|
2017-03-10 09:20:00 +08:00
|
|
|
if (config_fill && unlikely(opt_zero)) {
|
|
|
|
zero = true;
|
|
|
|
}
|
2012-04-22 07:04:51 +08:00
|
|
|
/*
|
2017-03-10 09:20:00 +08:00
|
|
|
* Copy zero into is_zeroed and pass the copy when allocating the
|
|
|
|
* extent, so that it is possible to make correct junk/zero fill
|
|
|
|
* decisions below, even if is_zeroed ends up true when zero is false.
|
2012-04-22 07:04:51 +08:00
|
|
|
*/
|
|
|
|
is_zeroed = zero;
|
2017-01-16 08:56:30 +08:00
|
|
|
if (likely(!tsdn_null(tsdn))) {
|
2016-05-12 07:52:58 +08:00
|
|
|
arena = arena_choose(tsdn_tsd(tsdn), arena);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-06-02 03:59:02 +08:00
|
|
|
if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn,
|
2017-01-16 08:56:30 +08:00
|
|
|
arena, usize, alignment, &is_zeroed)) == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-06-01 05:50:21 +08:00
|
|
|
/* Insert extent into large. */
|
|
|
|
malloc_mutex_lock(tsdn, &arena->large_mtx);
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_list_append(&arena->large, extent);
|
2016-06-01 05:50:21 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->large_mtx);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
|
2016-05-28 15:17:28 +08:00
|
|
|
prof_idump(tsdn);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-03-10 09:20:00 +08:00
|
|
|
if (zero) {
|
|
|
|
assert(is_zeroed);
|
2016-05-24 05:56:35 +08:00
|
|
|
} else if (config_fill && unlikely(opt_junk_alloc)) {
|
|
|
|
memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK,
|
2016-05-28 09:57:15 +08:00
|
|
|
extent_usize_get(extent));
|
2016-05-24 05:56:35 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_decay_tick(tsdn, arena);
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent_addr_get(extent);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
Implement in-place huge allocation shrinking.
Trivial example:
#include <stdlib.h>
int main(void) {
void *ptr = malloc(1024 * 1024 * 8);
if (!ptr) return 1;
ptr = realloc(ptr, 1024 * 1024 * 4);
if (!ptr) return 1;
}
Before:
mmap(NULL, 8388608, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7fcfff000000
mmap(NULL, 4194304, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7fcffec00000
madvise(0x7fcfff000000, 8388608, MADV_DONTNEED) = 0
After:
mmap(NULL, 8388608, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7f1934800000
madvise(0x7f1934c00000, 4194304, MADV_DONTNEED) = 0
Closes #134
2014-09-30 22:33:46 +08:00
|
|
|
#ifdef JEMALLOC_JET
|
2016-06-01 05:50:21 +08:00
|
|
|
#undef large_dalloc_junk
|
2017-01-20 13:41:41 +08:00
|
|
|
#define large_dalloc_junk JEMALLOC_N(n_large_dalloc_junk)
|
Implement in-place huge allocation shrinking.
Trivial example:
#include <stdlib.h>
int main(void) {
void *ptr = malloc(1024 * 1024 * 8);
if (!ptr) return 1;
ptr = realloc(ptr, 1024 * 1024 * 4);
if (!ptr) return 1;
}
Before:
mmap(NULL, 8388608, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7fcfff000000
mmap(NULL, 4194304, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7fcffec00000
madvise(0x7fcfff000000, 8388608, MADV_DONTNEED) = 0
After:
mmap(NULL, 8388608, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7f1934800000
madvise(0x7f1934c00000, 4194304, MADV_DONTNEED) = 0
Closes #134
2014-09-30 22:33:46 +08:00
|
|
|
#endif
|
2016-05-28 15:17:28 +08:00
|
|
|
void
|
2017-03-17 08:57:52 +08:00
|
|
|
large_dalloc_junk(void *ptr, size_t size) {
|
|
|
|
memset(ptr, JEMALLOC_FREE_JUNK, size);
|
2016-05-28 15:17:28 +08:00
|
|
|
}
|
|
|
|
#ifdef JEMALLOC_JET
|
2016-06-01 05:50:21 +08:00
|
|
|
#undef large_dalloc_junk
|
2017-01-20 13:41:41 +08:00
|
|
|
#define large_dalloc_junk JEMALLOC_N(large_dalloc_junk)
|
2016-06-01 05:50:21 +08:00
|
|
|
large_dalloc_junk_t *large_dalloc_junk = JEMALLOC_N(n_large_dalloc_junk);
|
2016-05-28 15:17:28 +08:00
|
|
|
#endif
|
|
|
|
|
2016-06-04 11:04:30 +08:00
|
|
|
#ifdef JEMALLOC_JET
|
|
|
|
#undef large_dalloc_maybe_junk
|
2017-01-20 13:41:41 +08:00
|
|
|
#define large_dalloc_maybe_junk JEMALLOC_N(n_large_dalloc_maybe_junk)
|
2016-06-04 11:04:30 +08:00
|
|
|
#endif
|
|
|
|
void
|
2017-03-17 08:57:52 +08:00
|
|
|
large_dalloc_maybe_junk(void *ptr, size_t size) {
|
2014-12-09 05:12:41 +08:00
|
|
|
if (config_fill && have_dss && unlikely(opt_junk_free)) {
|
Implement in-place huge allocation shrinking.
Trivial example:
#include <stdlib.h>
int main(void) {
void *ptr = malloc(1024 * 1024 * 8);
if (!ptr) return 1;
ptr = realloc(ptr, 1024 * 1024 * 4);
if (!ptr) return 1;
}
Before:
mmap(NULL, 8388608, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7fcfff000000
mmap(NULL, 4194304, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7fcffec00000
madvise(0x7fcfff000000, 8388608, MADV_DONTNEED) = 0
After:
mmap(NULL, 8388608, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7f1934800000
madvise(0x7f1934c00000, 4194304, MADV_DONTNEED) = 0
Closes #134
2014-09-30 22:33:46 +08:00
|
|
|
/*
|
2016-06-02 03:59:02 +08:00
|
|
|
* Only bother junk filling if the extent isn't about to be
|
Implement in-place huge allocation shrinking.
Trivial example:
#include <stdlib.h>
int main(void) {
void *ptr = malloc(1024 * 1024 * 8);
if (!ptr) return 1;
ptr = realloc(ptr, 1024 * 1024 * 4);
if (!ptr) return 1;
}
Before:
mmap(NULL, 8388608, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7fcfff000000
mmap(NULL, 4194304, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7fcffec00000
madvise(0x7fcfff000000, 8388608, MADV_DONTNEED) = 0
After:
mmap(NULL, 8388608, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7f1934800000
madvise(0x7f1934c00000, 4194304, MADV_DONTNEED) = 0
Closes #134
2014-09-30 22:33:46 +08:00
|
|
|
* unmapped.
|
|
|
|
*/
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!config_munmap || (have_dss && extent_in_dss(ptr))) {
|
2017-03-17 08:57:52 +08:00
|
|
|
large_dalloc_junk(ptr, size);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
Implement in-place huge allocation shrinking.
Trivial example:
#include <stdlib.h>
int main(void) {
void *ptr = malloc(1024 * 1024 * 8);
if (!ptr) return 1;
ptr = realloc(ptr, 1024 * 1024 * 4);
if (!ptr) return 1;
}
Before:
mmap(NULL, 8388608, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7fcfff000000
mmap(NULL, 4194304, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7fcffec00000
madvise(0x7fcfff000000, 8388608, MADV_DONTNEED) = 0
After:
mmap(NULL, 8388608, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7f1934800000
madvise(0x7f1934c00000, 4194304, MADV_DONTNEED) = 0
Closes #134
2014-09-30 22:33:46 +08:00
|
|
|
}
|
|
|
|
}
|
2016-06-04 11:04:30 +08:00
|
|
|
#ifdef JEMALLOC_JET
|
|
|
|
#undef large_dalloc_maybe_junk
|
2017-01-20 13:41:41 +08:00
|
|
|
#define large_dalloc_maybe_junk JEMALLOC_N(large_dalloc_maybe_junk)
|
2016-06-04 11:04:30 +08:00
|
|
|
large_dalloc_maybe_junk_t *large_dalloc_maybe_junk =
|
|
|
|
JEMALLOC_N(n_large_dalloc_maybe_junk);
|
|
|
|
#endif
|
Implement in-place huge allocation shrinking.
Trivial example:
#include <stdlib.h>
int main(void) {
void *ptr = malloc(1024 * 1024 * 8);
if (!ptr) return 1;
ptr = realloc(ptr, 1024 * 1024 * 4);
if (!ptr) return 1;
}
Before:
mmap(NULL, 8388608, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7fcfff000000
mmap(NULL, 4194304, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7fcffec00000
madvise(0x7fcfff000000, 8388608, MADV_DONTNEED) = 0
After:
mmap(NULL, 8388608, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7f1934800000
madvise(0x7f1934c00000, 4194304, MADV_DONTNEED) = 0
Closes #134
2014-09-30 22:33:46 +08:00
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
static bool
|
2017-01-16 08:56:30 +08:00
|
|
|
large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
|
2016-05-19 12:02:46 +08:00
|
|
|
arena_t *arena = extent_arena_get(extent);
|
2016-05-28 09:57:15 +08:00
|
|
|
size_t oldusize = extent_usize_get(extent);
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
|
2016-05-28 09:57:15 +08:00
|
|
|
size_t diff = extent_size_get(extent) - (usize + large_pad);
|
2015-03-19 09:55:33 +08:00
|
|
|
|
2016-05-28 09:57:15 +08:00
|
|
|
assert(oldusize > usize);
|
2015-09-12 07:18:53 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (extent_hooks->split == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-12-04 07:38:25 +08:00
|
|
|
|
2016-05-27 13:12:38 +08:00
|
|
|
/* Split excess pages. */
|
|
|
|
if (diff != 0) {
|
2016-06-02 03:59:02 +08:00
|
|
|
extent_t *trail = extent_split_wrapper(tsdn, arena,
|
2017-03-14 08:36:57 +08:00
|
|
|
&extent_hooks, extent, usize + large_pad, size2index(usize),
|
2017-03-17 08:57:52 +08:00
|
|
|
false, diff, NSIZES, false);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (trail == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2014-10-13 13:53:59 +08:00
|
|
|
|
2015-03-26 09:56:55 +08:00
|
|
|
if (config_fill && unlikely(opt_junk_free)) {
|
2016-10-14 03:18:38 +08:00
|
|
|
large_dalloc_maybe_junk(extent_addr_get(trail),
|
2017-03-14 08:36:57 +08:00
|
|
|
extent_size_get(trail));
|
2016-05-19 12:02:46 +08:00
|
|
|
}
|
|
|
|
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, trail);
|
2016-05-19 12:02:46 +08:00
|
|
|
}
|
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize);
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2014-10-13 13:53:59 +08:00
|
|
|
}
|
|
|
|
|
Attempt to expand huge allocations in-place.
This adds support for expanding huge allocations in-place by requesting
memory at a specific address from the chunk allocator.
It's currently only implemented for the chunk recycling path, although
in theory it could also be done by optimistically allocating new chunks.
On Linux, it could attempt an in-place mremap. However, that won't work
in practice since the heap is grown downwards and memory is not unmapped
(in a normal build, at least).
Repeated vector reallocation micro-benchmark:
#include <string.h>
#include <stdlib.h>
int main(void) {
for (size_t i = 0; i < 100; i++) {
void *ptr = NULL;
size_t old_size = 0;
for (size_t size = 4; size < (1 << 30); size *= 2) {
ptr = realloc(ptr, size);
if (!ptr) return 1;
memset(ptr + old_size, 0xff, size - old_size);
old_size = size;
}
free(ptr);
}
}
The glibc allocator fails to do any in-place reallocations on this
benchmark once it passes the M_MMAP_THRESHOLD (default 128k) but it
elides the cost of copies via mremap, which is currently not something
that jemalloc can use.
With this improvement, jemalloc still fails to do any in-place huge
reallocations for the first outer loop, but then succeeds 100% of the
time for the remaining 99 iterations. The time spent doing allocations
and copies drops down to under 5%, with nearly all of it spent doing
purging + faulting (when huge pages are disabled) and the array memset.
An improved mremap API (MREMAP_RETAIN - #138) would be far more general
but this is a portable optimization and would still be useful on Linux
for xallocx.
Numbers with transparent huge pages enabled:
glibc (copies elided via MREMAP_MAYMOVE): 8.471s
jemalloc: 17.816s
jemalloc + no-op madvise: 13.236s
jemalloc + this commit: 6.787s
jemalloc + this commit + no-op madvise: 6.144s
Numbers with transparent huge pages disabled:
glibc (copies elided via MREMAP_MAYMOVE): 15.403s
jemalloc: 39.456s
jemalloc + no-op madvise: 12.768s
jemalloc + this commit: 15.534s
jemalloc + this commit + no-op madvise: 6.354s
Closes #137
2014-10-04 13:39:32 +08:00
|
|
|
static bool
|
2016-06-01 05:50:21 +08:00
|
|
|
large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
2017-01-16 08:56:30 +08:00
|
|
|
bool zero) {
|
2016-05-19 12:02:46 +08:00
|
|
|
arena_t *arena = extent_arena_get(extent);
|
2016-05-28 09:57:15 +08:00
|
|
|
size_t oldusize = extent_usize_get(extent);
|
2016-06-04 03:05:53 +08:00
|
|
|
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
|
2017-03-14 08:36:57 +08:00
|
|
|
size_t trailsize = usize - oldusize;
|
2016-05-19 12:02:46 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (extent_hooks->merge == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-12-04 07:38:25 +08:00
|
|
|
|
2017-03-10 09:20:00 +08:00
|
|
|
if (config_fill && unlikely(opt_zero)) {
|
|
|
|
zero = true;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Copy zero into is_zeroed_trail and pass the copy when allocating the
|
|
|
|
* extent, so that it is possible to make correct junk/zero fill
|
|
|
|
* decisions below, even if is_zeroed_trail ends up true when zero is
|
|
|
|
* false.
|
|
|
|
*/
|
|
|
|
bool is_zeroed_trail = zero;
|
2017-01-30 13:57:14 +08:00
|
|
|
bool commit = true;
|
|
|
|
extent_t *trail;
|
2017-02-14 02:35:41 +08:00
|
|
|
bool new_mapping;
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
if ((trail = extents_alloc(tsdn, arena, &extent_hooks,
|
|
|
|
&arena->extents_dirty, extent_past_get(extent), trailsize, 0,
|
2017-03-14 08:36:57 +08:00
|
|
|
CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
|| (trail = extents_alloc(tsdn, arena, &extent_hooks,
|
|
|
|
&arena->extents_muzzy, extent_past_get(extent), trailsize, 0,
|
2017-03-14 08:36:57 +08:00
|
|
|
CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL) {
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
if (config_stats) {
|
|
|
|
new_mapping = false;
|
|
|
|
}
|
|
|
|
} else {
|
2016-06-02 03:59:02 +08:00
|
|
|
if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
|
2017-03-14 08:36:57 +08:00
|
|
|
extent_past_get(extent), trailsize, 0, CACHELINE, false,
|
|
|
|
NSIZES, &is_zeroed_trail, &commit)) == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-02-14 02:35:41 +08:00
|
|
|
if (config_stats) {
|
|
|
|
new_mapping = true;
|
|
|
|
}
|
2016-05-19 12:02:46 +08:00
|
|
|
}
|
Attempt to expand huge allocations in-place.
This adds support for expanding huge allocations in-place by requesting
memory at a specific address from the chunk allocator.
It's currently only implemented for the chunk recycling path, although
in theory it could also be done by optimistically allocating new chunks.
On Linux, it could attempt an in-place mremap. However, that won't work
in practice since the heap is grown downwards and memory is not unmapped
(in a normal build, at least).
Repeated vector reallocation micro-benchmark:
#include <string.h>
#include <stdlib.h>
int main(void) {
for (size_t i = 0; i < 100; i++) {
void *ptr = NULL;
size_t old_size = 0;
for (size_t size = 4; size < (1 << 30); size *= 2) {
ptr = realloc(ptr, size);
if (!ptr) return 1;
memset(ptr + old_size, 0xff, size - old_size);
old_size = size;
}
free(ptr);
}
}
The glibc allocator fails to do any in-place reallocations on this
benchmark once it passes the M_MMAP_THRESHOLD (default 128k) but it
elides the cost of copies via mremap, which is currently not something
that jemalloc can use.
With this improvement, jemalloc still fails to do any in-place huge
reallocations for the first outer loop, but then succeeds 100% of the
time for the remaining 99 iterations. The time spent doing allocations
and copies drops down to under 5%, with nearly all of it spent doing
purging + faulting (when huge pages are disabled) and the array memset.
An improved mremap API (MREMAP_RETAIN - #138) would be far more general
but this is a portable optimization and would still be useful on Linux
for xallocx.
Numbers with transparent huge pages enabled:
glibc (copies elided via MREMAP_MAYMOVE): 8.471s
jemalloc: 17.816s
jemalloc + no-op madvise: 13.236s
jemalloc + this commit: 6.787s
jemalloc + this commit + no-op madvise: 6.144s
Numbers with transparent huge pages disabled:
glibc (copies elided via MREMAP_MAYMOVE): 15.403s
jemalloc: 39.456s
jemalloc + no-op madvise: 12.768s
jemalloc + this commit: 15.534s
jemalloc + this commit + no-op madvise: 6.354s
Closes #137
2014-10-04 13:39:32 +08:00
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) {
|
|
|
|
extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail);
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2016-05-19 12:02:46 +08:00
|
|
|
}
|
2017-03-17 08:57:52 +08:00
|
|
|
rtree_ctx_t rtree_ctx_fallback;
|
|
|
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
|
|
szind_t szind = size2index(usize);
|
|
|
|
extent_szind_set(extent, szind);
|
|
|
|
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
|
|
|
|
(uintptr_t)extent_addr_get(extent), szind, false);
|
Attempt to expand huge allocations in-place.
This adds support for expanding huge allocations in-place by requesting
memory at a specific address from the chunk allocator.
It's currently only implemented for the chunk recycling path, although
in theory it could also be done by optimistically allocating new chunks.
On Linux, it could attempt an in-place mremap. However, that won't work
in practice since the heap is grown downwards and memory is not unmapped
(in a normal build, at least).
Repeated vector reallocation micro-benchmark:
#include <string.h>
#include <stdlib.h>
int main(void) {
for (size_t i = 0; i < 100; i++) {
void *ptr = NULL;
size_t old_size = 0;
for (size_t size = 4; size < (1 << 30); size *= 2) {
ptr = realloc(ptr, size);
if (!ptr) return 1;
memset(ptr + old_size, 0xff, size - old_size);
old_size = size;
}
free(ptr);
}
}
The glibc allocator fails to do any in-place reallocations on this
benchmark once it passes the M_MMAP_THRESHOLD (default 128k) but it
elides the cost of copies via mremap, which is currently not something
that jemalloc can use.
With this improvement, jemalloc still fails to do any in-place huge
reallocations for the first outer loop, but then succeeds 100% of the
time for the remaining 99 iterations. The time spent doing allocations
and copies drops down to under 5%, with nearly all of it spent doing
purging + faulting (when huge pages are disabled) and the array memset.
An improved mremap API (MREMAP_RETAIN - #138) would be far more general
but this is a portable optimization and would still be useful on Linux
for xallocx.
Numbers with transparent huge pages enabled:
glibc (copies elided via MREMAP_MAYMOVE): 8.471s
jemalloc: 17.816s
jemalloc + no-op madvise: 13.236s
jemalloc + this commit: 6.787s
jemalloc + this commit + no-op madvise: 6.144s
Numbers with transparent huge pages disabled:
glibc (copies elided via MREMAP_MAYMOVE): 15.403s
jemalloc: 39.456s
jemalloc + no-op madvise: 12.768s
jemalloc + this commit: 15.534s
jemalloc + this commit + no-op madvise: 6.354s
Closes #137
2014-10-04 13:39:32 +08:00
|
|
|
|
2017-02-14 02:35:41 +08:00
|
|
|
if (config_stats && new_mapping) {
|
|
|
|
arena_stats_mapped_add(tsdn, &arena->stats, trailsize);
|
|
|
|
}
|
|
|
|
|
2017-03-10 09:20:00 +08:00
|
|
|
if (zero) {
|
2016-05-28 09:57:15 +08:00
|
|
|
if (config_cache_oblivious) {
|
|
|
|
/*
|
|
|
|
* Zero the trailing bytes of the original allocation's
|
|
|
|
* last page, since they are in an indeterminate state.
|
|
|
|
* There will always be trailing bytes, because ptr's
|
2016-05-30 09:34:50 +08:00
|
|
|
* offset from the beginning of the extent is a multiple
|
|
|
|
* of CACHELINE in [0 .. PAGE).
|
2016-05-28 09:57:15 +08:00
|
|
|
*/
|
|
|
|
void *zbase = (void *)
|
|
|
|
((uintptr_t)extent_addr_get(extent) + oldusize);
|
|
|
|
void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
|
|
|
|
PAGE));
|
|
|
|
size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
|
|
|
|
assert(nzero > 0);
|
|
|
|
memset(zbase, 0, nzero);
|
|
|
|
}
|
2017-03-10 09:20:00 +08:00
|
|
|
assert(is_zeroed_trail);
|
2014-12-09 05:12:41 +08:00
|
|
|
} else if (config_fill && unlikely(opt_junk_alloc)) {
|
2016-05-28 09:57:15 +08:00
|
|
|
memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize),
|
|
|
|
JEMALLOC_ALLOC_JUNK, usize - oldusize);
|
2014-11-18 01:54:49 +08:00
|
|
|
}
|
2014-10-13 13:53:59 +08:00
|
|
|
|
2016-06-02 03:59:02 +08:00
|
|
|
arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize);
|
2016-05-19 12:02:46 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
Attempt to expand huge allocations in-place.
This adds support for expanding huge allocations in-place by requesting
memory at a specific address from the chunk allocator.
It's currently only implemented for the chunk recycling path, although
in theory it could also be done by optimistically allocating new chunks.
On Linux, it could attempt an in-place mremap. However, that won't work
in practice since the heap is grown downwards and memory is not unmapped
(in a normal build, at least).
Repeated vector reallocation micro-benchmark:
#include <string.h>
#include <stdlib.h>
int main(void) {
for (size_t i = 0; i < 100; i++) {
void *ptr = NULL;
size_t old_size = 0;
for (size_t size = 4; size < (1 << 30); size *= 2) {
ptr = realloc(ptr, size);
if (!ptr) return 1;
memset(ptr + old_size, 0xff, size - old_size);
old_size = size;
}
free(ptr);
}
}
The glibc allocator fails to do any in-place reallocations on this
benchmark once it passes the M_MMAP_THRESHOLD (default 128k) but it
elides the cost of copies via mremap, which is currently not something
that jemalloc can use.
With this improvement, jemalloc still fails to do any in-place huge
reallocations for the first outer loop, but then succeeds 100% of the
time for the remaining 99 iterations. The time spent doing allocations
and copies drops down to under 5%, with nearly all of it spent doing
purging + faulting (when huge pages are disabled) and the array memset.
An improved mremap API (MREMAP_RETAIN - #138) would be far more general
but this is a portable optimization and would still be useful on Linux
for xallocx.
Numbers with transparent huge pages enabled:
glibc (copies elided via MREMAP_MAYMOVE): 8.471s
jemalloc: 17.816s
jemalloc + no-op madvise: 13.236s
jemalloc + this commit: 6.787s
jemalloc + this commit + no-op madvise: 6.144s
Numbers with transparent huge pages disabled:
glibc (copies elided via MREMAP_MAYMOVE): 15.403s
jemalloc: 39.456s
jemalloc + no-op madvise: 12.768s
jemalloc + this commit: 15.534s
jemalloc + this commit + no-op madvise: 6.354s
Closes #137
2014-10-04 13:39:32 +08:00
|
|
|
}
|
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
bool
|
2016-06-01 05:50:21 +08:00
|
|
|
large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t usize_max, bool zero) {
|
2017-03-14 08:36:57 +08:00
|
|
|
size_t oldusize = extent_usize_get(extent);
|
|
|
|
|
2016-02-26 07:29:49 +08:00
|
|
|
/* The following should have been caught by callers. */
|
2016-06-01 05:50:21 +08:00
|
|
|
assert(usize_min > 0 && usize_max <= LARGE_MAXCLASS);
|
|
|
|
/* Both allocation sizes must be large to avoid a move. */
|
2017-03-14 08:36:57 +08:00
|
|
|
assert(oldusize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-03-14 08:36:57 +08:00
|
|
|
if (usize_max > oldusize) {
|
2015-09-12 07:18:53 +08:00
|
|
|
/* Attempt to expand the allocation in-place. */
|
2016-06-01 05:50:21 +08:00
|
|
|
if (!large_ralloc_no_move_expand(tsdn, extent, usize_max,
|
2016-05-19 12:02:46 +08:00
|
|
|
zero)) {
|
2016-03-24 11:29:33 +08:00
|
|
|
arena_decay_tick(tsdn, extent_arena_get(extent));
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2016-02-20 12:09:31 +08:00
|
|
|
}
|
2015-09-12 07:18:53 +08:00
|
|
|
/* Try again, this time with usize_min. */
|
2017-03-14 08:36:57 +08:00
|
|
|
if (usize_min < usize_max && usize_min > oldusize &&
|
2016-06-01 05:50:21 +08:00
|
|
|
large_ralloc_no_move_expand(tsdn, extent, usize_min,
|
|
|
|
zero)) {
|
2016-03-24 11:29:33 +08:00
|
|
|
arena_decay_tick(tsdn, extent_arena_get(extent));
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2016-02-20 12:09:31 +08:00
|
|
|
}
|
2014-10-06 08:54:10 +08:00
|
|
|
}
|
Implement in-place huge allocation shrinking.
Trivial example:
#include <stdlib.h>
int main(void) {
void *ptr = malloc(1024 * 1024 * 8);
if (!ptr) return 1;
ptr = realloc(ptr, 1024 * 1024 * 4);
if (!ptr) return 1;
}
Before:
mmap(NULL, 8388608, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7fcfff000000
mmap(NULL, 4194304, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7fcffec00000
madvise(0x7fcfff000000, 8388608, MADV_DONTNEED) = 0
After:
mmap(NULL, 8388608, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7f1934800000
madvise(0x7f1934c00000, 4194304, MADV_DONTNEED) = 0
Closes #134
2014-09-30 22:33:46 +08:00
|
|
|
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
/*
|
2016-06-02 03:59:02 +08:00
|
|
|
* Avoid moving the allocation if the existing extent size accommodates
|
2014-10-06 08:54:10 +08:00
|
|
|
* the new size.
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
*/
|
2017-03-14 08:36:57 +08:00
|
|
|
if (oldusize >= usize_min && oldusize <= usize_max) {
|
2016-03-24 11:29:33 +08:00
|
|
|
arena_decay_tick(tsdn, extent_arena_get(extent));
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
/* Attempt to shrink the allocation in-place. */
|
2017-03-14 08:36:57 +08:00
|
|
|
if (oldusize > usize_max) {
|
2016-06-01 05:50:21 +08:00
|
|
|
if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
|
2016-03-24 11:29:33 +08:00
|
|
|
arena_decay_tick(tsdn, extent_arena_get(extent));
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2016-02-20 12:09:31 +08:00
|
|
|
}
|
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2015-09-12 07:18:53 +08:00
|
|
|
}
|
Implement in-place huge allocation shrinking.
Trivial example:
#include <stdlib.h>
int main(void) {
void *ptr = malloc(1024 * 1024 * 8);
if (!ptr) return 1;
ptr = realloc(ptr, 1024 * 1024 * 4);
if (!ptr) return 1;
}
Before:
mmap(NULL, 8388608, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7fcfff000000
mmap(NULL, 4194304, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7fcffec00000
madvise(0x7fcfff000000, 8388608, MADV_DONTNEED) = 0
After:
mmap(NULL, 8388608, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7f1934800000
madvise(0x7f1934c00000, 4194304, MADV_DONTNEED) = 0
Closes #134
2014-09-30 22:33:46 +08:00
|
|
|
|
2015-09-12 07:18:53 +08:00
|
|
|
static void *
|
2016-06-01 05:50:21 +08:00
|
|
|
large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t alignment, bool zero) {
|
|
|
|
if (alignment <= CACHELINE) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return large_malloc(tsdn, arena, usize, zero);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return large_palloc(tsdn, arena, usize, alignment, zero);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
2016-06-01 05:50:21 +08:00
|
|
|
large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
|
2017-01-16 08:56:30 +08:00
|
|
|
size_t alignment, bool zero, tcache_t *tcache) {
|
2017-03-14 08:36:57 +08:00
|
|
|
size_t oldusize = extent_usize_get(extent);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2016-02-26 07:29:49 +08:00
|
|
|
/* The following should have been caught by callers. */
|
2016-06-01 05:50:21 +08:00
|
|
|
assert(usize > 0 && usize <= LARGE_MAXCLASS);
|
|
|
|
/* Both allocation sizes must be large to avoid a move. */
|
2017-03-14 08:36:57 +08:00
|
|
|
assert(oldusize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS);
|
2016-02-26 07:29:49 +08:00
|
|
|
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
/* Try to avoid moving the allocation. */
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent_addr_get(extent);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
2016-05-19 12:02:46 +08:00
|
|
|
* usize and old size are different enough that we need to use a
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
* different size class. In that case, fall back to allocating new
|
|
|
|
* space and copying.
|
2010-01-17 01:53:50 +08:00
|
|
|
*/
|
2017-03-14 08:36:57 +08:00
|
|
|
void *ret = large_ralloc_move_helper(tsdn, arena, usize, alignment,
|
|
|
|
zero);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (ret == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-03-14 08:36:57 +08:00
|
|
|
size_t copysize = (usize < oldusize) ? usize : oldusize;
|
2016-05-19 12:02:46 +08:00
|
|
|
memcpy(ret, extent_addr_get(extent), copysize);
|
2017-03-17 17:45:12 +08:00
|
|
|
isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, true);
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2016-09-24 03:16:55 +08:00
|
|
|
/*
|
|
|
|
* junked_locked indicates whether the extent's data have been junk-filled, and
|
2017-02-13 09:43:33 +08:00
|
|
|
* whether the arena's large_mtx is currently held.
|
2016-09-24 03:16:55 +08:00
|
|
|
*/
|
2016-05-28 15:17:28 +08:00
|
|
|
static void
|
2017-01-30 13:57:14 +08:00
|
|
|
large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
|
|
|
bool junked_locked) {
|
2017-02-13 09:43:33 +08:00
|
|
|
|
2016-05-28 15:17:28 +08:00
|
|
|
if (!junked_locked) {
|
2017-02-13 09:43:33 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->large_mtx);
|
|
|
|
extent_list_remove(&arena->large, extent);
|
|
|
|
malloc_mutex_unlock(tsdn, &arena->large_mtx);
|
2016-10-14 03:18:38 +08:00
|
|
|
large_dalloc_maybe_junk(extent_addr_get(extent),
|
2016-05-28 15:17:28 +08:00
|
|
|
extent_usize_get(extent));
|
2017-02-13 09:43:33 +08:00
|
|
|
} else {
|
|
|
|
malloc_mutex_assert_owner(tsdn, &arena->large_mtx);
|
|
|
|
extent_list_remove(&arena->large, extent);
|
2016-05-28 15:17:28 +08:00
|
|
|
}
|
2017-02-13 09:43:33 +08:00
|
|
|
arena_extent_dalloc_large_prep(tsdn, arena, extent);
|
2017-01-30 13:57:14 +08:00
|
|
|
}
|
2016-02-20 12:09:31 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
static void
|
|
|
|
large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
|
2017-03-02 07:25:48 +08:00
|
|
|
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, extent);
|
2016-05-28 15:17:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-01-30 13:57:14 +08:00
|
|
|
large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) {
|
|
|
|
large_dalloc_prep_impl(tsdn, extent_arena_get(extent), extent, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) {
|
|
|
|
large_dalloc_finish_impl(tsdn, extent_arena_get(extent), extent);
|
2016-05-28 15:17:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
large_dalloc(tsdn_t *tsdn, extent_t *extent) {
|
2017-01-30 13:57:14 +08:00
|
|
|
arena_t *arena = extent_arena_get(extent);
|
|
|
|
large_dalloc_prep_impl(tsdn, arena, extent, false);
|
|
|
|
large_dalloc_finish_impl(tsdn, arena, extent);
|
|
|
|
arena_decay_tick(tsdn, arena);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2014-11-28 03:22:36 +08:00
|
|
|
size_t
|
2017-01-16 08:56:30 +08:00
|
|
|
large_salloc(tsdn_t *tsdn, const extent_t *extent) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent_usize_get(extent);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_tctx_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent_prof_tctx_get(extent);
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) {
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_prof_tctx_set(extent, tctx);
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|
2015-09-15 14:48:11 +08:00
|
|
|
|
|
|
|
void
|
2017-01-16 08:56:30 +08:00
|
|
|
large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) {
|
2016-06-01 05:50:21 +08:00
|
|
|
large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U);
|
2015-09-15 14:48:11 +08:00
|
|
|
}
|