Implement support for non-coalescing maps on MinGW.

- Do not reallocate huge objects in place if the number of backing
  chunks would change.
- Do not cache multi-chunk mappings.

This resolves #213.
This commit is contained in:
Jason Evans
2015-07-24 18:21:42 -07:00
parent 40cbd30d50
commit d059b9d6a1
7 changed files with 44 additions and 4 deletions

View File

@@ -337,6 +337,7 @@ chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
extent_node_t *node, *prev;
extent_node_t key;
assert(maps_coalesce || size == chunksize);
assert(!cache || !zeroed);
unzeroed = cache || !zeroed;
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
@@ -421,6 +422,11 @@ chunk_dalloc_cache(arena_t *arena, void *chunk, size_t size)
assert(size != 0);
assert((size & chunksize_mask) == 0);
if (!maps_coalesce && size != chunksize) {
chunk_dalloc_arena(arena, chunk, size, false);
return;
}
chunk_record(arena, &arena->chunks_szad_cache, &arena->chunks_ad_cache,
true, chunk, size, false);
arena_maybe_purge(arena);

View File

@@ -304,6 +304,9 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
return (false);
}
if (!maps_coalesce)
return (true);
/* Shrink the allocation in-place. */
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)) {
huge_ralloc_no_move_shrink(ptr, oldsize, usize);