diff --git a/Makefile.in b/Makefile.in
index f90e2a4f..a24fde95 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -82,11 +82,11 @@ C_SRCS := $(srcroot)src/jemalloc.c \
$(srcroot)src/base.c \
$(srcroot)src/bitmap.c \
$(srcroot)src/chunk.c \
- $(srcroot)src/chunk_dss.c \
- $(srcroot)src/chunk_mmap.c \
$(srcroot)src/ckh.c \
$(srcroot)src/ctl.c \
$(srcroot)src/extent.c \
+ $(srcroot)src/extent_dss.c \
+ $(srcroot)src/extent_mmap.c \
$(srcroot)src/hash.c \
$(srcroot)src/large.c \
$(srcroot)src/mb.c \
@@ -171,16 +171,16 @@ TESTS_UNIT := \
$(srcroot)test/unit/zero.c
TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
$(srcroot)test/integration/allocated.c \
- $(srcroot)test/integration/sdallocx.c \
+ $(srcroot)test/integration/extent.c \
$(srcroot)test/integration/mallocx.c \
$(srcroot)test/integration/MALLOCX_ARENA.c \
$(srcroot)test/integration/overflow.c \
$(srcroot)test/integration/posix_memalign.c \
$(srcroot)test/integration/rallocx.c \
+ $(srcroot)test/integration/sdallocx.c \
$(srcroot)test/integration/thread_arena.c \
$(srcroot)test/integration/thread_tcache_enabled.c \
- $(srcroot)test/integration/xallocx.c \
- $(srcroot)test/integration/chunk.c
+ $(srcroot)test/integration/xallocx.c
TESTS_STRESS := $(srcroot)test/stress/microbench.c
TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_STRESS)
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index 0707b863..bc169756 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -278,22 +278,22 @@ extern ssize_t opt_decay_time;
extern const arena_bin_info_t arena_bin_info[NBINS];
-extent_t *arena_chunk_cache_alloc(tsdn_t *tsdn, arena_t *arena,
+extent_t *arena_extent_cache_alloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t *extent_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero);
-void arena_chunk_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
+void arena_extent_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t *extent_hooks, extent_t *extent);
-void arena_chunk_cache_maybe_insert(arena_t *arena, extent_t *extent,
+void arena_extent_cache_maybe_insert(arena_t *arena, extent_t *extent,
bool cache);
-void arena_chunk_cache_maybe_remove(arena_t *arena, extent_t *extent,
+void arena_extent_cache_maybe_remove(arena_t *arena, extent_t *extent,
bool cache);
-extent_t *arena_chunk_alloc_large(tsdn_t *tsdn, arena_t *arena,
+extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
size_t usize, size_t alignment, bool *zero);
-void arena_chunk_dalloc_large(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
- bool locked);
-void arena_chunk_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
+void arena_extent_dalloc_large(tsdn_t *tsdn, arena_t *arena,
+ extent_t *extent, bool locked);
+void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
extent_t *extent, size_t oldsize);
-void arena_chunk_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
+void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
extent_t *extent, size_t oldsize);
ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena,
diff --git a/include/jemalloc/internal/chunk.h b/include/jemalloc/internal/chunk.h
index ddfa0046..7a5ebbca 100644
--- a/include/jemalloc/internal/chunk.h
+++ b/include/jemalloc/internal/chunk.h
@@ -26,40 +26,7 @@ extern size_t chunksize;
extern size_t chunksize_mask; /* (chunksize - 1). */
extern size_t chunk_npages;
-extern const extent_hooks_t extent_hooks_default;
-
-extent_hooks_t extent_hooks_get(tsdn_t *tsdn, arena_t *arena);
-extent_hooks_t extent_hooks_set(tsdn_t *tsdn, arena_t *arena,
- const extent_hooks_t *extent_hooks);
-
-extent_t *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t *extent_hooks, void *new_addr, size_t usize, size_t pad,
- size_t alignment, bool *zero, bool slab);
-extent_t *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t *extent_hooks, void *new_addr, size_t usize, size_t pad,
- size_t alignment, bool *zero, bool *commit, bool slab);
-void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t *extent_hooks, extent_t *extent);
-void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t *extent_hooks, extent_t *extent);
-bool chunk_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t *extent_hooks, extent_t *extent, size_t offset,
- size_t length);
-bool chunk_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t *extent_hooks, extent_t *extent, size_t offset,
- size_t length);
-bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t *extent_hooks, extent_t *extent, size_t offset,
- size_t length);
-extent_t *chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t *extent_hooks, extent_t *extent, size_t size_a,
- size_t usize_a, size_t size_b, size_t usize_b);
-bool chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t *extent_hooks, extent_t *a, extent_t *b);
bool chunk_boot(void);
-void chunk_prefork(tsdn_t *tsdn);
-void chunk_postfork_parent(tsdn_t *tsdn);
-void chunk_postfork_child(tsdn_t *tsdn);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
@@ -67,6 +34,3 @@ void chunk_postfork_child(tsdn_t *tsdn);
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
-
-#include "jemalloc/internal/chunk_dss.h"
-#include "jemalloc/internal/chunk_mmap.h"
diff --git a/include/jemalloc/internal/extent.h b/include/jemalloc/internal/extent.h
index 8552f701..a41a15ff 100644
--- a/include/jemalloc/internal/extent.h
+++ b/include/jemalloc/internal/extent.h
@@ -38,7 +38,7 @@ struct extent_s {
bool e_active;
/*
- * The zeroed flag is used by chunk recycling code to track whether
+ * The zeroed flag is used by extent recycling code to track whether
* memory is zero-filled.
*/
bool e_zeroed;
@@ -87,11 +87,16 @@ typedef ph(extent_t) extent_heap_t;
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-extern rtree_t extents_rtree;
+extern rtree_t extents_rtree;
+extern const extent_hooks_t extent_hooks_default;
extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
+extent_hooks_t extent_hooks_get(tsdn_t *tsdn, arena_t *arena);
+extent_hooks_t extent_hooks_set(tsdn_t *tsdn, arena_t *arena,
+ const extent_hooks_t *extent_hooks);
+
#ifdef JEMALLOC_JET
typedef size_t (extent_size_quantize_t)(size_t);
extern extent_size_quantize_t *extent_size_quantize_floor;
@@ -103,6 +108,34 @@ size_t extent_size_quantize_ceil(size_t size);
ph_proto(, extent_heap_, extent_heap_t, extent_t)
+extent_t *extent_alloc_cache(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t *extent_hooks, void *new_addr, size_t usize, size_t pad,
+ size_t alignment, bool *zero, bool slab);
+extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t *extent_hooks, void *new_addr, size_t usize, size_t pad,
+ size_t alignment, bool *zero, bool *commit, bool slab);
+void extent_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t *extent_hooks, extent_t *extent);
+void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t *extent_hooks, extent_t *extent);
+bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t *extent_hooks, extent_t *extent, size_t offset,
+ size_t length);
+bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t *extent_hooks, extent_t *extent, size_t offset,
+ size_t length);
+bool extent_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t *extent_hooks, extent_t *extent, size_t offset,
+ size_t length);
+extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t *extent_hooks, extent_t *extent, size_t size_a,
+ size_t usize_a, size_t size_b, size_t usize_b);
+bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t *extent_hooks, extent_t *a, extent_t *b);
+void extent_prefork(tsdn_t *tsdn);
+void extent_postfork_parent(tsdn_t *tsdn);
+void extent_postfork_child(tsdn_t *tsdn);
+
bool extent_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
@@ -110,7 +143,7 @@ bool extent_boot(void);
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
-extent_t *extent_lookup(tsdn_t *tsdn, const void *chunk, bool dependent);
+extent_t *extent_lookup(tsdn_t *tsdn, const void *ptr, bool dependent);
arena_t *extent_arena_get(const extent_t *extent);
void *extent_base_get(const extent_t *extent);
void *extent_addr_get(const extent_t *extent);
@@ -395,3 +428,5 @@ extent_ring_remove(extent_t *extent)
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
+#include "jemalloc/internal/extent_dss.h"
+#include "jemalloc/internal/extent_mmap.h"
diff --git a/include/jemalloc/internal/chunk_dss.h b/include/jemalloc/internal/extent_dss.h
similarity index 71%
rename from include/jemalloc/internal/chunk_dss.h
rename to include/jemalloc/internal/extent_dss.h
index 724fa579..43573775 100644
--- a/include/jemalloc/internal/chunk_dss.h
+++ b/include/jemalloc/internal/extent_dss.h
@@ -21,15 +21,15 @@ extern const char *dss_prec_names[];
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-dss_prec_t chunk_dss_prec_get(tsdn_t *tsdn);
-bool chunk_dss_prec_set(tsdn_t *tsdn, dss_prec_t dss_prec);
-void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
+dss_prec_t extent_dss_prec_get(tsdn_t *tsdn);
+bool extent_dss_prec_set(tsdn_t *tsdn, dss_prec_t dss_prec);
+void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit);
-bool chunk_in_dss(tsdn_t *tsdn, void *chunk);
-bool chunk_dss_boot(void);
-void chunk_dss_prefork(tsdn_t *tsdn);
-void chunk_dss_postfork_parent(tsdn_t *tsdn);
-void chunk_dss_postfork_child(tsdn_t *tsdn);
+bool extent_in_dss(tsdn_t *tsdn, void *addr);
+bool extent_dss_boot(void);
+void extent_dss_prefork(tsdn_t *tsdn);
+void extent_dss_postfork_parent(tsdn_t *tsdn);
+void extent_dss_postfork_child(tsdn_t *tsdn);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/include/jemalloc/internal/chunk_mmap.h b/include/jemalloc/internal/extent_mmap.h
similarity index 84%
rename from include/jemalloc/internal/chunk_mmap.h
rename to include/jemalloc/internal/extent_mmap.h
index 6f2d0ac2..3c1a7884 100644
--- a/include/jemalloc/internal/chunk_mmap.h
+++ b/include/jemalloc/internal/extent_mmap.h
@@ -9,9 +9,9 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-void *chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment,
+void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit);
-bool chunk_dalloc_mmap(void *chunk, size_t size);
+bool extent_dalloc_mmap(void *addr, size_t size);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index 92f91e44..a2f093ee 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -9,14 +9,6 @@ arena_boot
arena_choose
arena_choose_hard
arena_choose_impl
-arena_chunk_alloc_large
-arena_chunk_cache_alloc
-arena_chunk_cache_dalloc
-arena_chunk_cache_maybe_insert
-arena_chunk_cache_maybe_remove
-arena_chunk_dalloc_large
-arena_chunk_ralloc_large_expand
-arena_chunk_ralloc_large_shrink
arena_cleanup
arena_dalloc
arena_dalloc_bin_junked_locked
@@ -31,6 +23,14 @@ arena_decay_time_get
arena_decay_time_set
arena_dss_prec_get
arena_dss_prec_set
+arena_extent_alloc_large
+arena_extent_cache_alloc
+arena_extent_cache_dalloc
+arena_extent_cache_maybe_insert
+arena_extent_cache_maybe_remove
+arena_extent_dalloc_large
+arena_extent_ralloc_large_expand
+arena_extent_ralloc_large_shrink
arena_get
arena_ichoose
arena_init
@@ -115,30 +115,8 @@ bootstrap_free
bootstrap_malloc
bt_init
buferror
-chunk_alloc_cache
-chunk_alloc_dss
-chunk_alloc_mmap
-chunk_alloc_wrapper
chunk_boot
-chunk_commit_wrapper
-chunk_dalloc_cache
-chunk_dalloc_mmap
-chunk_dalloc_wrapper
-chunk_decommit_wrapper
-chunk_dss_boot
-chunk_dss_postfork_child
-chunk_dss_postfork_parent
-chunk_dss_prec_get
-chunk_dss_prec_set
-chunk_dss_prefork
-chunk_in_dss
-chunk_merge_wrapper
chunk_npages
-chunk_postfork_child
-chunk_postfork_parent
-chunk_prefork
-chunk_purge_wrapper
-chunk_split_wrapper
chunksize
chunksize_mask
ckh_count
@@ -167,14 +145,29 @@ extent_addr_get
extent_addr_randomize
extent_addr_set
extent_alloc
+extent_alloc_cache
+extent_alloc_dss
+extent_alloc_mmap
+extent_alloc_wrapper
extent_arena_get
extent_arena_set
extent_base_get
extent_before_get
extent_boot
+extent_commit_wrapper
extent_committed_get
extent_committed_set
extent_dalloc
+extent_dalloc_cache
+extent_dalloc_mmap
+extent_dalloc_wrapper
+extent_decommit_wrapper
+extent_dss_boot
+extent_dss_postfork_child
+extent_dss_postfork_parent
+extent_dss_prec_get
+extent_dss_prec_set
+extent_dss_prefork
extent_heap_empty
extent_heap_first
extent_heap_insert
@@ -184,12 +177,18 @@ extent_heap_remove_first
extent_hooks_default
extent_hooks_get
extent_hooks_set
+extent_in_dss
extent_init
extent_last_get
extent_lookup
+extent_merge_wrapper
extent_past_get
+extent_postfork_child
+extent_postfork_parent
+extent_prefork
extent_prof_tctx_get
extent_prof_tctx_set
+extent_purge_wrapper
extent_retained_get
extent_ring_insert
extent_ring_remove
@@ -201,6 +200,7 @@ extent_slab_data_get
extent_slab_data_get_const
extent_slab_get
extent_slab_set
+extent_split_wrapper
extent_usize_get
extent_usize_set
extent_zeroed_get
diff --git a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj
index 91c949aa..59f52f96 100644
--- a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj
+++ b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj
@@ -41,11 +41,11 @@
-
-
+
+
@@ -92,11 +92,11 @@
-
-
+
+
diff --git a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters
index 09d4cb20..159b2e72 100644
--- a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters
+++ b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters
@@ -62,12 +62,6 @@
Header Files\internal
-
- Header Files\internal
-
-
- Header Files\internal
-
Header Files\internal
@@ -77,6 +71,12 @@
Header Files\internal
+
+ Header Files\internal
+
+
+ Header Files\internal
+
Header Files\internal
@@ -187,12 +187,6 @@
Source Files
-
- Source Files
-
-
- Source Files
-
Source Files
@@ -202,6 +196,12 @@
Source Files
+
+ Source Files
+
+
+ Source Files
+
Source Files
diff --git a/src/arena.c b/src/arena.c
index 075082b1..990e0e89 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -46,33 +46,33 @@ static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena,
/******************************************************************************/
static size_t
-arena_chunk_dirty_npages(const extent_t *extent)
+arena_extent_dirty_npages(const extent_t *extent)
{
return (extent_size_get(extent) >> LG_PAGE);
}
static extent_t *
-arena_chunk_cache_alloc_locked(tsdn_t *tsdn, arena_t *arena,
+arena_extent_cache_alloc_locked(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t *extent_hooks, void *new_addr, size_t usize, size_t pad,
size_t alignment, bool *zero, bool slab)
{
malloc_mutex_assert_owner(tsdn, &arena->lock);
- return (chunk_alloc_cache(tsdn, arena, extent_hooks, new_addr, usize,
+ return (extent_alloc_cache(tsdn, arena, extent_hooks, new_addr, usize,
pad, alignment, zero, slab));
}
extent_t *
-arena_chunk_cache_alloc(tsdn_t *tsdn, arena_t *arena,
+arena_extent_cache_alloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t *extent_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero)
{
extent_t *extent;
malloc_mutex_lock(tsdn, &arena->lock);
- extent = arena_chunk_cache_alloc_locked(tsdn, arena, extent_hooks,
+ extent = arena_extent_cache_alloc_locked(tsdn, arena, extent_hooks,
new_addr, size, 0, alignment, zero, false);
malloc_mutex_unlock(tsdn, &arena->lock);
@@ -80,44 +80,44 @@ arena_chunk_cache_alloc(tsdn_t *tsdn, arena_t *arena,
}
static void
-arena_chunk_cache_dalloc_locked(tsdn_t *tsdn, arena_t *arena,
+arena_extent_cache_dalloc_locked(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t *extent_hooks, extent_t *extent)
{
malloc_mutex_assert_owner(tsdn, &arena->lock);
- chunk_dalloc_cache(tsdn, arena, extent_hooks, extent);
+ extent_dalloc_cache(tsdn, arena, extent_hooks, extent);
arena_maybe_purge(tsdn, arena);
}
void
-arena_chunk_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
+arena_extent_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t *extent_hooks, extent_t *extent)
{
malloc_mutex_lock(tsdn, &arena->lock);
- arena_chunk_cache_dalloc_locked(tsdn, arena, extent_hooks, extent);
+ arena_extent_cache_dalloc_locked(tsdn, arena, extent_hooks, extent);
malloc_mutex_unlock(tsdn, &arena->lock);
}
void
-arena_chunk_cache_maybe_insert(arena_t *arena, extent_t *extent, bool cache)
+arena_extent_cache_maybe_insert(arena_t *arena, extent_t *extent, bool cache)
{
if (cache) {
extent_ring_insert(&arena->extents_dirty, extent);
- arena->ndirty += arena_chunk_dirty_npages(extent);
+ arena->ndirty += arena_extent_dirty_npages(extent);
}
}
void
-arena_chunk_cache_maybe_remove(arena_t *arena, extent_t *extent, bool dirty)
+arena_extent_cache_maybe_remove(arena_t *arena, extent_t *extent, bool dirty)
{
if (dirty) {
extent_ring_remove(extent);
- assert(arena->ndirty >= arena_chunk_dirty_npages(extent));
- arena->ndirty -= arena_chunk_dirty_npages(extent);
+ assert(arena->ndirty >= arena_extent_dirty_npages(extent));
+ arena->ndirty -= arena_extent_dirty_npages(extent);
}
}
@@ -320,13 +320,13 @@ arena_large_ralloc_stats_update(arena_t *arena, size_t oldusize, size_t usize)
}
static extent_t *
-arena_chunk_alloc_large_hard(tsdn_t *tsdn, arena_t *arena,
+arena_extent_alloc_large_hard(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t *extent_hooks, size_t usize, size_t alignment, bool *zero)
{
extent_t *extent;
bool commit = true;
- extent = chunk_alloc_wrapper(tsdn, arena, extent_hooks, NULL, usize,
+ extent = extent_alloc_wrapper(tsdn, arena, extent_hooks, NULL, usize,
large_pad, alignment, zero, &commit, false);
if (extent == NULL) {
/* Revert optimistic stats updates. */
@@ -343,7 +343,7 @@ arena_chunk_alloc_large_hard(tsdn_t *tsdn, arena_t *arena,
}
extent_t *
-arena_chunk_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
+arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool *zero)
{
extent_t *extent;
@@ -358,11 +358,11 @@ arena_chunk_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
}
arena_nactive_add(arena, (usize + large_pad) >> LG_PAGE);
- extent = arena_chunk_cache_alloc_locked(tsdn, arena, &extent_hooks,
+ extent = arena_extent_cache_alloc_locked(tsdn, arena, &extent_hooks,
NULL, usize, large_pad, alignment, zero, false);
malloc_mutex_unlock(tsdn, &arena->lock);
if (extent == NULL) {
- extent = arena_chunk_alloc_large_hard(tsdn, arena,
+ extent = arena_extent_alloc_large_hard(tsdn, arena,
&extent_hooks, usize, alignment, zero);
}
@@ -370,7 +370,7 @@ arena_chunk_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
}
void
-arena_chunk_dalloc_large(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+arena_extent_dalloc_large(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
bool locked)
{
extent_hooks_t extent_hooks = EXTENT_HOOKS_INITIALIZER;
@@ -384,13 +384,13 @@ arena_chunk_dalloc_large(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
}
arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
- arena_chunk_cache_dalloc_locked(tsdn, arena, &extent_hooks, extent);
+ arena_extent_cache_dalloc_locked(tsdn, arena, &extent_hooks, extent);
if (!locked)
malloc_mutex_unlock(tsdn, &arena->lock);
}
void
-arena_chunk_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
size_t oldusize)
{
size_t usize = extent_usize_get(extent);
@@ -406,7 +406,7 @@ arena_chunk_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
}
void
-arena_chunk_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
size_t oldusize)
{
size_t usize = extent_usize_get(extent);
@@ -756,7 +756,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
next = qr_next(extent, qr_link);
/* Allocate. */
zero = false;
- textent = arena_chunk_cache_alloc_locked(tsdn, arena,
+ textent = arena_extent_cache_alloc_locked(tsdn, arena,
extent_hooks, extent_base_get(extent),
extent_size_get(extent), 0, CACHELINE, &zero, false);
assert(textent == extent);
@@ -793,7 +793,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
next = qr_next(extent, qr_link);
extent_ring_remove(extent);
- chunk_dalloc_wrapper(tsdn, arena, extent_hooks, extent);
+ extent_dalloc_wrapper(tsdn, arena, extent_hooks, extent);
}
if (config_stats) {
@@ -869,7 +869,7 @@ arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab)
extent_hooks_t extent_hooks = EXTENT_HOOKS_INITIALIZER;
arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
- arena_chunk_cache_dalloc_locked(tsdn, arena, &extent_hooks, slab);
+ arena_extent_cache_dalloc_locked(tsdn, arena, &extent_hooks, slab);
}
void
@@ -996,7 +996,7 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
zero = false;
commit = true;
malloc_mutex_unlock(tsdn, &arena->lock);
- slab = chunk_alloc_wrapper(tsdn, arena, extent_hooks, NULL,
+ slab = extent_alloc_wrapper(tsdn, arena, extent_hooks, NULL,
bin_info->slab_size, 0, PAGE, &zero, &commit, true);
malloc_mutex_lock(tsdn, &arena->lock);
@@ -1013,7 +1013,7 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
bool zero;
zero = false;
- slab = arena_chunk_cache_alloc_locked(tsdn, arena, &extent_hooks, NULL,
+ slab = arena_extent_cache_alloc_locked(tsdn, arena, &extent_hooks, NULL,
bin_info->slab_size, 0, PAGE, &zero, true);
if (slab == NULL) {
slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
@@ -1774,7 +1774,7 @@ arena_new(tsdn_t *tsdn, unsigned ind)
(uint64_t)(uintptr_t)arena;
}
- arena->dss_prec = chunk_dss_prec_get(tsdn);
+ arena->dss_prec = extent_dss_prec_get(tsdn);
arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
arena->purging = false;
diff --git a/src/base.c b/src/base.c
index 3807422c..667786e1 100644
--- a/src/base.c
+++ b/src/base.c
@@ -38,7 +38,7 @@ base_extent_dalloc(tsdn_t *tsdn, extent_t *extent)
}
static extent_t *
-base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
+base_extent_alloc(tsdn_t *tsdn, size_t minsize)
{
extent_t *extent;
size_t csize, nsize;
@@ -51,13 +51,13 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
nsize = (extent == NULL) ? CACHELINE_CEILING(sizeof(extent_t)) : 0;
csize = CHUNK_CEILING(minsize + nsize);
/*
- * Directly call chunk_alloc_mmap() because it's critical to allocate
+ * Directly call extent_alloc_mmap() because it's critical to allocate
* untouched demand-zeroed virtual memory.
*/
{
bool zero = true;
bool commit = true;
- addr = chunk_alloc_mmap(NULL, csize, PAGE, &zero, &commit);
+ addr = extent_alloc_mmap(NULL, csize, PAGE, &zero, &commit);
}
if (addr == NULL) {
if (extent != NULL)
@@ -108,7 +108,7 @@ base_alloc(tsdn_t *tsdn, size_t size)
}
if (extent == NULL) {
/* Try to allocate more space. */
- extent = base_chunk_alloc(tsdn, csize);
+ extent = base_extent_alloc(tsdn, csize);
}
if (extent == NULL) {
ret = NULL;
diff --git a/src/chunk.c b/src/chunk.c
index 3ce6e015..d750f715 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -7,924 +7,12 @@
const char *opt_dss = DSS_DEFAULT;
size_t opt_lg_chunk = 0;
-/* Used exclusively for gdump triggering. */
-static size_t curchunks;
-static size_t highchunks;
-
/* Various chunk-related settings. */
size_t chunksize;
size_t chunksize_mask; /* (chunksize - 1). */
size_t chunk_npages;
-static void *extent_alloc_default(void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
-static bool extent_dalloc_default(void *addr, size_t size, bool committed,
- unsigned arena_ind);
-static bool extent_commit_default(void *addr, size_t size, size_t offset,
- size_t length, unsigned arena_ind);
-static bool extent_decommit_default(void *addr, size_t size, size_t offset,
- size_t length, unsigned arena_ind);
-static bool extent_purge_default(void *addr, size_t size, size_t offset,
- size_t length, unsigned arena_ind);
-static bool extent_split_default(void *addr, size_t size, size_t size_a,
- size_t size_b, bool committed, unsigned arena_ind);
-static bool extent_merge_default(void *addr_a, size_t size_a, void *addr_b,
- size_t size_b, bool committed, unsigned arena_ind);
-
-const extent_hooks_t extent_hooks_default = {
- extent_alloc_default,
- extent_dalloc_default,
- extent_commit_default,
- extent_decommit_default,
- extent_purge_default,
- extent_split_default,
- extent_merge_default
-};
-
/******************************************************************************/
-/*
- * Function prototypes for static functions that are referenced prior to
- * definition.
- */
-
-static void chunk_record(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t *extent_hooks, extent_heap_t extent_heaps[NPSIZES],
- bool cache, extent_t *extent);
-
-/******************************************************************************/
-
-static void
-extent_heaps_insert(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
-{
- size_t psz = extent_size_quantize_floor(extent_size_get(extent));
- pszind_t pind = psz2ind(psz);
- extent_heap_insert(&extent_heaps[pind], extent);
-}
-
-static void
-extent_heaps_remove(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
-{
- size_t psz = extent_size_quantize_floor(extent_size_get(extent));
- pszind_t pind = psz2ind(psz);
- extent_heap_remove(&extent_heaps[pind], extent);
-}
-
-static extent_hooks_t
-extent_hooks_get_locked(arena_t *arena)
-{
-
- return (arena->extent_hooks);
-}
-
-extent_hooks_t
-extent_hooks_get(tsdn_t *tsdn, arena_t *arena)
-{
- extent_hooks_t extent_hooks;
-
- malloc_mutex_lock(tsdn, &arena->extents_mtx);
- extent_hooks = extent_hooks_get_locked(arena);
- malloc_mutex_unlock(tsdn, &arena->extents_mtx);
-
- return (extent_hooks);
-}
-
-extent_hooks_t
-extent_hooks_set(tsdn_t *tsdn, arena_t *arena,
- const extent_hooks_t *extent_hooks)
-{
- extent_hooks_t old_extent_hooks;
-
- malloc_mutex_lock(tsdn, &arena->extents_mtx);
- old_extent_hooks = arena->extent_hooks;
- /*
- * Copy each field atomically so that it is impossible for readers to
- * see partially updated pointers. There are places where readers only
- * need one hook function pointer (therefore no need to copy the
- * entirety of arena->extent_hooks), and stale reads do not affect
- * correctness, so they perform unlocked reads.
- */
-#define ATOMIC_COPY_HOOK(n) do { \
- union { \
- extent_##n##_t **n; \
- void **v; \
- } u; \
- u.n = &arena->extent_hooks.n; \
- atomic_write_p(u.v, extent_hooks->n); \
-} while (0)
- ATOMIC_COPY_HOOK(alloc);
- ATOMIC_COPY_HOOK(dalloc);
- ATOMIC_COPY_HOOK(commit);
- ATOMIC_COPY_HOOK(decommit);
- ATOMIC_COPY_HOOK(purge);
- ATOMIC_COPY_HOOK(split);
- ATOMIC_COPY_HOOK(merge);
-#undef ATOMIC_COPY_HOOK
- malloc_mutex_unlock(tsdn, &arena->extents_mtx);
-
- return (old_extent_hooks);
-}
-
-static void
-extent_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t *extent_hooks, bool locked)
-{
- static const extent_hooks_t uninitialized_hooks =
- EXTENT_HOOKS_INITIALIZER;
-
- if (memcmp(extent_hooks, &uninitialized_hooks, sizeof(extent_hooks_t))
- == 0) {
- *extent_hooks = locked ? extent_hooks_get_locked(arena) :
- extent_hooks_get(tsdn, arena);
- }
-}
-
-static void
-extent_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t *extent_hooks)
-{
-
- extent_hooks_assure_initialized_impl(tsdn, arena, extent_hooks, true);
-}
-
-static void
-extent_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t *extent_hooks)
-{
-
- extent_hooks_assure_initialized_impl(tsdn, arena, extent_hooks, false);
-}
-
-static bool
-extent_rtree_acquire(tsdn_t *tsdn, const extent_t *extent, bool dependent,
- bool init_missing, rtree_elm_t **r_elm_a, rtree_elm_t **r_elm_b)
-{
-
- *r_elm_a = rtree_elm_acquire(tsdn, &extents_rtree,
- (uintptr_t)extent_base_get(extent), dependent, init_missing);
- if (!dependent && *r_elm_a == NULL)
- return (true);
- assert(*r_elm_a != NULL);
-
- if (extent_size_get(extent) > PAGE) {
- *r_elm_b = rtree_elm_acquire(tsdn, &extents_rtree,
- (uintptr_t)extent_last_get(extent), dependent,
- init_missing);
- if (!dependent && *r_elm_b == NULL)
- return (true);
- assert(*r_elm_b != NULL);
- } else
- *r_elm_b = NULL;
-
- return (false);
-}
-
-static void
-extent_rtree_write_acquired(tsdn_t *tsdn, rtree_elm_t *elm_a,
- rtree_elm_t *elm_b, const extent_t *extent)
-{
-
- rtree_elm_write_acquired(tsdn, &extents_rtree, elm_a, extent);
- if (elm_b != NULL)
- rtree_elm_write_acquired(tsdn, &extents_rtree, elm_b, extent);
-}
-
-static void
-extent_rtree_release(tsdn_t *tsdn, rtree_elm_t *elm_a, rtree_elm_t *elm_b)
-{
-
- rtree_elm_release(tsdn, &extents_rtree, elm_a);
- if (elm_b != NULL)
- rtree_elm_release(tsdn, &extents_rtree, elm_b);
-}
-
-static void
-chunk_interior_register(tsdn_t *tsdn, const extent_t *extent)
-{
- size_t i;
-
- assert(extent_slab_get(extent));
-
- for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
- rtree_write(tsdn, &extents_rtree,
- (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
- LG_PAGE), extent);
- }
-}
-
-static bool
-chunk_register(tsdn_t *tsdn, const extent_t *extent)
-{
- rtree_elm_t *elm_a, *elm_b;
-
- if (extent_rtree_acquire(tsdn, extent, false, true, &elm_a, &elm_b))
- return (true);
- extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent);
- if (extent_slab_get(extent))
- chunk_interior_register(tsdn, extent);
- extent_rtree_release(tsdn, elm_a, elm_b);
-
- if (config_prof && opt_prof && extent_active_get(extent)) {
- size_t nadd = (extent_size_get(extent) == 0) ? 1 :
- extent_size_get(extent) / chunksize;
- size_t cur = atomic_add_z(&curchunks, nadd);
- size_t high = atomic_read_z(&highchunks);
- while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
- /*
- * Don't refresh cur, because it may have decreased
- * since this thread lost the highchunks update race.
- */
- high = atomic_read_z(&highchunks);
- }
- if (cur > high && prof_gdump_get_unlocked())
- prof_gdump(tsdn);
- }
-
- return (false);
-}
-
-static void
-chunk_interior_deregister(tsdn_t *tsdn, const extent_t *extent)
-{
- size_t i;
-
- assert(extent_slab_get(extent));
-
- for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
- rtree_clear(tsdn, &extents_rtree,
- (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
- LG_PAGE));
- }
-}
-
-static void
-chunk_deregister(tsdn_t *tsdn, const extent_t *extent)
-{
- rtree_elm_t *elm_a, *elm_b;
-
- extent_rtree_acquire(tsdn, extent, true, false, &elm_a, &elm_b);
- extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL);
- if (extent_slab_get(extent))
- chunk_interior_deregister(tsdn, extent);
- extent_rtree_release(tsdn, elm_a, elm_b);
-
- if (config_prof && opt_prof && extent_active_get(extent)) {
- size_t nsub = (extent_size_get(extent) == 0) ? 1 :
- extent_size_get(extent) / chunksize;
- assert(atomic_read_z(&curchunks) >= nsub);
- atomic_sub_z(&curchunks, nsub);
- }
-}
-
-/*
- * Do first-best-fit chunk selection, i.e. select the lowest chunk that best
- * fits.
- */
-static extent_t *
-chunk_first_best_fit(arena_t *arena, extent_heap_t extent_heaps[NPSIZES],
- size_t size)
-{
- pszind_t pind, i;
-
- pind = psz2ind(extent_size_quantize_ceil(size));
- for (i = pind; i < NPSIZES; i++) {
- extent_t *extent = extent_heap_first(&extent_heaps[i]);
- if (extent != NULL)
- return (extent);
- }
-
- return (NULL);
-}
-
-static void
-chunk_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
- bool cache, extent_t *extent)
-{
-
- /*
- * Leak chunk after making sure its pages have already been purged, so
- * that this is only a virtual memory leak.
- */
- if (cache) {
- chunk_purge_wrapper(tsdn, arena, extent_hooks, extent, 0,
- extent_size_get(extent));
- }
- extent_dalloc(tsdn, arena, extent);
-}
-
-static extent_t *
-chunk_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
- extent_heap_t extent_heaps[NPSIZES], bool cache, void *new_addr,
- size_t usize, size_t pad, size_t alignment, bool *zero, bool *commit,
- bool slab)
-{
- extent_t *extent;
- size_t size, alloc_size, leadsize, trailsize;
-
- assert(new_addr == NULL || !slab);
- assert(pad == 0 || !slab);
-
- size = usize + pad;
- alloc_size = s2u(size + PAGE_CEILING(alignment) - PAGE);
- /* Beware size_t wrap-around. */
- if (alloc_size < usize)
- return (NULL);
- malloc_mutex_lock(tsdn, &arena->extents_mtx);
- extent_hooks_assure_initialized_locked(tsdn, arena, extent_hooks);
- if (new_addr != NULL) {
- rtree_elm_t *elm;
-
- elm = rtree_elm_acquire(tsdn, &extents_rtree,
- (uintptr_t)new_addr, false, false);
- if (elm != NULL) {
- extent = rtree_elm_read_acquired(tsdn, &extents_rtree,
- elm);
- if (extent != NULL && (extent_active_get(extent) ||
- extent_retained_get(extent) == cache))
- extent = NULL;
- rtree_elm_release(tsdn, &extents_rtree, elm);
- } else
- extent = NULL;
- } else
- extent = chunk_first_best_fit(arena, extent_heaps, alloc_size);
- if (extent == NULL || (new_addr != NULL && extent_size_get(extent) <
- size)) {
- malloc_mutex_unlock(tsdn, &arena->extents_mtx);
- return (NULL);
- }
- extent_heaps_remove(extent_heaps, extent);
- arena_chunk_cache_maybe_remove(arena, extent, cache);
-
- leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(extent),
- PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(extent);
- assert(new_addr == NULL || leadsize == 0);
- assert(extent_size_get(extent) >= leadsize + size);
- trailsize = extent_size_get(extent) - leadsize - size;
- if (extent_zeroed_get(extent))
- *zero = true;
- if (extent_committed_get(extent))
- *commit = true;
-
- /* Split the lead. */
- if (leadsize != 0) {
- extent_t *lead = extent;
- extent = chunk_split_wrapper(tsdn, arena, extent_hooks, lead,
- leadsize, leadsize, size + trailsize, usize + trailsize);
- if (extent == NULL) {
- chunk_leak(tsdn, arena, extent_hooks, cache, lead);
- malloc_mutex_unlock(tsdn, &arena->extents_mtx);
- return (NULL);
- }
- extent_heaps_insert(extent_heaps, lead);
- arena_chunk_cache_maybe_insert(arena, lead, cache);
- }
-
- /* Split the trail. */
- if (trailsize != 0) {
- extent_t *trail = chunk_split_wrapper(tsdn, arena, extent_hooks,
- extent, size, usize, trailsize, trailsize);
- if (trail == NULL) {
- chunk_leak(tsdn, arena, extent_hooks, cache, extent);
- malloc_mutex_unlock(tsdn, &arena->extents_mtx);
- return (NULL);
- }
- extent_heaps_insert(extent_heaps, trail);
- arena_chunk_cache_maybe_insert(arena, trail, cache);
- } else if (leadsize == 0) {
- /*
- * Splitting causes usize to be set as a side effect, but no
- * splitting occurred.
- */
- extent_usize_set(extent, usize);
- }
-
- if (!extent_committed_get(extent) &&
- extent_hooks->commit(extent_base_get(extent),
- extent_size_get(extent), 0, extent_size_get(extent), arena->ind)) {
- malloc_mutex_unlock(tsdn, &arena->extents_mtx);
- chunk_record(tsdn, arena, extent_hooks, extent_heaps, cache,
- extent);
- return (NULL);
- }
-
- if (pad != 0)
- extent_addr_randomize(tsdn, extent, alignment);
- extent_active_set(extent, true);
- if (slab) {
- extent_slab_set(extent, slab);
- chunk_interior_register(tsdn, extent);
- }
-
- malloc_mutex_unlock(tsdn, &arena->extents_mtx);
-
- if (*zero) {
- if (!extent_zeroed_get(extent)) {
- memset(extent_addr_get(extent), 0,
- extent_usize_get(extent));
- } else if (config_debug) {
- size_t i;
- size_t *p = (size_t *)(uintptr_t)
- extent_addr_get(extent);
-
- for (i = 0; i < usize / sizeof(size_t); i++)
- assert(p[i] == 0);
- }
- }
- return (extent);
-}
-
-/*
- * If the caller specifies (!*zero), it is still possible to receive zeroed
- * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes
- * advantage of this to avoid demanding zeroed chunks, but taking advantage of
- * them if they are returned.
- */
-static void *
-chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec)
-{
- void *ret;
-
- assert(size != 0);
- assert(alignment != 0);
-
- /* "primary" dss. */
- if (have_dss && dss_prec == dss_prec_primary && (ret =
- chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
- commit)) != NULL)
- return (ret);
- /* mmap. */
- if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
- NULL)
- return (ret);
- /* "secondary" dss. */
- if (have_dss && dss_prec == dss_prec_secondary && (ret =
- chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
- commit)) != NULL)
- return (ret);
-
- /* All strategies for allocation failed. */
- return (NULL);
-}
-
-extent_t *
-chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
- void *new_addr, size_t usize, size_t pad, size_t alignment, bool *zero,
- bool slab)
-{
- extent_t *extent;
- bool commit;
-
- assert(usize + pad != 0);
- assert(alignment != 0);
-
- commit = true;
- extent = chunk_recycle(tsdn, arena, extent_hooks, arena->extents_cached,
- true, new_addr, usize, pad, alignment, zero, &commit, slab);
- if (extent == NULL)
- return (NULL);
- assert(commit);
- return (extent);
-}
-
-static arena_t *
-chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind)
-{
- arena_t *arena;
-
- arena = arena_get(tsdn, arena_ind, false);
- /*
- * The arena we're allocating on behalf of must have been initialized
- * already.
- */
- assert(arena != NULL);
- return (arena);
-}
-
-static void *
-extent_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
- bool *commit, unsigned arena_ind)
-{
- void *ret;
- tsdn_t *tsdn;
- arena_t *arena;
-
- tsdn = tsdn_fetch();
- arena = chunk_arena_get(tsdn, arena_ind);
- ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
- commit, arena->dss_prec);
- if (ret == NULL)
- return (NULL);
-
- return (ret);
-}
-
-static extent_t *
-chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
- void *new_addr, size_t usize, size_t pad, size_t alignment, bool *zero,
- bool *commit, bool slab)
-{
- extent_t *extent;
-
- assert(usize != 0);
- assert(alignment != 0);
-
- extent = chunk_recycle(tsdn, arena, extent_hooks,
- arena->extents_retained, false, new_addr, usize, pad, alignment,
- zero, commit, slab);
- if (extent != NULL && config_stats) {
- size_t size = usize + pad;
- arena->stats.retained -= size;
- }
-
- return (extent);
-}
-
-static extent_t *
-chunk_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t *extent_hooks, void *new_addr, size_t usize, size_t pad,
- size_t alignment, bool *zero, bool *commit, bool slab)
-{
- extent_t *extent;
- size_t size;
- void *addr;
-
- size = usize + pad;
- extent = extent_alloc(tsdn, arena);
- if (extent == NULL)
- return (NULL);
- addr = extent_hooks->alloc(new_addr, size, alignment, zero, commit,
- arena->ind);
- if (addr == NULL) {
- extent_dalloc(tsdn, arena, extent);
- return (NULL);
- }
- extent_init(extent, arena, addr, size, usize, true, zero, commit, slab);
- if (pad != 0)
- extent_addr_randomize(tsdn, extent, alignment);
- if (chunk_register(tsdn, extent)) {
- chunk_leak(tsdn, arena, extent_hooks, false, extent);
- return (NULL);
- }
-
- return (extent);
-}
-
-extent_t *
-chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
- void *new_addr, size_t usize, size_t pad, size_t alignment, bool *zero,
- bool *commit, bool slab)
-{
- extent_t *extent;
-
- extent_hooks_assure_initialized(tsdn, arena, extent_hooks);
-
- extent = chunk_alloc_retained(tsdn, arena, extent_hooks, new_addr,
- usize, pad, alignment, zero, commit, slab);
- if (extent == NULL) {
- extent = chunk_alloc_wrapper_hard(tsdn, arena, extent_hooks,
- new_addr, usize, pad, alignment, zero, commit, slab);
- }
-
- return (extent);
-}
-
-static bool
-chunk_can_coalesce(const extent_t *a, const extent_t *b)
-{
-
- if (extent_arena_get(a) != extent_arena_get(b))
- return (false);
- if (extent_active_get(a) != extent_active_get(b))
- return (false);
- if (extent_committed_get(a) != extent_committed_get(b))
- return (false);
- if (extent_retained_get(a) != extent_retained_get(b))
- return (false);
-
- return (true);
-}
-
-static void
-chunk_try_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
- extent_t *a, extent_t *b, extent_heap_t extent_heaps[NPSIZES], bool cache)
-{
-
- if (!chunk_can_coalesce(a, b))
- return;
-
- extent_heaps_remove(extent_heaps, a);
- extent_heaps_remove(extent_heaps, b);
-
- arena_chunk_cache_maybe_remove(extent_arena_get(a), a, cache);
- arena_chunk_cache_maybe_remove(extent_arena_get(b), b, cache);
-
- if (chunk_merge_wrapper(tsdn, arena, extent_hooks, a, b)) {
- extent_heaps_insert(extent_heaps, a);
- extent_heaps_insert(extent_heaps, b);
- arena_chunk_cache_maybe_insert(extent_arena_get(a), a, cache);
- arena_chunk_cache_maybe_insert(extent_arena_get(b), b, cache);
- return;
- }
-
- extent_heaps_insert(extent_heaps, a);
- arena_chunk_cache_maybe_insert(extent_arena_get(a), a, cache);
-}
-
-static void
-chunk_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
- extent_heap_t extent_heaps[NPSIZES], bool cache, extent_t *extent)
-{
- extent_t *prev, *next;
-
- assert(!cache || !extent_zeroed_get(extent));
-
- malloc_mutex_lock(tsdn, &arena->extents_mtx);
- extent_hooks_assure_initialized_locked(tsdn, arena, extent_hooks);
-
- extent_usize_set(extent, 0);
- extent_active_set(extent, false);
- extent_zeroed_set(extent, !cache && extent_zeroed_get(extent));
- if (extent_slab_get(extent)) {
- chunk_interior_deregister(tsdn, extent);
- extent_slab_set(extent, false);
- }
-
- assert(extent_lookup(tsdn, extent_base_get(extent), true) == extent);
- extent_heaps_insert(extent_heaps, extent);
- arena_chunk_cache_maybe_insert(arena, extent, cache);
-
- /* Try to coalesce forward. */
- next = rtree_read(tsdn, &extents_rtree,
- (uintptr_t)extent_past_get(extent), false);
- if (next != NULL) {
- chunk_try_coalesce(tsdn, arena, extent_hooks, extent, next,
- extent_heaps, cache);
- }
-
- /* Try to coalesce backward. */
- prev = rtree_read(tsdn, &extents_rtree,
- (uintptr_t)extent_before_get(extent), false);
- if (prev != NULL) {
- chunk_try_coalesce(tsdn, arena, extent_hooks, prev, extent,
- extent_heaps, cache);
- }
-
- malloc_mutex_unlock(tsdn, &arena->extents_mtx);
-}
-
-void
-chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
- extent_t *extent)
-{
-
- assert(extent_base_get(extent) != NULL);
- assert(extent_size_get(extent) != 0);
-
- extent_addr_set(extent, extent_base_get(extent));
- extent_zeroed_set(extent, false);
-
- chunk_record(tsdn, arena, extent_hooks, arena->extents_cached, true,
- extent);
-}
-
-static bool
-extent_dalloc_default(void *addr, size_t size, bool committed,
- unsigned arena_ind)
-{
-
- if (!have_dss || !chunk_in_dss(tsdn_fetch(), addr))
- return (chunk_dalloc_mmap(addr, size));
- return (true);
-}
-
-void
-chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
- extent_t *extent)
-{
-
- assert(extent_base_get(extent) != NULL);
- assert(extent_size_get(extent) != 0);
-
- extent_addr_set(extent, extent_base_get(extent));
-
- extent_hooks_assure_initialized(tsdn, arena, extent_hooks);
- /* Try to deallocate. */
- if (!extent_hooks->dalloc(extent_base_get(extent),
- extent_size_get(extent), extent_committed_get(extent),
- arena->ind)) {
- chunk_deregister(tsdn, extent);
- extent_dalloc(tsdn, arena, extent);
- return;
- }
- /* Try to decommit; purge if that fails. */
- if (extent_committed_get(extent)) {
- extent_committed_set(extent,
- extent_hooks->decommit(extent_base_get(extent),
- extent_size_get(extent), 0, extent_size_get(extent),
- arena->ind));
- }
- extent_zeroed_set(extent, !extent_committed_get(extent) ||
- !extent_hooks->purge(extent_base_get(extent),
- extent_size_get(extent), 0, extent_size_get(extent), arena->ind));
-
- if (config_stats)
- arena->stats.retained += extent_size_get(extent);
-
- chunk_record(tsdn, arena, extent_hooks, arena->extents_retained, false,
- extent);
-}
-
-static bool
-extent_commit_default(void *addr, size_t size, size_t offset, size_t length,
- unsigned arena_ind)
-{
-
- return (pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
- length));
-}
-
-bool
-chunk_commit_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
- extent_t *extent, size_t offset, size_t length)
-{
-
- extent_hooks_assure_initialized(tsdn, arena, extent_hooks);
- return (extent_hooks->commit(extent_base_get(extent),
- extent_size_get(extent), offset, length, arena->ind));
-}
-
-static bool
-extent_decommit_default(void *addr, size_t size, size_t offset, size_t length,
- unsigned arena_ind)
-{
-
- return (pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
- length));
-}
-
-bool
-chunk_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t *extent_hooks, extent_t *extent, size_t offset,
- size_t length)
-{
-
- extent_hooks_assure_initialized(tsdn, arena, extent_hooks);
- return (extent_hooks->decommit(extent_base_get(extent),
- extent_size_get(extent), offset, length, arena->ind));
-}
-
-static bool
-extent_purge_default(void *addr, size_t size, size_t offset, size_t length,
- unsigned arena_ind)
-{
-
- assert(addr != NULL);
- assert((offset & PAGE_MASK) == 0);
- assert(length != 0);
- assert((length & PAGE_MASK) == 0);
-
- return (pages_purge((void *)((uintptr_t)addr + (uintptr_t)offset),
- length));
-}
-
-bool
-chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
- extent_t *extent, size_t offset, size_t length)
-{
-
- extent_hooks_assure_initialized(tsdn, arena, extent_hooks);
- return (extent_hooks->purge(extent_base_get(extent),
- extent_size_get(extent), offset, length, arena->ind));
-}
-
-static bool
-extent_split_default(void *addr, size_t size, size_t size_a, size_t size_b,
- bool committed, unsigned arena_ind)
-{
-
- if (!maps_coalesce)
- return (true);
- return (false);
-}
-
-extent_t *
-chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
- extent_t *extent, size_t size_a, size_t usize_a, size_t size_b,
- size_t usize_b)
-{
- extent_t *trail;
- rtree_elm_t *lead_elm_a, *lead_elm_b, *trail_elm_a, *trail_elm_b;
-
- assert(extent_size_get(extent) == size_a + size_b);
-
- extent_hooks_assure_initialized(tsdn, arena, extent_hooks);
-
- trail = extent_alloc(tsdn, arena);
- if (trail == NULL)
- goto label_error_a;
-
- {
- extent_t lead;
-
- extent_init(&lead, arena, extent_addr_get(extent), size_a,
- usize_a, extent_active_get(extent),
- extent_zeroed_get(extent), extent_committed_get(extent),
- extent_slab_get(extent));
-
- if (extent_rtree_acquire(tsdn, &lead, false, true, &lead_elm_a,
- &lead_elm_b))
- goto label_error_b;
- }
-
- extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
- size_a), size_b, usize_b, extent_active_get(extent),
- extent_zeroed_get(extent), extent_committed_get(extent),
- extent_slab_get(extent));
- if (extent_rtree_acquire(tsdn, trail, false, true, &trail_elm_a,
- &trail_elm_b))
- goto label_error_c;
-
- if (extent_hooks->split(extent_base_get(extent), size_a + size_b, size_a,
- size_b, extent_committed_get(extent), arena->ind))
- goto label_error_d;
-
- extent_size_set(extent, size_a);
- extent_usize_set(extent, usize_a);
-
- extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent);
- extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail);
-
- extent_rtree_release(tsdn, lead_elm_a, lead_elm_b);
- extent_rtree_release(tsdn, trail_elm_a, trail_elm_b);
-
- return (trail);
-label_error_d:
- extent_rtree_release(tsdn, lead_elm_a, lead_elm_b);
-label_error_c:
- extent_rtree_release(tsdn, lead_elm_a, lead_elm_b);
-label_error_b:
- extent_dalloc(tsdn, arena, trail);
-label_error_a:
- return (NULL);
-}
-
-static bool
-extent_merge_default(void *addr_a, size_t size_a, void *addr_b, size_t size_b,
- bool committed, unsigned arena_ind)
-{
-
- if (!maps_coalesce)
- return (true);
- if (have_dss) {
- tsdn_t *tsdn = tsdn_fetch();
- if (chunk_in_dss(tsdn, addr_a) != chunk_in_dss(tsdn, addr_b))
- return (true);
- }
-
- return (false);
-}
-
-bool
-chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
- extent_t *a, extent_t *b)
-{
- rtree_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
-
- extent_hooks_assure_initialized(tsdn, arena, extent_hooks);
- if (extent_hooks->merge(extent_base_get(a), extent_size_get(a),
- extent_base_get(b), extent_size_get(b), extent_committed_get(a),
- arena->ind))
- return (true);
-
- /*
- * The rtree writes must happen while all the relevant elements are
- * owned, so the following code uses decomposed helper functions rather
- * than chunk_{,de}register() to do things in the right order.
- */
- extent_rtree_acquire(tsdn, a, true, false, &a_elm_a, &a_elm_b);
- extent_rtree_acquire(tsdn, b, true, false, &b_elm_a, &b_elm_b);
-
- if (a_elm_b != NULL) {
- rtree_elm_write_acquired(tsdn, &extents_rtree, a_elm_b, NULL);
- rtree_elm_release(tsdn, &extents_rtree, a_elm_b);
- }
- if (b_elm_b != NULL) {
- rtree_elm_write_acquired(tsdn, &extents_rtree, b_elm_a, NULL);
- rtree_elm_release(tsdn, &extents_rtree, b_elm_a);
- } else
- b_elm_b = b_elm_a;
-
- extent_size_set(a, extent_size_get(a) + extent_size_get(b));
- extent_usize_set(a, extent_usize_get(a) + extent_usize_get(b));
- extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
-
- extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a);
- extent_rtree_release(tsdn, a_elm_a, b_elm_b);
-
- extent_dalloc(tsdn, extent_arena_get(b), b);
-
- return (false);
-}
bool
chunk_boot(void)
@@ -959,29 +47,5 @@ chunk_boot(void)
chunksize_mask = chunksize - 1;
chunk_npages = (chunksize >> LG_PAGE);
- if (have_dss && chunk_dss_boot())
- return (true);
-
return (false);
}
-
-void
-chunk_prefork(tsdn_t *tsdn)
-{
-
- chunk_dss_prefork(tsdn);
-}
-
-void
-chunk_postfork_parent(tsdn_t *tsdn)
-{
-
- chunk_dss_postfork_parent(tsdn);
-}
-
-void
-chunk_postfork_child(tsdn_t *tsdn)
-{
-
- chunk_dss_postfork_child(tsdn);
-}
diff --git a/src/ctl.c b/src/ctl.c
index 5ff2a42d..61f3aa1c 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -1560,11 +1560,11 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
dss_prec_old = arena_dss_prec_get(tsd_tsdn(tsd), arena);
} else {
if (dss_prec != dss_prec_limit &&
- chunk_dss_prec_set(tsd_tsdn(tsd), dss_prec)) {
+ extent_dss_prec_set(tsd_tsdn(tsd), dss_prec)) {
ret = EFAULT;
goto label_return;
}
- dss_prec_old = chunk_dss_prec_get(tsd_tsdn(tsd));
+ dss_prec_old = extent_dss_prec_get(tsd_tsdn(tsd));
}
dss = dss_prec_names[dss_prec_old];
diff --git a/src/extent.c b/src/extent.c
index 3e62e3bc..9f3ddd95 100644
--- a/src/extent.c
+++ b/src/extent.c
@@ -6,6 +6,45 @@
rtree_t extents_rtree;
+static void *extent_alloc_default(void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
+static bool extent_dalloc_default(void *addr, size_t size, bool committed,
+ unsigned arena_ind);
+static bool extent_commit_default(void *addr, size_t size, size_t offset,
+ size_t length, unsigned arena_ind);
+static bool extent_decommit_default(void *addr, size_t size, size_t offset,
+ size_t length, unsigned arena_ind);
+static bool extent_purge_default(void *addr, size_t size, size_t offset,
+ size_t length, unsigned arena_ind);
+static bool extent_split_default(void *addr, size_t size, size_t size_a,
+ size_t size_b, bool committed, unsigned arena_ind);
+static bool extent_merge_default(void *addr_a, size_t size_a, void *addr_b,
+ size_t size_b, bool committed, unsigned arena_ind);
+
+const extent_hooks_t extent_hooks_default = {
+ extent_alloc_default,
+ extent_dalloc_default,
+ extent_commit_default,
+ extent_decommit_default,
+ extent_purge_default,
+ extent_split_default,
+ extent_merge_default
+};
+
+/* Used exclusively for gdump triggering. */
+static size_t curchunks;
+static size_t highchunks;
+
+/******************************************************************************/
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
+static void extent_record(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t *extent_hooks, extent_heap_t extent_heaps[NPSIZES],
+ bool cache, extent_t *extent);
+
/******************************************************************************/
extent_t *
@@ -34,6 +73,91 @@ extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
}
+static extent_hooks_t
+extent_hooks_get_locked(arena_t *arena)
+{
+
+ return (arena->extent_hooks);
+}
+
+extent_hooks_t
+extent_hooks_get(tsdn_t *tsdn, arena_t *arena)
+{
+ extent_hooks_t extent_hooks;
+
+ malloc_mutex_lock(tsdn, &arena->extents_mtx);
+ extent_hooks = extent_hooks_get_locked(arena);
+ malloc_mutex_unlock(tsdn, &arena->extents_mtx);
+
+ return (extent_hooks);
+}
+
+extent_hooks_t
+extent_hooks_set(tsdn_t *tsdn, arena_t *arena,
+ const extent_hooks_t *extent_hooks)
+{
+ extent_hooks_t old_extent_hooks;
+
+ malloc_mutex_lock(tsdn, &arena->extents_mtx);
+ old_extent_hooks = arena->extent_hooks;
+ /*
+ * Copy each field atomically so that it is impossible for readers to
+ * see partially updated pointers. There are places where readers only
+ * need one hook function pointer (therefore no need to copy the
+ * entirety of arena->extent_hooks), and stale reads do not affect
+ * correctness, so they perform unlocked reads.
+ */
+#define ATOMIC_COPY_HOOK(n) do { \
+ union { \
+ extent_##n##_t **n; \
+ void **v; \
+ } u; \
+ u.n = &arena->extent_hooks.n; \
+ atomic_write_p(u.v, extent_hooks->n); \
+} while (0)
+ ATOMIC_COPY_HOOK(alloc);
+ ATOMIC_COPY_HOOK(dalloc);
+ ATOMIC_COPY_HOOK(commit);
+ ATOMIC_COPY_HOOK(decommit);
+ ATOMIC_COPY_HOOK(purge);
+ ATOMIC_COPY_HOOK(split);
+ ATOMIC_COPY_HOOK(merge);
+#undef ATOMIC_COPY_HOOK
+ malloc_mutex_unlock(tsdn, &arena->extents_mtx);
+
+ return (old_extent_hooks);
+}
+
+static void
+extent_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t *extent_hooks, bool locked)
+{
+ static const extent_hooks_t uninitialized_hooks =
+ EXTENT_HOOKS_INITIALIZER;
+
+ if (memcmp(extent_hooks, &uninitialized_hooks, sizeof(extent_hooks_t))
+ == 0) {
+ *extent_hooks = locked ? extent_hooks_get_locked(arena) :
+ extent_hooks_get(tsdn, arena);
+ }
+}
+
+static void
+extent_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t *extent_hooks)
+{
+
+ extent_hooks_assure_initialized_impl(tsdn, arena, extent_hooks, true);
+}
+
+static void
+extent_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t *extent_hooks)
+{
+
+ extent_hooks_assure_initialized_impl(tsdn, arena, extent_hooks, false);
+}
+
#ifdef JEMALLOC_JET
#undef extent_size_quantize_floor
#define extent_size_quantize_floor JEMALLOC_N(n_extent_size_quantize_floor)
@@ -118,6 +242,787 @@ extent_ad_comp(const extent_t *a, const extent_t *b)
/* Generate pairing heap functions. */
ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_ad_comp)
+static void
+extent_heaps_insert(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
+{
+ size_t psz = extent_size_quantize_floor(extent_size_get(extent));
+ pszind_t pind = psz2ind(psz);
+ extent_heap_insert(&extent_heaps[pind], extent);
+}
+
+static void
+extent_heaps_remove(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
+{
+ size_t psz = extent_size_quantize_floor(extent_size_get(extent));
+ pszind_t pind = psz2ind(psz);
+ extent_heap_remove(&extent_heaps[pind], extent);
+}
+
+static bool
+extent_rtree_acquire(tsdn_t *tsdn, const extent_t *extent, bool dependent,
+ bool init_missing, rtree_elm_t **r_elm_a, rtree_elm_t **r_elm_b)
+{
+
+ *r_elm_a = rtree_elm_acquire(tsdn, &extents_rtree,
+ (uintptr_t)extent_base_get(extent), dependent, init_missing);
+ if (!dependent && *r_elm_a == NULL)
+ return (true);
+ assert(*r_elm_a != NULL);
+
+ if (extent_size_get(extent) > PAGE) {
+ *r_elm_b = rtree_elm_acquire(tsdn, &extents_rtree,
+ (uintptr_t)extent_last_get(extent), dependent,
+ init_missing);
+ if (!dependent && *r_elm_b == NULL)
+ return (true);
+ assert(*r_elm_b != NULL);
+ } else
+ *r_elm_b = NULL;
+
+ return (false);
+}
+
+static void
+extent_rtree_write_acquired(tsdn_t *tsdn, rtree_elm_t *elm_a,
+ rtree_elm_t *elm_b, const extent_t *extent)
+{
+
+ rtree_elm_write_acquired(tsdn, &extents_rtree, elm_a, extent);
+ if (elm_b != NULL)
+ rtree_elm_write_acquired(tsdn, &extents_rtree, elm_b, extent);
+}
+
+static void
+extent_rtree_release(tsdn_t *tsdn, rtree_elm_t *elm_a, rtree_elm_t *elm_b)
+{
+
+ rtree_elm_release(tsdn, &extents_rtree, elm_a);
+ if (elm_b != NULL)
+ rtree_elm_release(tsdn, &extents_rtree, elm_b);
+}
+
+static void
+extent_interior_register(tsdn_t *tsdn, const extent_t *extent)
+{
+ size_t i;
+
+ assert(extent_slab_get(extent));
+
+ for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
+ rtree_write(tsdn, &extents_rtree,
+ (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
+ LG_PAGE), extent);
+ }
+}
+
+static bool
+extent_register(tsdn_t *tsdn, const extent_t *extent)
+{
+ rtree_elm_t *elm_a, *elm_b;
+
+ if (extent_rtree_acquire(tsdn, extent, false, true, &elm_a, &elm_b))
+ return (true);
+ extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent);
+ if (extent_slab_get(extent))
+ extent_interior_register(tsdn, extent);
+ extent_rtree_release(tsdn, elm_a, elm_b);
+
+ if (config_prof && opt_prof && extent_active_get(extent)) {
+ size_t nadd = (extent_size_get(extent) == 0) ? 1 :
+ extent_size_get(extent) / chunksize;
+ size_t cur = atomic_add_z(&curchunks, nadd);
+ size_t high = atomic_read_z(&highchunks);
+ while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
+ /*
+ * Don't refresh cur, because it may have decreased
+ * since this thread lost the highchunks update race.
+ */
+ high = atomic_read_z(&highchunks);
+ }
+ if (cur > high && prof_gdump_get_unlocked())
+ prof_gdump(tsdn);
+ }
+
+ return (false);
+}
+
+static void
+extent_interior_deregister(tsdn_t *tsdn, const extent_t *extent)
+{
+ size_t i;
+
+ assert(extent_slab_get(extent));
+
+ for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
+ rtree_clear(tsdn, &extents_rtree,
+ (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
+ LG_PAGE));
+ }
+}
+
+static void
+extent_deregister(tsdn_t *tsdn, const extent_t *extent)
+{
+ rtree_elm_t *elm_a, *elm_b;
+
+ extent_rtree_acquire(tsdn, extent, true, false, &elm_a, &elm_b);
+ extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL);
+ if (extent_slab_get(extent))
+ extent_interior_deregister(tsdn, extent);
+ extent_rtree_release(tsdn, elm_a, elm_b);
+
+ if (config_prof && opt_prof && extent_active_get(extent)) {
+ size_t nsub = (extent_size_get(extent) == 0) ? 1 :
+ extent_size_get(extent) / chunksize;
+ assert(atomic_read_z(&curchunks) >= nsub);
+ atomic_sub_z(&curchunks, nsub);
+ }
+}
+
+/*
+ * Do first-best-fit extent selection, i.e. select the lowest extent that best
+ * fits.
+ */
+static extent_t *
+extent_first_best_fit(arena_t *arena, extent_heap_t extent_heaps[NPSIZES],
+ size_t size)
+{
+ pszind_t pind, i;
+
+ pind = psz2ind(extent_size_quantize_ceil(size));
+ for (i = pind; i < NPSIZES; i++) {
+ extent_t *extent = extent_heap_first(&extent_heaps[i]);
+ if (extent != NULL)
+ return (extent);
+ }
+
+ return (NULL);
+}
+
+static void
+extent_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
+ bool cache, extent_t *extent)
+{
+
+ /*
+ * Leak extent after making sure its pages have already been purged, so
+ * that this is only a virtual memory leak.
+ */
+ if (cache) {
+ extent_purge_wrapper(tsdn, arena, extent_hooks, extent, 0,
+ extent_size_get(extent));
+ }
+ extent_dalloc(tsdn, arena, extent);
+}
+
+static extent_t *
+extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
+ extent_heap_t extent_heaps[NPSIZES], bool cache, void *new_addr,
+ size_t usize, size_t pad, size_t alignment, bool *zero, bool *commit,
+ bool slab)
+{
+ extent_t *extent;
+ size_t size, alloc_size, leadsize, trailsize;
+
+ assert(new_addr == NULL || !slab);
+ assert(pad == 0 || !slab);
+
+ size = usize + pad;
+ alloc_size = s2u(size + PAGE_CEILING(alignment) - PAGE);
+ /* Beware size_t wrap-around. */
+ if (alloc_size < usize)
+ return (NULL);
+ malloc_mutex_lock(tsdn, &arena->extents_mtx);
+ extent_hooks_assure_initialized_locked(tsdn, arena, extent_hooks);
+ if (new_addr != NULL) {
+ rtree_elm_t *elm;
+
+ elm = rtree_elm_acquire(tsdn, &extents_rtree,
+ (uintptr_t)new_addr, false, false);
+ if (elm != NULL) {
+ extent = rtree_elm_read_acquired(tsdn, &extents_rtree,
+ elm);
+ if (extent != NULL && (extent_active_get(extent) ||
+ extent_retained_get(extent) == cache))
+ extent = NULL;
+ rtree_elm_release(tsdn, &extents_rtree, elm);
+ } else
+ extent = NULL;
+ } else
+ extent = extent_first_best_fit(arena, extent_heaps, alloc_size);
+ if (extent == NULL || (new_addr != NULL && extent_size_get(extent) <
+ size)) {
+ malloc_mutex_unlock(tsdn, &arena->extents_mtx);
+ return (NULL);
+ }
+ extent_heaps_remove(extent_heaps, extent);
+ arena_extent_cache_maybe_remove(arena, extent, cache);
+
+ leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(extent),
+ PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(extent);
+ assert(new_addr == NULL || leadsize == 0);
+ assert(extent_size_get(extent) >= leadsize + size);
+ trailsize = extent_size_get(extent) - leadsize - size;
+ if (extent_zeroed_get(extent))
+ *zero = true;
+ if (extent_committed_get(extent))
+ *commit = true;
+
+ /* Split the lead. */
+ if (leadsize != 0) {
+ extent_t *lead = extent;
+ extent = extent_split_wrapper(tsdn, arena, extent_hooks, lead,
+ leadsize, leadsize, size + trailsize, usize + trailsize);
+ if (extent == NULL) {
+ extent_leak(tsdn, arena, extent_hooks, cache, lead);
+ malloc_mutex_unlock(tsdn, &arena->extents_mtx);
+ return (NULL);
+ }
+ extent_heaps_insert(extent_heaps, lead);
+ arena_extent_cache_maybe_insert(arena, lead, cache);
+ }
+
+ /* Split the trail. */
+ if (trailsize != 0) {
+ extent_t *trail = extent_split_wrapper(tsdn, arena,
+ extent_hooks, extent, size, usize, trailsize, trailsize);
+ if (trail == NULL) {
+ extent_leak(tsdn, arena, extent_hooks, cache, extent);
+ malloc_mutex_unlock(tsdn, &arena->extents_mtx);
+ return (NULL);
+ }
+ extent_heaps_insert(extent_heaps, trail);
+ arena_extent_cache_maybe_insert(arena, trail, cache);
+ } else if (leadsize == 0) {
+ /*
+ * Splitting causes usize to be set as a side effect, but no
+ * splitting occurred.
+ */
+ extent_usize_set(extent, usize);
+ }
+
+ if (!extent_committed_get(extent) &&
+ extent_hooks->commit(extent_base_get(extent),
+ extent_size_get(extent), 0, extent_size_get(extent), arena->ind)) {
+ malloc_mutex_unlock(tsdn, &arena->extents_mtx);
+ extent_record(tsdn, arena, extent_hooks, extent_heaps, cache,
+ extent);
+ return (NULL);
+ }
+
+ if (pad != 0)
+ extent_addr_randomize(tsdn, extent, alignment);
+ extent_active_set(extent, true);
+ if (slab) {
+ extent_slab_set(extent, slab);
+ extent_interior_register(tsdn, extent);
+ }
+
+ malloc_mutex_unlock(tsdn, &arena->extents_mtx);
+
+ if (*zero) {
+ if (!extent_zeroed_get(extent)) {
+ memset(extent_addr_get(extent), 0,
+ extent_usize_get(extent));
+ } else if (config_debug) {
+ size_t i;
+ size_t *p = (size_t *)(uintptr_t)
+ extent_addr_get(extent);
+
+ for (i = 0; i < usize / sizeof(size_t); i++)
+ assert(p[i] == 0);
+ }
+ }
+ return (extent);
+}
+
+/*
+ * If the caller specifies (!*zero), it is still possible to receive zeroed
+ * memory, in which case *zero is toggled to true. arena_extent_alloc() takes
+ * advantage of this to avoid demanding zeroed extents, but taking advantage of
+ * them if they are returned.
+ */
+static void *
+extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec)
+{
+ void *ret;
+
+ assert(size != 0);
+ assert(alignment != 0);
+
+ /* "primary" dss. */
+ if (have_dss && dss_prec == dss_prec_primary && (ret =
+ extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
+ commit)) != NULL)
+ return (ret);
+ /* mmap. */
+ if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
+ != NULL)
+ return (ret);
+ /* "secondary" dss. */
+ if (have_dss && dss_prec == dss_prec_secondary && (ret =
+ extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
+ commit)) != NULL)
+ return (ret);
+
+ /* All strategies for allocation failed. */
+ return (NULL);
+}
+
+extent_t *
+extent_alloc_cache(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
+ void *new_addr, size_t usize, size_t pad, size_t alignment, bool *zero,
+ bool slab)
+{
+ extent_t *extent;
+ bool commit;
+
+ assert(usize + pad != 0);
+ assert(alignment != 0);
+
+ commit = true;
+ extent = extent_recycle(tsdn, arena, extent_hooks,
+ arena->extents_cached, true, new_addr, usize, pad, alignment, zero,
+ &commit, slab);
+ if (extent == NULL)
+ return (NULL);
+ assert(commit);
+ return (extent);
+}
+
+static void *
+extent_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
+ bool *commit, unsigned arena_ind)
+{
+ void *ret;
+ tsdn_t *tsdn;
+ arena_t *arena;
+
+ tsdn = tsdn_fetch();
+ arena = arena_get(tsdn, arena_ind, false);
+ /*
+ * The arena we're allocating on behalf of must have been initialized
+ * already.
+ */
+ assert(arena != NULL);
+ ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
+ commit, arena->dss_prec);
+ if (ret == NULL)
+ return (NULL);
+
+ return (ret);
+}
+
+static extent_t *
+extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t *extent_hooks, void *new_addr, size_t usize, size_t pad,
+ size_t alignment, bool *zero, bool *commit, bool slab)
+{
+ extent_t *extent;
+
+ assert(usize != 0);
+ assert(alignment != 0);
+
+ extent = extent_recycle(tsdn, arena, extent_hooks,
+ arena->extents_retained, false, new_addr, usize, pad, alignment,
+ zero, commit, slab);
+ if (extent != NULL && config_stats) {
+ size_t size = usize + pad;
+ arena->stats.retained -= size;
+ }
+
+ return (extent);
+}
+
+static extent_t *
+extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t *extent_hooks, void *new_addr, size_t usize, size_t pad,
+ size_t alignment, bool *zero, bool *commit, bool slab)
+{
+ extent_t *extent;
+ size_t size;
+ void *addr;
+
+ size = usize + pad;
+ extent = extent_alloc(tsdn, arena);
+ if (extent == NULL)
+ return (NULL);
+ addr = extent_hooks->alloc(new_addr, size, alignment, zero, commit,
+ arena->ind);
+ if (addr == NULL) {
+ extent_dalloc(tsdn, arena, extent);
+ return (NULL);
+ }
+ extent_init(extent, arena, addr, size, usize, true, zero, commit, slab);
+ if (pad != 0)
+ extent_addr_randomize(tsdn, extent, alignment);
+ if (extent_register(tsdn, extent)) {
+ extent_leak(tsdn, arena, extent_hooks, false, extent);
+ return (NULL);
+ }
+
+ return (extent);
+}
+
+extent_t *
+extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
+ void *new_addr, size_t usize, size_t pad, size_t alignment, bool *zero,
+ bool *commit, bool slab)
+{
+ extent_t *extent;
+
+ extent_hooks_assure_initialized(tsdn, arena, extent_hooks);
+
+ extent = extent_alloc_retained(tsdn, arena, extent_hooks, new_addr,
+ usize, pad, alignment, zero, commit, slab);
+ if (extent == NULL) {
+ extent = extent_alloc_wrapper_hard(tsdn, arena, extent_hooks,
+ new_addr, usize, pad, alignment, zero, commit, slab);
+ }
+
+ return (extent);
+}
+
+static bool
+extent_can_coalesce(const extent_t *a, const extent_t *b)
+{
+
+ if (extent_arena_get(a) != extent_arena_get(b))
+ return (false);
+ if (extent_active_get(a) != extent_active_get(b))
+ return (false);
+ if (extent_committed_get(a) != extent_committed_get(b))
+ return (false);
+ if (extent_retained_get(a) != extent_retained_get(b))
+ return (false);
+
+ return (true);
+}
+
+static void
+extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
+ extent_t *a, extent_t *b, extent_heap_t extent_heaps[NPSIZES], bool cache)
+{
+
+ if (!extent_can_coalesce(a, b))
+ return;
+
+ extent_heaps_remove(extent_heaps, a);
+ extent_heaps_remove(extent_heaps, b);
+
+ arena_extent_cache_maybe_remove(extent_arena_get(a), a, cache);
+ arena_extent_cache_maybe_remove(extent_arena_get(b), b, cache);
+
+ if (extent_merge_wrapper(tsdn, arena, extent_hooks, a, b)) {
+ extent_heaps_insert(extent_heaps, a);
+ extent_heaps_insert(extent_heaps, b);
+ arena_extent_cache_maybe_insert(extent_arena_get(a), a, cache);
+ arena_extent_cache_maybe_insert(extent_arena_get(b), b, cache);
+ return;
+ }
+
+ extent_heaps_insert(extent_heaps, a);
+ arena_extent_cache_maybe_insert(extent_arena_get(a), a, cache);
+}
+
+static void
+extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
+ extent_heap_t extent_heaps[NPSIZES], bool cache, extent_t *extent)
+{
+ extent_t *prev, *next;
+
+ assert(!cache || !extent_zeroed_get(extent));
+
+ malloc_mutex_lock(tsdn, &arena->extents_mtx);
+ extent_hooks_assure_initialized_locked(tsdn, arena, extent_hooks);
+
+ extent_usize_set(extent, 0);
+ extent_active_set(extent, false);
+ extent_zeroed_set(extent, !cache && extent_zeroed_get(extent));
+ if (extent_slab_get(extent)) {
+ extent_interior_deregister(tsdn, extent);
+ extent_slab_set(extent, false);
+ }
+
+ assert(extent_lookup(tsdn, extent_base_get(extent), true) == extent);
+ extent_heaps_insert(extent_heaps, extent);
+ arena_extent_cache_maybe_insert(arena, extent, cache);
+
+ /* Try to coalesce forward. */
+ next = rtree_read(tsdn, &extents_rtree,
+ (uintptr_t)extent_past_get(extent), false);
+ if (next != NULL) {
+ extent_try_coalesce(tsdn, arena, extent_hooks, extent, next,
+ extent_heaps, cache);
+ }
+
+ /* Try to coalesce backward. */
+ prev = rtree_read(tsdn, &extents_rtree,
+ (uintptr_t)extent_before_get(extent), false);
+ if (prev != NULL) {
+ extent_try_coalesce(tsdn, arena, extent_hooks, prev, extent,
+ extent_heaps, cache);
+ }
+
+ malloc_mutex_unlock(tsdn, &arena->extents_mtx);
+}
+
+void
+extent_dalloc_cache(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
+ extent_t *extent)
+{
+
+ assert(extent_base_get(extent) != NULL);
+ assert(extent_size_get(extent) != 0);
+
+ extent_addr_set(extent, extent_base_get(extent));
+ extent_zeroed_set(extent, false);
+
+ extent_record(tsdn, arena, extent_hooks, arena->extents_cached, true,
+ extent);
+}
+
+static bool
+extent_dalloc_default(void *addr, size_t size, bool committed,
+ unsigned arena_ind)
+{
+
+ if (!have_dss || !extent_in_dss(tsdn_fetch(), addr))
+ return (extent_dalloc_mmap(addr, size));
+ return (true);
+}
+
+void
+extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t *extent_hooks, extent_t *extent)
+{
+
+ assert(extent_base_get(extent) != NULL);
+ assert(extent_size_get(extent) != 0);
+
+ extent_addr_set(extent, extent_base_get(extent));
+
+ extent_hooks_assure_initialized(tsdn, arena, extent_hooks);
+ /* Try to deallocate. */
+ if (!extent_hooks->dalloc(extent_base_get(extent),
+ extent_size_get(extent), extent_committed_get(extent),
+ arena->ind)) {
+ extent_deregister(tsdn, extent);
+ extent_dalloc(tsdn, arena, extent);
+ return;
+ }
+ /* Try to decommit; purge if that fails. */
+ if (extent_committed_get(extent)) {
+ extent_committed_set(extent,
+ extent_hooks->decommit(extent_base_get(extent),
+ extent_size_get(extent), 0, extent_size_get(extent),
+ arena->ind));
+ }
+ extent_zeroed_set(extent, !extent_committed_get(extent) ||
+ !extent_hooks->purge(extent_base_get(extent),
+ extent_size_get(extent), 0, extent_size_get(extent), arena->ind));
+
+ if (config_stats)
+ arena->stats.retained += extent_size_get(extent);
+
+ extent_record(tsdn, arena, extent_hooks, arena->extents_retained, false,
+ extent);
+}
+
+static bool
+extent_commit_default(void *addr, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+
+ return (pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
+ length));
+}
+
+bool
+extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t *extent_hooks, extent_t *extent, size_t offset,
+ size_t length)
+{
+
+ extent_hooks_assure_initialized(tsdn, arena, extent_hooks);
+ return (extent_hooks->commit(extent_base_get(extent),
+ extent_size_get(extent), offset, length, arena->ind));
+}
+
+static bool
+extent_decommit_default(void *addr, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+
+ return (pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
+ length));
+}
+
+bool
+extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t *extent_hooks, extent_t *extent, size_t offset,
+ size_t length)
+{
+
+ extent_hooks_assure_initialized(tsdn, arena, extent_hooks);
+ return (extent_hooks->decommit(extent_base_get(extent),
+ extent_size_get(extent), offset, length, arena->ind));
+}
+
+static bool
+extent_purge_default(void *addr, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+
+ assert(addr != NULL);
+ assert((offset & PAGE_MASK) == 0);
+ assert(length != 0);
+ assert((length & PAGE_MASK) == 0);
+
+ return (pages_purge((void *)((uintptr_t)addr + (uintptr_t)offset),
+ length));
+}
+
+bool
+extent_purge_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
+ extent_t *extent, size_t offset, size_t length)
+{
+
+ extent_hooks_assure_initialized(tsdn, arena, extent_hooks);
+ return (extent_hooks->purge(extent_base_get(extent),
+ extent_size_get(extent), offset, length, arena->ind));
+}
+
+static bool
+extent_split_default(void *addr, size_t size, size_t size_a, size_t size_b,
+ bool committed, unsigned arena_ind)
+{
+
+ if (!maps_coalesce)
+ return (true);
+ return (false);
+}
+
+extent_t *
+extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
+ extent_t *extent, size_t size_a, size_t usize_a, size_t size_b,
+ size_t usize_b)
+{
+ extent_t *trail;
+ rtree_elm_t *lead_elm_a, *lead_elm_b, *trail_elm_a, *trail_elm_b;
+
+ assert(extent_size_get(extent) == size_a + size_b);
+
+ extent_hooks_assure_initialized(tsdn, arena, extent_hooks);
+
+ trail = extent_alloc(tsdn, arena);
+ if (trail == NULL)
+ goto label_error_a;
+
+ {
+ extent_t lead;
+
+ extent_init(&lead, arena, extent_addr_get(extent), size_a,
+ usize_a, extent_active_get(extent),
+ extent_zeroed_get(extent), extent_committed_get(extent),
+ extent_slab_get(extent));
+
+ if (extent_rtree_acquire(tsdn, &lead, false, true, &lead_elm_a,
+ &lead_elm_b))
+ goto label_error_b;
+ }
+
+ extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
+ size_a), size_b, usize_b, extent_active_get(extent),
+ extent_zeroed_get(extent), extent_committed_get(extent),
+ extent_slab_get(extent));
+ if (extent_rtree_acquire(tsdn, trail, false, true, &trail_elm_a,
+ &trail_elm_b))
+ goto label_error_c;
+
+ if (extent_hooks->split(extent_base_get(extent), size_a + size_b,
+ size_a, size_b, extent_committed_get(extent), arena->ind))
+ goto label_error_d;
+
+ extent_size_set(extent, size_a);
+ extent_usize_set(extent, usize_a);
+
+ extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent);
+ extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail);
+
+ extent_rtree_release(tsdn, lead_elm_a, lead_elm_b);
+ extent_rtree_release(tsdn, trail_elm_a, trail_elm_b);
+
+ return (trail);
+label_error_d:
+ extent_rtree_release(tsdn, lead_elm_a, lead_elm_b);
+label_error_c:
+ extent_rtree_release(tsdn, lead_elm_a, lead_elm_b);
+label_error_b:
+ extent_dalloc(tsdn, arena, trail);
+label_error_a:
+ return (NULL);
+}
+
+static bool
+extent_merge_default(void *addr_a, size_t size_a, void *addr_b, size_t size_b,
+ bool committed, unsigned arena_ind)
+{
+
+ if (!maps_coalesce)
+ return (true);
+ if (have_dss) {
+ tsdn_t *tsdn = tsdn_fetch();
+ if (extent_in_dss(tsdn, addr_a) != extent_in_dss(tsdn, addr_b))
+ return (true);
+ }
+
+ return (false);
+}
+
+bool
+extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t *extent_hooks,
+ extent_t *a, extent_t *b)
+{
+ rtree_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
+
+ extent_hooks_assure_initialized(tsdn, arena, extent_hooks);
+ if (extent_hooks->merge(extent_base_get(a), extent_size_get(a),
+ extent_base_get(b), extent_size_get(b), extent_committed_get(a),
+ arena->ind))
+ return (true);
+
+ /*
+ * The rtree writes must happen while all the relevant elements are
+ * owned, so the following code uses decomposed helper functions rather
+ * than extent_{,de}register() to do things in the right order.
+ */
+ extent_rtree_acquire(tsdn, a, true, false, &a_elm_a, &a_elm_b);
+ extent_rtree_acquire(tsdn, b, true, false, &b_elm_a, &b_elm_b);
+
+ if (a_elm_b != NULL) {
+ rtree_elm_write_acquired(tsdn, &extents_rtree, a_elm_b, NULL);
+ rtree_elm_release(tsdn, &extents_rtree, a_elm_b);
+ }
+ if (b_elm_b != NULL) {
+ rtree_elm_write_acquired(tsdn, &extents_rtree, b_elm_a, NULL);
+ rtree_elm_release(tsdn, &extents_rtree, b_elm_a);
+ } else
+ b_elm_b = b_elm_a;
+
+ extent_size_set(a, extent_size_get(a) + extent_size_get(b));
+ extent_usize_set(a, extent_usize_get(a) + extent_usize_get(b));
+ extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
+
+ extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a);
+ extent_rtree_release(tsdn, a_elm_a, b_elm_b);
+
+ extent_dalloc(tsdn, extent_arena_get(b), b);
+
+ return (false);
+}
+
bool
extent_boot(void)
{
@@ -126,5 +1031,29 @@ extent_boot(void)
LG_PAGE)))
return (true);
+ if (have_dss && extent_dss_boot())
+ return (true);
+
return (false);
}
+
+void
+extent_prefork(tsdn_t *tsdn)
+{
+
+ extent_dss_prefork(tsdn);
+}
+
+void
+extent_postfork_parent(tsdn_t *tsdn)
+{
+
+ extent_dss_postfork_parent(tsdn);
+}
+
+void
+extent_postfork_child(tsdn_t *tsdn)
+{
+
+ extent_dss_postfork_child(tsdn);
+}
diff --git a/src/chunk_dss.c b/src/extent_dss.c
similarity index 86%
rename from src/chunk_dss.c
rename to src/extent_dss.c
index 9fa4ad81..0e34a440 100644
--- a/src/chunk_dss.c
+++ b/src/extent_dss.c
@@ -1,4 +1,4 @@
-#define JEMALLOC_CHUNK_DSS_C_
+#define JEMALLOC_EXTENT_DSS_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
@@ -29,7 +29,7 @@ static void *dss_max;
/******************************************************************************/
static void *
-chunk_dss_sbrk(intptr_t increment)
+extent_dss_sbrk(intptr_t increment)
{
#ifdef JEMALLOC_DSS
@@ -41,7 +41,7 @@ chunk_dss_sbrk(intptr_t increment)
}
dss_prec_t
-chunk_dss_prec_get(tsdn_t *tsdn)
+extent_dss_prec_get(tsdn_t *tsdn)
{
dss_prec_t ret;
@@ -54,7 +54,7 @@ chunk_dss_prec_get(tsdn_t *tsdn)
}
bool
-chunk_dss_prec_set(tsdn_t *tsdn, dss_prec_t dss_prec)
+extent_dss_prec_set(tsdn_t *tsdn, dss_prec_t dss_prec)
{
if (!have_dss)
@@ -66,7 +66,7 @@ chunk_dss_prec_set(tsdn_t *tsdn, dss_prec_t dss_prec)
}
void *
-chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
+extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit)
{
void *ret;
@@ -104,7 +104,7 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
break;
/* Get the current end of the DSS. */
- dss_max = chunk_dss_sbrk(0);
+ dss_max = extent_dss_sbrk(0);
/* Make sure the earlier condition still holds. */
if (new_addr != NULL && dss_max != new_addr)
@@ -128,7 +128,7 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
(uintptr_t)dss_next < (uintptr_t)dss_max)
break; /* Wrap-around. */
incr = pad_size + size;
- dss_prev = chunk_dss_sbrk(incr);
+ dss_prev = extent_dss_sbrk(incr);
if (dss_prev == (void *)-1)
break;
if (dss_prev == dss_max) {
@@ -138,7 +138,7 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
if (pad_size != 0) {
extent_hooks_t extent_hooks =
EXTENT_HOOKS_INITIALIZER;
- chunk_dalloc_wrapper(tsdn, arena,
+ extent_dalloc_wrapper(tsdn, arena,
&extent_hooks, pad);
} else
extent_dalloc(tsdn, arena, pad);
@@ -157,15 +157,15 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
}
bool
-chunk_in_dss(tsdn_t *tsdn, void *chunk)
+extent_in_dss(tsdn_t *tsdn, void *addr)
{
bool ret;
cassert(have_dss);
malloc_mutex_lock(tsdn, &dss_mtx);
- if ((uintptr_t)chunk >= (uintptr_t)dss_base
- && (uintptr_t)chunk < (uintptr_t)dss_max)
+ if ((uintptr_t)addr >= (uintptr_t)dss_base
+ && (uintptr_t)addr < (uintptr_t)dss_max)
ret = true;
else
ret = false;
@@ -175,14 +175,14 @@ chunk_in_dss(tsdn_t *tsdn, void *chunk)
}
bool
-chunk_dss_boot(void)
+extent_dss_boot(void)
{
cassert(have_dss);
if (malloc_mutex_init(&dss_mtx, "dss", WITNESS_RANK_DSS))
return (true);
- dss_base = chunk_dss_sbrk(0);
+ dss_base = extent_dss_sbrk(0);
dss_prev = dss_base;
dss_max = dss_base;
@@ -190,7 +190,7 @@ chunk_dss_boot(void)
}
void
-chunk_dss_prefork(tsdn_t *tsdn)
+extent_dss_prefork(tsdn_t *tsdn)
{
if (have_dss)
@@ -198,7 +198,7 @@ chunk_dss_prefork(tsdn_t *tsdn)
}
void
-chunk_dss_postfork_parent(tsdn_t *tsdn)
+extent_dss_postfork_parent(tsdn_t *tsdn)
{
if (have_dss)
@@ -206,7 +206,7 @@ chunk_dss_postfork_parent(tsdn_t *tsdn)
}
void
-chunk_dss_postfork_child(tsdn_t *tsdn)
+extent_dss_postfork_child(tsdn_t *tsdn)
{
if (have_dss)
diff --git a/src/chunk_mmap.c b/src/extent_mmap.c
similarity index 83%
rename from src/chunk_mmap.c
rename to src/extent_mmap.c
index e1ee26f4..0dd3247e 100644
--- a/src/chunk_mmap.c
+++ b/src/extent_mmap.c
@@ -1,10 +1,10 @@
-#define JEMALLOC_CHUNK_MMAP_C_
+#define JEMALLOC_EXTENT_MMAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
static void *
-chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
+extent_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
{
void *ret;
size_t alloc_size;
@@ -30,7 +30,7 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
}
void *
-chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
+extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit)
{
void *ret;
@@ -58,7 +58,7 @@ chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) {
pages_unmap(ret, size);
- return (chunk_alloc_mmap_slow(size, alignment, zero, commit));
+ return (extent_alloc_mmap_slow(size, alignment, zero, commit));
}
assert(ret != NULL);
@@ -67,10 +67,10 @@ chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
}
bool
-chunk_dalloc_mmap(void *chunk, size_t size)
+extent_dalloc_mmap(void *addr, size_t size)
{
if (config_munmap)
- pages_unmap(chunk, size);
+ pages_unmap(addr, size);
return (!config_munmap);
}
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 03e61df6..82d2e6b3 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -1067,7 +1067,7 @@ malloc_conf_init(void)
for (i = 0; i < dss_prec_limit; i++) {
if (strncmp(dss_prec_names[i], v, vlen)
== 0) {
- if (chunk_dss_prec_set(NULL,
+ if (extent_dss_prec_set(NULL,
i)) {
malloc_conf_error(
"Error setting dss",
@@ -2686,7 +2686,7 @@ _malloc_prefork(void)
}
}
base_prefork(tsd_tsdn(tsd));
- chunk_prefork(tsd_tsdn(tsd));
+ extent_prefork(tsd_tsdn(tsd));
for (i = 0; i < narenas; i++) {
if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
arena_prefork3(tsd_tsdn(tsd), arena);
@@ -2715,7 +2715,7 @@ _malloc_postfork(void)
witness_postfork_parent(tsd);
/* Release all mutexes, now that fork() has completed. */
- chunk_postfork_parent(tsd_tsdn(tsd));
+ extent_postfork_parent(tsd_tsdn(tsd));
base_postfork_parent(tsd_tsdn(tsd));
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena;
@@ -2740,7 +2740,7 @@ jemalloc_postfork_child(void)
witness_postfork_child(tsd);
/* Release all mutexes, now that fork() has completed. */
- chunk_postfork_child(tsd_tsdn(tsd));
+ extent_postfork_child(tsd_tsdn(tsd));
base_postfork_child(tsd_tsdn(tsd));
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena;
diff --git a/src/large.c b/src/large.c
index ce8d32fb..60a0745e 100644
--- a/src/large.c
+++ b/src/large.c
@@ -28,13 +28,13 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
return (NULL);
/*
- * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
+ * Copy zero into is_zeroed and pass the copy to extent_alloc(), so that
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
if (likely(!tsdn_null(tsdn)))
arena = arena_choose(tsdn_tsd(tsdn), arena);
- if (unlikely(arena == NULL) || (extent = arena_chunk_alloc_large(tsdn,
+ if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn,
arena, usize, alignment, &is_zeroed)) == NULL)
return (NULL);
@@ -82,10 +82,10 @@ large_dalloc_maybe_junk(tsdn_t *tsdn, void *ptr, size_t usize)
if (config_fill && have_dss && unlikely(opt_junk_free)) {
/*
- * Only bother junk filling if the chunk isn't about to be
+ * Only bother junk filling if the extent isn't about to be
* unmapped.
*/
- if (!config_munmap || (have_dss && chunk_in_dss(tsdn, ptr)))
+ if (!config_munmap || (have_dss && extent_in_dss(tsdn, ptr)))
large_dalloc_junk(ptr, usize);
memset(ptr, JEMALLOC_FREE_JUNK, usize);
}
@@ -103,7 +103,7 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
/* Split excess pages. */
if (diff != 0) {
- extent_t *trail = chunk_split_wrapper(tsdn, arena,
+ extent_t *trail = extent_split_wrapper(tsdn, arena,
&extent_hooks, extent, usize + large_pad, usize, diff,
diff);
if (trail == NULL)
@@ -114,10 +114,10 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
extent_usize_get(trail));
}
- arena_chunk_cache_dalloc(tsdn, arena, &extent_hooks, trail);
+ arena_extent_cache_dalloc(tsdn, arena, &extent_hooks, trail);
}
- arena_chunk_ralloc_large_shrink(tsdn, arena, extent, oldusize);
+ arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize);
return (false);
}
@@ -133,18 +133,18 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
size_t trailsize = usize - extent_usize_get(extent);
extent_t *trail;
- if ((trail = arena_chunk_cache_alloc(tsdn, arena, &extent_hooks,
+ if ((trail = arena_extent_cache_alloc(tsdn, arena, &extent_hooks,
extent_past_get(extent), trailsize, CACHELINE, &is_zeroed_trail))
== NULL) {
bool commit = true;
- if ((trail = chunk_alloc_wrapper(tsdn, arena, &extent_hooks,
+ if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
extent_past_get(extent), trailsize, 0, CACHELINE,
&is_zeroed_trail, &commit, false)) == NULL)
return (true);
}
- if (chunk_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) {
- chunk_dalloc_wrapper(tsdn, arena, &extent_hooks, trail);
+ if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) {
+ extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail);
return (true);
}
@@ -174,7 +174,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
JEMALLOC_ALLOC_JUNK, usize - oldusize);
}
- arena_chunk_ralloc_large_expand(tsdn, arena, extent, oldusize);
+ arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize);
return (false);
}
@@ -209,7 +209,7 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
}
/*
- * Avoid moving the allocation if the existing chunk size accommodates
+ * Avoid moving the allocation if the existing extent size accommodates
* the new size.
*/
if (extent_usize_get(extent) >= usize_min && extent_usize_get(extent) <=
@@ -287,7 +287,7 @@ large_dalloc_impl(tsdn_t *tsdn, extent_t *extent, bool junked_locked)
large_dalloc_maybe_junk(tsdn, extent_addr_get(extent),
extent_usize_get(extent));
}
- arena_chunk_dalloc_large(tsdn, arena, extent, junked_locked);
+ arena_extent_dalloc_large(tsdn, arena, extent, junked_locked);
if (!junked_locked)
arena_decay_tick(tsdn, arena);
diff --git a/test/integration/chunk.c b/test/integration/extent.c
similarity index 72%
rename from test/integration/chunk.c
rename to test/integration/extent.c
index 10c4ba77..15b96a00 100644
--- a/test/integration/chunk.c
+++ b/test/integration/extent.c
@@ -25,7 +25,7 @@ static bool did_merge;
#endif
void *
-chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
+extent_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit, unsigned arena_ind)
{
@@ -38,86 +38,86 @@ chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
}
bool
-chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind)
+extent_dalloc(void *addr, size_t size, bool committed, unsigned arena_ind)
{
- TRACE_HOOK("%s(chunk=%p, size=%zu, committed=%s, arena_ind=%u)\n",
- __func__, chunk, size, committed ? "true" : "false", arena_ind);
+ TRACE_HOOK("%s(addr=%p, size=%zu, committed=%s, arena_ind=%u)\n",
+ __func__, addr, size, committed ? "true" : "false", arena_ind);
did_dalloc = true;
if (!do_dalloc)
return (true);
- return (old_hooks.dalloc(chunk, size, committed, arena_ind));
+ return (old_hooks.dalloc(addr, size, committed, arena_ind));
}
bool
-chunk_commit(void *chunk, size_t size, size_t offset, size_t length,
+extent_commit(void *addr, size_t size, size_t offset, size_t length,
unsigned arena_ind)
{
bool err;
- TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu, "
- "arena_ind=%u)\n", __func__, chunk, size, offset, length,
+ TRACE_HOOK("%s(addr=%p, size=%zu, offset=%zu, length=%zu, "
+ "arena_ind=%u)\n", __func__, addr, size, offset, length,
arena_ind);
- err = old_hooks.commit(chunk, size, offset, length, arena_ind);
+ err = old_hooks.commit(addr, size, offset, length, arena_ind);
did_commit = !err;
return (err);
}
bool
-chunk_decommit(void *chunk, size_t size, size_t offset, size_t length,
+extent_decommit(void *addr, size_t size, size_t offset, size_t length,
unsigned arena_ind)
{
bool err;
- TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu, "
- "arena_ind=%u)\n", __func__, chunk, size, offset, length,
+ TRACE_HOOK("%s(addr=%p, size=%zu, offset=%zu, length=%zu, "
+ "arena_ind=%u)\n", __func__, addr, size, offset, length,
arena_ind);
if (!do_decommit)
return (true);
- err = old_hooks.decommit(chunk, size, offset, length, arena_ind);
+ err = old_hooks.decommit(addr, size, offset, length, arena_ind);
did_decommit = !err;
return (err);
}
bool
-chunk_purge(void *chunk, size_t size, size_t offset, size_t length,
+extent_purge(void *addr, size_t size, size_t offset, size_t length,
unsigned arena_ind)
{
- TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu "
- "arena_ind=%u)\n", __func__, chunk, size, offset, length,
+ TRACE_HOOK("%s(addr=%p, size=%zu, offset=%zu, length=%zu "
+ "arena_ind=%u)\n", __func__, addr, size, offset, length,
arena_ind);
did_purge = true;
- return (old_hooks.purge(chunk, size, offset, length, arena_ind));
+ return (old_hooks.purge(addr, size, offset, length, arena_ind));
}
bool
-chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b,
+extent_split(void *addr, size_t size, size_t size_a, size_t size_b,
bool committed, unsigned arena_ind)
{
- TRACE_HOOK("%s(chunk=%p, size=%zu, size_a=%zu, size_b=%zu, "
- "committed=%s, arena_ind=%u)\n", __func__, chunk, size, size_a,
+ TRACE_HOOK("%s(addr=%p, size=%zu, size_a=%zu, size_b=%zu, "
+ "committed=%s, arena_ind=%u)\n", __func__, addr, size, size_a,
size_b, committed ? "true" : "false", arena_ind);
did_split = true;
- return (old_hooks.split(chunk, size, size_a, size_b, committed,
+ return (old_hooks.split(addr, size, size_a, size_b, committed,
arena_ind));
}
bool
-chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
+extent_merge(void *addr_a, size_t size_a, void *addr_b, size_t size_b,
bool committed, unsigned arena_ind)
{
- TRACE_HOOK("%s(chunk_a=%p, size_a=%zu, chunk_b=%p size_b=%zu, "
- "committed=%s, arena_ind=%u)\n", __func__, chunk_a, size_a, chunk_b,
+ TRACE_HOOK("%s(addr_a=%p, size_a=%zu, addr_b=%p size_b=%zu, "
+ "committed=%s, arena_ind=%u)\n", __func__, addr_a, size_a, addr_b,
size_b, committed ? "true" : "false", arena_ind);
did_merge = true;
- return (old_hooks.merge(chunk_a, size_a, chunk_b, size_b,
+ return (old_hooks.merge(addr_a, size_a, addr_b, size_b,
committed, arena_ind));
}
-TEST_BEGIN(test_chunk)
+TEST_BEGIN(test_extent)
{
void *p;
size_t old_size, new_size, large0, large1, large2, sz;
@@ -126,13 +126,13 @@ TEST_BEGIN(test_chunk)
size_t hooks_mib[3], purge_mib[3];
size_t hooks_miblen, purge_miblen;
extent_hooks_t new_hooks = {
- chunk_alloc,
- chunk_dalloc,
- chunk_commit,
- chunk_decommit,
- chunk_purge,
- chunk_split,
- chunk_merge
+ extent_alloc,
+ extent_dalloc,
+ extent_commit,
+ extent_decommit,
+ extent_purge,
+ extent_split,
+ extent_merge
};
bool xallocx_success_a, xallocx_success_b, xallocx_success_c;
@@ -151,16 +151,16 @@ TEST_BEGIN(test_chunk)
assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, &old_hooks, &old_size,
&new_hooks, new_size), 0, "Unexpected extent_hooks error");
orig_hooks = old_hooks;
- assert_ptr_ne(old_hooks.alloc, chunk_alloc, "Unexpected alloc error");
- assert_ptr_ne(old_hooks.dalloc, chunk_dalloc,
+ assert_ptr_ne(old_hooks.alloc, extent_alloc, "Unexpected alloc error");
+ assert_ptr_ne(old_hooks.dalloc, extent_dalloc,
"Unexpected dalloc error");
- assert_ptr_ne(old_hooks.commit, chunk_commit,
+ assert_ptr_ne(old_hooks.commit, extent_commit,
"Unexpected commit error");
- assert_ptr_ne(old_hooks.decommit, chunk_decommit,
+ assert_ptr_ne(old_hooks.decommit, extent_decommit,
"Unexpected decommit error");
- assert_ptr_ne(old_hooks.purge, chunk_purge, "Unexpected purge error");
- assert_ptr_ne(old_hooks.split, chunk_split, "Unexpected split error");
- assert_ptr_ne(old_hooks.merge, chunk_merge, "Unexpected merge error");
+ assert_ptr_ne(old_hooks.purge, extent_purge, "Unexpected purge error");
+ assert_ptr_ne(old_hooks.split, extent_split, "Unexpected split error");
+ assert_ptr_ne(old_hooks.merge, extent_merge, "Unexpected merge error");
/* Get large size classes. */
sz = sizeof(size_t);
@@ -249,5 +249,5 @@ int
main(void)
{
- return (test(test_chunk));
+ return (test(test_extent));
}