Disentangle arena and extent locking.
Refactor arena and extent locking protocols such that arena and
extent locks are never held when calling into the extent_*_wrapper()
API. This requires extra care during purging since the arena lock no
longer protects the inner purging logic. It also requires extra care to
protect extents from being merged with adjacent extents.
Convert extent_t's 'active' flag to an enumerated 'state', so that
retained extents are explicitly marked as such, rather than depending on
ring linkage state.
Refactor the extent collections (and their synchronization) for cached
and retained extents into extents_t. Incorporate LRU functionality to
support purging. Incorporate page count accounting, which replaces
arena->ndirty and arena->stats.retained.
Assert that no core locks are held when entering any internal
[de]allocation functions. This is in addition to existing assertions
that no locks are held when entering external [de]allocation functions.
Audit and document synchronization protocols for all arena_t fields.
This fixes a potential deadlock due to recursive allocation during
gdump, in a similar fashion to b49c649bc1
(Fix lock order reversal during gdump.), but with a necessarily much
broader code impact.
This commit is contained in:
parent
1b6e43507e
commit
d27f29b468
@ -13,22 +13,17 @@ extern ssize_t opt_decay_time;
|
||||
|
||||
extern const arena_bin_info_t arena_bin_info[NBINS];
|
||||
|
||||
extent_t *arena_extent_cache_alloc(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size,
|
||||
size_t alignment, bool *zero);
|
||||
void arena_extent_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent);
|
||||
void arena_extent_cache_maybe_insert(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_t *extent, bool cache);
|
||||
void arena_extent_cache_maybe_remove(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_t *extent, bool cache);
|
||||
#ifdef JEMALLOC_JET
|
||||
size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr);
|
||||
#endif
|
||||
extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
|
||||
size_t usize, size_t alignment, bool *zero);
|
||||
void arena_extent_dalloc_large(tsdn_t *tsdn, arena_t *arena,
|
||||
void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_t *extent, bool locked);
|
||||
void arena_extent_dalloc_large_finish(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_t *extent);
|
||||
void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_t *extent, size_t oldsize);
|
||||
void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
|
||||
|
@ -66,8 +66,8 @@ struct arena_decay_s {
|
||||
/*
|
||||
* Number of dirty pages at beginning of current epoch. During epoch
|
||||
* advancement we use the delta between arena->decay.ndirty and
|
||||
* arena->ndirty to determine how many dirty pages, if any, were
|
||||
* generated.
|
||||
* extents_npages_get(&arena->extents_cached) to determine how many
|
||||
* dirty pages, if any, were generated.
|
||||
*/
|
||||
size_t nunpurged;
|
||||
/*
|
||||
@ -98,8 +98,8 @@ struct arena_bin_s {
|
||||
*/
|
||||
extent_heap_t slabs_nonfull;
|
||||
|
||||
/* Ring sentinel used to track full slabs. */
|
||||
extent_t slabs_full;
|
||||
/* List used to track full slabs. */
|
||||
extent_list_t slabs_full;
|
||||
|
||||
/* Bin statistics. */
|
||||
malloc_bin_stats_t stats;
|
||||
@ -107,84 +107,97 @@ struct arena_bin_s {
|
||||
|
||||
struct arena_s {
|
||||
/*
|
||||
* Number of threads currently assigned to this arena, synchronized via
|
||||
* atomic operations. Each thread has two distinct assignments, one for
|
||||
* application-serving allocation, and the other for internal metadata
|
||||
* allocation. Internal metadata must not be allocated from arenas
|
||||
* explicitly created via the arenas.create mallctl, because the
|
||||
* arena.<i>.reset mallctl indiscriminately discards all allocations for
|
||||
* the affected arena.
|
||||
* Number of threads currently assigned to this arena. Each thread has
|
||||
* two distinct assignments, one for application-serving allocation, and
|
||||
* the other for internal metadata allocation. Internal metadata must
|
||||
* not be allocated from arenas explicitly created via the arenas.create
|
||||
* mallctl, because the arena.<i>.reset mallctl indiscriminately
|
||||
* discards all allocations for the affected arena.
|
||||
*
|
||||
* 0: Application allocation.
|
||||
* 1: Internal metadata allocation.
|
||||
*
|
||||
* Synchronization: atomic.
|
||||
*/
|
||||
unsigned nthreads[2];
|
||||
|
||||
/*
|
||||
* There are three classes of arena operations from a locking
|
||||
* perspective:
|
||||
* 1) Thread assignment (modifies nthreads) is synchronized via atomics.
|
||||
* 2) Bin-related operations are protected by bin locks.
|
||||
* 3) Extent-related operations are protected by this mutex.
|
||||
* Synchronizes various arena operations, as indicated in field-specific
|
||||
* comments.
|
||||
*/
|
||||
malloc_mutex_t lock;
|
||||
|
||||
/* Synchronization: lock. */
|
||||
arena_stats_t stats;
|
||||
/*
|
||||
* List of tcaches for extant threads associated with this arena.
|
||||
* Stats from these are merged incrementally, and at exit if
|
||||
* opt_stats_print is enabled.
|
||||
*
|
||||
* Synchronization: lock.
|
||||
*/
|
||||
ql_head(tcache_t) tcache_ql;
|
||||
|
||||
/* Synchronization: lock. */
|
||||
uint64_t prof_accumbytes;
|
||||
|
||||
/*
|
||||
* PRNG state for cache index randomization of large allocation base
|
||||
* pointers.
|
||||
*
|
||||
* Synchronization: atomic.
|
||||
*/
|
||||
size_t offset_state;
|
||||
|
||||
/* Extent serial number generator state. */
|
||||
/*
|
||||
* Extent serial number generator state.
|
||||
*
|
||||
* Synchronization: atomic.
|
||||
*/
|
||||
size_t extent_sn_next;
|
||||
|
||||
/* Synchronization: lock. */
|
||||
dss_prec_t dss_prec;
|
||||
|
||||
/* True if a thread is currently executing arena_purge_to_limit(). */
|
||||
bool purging;
|
||||
/*
|
||||
* 1/0 (true/false) if a thread is currently executing
|
||||
* arena_purge_to_limit().
|
||||
*
|
||||
* Synchronization: atomic.
|
||||
*/
|
||||
unsigned purging;
|
||||
|
||||
/* Number of pages in active extents. */
|
||||
/*
|
||||
* Number of pages in active extents.
|
||||
*
|
||||
* Synchronization: atomic.
|
||||
*/
|
||||
size_t nactive;
|
||||
|
||||
/*
|
||||
* Current count of pages within unused extents that are potentially
|
||||
* dirty, and for which pages_purge_*() has not been called. By
|
||||
* tracking this, we can institute a limit on how much dirty unused
|
||||
* memory is mapped for each arena.
|
||||
* Decay-based purging state.
|
||||
*
|
||||
* Synchronization: lock.
|
||||
*/
|
||||
size_t ndirty;
|
||||
|
||||
/* Decay-based purging state. */
|
||||
arena_decay_t decay;
|
||||
|
||||
/* Extant large allocations. */
|
||||
ql_head(extent_t) large;
|
||||
/*
|
||||
* Extant large allocations.
|
||||
*
|
||||
* Synchronization: large_mtx.
|
||||
*/
|
||||
extent_list_t large;
|
||||
/* Synchronizes all large allocation/update/deallocation. */
|
||||
malloc_mutex_t large_mtx;
|
||||
|
||||
/*
|
||||
* Heaps of extents that were previously allocated. These are used when
|
||||
* allocating extents, in an attempt to re-use address space.
|
||||
* Collections of extents that were previously allocated. These are
|
||||
* used when allocating extents, in an attempt to re-use address space.
|
||||
*
|
||||
* Synchronization: internal.
|
||||
*/
|
||||
extent_heap_t extents_cached[NPSIZES+1];
|
||||
extent_heap_t extents_retained[NPSIZES+1];
|
||||
/*
|
||||
* Ring sentinel used to track unused dirty memory. Dirty memory is
|
||||
* managed as an LRU of cached extents.
|
||||
*/
|
||||
extent_t extents_dirty;
|
||||
/* Protects extents_{cached,retained,dirty}. */
|
||||
malloc_mutex_t extents_mtx;
|
||||
extents_t extents_cached;
|
||||
extents_t extents_retained;
|
||||
|
||||
/*
|
||||
* Next extent size class in a growing series to use when satisfying a
|
||||
@ -192,17 +205,31 @@ struct arena_s {
|
||||
* the number of disjoint virtual memory ranges so that extent merging
|
||||
* can be effective even if multiple arenas' extent allocation requests
|
||||
* are highly interleaved.
|
||||
*
|
||||
* Synchronization: atomic.
|
||||
*/
|
||||
pszind_t extent_grow_next;
|
||||
|
||||
/* Cache of extent structures that were allocated via base_alloc(). */
|
||||
ql_head(extent_t) extent_cache;
|
||||
malloc_mutex_t extent_cache_mtx;
|
||||
/*
|
||||
* Freelist of extent structures that were allocated via base_alloc().
|
||||
*
|
||||
* Synchronization: extent_freelist_mtx.
|
||||
*/
|
||||
extent_list_t extent_freelist;
|
||||
malloc_mutex_t extent_freelist_mtx;
|
||||
|
||||
/* bins is used to store heaps of free regions. */
|
||||
/*
|
||||
* bins is used to store heaps of free regions.
|
||||
*
|
||||
* Synchronization: internal.
|
||||
*/
|
||||
arena_bin_t bins[NBINS];
|
||||
|
||||
/* Base allocator, from which arena metadata are allocated. */
|
||||
/*
|
||||
* Base allocator, from which arena metadata are allocated.
|
||||
*
|
||||
* Synchronization: internal.
|
||||
*/
|
||||
base_t *base;
|
||||
};
|
||||
|
||||
|
@ -21,9 +21,13 @@ size_t extent_size_quantize_ceil(size_t size);
|
||||
|
||||
ph_proto(, extent_heap_, extent_heap_t, extent_t)
|
||||
|
||||
extent_t *extent_alloc_cache_locked(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
|
||||
size_t alignment, bool *zero, bool *commit, bool slab);
|
||||
bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state);
|
||||
extent_state_t extents_state_get(const extents_t *extents);
|
||||
size_t extents_npages_get(extents_t *extents);
|
||||
extent_t *extents_evict(tsdn_t *tsdn, extents_t *extents, size_t npages_min);
|
||||
void extents_prefork(tsdn_t *tsdn, extents_t *extents);
|
||||
void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents);
|
||||
void extents_postfork_child(tsdn_t *tsdn, extents_t *extents);
|
||||
extent_t *extent_alloc_cache(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
|
||||
size_t alignment, bool *zero, bool *commit, bool slab);
|
||||
|
@ -12,8 +12,7 @@ void *extent_before_get(const extent_t *extent);
|
||||
void *extent_last_get(const extent_t *extent);
|
||||
void *extent_past_get(const extent_t *extent);
|
||||
size_t extent_sn_get(const extent_t *extent);
|
||||
bool extent_active_get(const extent_t *extent);
|
||||
bool extent_retained_get(const extent_t *extent);
|
||||
extent_state_t extent_state_get(const extent_t *extent);
|
||||
bool extent_zeroed_get(const extent_t *extent);
|
||||
bool extent_committed_get(const extent_t *extent);
|
||||
bool extent_slab_get(const extent_t *extent);
|
||||
@ -26,16 +25,19 @@ void extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment);
|
||||
void extent_size_set(extent_t *extent, size_t size);
|
||||
void extent_usize_set(extent_t *extent, size_t usize);
|
||||
void extent_sn_set(extent_t *extent, size_t sn);
|
||||
void extent_active_set(extent_t *extent, bool active);
|
||||
void extent_state_set(extent_t *extent, extent_state_t state);
|
||||
void extent_zeroed_set(extent_t *extent, bool zeroed);
|
||||
void extent_committed_set(extent_t *extent, bool committed);
|
||||
void extent_slab_set(extent_t *extent, bool slab);
|
||||
void extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx);
|
||||
void extent_init(extent_t *extent, arena_t *arena, void *addr,
|
||||
size_t size, size_t usize, size_t sn, bool active, bool zeroed,
|
||||
size_t size, size_t usize, size_t sn, extent_state_t state, bool zeroed,
|
||||
bool committed, bool slab);
|
||||
void extent_ring_insert(extent_t *sentinel, extent_t *extent);
|
||||
void extent_ring_remove(extent_t *extent);
|
||||
void extent_list_init(extent_list_t *list);
|
||||
extent_t *extent_list_first(const extent_list_t *list);
|
||||
extent_t *extent_list_last(const extent_list_t *list);
|
||||
void extent_list_append(extent_list_t *list, extent_t *extent);
|
||||
void extent_list_remove(extent_list_t *list, extent_t *extent);
|
||||
int extent_sn_comp(const extent_t *a, const extent_t *b);
|
||||
int extent_ad_comp(const extent_t *a, const extent_t *b);
|
||||
int extent_snad_comp(const extent_t *a, const extent_t *b);
|
||||
@ -103,14 +105,9 @@ extent_sn_get(const extent_t *extent) {
|
||||
return extent->e_sn;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
extent_active_get(const extent_t *extent) {
|
||||
return extent->e_active;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
extent_retained_get(const extent_t *extent) {
|
||||
return (qr_next(extent, qr_link) == extent);
|
||||
JEMALLOC_INLINE extent_state_t
|
||||
extent_state_get(const extent_t *extent) {
|
||||
return extent->e_state;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
@ -191,8 +188,8 @@ extent_sn_set(extent_t *extent, size_t sn) {
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_active_set(extent_t *extent, bool active) {
|
||||
extent->e_active = active;
|
||||
extent_state_set(extent_t *extent, extent_state_t state) {
|
||||
extent->e_state = state;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
@ -217,7 +214,7 @@ extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||
size_t usize, size_t sn, bool active, bool zeroed, bool committed,
|
||||
size_t usize, size_t sn, extent_state_t state, bool zeroed, bool committed,
|
||||
bool slab) {
|
||||
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
||||
|
||||
@ -226,24 +223,39 @@ extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||
extent_size_set(extent, size);
|
||||
extent_usize_set(extent, usize);
|
||||
extent_sn_set(extent, sn);
|
||||
extent_active_set(extent, active);
|
||||
extent_state_set(extent, state);
|
||||
extent_zeroed_set(extent, zeroed);
|
||||
extent_committed_set(extent, committed);
|
||||
extent_slab_set(extent, slab);
|
||||
if (config_prof) {
|
||||
extent_prof_tctx_set(extent, NULL);
|
||||
}
|
||||
qr_new(extent, qr_link);
|
||||
ql_elm_new(extent, ql_link);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_ring_insert(extent_t *sentinel, extent_t *extent) {
|
||||
qr_meld(sentinel, extent, extent_t, qr_link);
|
||||
extent_list_init(extent_list_t *list) {
|
||||
ql_new(list);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE extent_t *
|
||||
extent_list_first(const extent_list_t *list) {
|
||||
return ql_first(list);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE extent_t *
|
||||
extent_list_last(const extent_list_t *list) {
|
||||
return ql_last(list, ql_link);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_ring_remove(extent_t *extent) {
|
||||
qr_remove(extent, qr_link);
|
||||
extent_list_append(extent_list_t *list, extent_t *extent) {
|
||||
ql_tail_insert(list, extent, ql_link);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_list_remove(extent_list_t *list, extent_t *extent) {
|
||||
ql_remove(list, extent, ql_link);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE int
|
||||
|
@ -1,6 +1,12 @@
|
||||
#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
|
||||
#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
|
||||
|
||||
typedef enum {
|
||||
extent_state_active = 0,
|
||||
extent_state_dirty = 1,
|
||||
extent_state_retained = 2
|
||||
} extent_state_t;
|
||||
|
||||
/* Extent (span of pages). Use accessor functions for e_* fields. */
|
||||
struct extent_s {
|
||||
/* Arena from which this extent came, if any. */
|
||||
@ -32,8 +38,8 @@ struct extent_s {
|
||||
*/
|
||||
size_t e_sn;
|
||||
|
||||
/* True if extent is active (in use). */
|
||||
bool e_active;
|
||||
/* Extent state. */
|
||||
extent_state_t e_state;
|
||||
|
||||
/*
|
||||
* The zeroed flag is used by extent recycling code to track whether
|
||||
@ -67,18 +73,48 @@ struct extent_s {
|
||||
};
|
||||
|
||||
/*
|
||||
* Linkage for arena's extents_dirty and arena_bin_t's slabs_full rings.
|
||||
* List linkage, used by a variety of lists:
|
||||
* - arena_bin_t's slabs_full
|
||||
* - extents_t's LRU
|
||||
* - stashed dirty extents
|
||||
* - arena's large allocations
|
||||
* - arena's extent structure freelist
|
||||
*/
|
||||
qr(extent_t) qr_link;
|
||||
ql_elm(extent_t) ql_link;
|
||||
|
||||
union {
|
||||
/* Linkage for per size class sn/address-ordered heaps. */
|
||||
phn(extent_t) ph_link;
|
||||
|
||||
/* Linkage for arena's large and extent_cache lists. */
|
||||
ql_elm(extent_t) ql_link;
|
||||
};
|
||||
/* Linkage for per size class sn/address-ordered heaps. */
|
||||
phn(extent_t) ph_link;
|
||||
};
|
||||
typedef ql_head(extent_t) extent_list_t;
|
||||
typedef ph(extent_t) extent_heap_t;
|
||||
|
||||
/* Quantized collection of extents, with built-in LRU queue. */
|
||||
struct extents_s {
|
||||
malloc_mutex_t mtx;
|
||||
|
||||
/*
|
||||
* Quantized per size class heaps of extents.
|
||||
*
|
||||
* Synchronization: mtx.
|
||||
*/
|
||||
extent_heap_t heaps[NPSIZES+1];
|
||||
|
||||
/*
|
||||
* LRU of all extents in heaps.
|
||||
*
|
||||
* Synchronization: mtx.
|
||||
*/
|
||||
extent_list_t lru;
|
||||
|
||||
/*
|
||||
* Page sum for all extents in heaps.
|
||||
*
|
||||
* Synchronization: atomic.
|
||||
*/
|
||||
size_t npages;
|
||||
|
||||
/* All stored extents must be in the same state. */
|
||||
extent_state_t state;
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */
|
||||
|
@ -2,6 +2,7 @@
|
||||
#define JEMALLOC_INTERNAL_EXTENT_TYPES_H
|
||||
|
||||
typedef struct extent_s extent_t;
|
||||
typedef struct extents_s extents_t;
|
||||
|
||||
#define EXTENT_HOOKS_INITIALIZER NULL
|
||||
|
||||
|
@ -979,6 +979,7 @@ iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
|
||||
assert(!is_internal || tcache == NULL);
|
||||
assert(!is_internal || arena == NULL || arena_ind_get(arena) <
|
||||
narenas_auto);
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||
|
||||
ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
|
||||
if (config_stats && is_internal && likely(ret != NULL)) {
|
||||
@ -1004,6 +1005,7 @@ ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
||||
assert(!is_internal || tcache == NULL);
|
||||
assert(!is_internal || arena == NULL || arena_ind_get(arena) <
|
||||
narenas_auto);
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||
|
||||
ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
|
||||
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
|
||||
@ -1042,7 +1044,7 @@ ivsalloc(tsdn_t *tsdn, const void *ptr) {
|
||||
if (extent == NULL) {
|
||||
return 0;
|
||||
}
|
||||
assert(extent_active_get(extent));
|
||||
assert(extent_state_get(extent) == extent_state_active);
|
||||
/* Only slab members should be looked up via interior pointers. */
|
||||
assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
|
||||
|
||||
@ -1056,6 +1058,7 @@ idalloctm(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
|
||||
assert(!is_internal || tcache == NULL);
|
||||
assert(!is_internal || arena_ind_get(iaalloc(tsdn, ptr)) <
|
||||
narenas_auto);
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||
if (config_stats && is_internal) {
|
||||
arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, extent,
|
||||
ptr));
|
||||
@ -1073,6 +1076,7 @@ idalloc(tsd_t *tsd, extent_t *extent, void *ptr) {
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
isdalloct(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
|
||||
tcache_t *tcache, bool slow_path) {
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||
arena_sdalloc(tsdn, extent, ptr, size, tcache, slow_path);
|
||||
}
|
||||
|
||||
@ -1080,6 +1084,7 @@ JEMALLOC_ALWAYS_INLINE void *
|
||||
iralloct_realign(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
|
||||
size_t size, size_t extra, size_t alignment, bool zero, tcache_t *tcache,
|
||||
arena_t *arena) {
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||
void *p;
|
||||
size_t usize, copysize;
|
||||
|
||||
@ -1117,6 +1122,7 @@ iralloct(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, size_t size,
|
||||
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) {
|
||||
assert(ptr != NULL);
|
||||
assert(size != 0);
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||
|
||||
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
|
||||
!= 0) {
|
||||
@ -1144,6 +1150,7 @@ ixalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, size_t size,
|
||||
size_t extra, size_t alignment, bool zero) {
|
||||
assert(ptr != NULL);
|
||||
assert(size != 0);
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||
|
||||
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
|
||||
!= 0) {
|
||||
|
@ -17,7 +17,8 @@ extern large_dalloc_maybe_junk_t *large_dalloc_maybe_junk;
|
||||
void large_dalloc_junk(void *ptr, size_t usize);
|
||||
void large_dalloc_maybe_junk(void *ptr, size_t usize);
|
||||
#endif
|
||||
void large_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent);
|
||||
void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent);
|
||||
void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent);
|
||||
void large_dalloc(tsdn_t *tsdn, extent_t *extent);
|
||||
size_t large_salloc(tsdn_t *tsdn, const extent_t *extent);
|
||||
prof_tctx_t *large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent);
|
||||
|
@ -25,11 +25,9 @@ arena_destroy
|
||||
arena_dss_prec_get
|
||||
arena_dss_prec_set
|
||||
arena_extent_alloc_large
|
||||
arena_extent_cache_alloc
|
||||
arena_extent_cache_dalloc
|
||||
arena_extent_cache_maybe_insert
|
||||
arena_extent_cache_maybe_remove
|
||||
arena_extent_dalloc_large
|
||||
arena_extent_dalloc_large_finish
|
||||
arena_extent_dalloc_large_prep
|
||||
arena_extent_ralloc_large_expand
|
||||
arena_extent_ralloc_large_shrink
|
||||
arena_extent_sn_next
|
||||
@ -141,15 +139,12 @@ ctl_postfork_parent
|
||||
ctl_prefork
|
||||
decay_ticker_get
|
||||
dss_prec_names
|
||||
extent_active_get
|
||||
extent_active_set
|
||||
extent_ad_comp
|
||||
extent_addr_get
|
||||
extent_addr_randomize
|
||||
extent_addr_set
|
||||
extent_alloc
|
||||
extent_alloc_cache
|
||||
extent_alloc_cache_locked
|
||||
extent_alloc_dss
|
||||
extent_alloc_mmap
|
||||
extent_alloc_wrapper
|
||||
@ -184,6 +179,10 @@ extent_hooks_set
|
||||
extent_in_dss
|
||||
extent_init
|
||||
extent_last_get
|
||||
extent_list_append
|
||||
extent_list_first
|
||||
extent_list_last
|
||||
extent_list_remove
|
||||
extent_lookup
|
||||
extent_merge_wrapper
|
||||
extent_past_get
|
||||
@ -191,9 +190,6 @@ extent_prof_tctx_get
|
||||
extent_prof_tctx_set
|
||||
extent_purge_forced_wrapper
|
||||
extent_purge_lazy_wrapper
|
||||
extent_retained_get
|
||||
extent_ring_insert
|
||||
extent_ring_remove
|
||||
extent_size_get
|
||||
extent_size_quantize_ceil
|
||||
extent_size_quantize_floor
|
||||
@ -207,11 +203,20 @@ extent_sn_get
|
||||
extent_sn_set
|
||||
extent_snad_comp
|
||||
extent_split_wrapper
|
||||
extent_state_get
|
||||
extent_state_set
|
||||
extent_usize_get
|
||||
extent_usize_set
|
||||
extent_zeroed_get
|
||||
extent_zeroed_set
|
||||
extents_evict
|
||||
extents_init
|
||||
extents_npages_get
|
||||
extents_prefork
|
||||
extents_postfork_child
|
||||
extents_postfork_parent
|
||||
extents_rtree
|
||||
extents_state_get
|
||||
ffs_llu
|
||||
ffs_lu
|
||||
ffs_u
|
||||
@ -255,9 +260,10 @@ jemalloc_postfork_child
|
||||
jemalloc_postfork_parent
|
||||
jemalloc_prefork
|
||||
large_dalloc
|
||||
large_dalloc_finish
|
||||
large_dalloc_junk
|
||||
large_dalloc_junked_locked
|
||||
large_dalloc_maybe_junk
|
||||
large_dalloc_prep_junked_locked
|
||||
large_malloc
|
||||
large_palloc
|
||||
large_prof_tctx_get
|
||||
|
@ -70,9 +70,14 @@ struct malloc_large_stats_s {
|
||||
size_t curlextents;
|
||||
};
|
||||
|
||||
/*
|
||||
* Arena stats. Note that fields marked "derived" are not directly maintained
|
||||
* within the arena code; rather their values are derived during stats merge
|
||||
* requests.
|
||||
*/
|
||||
struct arena_stats_s {
|
||||
/* Number of bytes currently mapped. */
|
||||
size_t mapped;
|
||||
/* Number of bytes currently mapped, excluding retained memory. */
|
||||
size_t mapped; /* Derived. */
|
||||
|
||||
/*
|
||||
* Number of bytes currently retained as a side effect of munmap() being
|
||||
@ -80,7 +85,7 @@ struct arena_stats_s {
|
||||
* always decommitted or purged), but they are excluded from the mapped
|
||||
* statistic (above).
|
||||
*/
|
||||
size_t retained;
|
||||
size_t retained; /* Derived. */
|
||||
|
||||
/*
|
||||
* Total number of purge sweeps, total number of madvise calls made,
|
||||
@ -91,9 +96,9 @@ struct arena_stats_s {
|
||||
uint64_t nmadvise;
|
||||
uint64_t purged;
|
||||
|
||||
size_t base;
|
||||
size_t base; /* Derived. */
|
||||
size_t internal; /* Protected via atomic_*_zu(). */
|
||||
size_t resident;
|
||||
size_t resident; /* Derived. */
|
||||
|
||||
size_t allocated_large;
|
||||
uint64_t nmalloc_large;
|
||||
@ -101,7 +106,7 @@ struct arena_stats_s {
|
||||
uint64_t nrequests_large;
|
||||
|
||||
/* Number of bytes cached in tcache associated with this arena. */
|
||||
size_t tcache_bytes;
|
||||
size_t tcache_bytes; /* Derived. */
|
||||
|
||||
/* One element for each large size class. */
|
||||
malloc_large_stats_t lstats[NSIZES - NBINS];
|
||||
|
@ -26,9 +26,17 @@ typedef int witness_comp_t (const witness_t *, void *, const witness_t *,
|
||||
#define WITNESS_RANK_PROF_TDATA 7U
|
||||
#define WITNESS_RANK_PROF_GCTX 8U
|
||||
|
||||
/*
|
||||
* Used as an argument to witness_depth_to_rank() in order to validate depth
|
||||
* excluding non-core locks with lower ranks. Since the rank argument to
|
||||
* witness_depth_to_rank() is inclusive rather than exclusive, this definition
|
||||
* can have the same value as the minimally ranked core lock.
|
||||
*/
|
||||
#define WITNESS_RANK_CORE 9U
|
||||
|
||||
#define WITNESS_RANK_ARENA 9U
|
||||
#define WITNESS_RANK_ARENA_EXTENTS 10U
|
||||
#define WITNESS_RANK_ARENA_EXTENT_CACHE 11U
|
||||
#define WITNESS_RANK_EXTENTS 10U
|
||||
#define WITNESS_RANK_EXTENT_FREELIST 11U
|
||||
|
||||
#define WITNESS_RANK_RTREE_ELM 12U
|
||||
#define WITNESS_RANK_RTREE 13U
|
||||
|
377
src/arena.c
377
src/arena.c
@ -37,75 +37,13 @@ static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena,
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static size_t
|
||||
arena_extent_dirty_npages(const extent_t *extent) {
|
||||
return (extent_size_get(extent) >> LG_PAGE);
|
||||
}
|
||||
|
||||
static extent_t *
|
||||
arena_extent_cache_alloc_locked(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
|
||||
size_t alignment, bool *zero, bool slab) {
|
||||
bool commit = true;
|
||||
|
||||
malloc_mutex_assert_owner(tsdn, &arena->lock);
|
||||
|
||||
return extent_alloc_cache(tsdn, arena, r_extent_hooks, new_addr, usize,
|
||||
pad, alignment, zero, &commit, slab);
|
||||
}
|
||||
|
||||
extent_t *
|
||||
arena_extent_cache_alloc(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size,
|
||||
size_t alignment, bool *zero) {
|
||||
extent_t *extent;
|
||||
|
||||
malloc_mutex_lock(tsdn, &arena->lock);
|
||||
extent = arena_extent_cache_alloc_locked(tsdn, arena, r_extent_hooks,
|
||||
new_addr, size, 0, alignment, zero, false);
|
||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||
|
||||
return extent;
|
||||
}
|
||||
|
||||
static void
|
||||
arena_extent_cache_dalloc_locked(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent) {
|
||||
malloc_mutex_assert_owner(tsdn, &arena->lock);
|
||||
|
||||
extent_dalloc_cache(tsdn, arena, r_extent_hooks, extent);
|
||||
arena_maybe_purge(tsdn, arena);
|
||||
}
|
||||
|
||||
void
|
||||
arena_extent_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent) {
|
||||
malloc_mutex_lock(tsdn, &arena->lock);
|
||||
arena_extent_cache_dalloc_locked(tsdn, arena, r_extent_hooks, extent);
|
||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||
}
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||
|
||||
void
|
||||
arena_extent_cache_maybe_insert(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
bool cache) {
|
||||
malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
|
||||
|
||||
if (cache) {
|
||||
extent_ring_insert(&arena->extents_dirty, extent);
|
||||
arena->ndirty += arena_extent_dirty_npages(extent);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
arena_extent_cache_maybe_remove(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
bool dirty) {
|
||||
malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
|
||||
|
||||
if (dirty) {
|
||||
extent_ring_remove(extent);
|
||||
assert(arena->ndirty >= arena_extent_dirty_npages(extent));
|
||||
arena->ndirty -= arena_extent_dirty_npages(extent);
|
||||
}
|
||||
extent_dalloc_cache(tsdn, arena, r_extent_hooks, extent);
|
||||
arena_purge(tsdn, arena, false);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C void *
|
||||
@ -180,13 +118,13 @@ arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab,
|
||||
|
||||
static void
|
||||
arena_nactive_add(arena_t *arena, size_t add_pages) {
|
||||
arena->nactive += add_pages;
|
||||
atomic_add_zu(&arena->nactive, add_pages);
|
||||
}
|
||||
|
||||
static void
|
||||
arena_nactive_sub(arena_t *arena, size_t sub_pages) {
|
||||
assert(arena->nactive >= sub_pages);
|
||||
arena->nactive -= sub_pages;
|
||||
assert(atomic_read_zu(&arena->nactive) >= sub_pages);
|
||||
atomic_sub_zu(&arena->nactive, sub_pages);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -269,6 +207,8 @@ arena_extent_alloc_large_hard(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_t *extent;
|
||||
bool commit = true;
|
||||
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||
|
||||
extent = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL, usize,
|
||||
large_pad, alignment, zero, &commit, false);
|
||||
if (extent == NULL) {
|
||||
@ -291,6 +231,8 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
extent_t *extent;
|
||||
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
|
||||
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||
|
||||
malloc_mutex_lock(tsdn, &arena->lock);
|
||||
|
||||
/* Optimistically update stats. */
|
||||
@ -300,9 +242,11 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
}
|
||||
arena_nactive_add(arena, (usize + large_pad) >> LG_PAGE);
|
||||
|
||||
extent = arena_extent_cache_alloc_locked(tsdn, arena, &extent_hooks,
|
||||
NULL, usize, large_pad, alignment, zero, false);
|
||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||
|
||||
bool commit = true;
|
||||
extent = extent_alloc_cache(tsdn, arena, &extent_hooks, NULL, usize,
|
||||
large_pad, alignment, zero, &commit, false);
|
||||
if (extent == NULL) {
|
||||
extent = arena_extent_alloc_large_hard(tsdn, arena,
|
||||
&extent_hooks, usize, alignment, zero);
|
||||
@ -312,10 +256,8 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
}
|
||||
|
||||
void
|
||||
arena_extent_dalloc_large(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
bool locked) {
|
||||
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
|
||||
|
||||
if (!locked) {
|
||||
malloc_mutex_lock(tsdn, &arena->lock);
|
||||
} else {
|
||||
@ -326,12 +268,17 @@ arena_extent_dalloc_large(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
extent_usize_get(extent));
|
||||
arena->stats.mapped -= extent_size_get(extent);
|
||||
}
|
||||
arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
|
||||
|
||||
arena_extent_cache_dalloc_locked(tsdn, arena, &extent_hooks, extent);
|
||||
if (!locked) {
|
||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||
}
|
||||
arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
|
||||
}
|
||||
|
||||
void
|
||||
arena_extent_dalloc_large_finish(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_t *extent) {
|
||||
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
|
||||
extent_dalloc_cache(tsdn, arena, &extent_hooks, extent);
|
||||
}
|
||||
|
||||
void
|
||||
@ -414,8 +361,9 @@ arena_decay_backlog_npages_limit(const arena_t *arena) {
|
||||
|
||||
static void
|
||||
arena_decay_backlog_update_last(arena_t *arena) {
|
||||
size_t ndirty_delta = (arena->ndirty > arena->decay.nunpurged) ?
|
||||
arena->ndirty - arena->decay.nunpurged : 0;
|
||||
size_t ndirty = extents_npages_get(&arena->extents_cached);
|
||||
size_t ndirty_delta = (ndirty > arena->decay.nunpurged) ? ndirty -
|
||||
arena->decay.nunpurged : 0;
|
||||
arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
|
||||
}
|
||||
|
||||
@ -468,10 +416,15 @@ static void
|
||||
arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena) {
|
||||
size_t ndirty_limit = arena_decay_backlog_npages_limit(arena);
|
||||
|
||||
if (arena->ndirty > ndirty_limit) {
|
||||
if (extents_npages_get(&arena->extents_cached) > ndirty_limit) {
|
||||
arena_purge_to_limit(tsdn, arena, ndirty_limit);
|
||||
}
|
||||
arena->decay.nunpurged = arena->ndirty;
|
||||
/*
|
||||
* There may be concurrent ndirty fluctuation between the purge above
|
||||
* and the nunpurged update below, but this is inconsequential to decay
|
||||
* machinery correctness.
|
||||
*/
|
||||
arena->decay.nunpurged = extents_npages_get(&arena->extents_cached);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -492,7 +445,7 @@ arena_decay_init(arena_t *arena, ssize_t decay_time) {
|
||||
nstime_update(&arena->decay.epoch);
|
||||
arena->decay.jitter_state = (uint64_t)(uintptr_t)arena;
|
||||
arena_decay_deadline_init(arena);
|
||||
arena->decay.nunpurged = arena->ndirty;
|
||||
arena->decay.nunpurged = extents_npages_get(&arena->extents_cached);
|
||||
memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
|
||||
}
|
||||
|
||||
@ -540,9 +493,9 @@ arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) {
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
arena_maybe_purge_helper(tsdn_t *tsdn, arena_t *arena) {
|
||||
nstime_t time;
|
||||
void
|
||||
arena_maybe_purge(tsdn_t *tsdn, arena_t *arena) {
|
||||
malloc_mutex_assert_owner(tsdn, &arena->lock);
|
||||
|
||||
/* Purge all or nothing if the option is disabled. */
|
||||
if (arena->decay.time <= 0) {
|
||||
@ -552,6 +505,7 @@ arena_maybe_purge_helper(tsdn_t *tsdn, arena_t *arena) {
|
||||
return;
|
||||
}
|
||||
|
||||
nstime_t time;
|
||||
nstime_init(&time, 0);
|
||||
nstime_update(&time);
|
||||
if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch,
|
||||
@ -583,95 +537,40 @@ arena_maybe_purge_helper(tsdn_t *tsdn, arena_t *arena) {
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
arena_maybe_purge(tsdn_t *tsdn, arena_t *arena) {
|
||||
malloc_mutex_assert_owner(tsdn, &arena->lock);
|
||||
|
||||
/* Don't recursively purge. */
|
||||
if (arena->purging) {
|
||||
return;
|
||||
}
|
||||
|
||||
arena_maybe_purge_helper(tsdn, arena);
|
||||
}
|
||||
|
||||
static size_t
|
||||
arena_dirty_count(tsdn_t *tsdn, arena_t *arena) {
|
||||
extent_t *extent;
|
||||
size_t ndirty = 0;
|
||||
|
||||
malloc_mutex_lock(tsdn, &arena->extents_mtx);
|
||||
|
||||
for (extent = qr_next(&arena->extents_dirty, qr_link); extent !=
|
||||
&arena->extents_dirty; extent = qr_next(extent, qr_link)) {
|
||||
ndirty += extent_size_get(extent) >> LG_PAGE;
|
||||
}
|
||||
|
||||
malloc_mutex_unlock(tsdn, &arena->extents_mtx);
|
||||
|
||||
return ndirty;
|
||||
}
|
||||
|
||||
static size_t
|
||||
arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
||||
size_t ndirty_limit, extent_t *purge_extents_sentinel) {
|
||||
extent_t *extent, *next;
|
||||
size_t nstashed = 0;
|
||||
|
||||
malloc_mutex_lock(tsdn, &arena->extents_mtx);
|
||||
size_t ndirty_limit, extent_list_t *purge_extents) {
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||
|
||||
/* Stash extents according to ndirty_limit. */
|
||||
for (extent = qr_next(&arena->extents_dirty, qr_link); extent !=
|
||||
&arena->extents_dirty; extent = next) {
|
||||
size_t npages;
|
||||
bool zero, commit;
|
||||
UNUSED extent_t *textent;
|
||||
|
||||
npages = extent_size_get(extent) >> LG_PAGE;
|
||||
if (arena->ndirty - (nstashed + npages) < ndirty_limit) {
|
||||
break;
|
||||
}
|
||||
|
||||
next = qr_next(extent, qr_link);
|
||||
/* Allocate. */
|
||||
zero = false;
|
||||
commit = false;
|
||||
textent = extent_alloc_cache_locked(tsdn, arena, r_extent_hooks,
|
||||
extent_base_get(extent), extent_size_get(extent), 0, PAGE,
|
||||
&zero, &commit, false);
|
||||
assert(textent == extent);
|
||||
assert(zero == extent_zeroed_get(extent));
|
||||
extent_ring_remove(extent);
|
||||
extent_ring_insert(purge_extents_sentinel, extent);
|
||||
|
||||
nstashed += npages;
|
||||
size_t nstashed = 0;
|
||||
for (extent_t *extent = extents_evict(tsdn, &arena->extents_cached,
|
||||
ndirty_limit); extent != NULL; extent = extents_evict(tsdn,
|
||||
&arena->extents_cached, ndirty_limit)) {
|
||||
extent_list_append(purge_extents, extent);
|
||||
nstashed += extent_size_get(extent) >> LG_PAGE;
|
||||
}
|
||||
|
||||
malloc_mutex_unlock(tsdn, &arena->extents_mtx);
|
||||
return nstashed;
|
||||
}
|
||||
|
||||
static size_t
|
||||
arena_purge_stashed(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *purge_extents_sentinel) {
|
||||
extent_hooks_t **r_extent_hooks, extent_list_t *purge_extents) {
|
||||
UNUSED size_t nmadvise;
|
||||
size_t npurged;
|
||||
extent_t *extent, *next;
|
||||
|
||||
if (config_stats) {
|
||||
nmadvise = 0;
|
||||
}
|
||||
npurged = 0;
|
||||
|
||||
for (extent = qr_next(purge_extents_sentinel, qr_link); extent !=
|
||||
purge_extents_sentinel; extent = next) {
|
||||
for (extent_t *extent = extent_list_first(purge_extents); extent !=
|
||||
NULL; extent = extent_list_first(purge_extents)) {
|
||||
if (config_stats) {
|
||||
nmadvise++;
|
||||
}
|
||||
npurged += extent_size_get(extent) >> LG_PAGE;
|
||||
|
||||
next = qr_next(extent, qr_link);
|
||||
extent_ring_remove(extent);
|
||||
extent_list_remove(purge_extents, extent);
|
||||
extent_dalloc_wrapper(tsdn, arena, r_extent_hooks, extent);
|
||||
}
|
||||
|
||||
@ -684,43 +583,44 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena,
|
||||
}
|
||||
|
||||
/*
|
||||
* ndirty_limit: Purge as many dirty extents as possible without violating the
|
||||
* invariant: (arena->ndirty >= ndirty_limit)
|
||||
* ndirty_limit: Purge as many dirty extents as possible without violating the
|
||||
* invariant: (extents_npages_get(&arena->extents_cached) >= ndirty_limit)
|
||||
*/
|
||||
static void
|
||||
arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit) {
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 1);
|
||||
malloc_mutex_assert_owner(tsdn, &arena->lock);
|
||||
|
||||
if (atomic_cas_u(&arena->purging, 0, 1)) {
|
||||
return;
|
||||
}
|
||||
|
||||
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
|
||||
size_t npurge, npurged;
|
||||
extent_t purge_extents_sentinel;
|
||||
extent_list_t purge_extents;
|
||||
|
||||
arena->purging = true;
|
||||
extent_list_init(&purge_extents);
|
||||
|
||||
/*
|
||||
* Calls to arena_dirty_count() are disabled even for debug builds
|
||||
* because overhead grows nonlinearly as memory usage increases.
|
||||
*/
|
||||
if (false && config_debug) {
|
||||
size_t ndirty = arena_dirty_count(tsdn, arena);
|
||||
assert(ndirty == arena->ndirty);
|
||||
}
|
||||
extent_init(&purge_extents_sentinel, arena, NULL, 0, 0, 0, false, false,
|
||||
false, false);
|
||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||
|
||||
npurge = arena_stash_dirty(tsdn, arena, &extent_hooks, ndirty_limit,
|
||||
&purge_extents_sentinel);
|
||||
&purge_extents);
|
||||
if (npurge == 0) {
|
||||
malloc_mutex_lock(tsdn, &arena->lock);
|
||||
goto label_return;
|
||||
}
|
||||
npurged = arena_purge_stashed(tsdn, arena, &extent_hooks,
|
||||
&purge_extents_sentinel);
|
||||
&purge_extents);
|
||||
assert(npurged == npurge);
|
||||
|
||||
malloc_mutex_lock(tsdn, &arena->lock);
|
||||
|
||||
if (config_stats) {
|
||||
arena->stats.npurge++;
|
||||
}
|
||||
|
||||
label_return:
|
||||
arena->purging = false;
|
||||
atomic_write_u(&arena->purging, 0);
|
||||
}
|
||||
|
||||
void
|
||||
@ -737,9 +637,14 @@ arena_purge(tsdn_t *tsdn, arena_t *arena, bool all) {
|
||||
static void
|
||||
arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
|
||||
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
|
||||
size_t npages = extent_size_get(slab) >> LG_PAGE;
|
||||
|
||||
arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
|
||||
arena_extent_cache_dalloc_locked(tsdn, arena, &extent_hooks, slab);
|
||||
extent_dalloc_cache(tsdn, arena, &extent_hooks, slab);
|
||||
|
||||
arena_nactive_sub(arena, npages);
|
||||
malloc_mutex_lock(tsdn, &arena->lock);
|
||||
arena_maybe_purge(tsdn, arena);
|
||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -768,19 +673,16 @@ arena_bin_slabs_nonfull_tryget(arena_bin_t *bin) {
|
||||
static void
|
||||
arena_bin_slabs_full_insert(arena_bin_t *bin, extent_t *slab) {
|
||||
assert(extent_slab_data_get(slab)->nfree == 0);
|
||||
extent_ring_insert(&bin->slabs_full, slab);
|
||||
extent_list_append(&bin->slabs_full, slab);
|
||||
}
|
||||
|
||||
static void
|
||||
arena_bin_slabs_full_remove(extent_t *slab) {
|
||||
extent_ring_remove(slab);
|
||||
arena_bin_slabs_full_remove(arena_bin_t *bin, extent_t *slab) {
|
||||
extent_list_remove(&bin->slabs_full, slab);
|
||||
}
|
||||
|
||||
void
|
||||
arena_reset(tsd_t *tsd, arena_t *arena) {
|
||||
unsigned i;
|
||||
extent_t *extent;
|
||||
|
||||
/*
|
||||
* Locking in this function is unintuitive. The caller guarantees that
|
||||
* no concurrent operations are happening in this arena, but there are
|
||||
@ -797,8 +699,9 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
|
||||
|
||||
/* Large allocations. */
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
|
||||
for (extent = ql_last(&arena->large, ql_link); extent != NULL; extent =
|
||||
ql_last(&arena->large, ql_link)) {
|
||||
|
||||
for (extent_t *extent = extent_list_first(&arena->large); extent !=
|
||||
NULL; extent = extent_list_first(&arena->large)) {
|
||||
void *ptr = extent_base_get(extent);
|
||||
size_t usize;
|
||||
|
||||
@ -819,10 +722,8 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
|
||||
}
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
|
||||
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
|
||||
|
||||
/* Bins. */
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
for (unsigned i = 0; i < NBINS; i++) {
|
||||
extent_t *slab;
|
||||
arena_bin_t *bin = &arena->bins[i];
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
||||
@ -839,10 +740,9 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
|
||||
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
||||
}
|
||||
for (slab = qr_next(&bin->slabs_full, qr_link); slab !=
|
||||
&bin->slabs_full; slab = qr_next(&bin->slabs_full,
|
||||
qr_link)) {
|
||||
arena_bin_slabs_full_remove(slab);
|
||||
for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
|
||||
slab = extent_list_first(&bin->slabs_full)) {
|
||||
arena_bin_slabs_full_remove(bin, slab);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
||||
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
||||
@ -854,17 +754,12 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
||||
}
|
||||
|
||||
assert(!arena->purging);
|
||||
arena->nactive = 0;
|
||||
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
|
||||
assert(atomic_read_u(&arena->purging) == 0);
|
||||
atomic_write_zu(&arena->nactive, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
|
||||
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
|
||||
size_t i;
|
||||
|
||||
/*
|
||||
* Iterate over the retained extents and blindly attempt to deallocate
|
||||
* them. This gives the extent allocator underlying the extent hooks an
|
||||
@ -876,15 +771,11 @@ arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
|
||||
* dss for arenas to be destroyed), or provide custom extent hooks that
|
||||
* either unmap retained extents or track them for later use.
|
||||
*/
|
||||
for (i = 0; i < sizeof(arena->extents_retained)/sizeof(extent_heap_t);
|
||||
i++) {
|
||||
extent_heap_t *extents = &arena->extents_retained[i];
|
||||
extent_t *extent;
|
||||
|
||||
while ((extent = extent_heap_remove_first(extents)) != NULL) {
|
||||
extent_dalloc_wrapper_try(tsdn, arena, &extent_hooks,
|
||||
extent);
|
||||
}
|
||||
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
|
||||
for (extent_t *extent = extents_evict(tsdn, &arena->extents_retained,
|
||||
0); extent != NULL; extent = extents_evict(tsdn,
|
||||
&arena->extents_retained, 0)) {
|
||||
extent_dalloc_wrapper_try(tsdn, arena, &extent_hooks, extent);
|
||||
}
|
||||
}
|
||||
|
||||
@ -899,7 +790,7 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
|
||||
* Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
|
||||
* extents, so only retained extents may remain.
|
||||
*/
|
||||
assert(arena->ndirty == 0);
|
||||
assert(extents_npages_get(&arena->extents_cached) == 0);
|
||||
|
||||
/* Attempt to deallocate retained memory. */
|
||||
arena_destroy_retained(tsd_tsdn(tsd), arena);
|
||||
@ -929,12 +820,12 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_t *slab;
|
||||
bool zero, commit;
|
||||
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||
|
||||
zero = false;
|
||||
commit = true;
|
||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||
slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL,
|
||||
bin_info->slab_size, 0, PAGE, &zero, &commit, true);
|
||||
malloc_mutex_lock(tsdn, &arena->lock);
|
||||
|
||||
return slab;
|
||||
}
|
||||
@ -942,13 +833,13 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
|
||||
static extent_t *
|
||||
arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
||||
const arena_bin_info_t *bin_info) {
|
||||
extent_t *slab;
|
||||
arena_slab_data_t *slab_data;
|
||||
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||
|
||||
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
|
||||
bool zero = false;
|
||||
|
||||
slab = arena_extent_cache_alloc_locked(tsdn, arena, &extent_hooks, NULL,
|
||||
bin_info->slab_size, 0, PAGE, &zero, true);
|
||||
bool commit = true;
|
||||
extent_t *slab = extent_alloc_cache(tsdn, arena, &extent_hooks, NULL,
|
||||
bin_info->slab_size, 0, PAGE, &zero, &commit, true);
|
||||
if (slab == NULL) {
|
||||
slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
|
||||
bin_info);
|
||||
@ -958,10 +849,12 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
||||
}
|
||||
assert(extent_slab_get(slab));
|
||||
|
||||
malloc_mutex_lock(tsdn, &arena->lock);
|
||||
|
||||
arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
|
||||
|
||||
/* Initialize slab internals. */
|
||||
slab_data = extent_slab_data_get(slab);
|
||||
arena_slab_data_t *slab_data = extent_slab_data_get(slab);
|
||||
slab_data->binind = binind;
|
||||
slab_data->nfree = bin_info->nregs;
|
||||
bitmap_init(slab_data->bitmap, &bin_info->bitmap_info);
|
||||
@ -969,6 +862,7 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
||||
if (config_stats) {
|
||||
arena->stats.mapped += extent_size_get(slab);
|
||||
}
|
||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||
|
||||
return slab;
|
||||
}
|
||||
@ -991,9 +885,7 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
|
||||
/* Allocate a new slab. */
|
||||
malloc_mutex_unlock(tsdn, &bin->lock);
|
||||
/******************************/
|
||||
malloc_mutex_lock(tsdn, &arena->lock);
|
||||
slab = arena_slab_alloc(tsdn, arena, binind, bin_info);
|
||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||
/********************************/
|
||||
malloc_mutex_lock(tsdn, &bin->lock);
|
||||
if (slab != NULL) {
|
||||
@ -1317,7 +1209,7 @@ arena_dissociate_bin_slab(extent_t *slab, arena_bin_t *bin) {
|
||||
* into the non-full slabs heap.
|
||||
*/
|
||||
if (bin_info->nregs == 1) {
|
||||
arena_bin_slabs_full_remove(slab);
|
||||
arena_bin_slabs_full_remove(bin, slab);
|
||||
} else {
|
||||
arena_bin_slabs_nonfull_remove(bin, slab);
|
||||
}
|
||||
@ -1331,9 +1223,7 @@ arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
||||
|
||||
malloc_mutex_unlock(tsdn, &bin->lock);
|
||||
/******************************/
|
||||
malloc_mutex_lock(tsdn, &arena->lock);
|
||||
arena_slab_dalloc(tsdn, arena, slab);
|
||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||
/****************************/
|
||||
malloc_mutex_lock(tsdn, &bin->lock);
|
||||
if (config_stats) {
|
||||
@ -1385,7 +1275,7 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
||||
arena_dissociate_bin_slab(slab, bin);
|
||||
arena_dalloc_bin_slab(tsdn, arena, slab, bin);
|
||||
} else if (slab_data->nfree == 1 && slab != bin->slabcur) {
|
||||
arena_bin_slabs_full_remove(slab);
|
||||
arena_bin_slabs_full_remove(bin, slab);
|
||||
arena_bin_lower_slab(tsdn, arena, slab, bin);
|
||||
}
|
||||
|
||||
@ -1554,8 +1444,8 @@ arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
|
||||
*nthreads += arena_nthreads_get(arena, false);
|
||||
*dss = dss_prec_names[arena->dss_prec];
|
||||
*decay_time = arena->decay.time;
|
||||
*nactive += arena->nactive;
|
||||
*ndirty += arena->ndirty;
|
||||
*nactive += atomic_read_zu(&arena->nactive);
|
||||
*ndirty += extents_npages_get(&arena->extents_cached);
|
||||
}
|
||||
|
||||
void
|
||||
@ -1585,14 +1475,15 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
&base_mapped);
|
||||
|
||||
astats->mapped += base_mapped + arena->stats.mapped;
|
||||
astats->retained += arena->stats.retained;
|
||||
astats->retained += (extents_npages_get(&arena->extents_retained) <<
|
||||
LG_PAGE);
|
||||
astats->npurge += arena->stats.npurge;
|
||||
astats->nmadvise += arena->stats.nmadvise;
|
||||
astats->purged += arena->stats.purged;
|
||||
astats->base += base_allocated;
|
||||
astats->internal += arena_internal_get(arena);
|
||||
astats->resident += base_resident + (((arena->nactive + arena->ndirty)
|
||||
<< LG_PAGE));
|
||||
astats->resident += base_resident + (((atomic_read_zu(&arena->nactive) +
|
||||
extents_npages_get(&arena->extents_cached)) << LG_PAGE));
|
||||
astats->allocated_large += arena->stats.allocated_large;
|
||||
astats->nmalloc_large += arena->stats.nmalloc_large;
|
||||
astats->ndalloc_large += arena->stats.ndalloc_large;
|
||||
@ -1709,28 +1600,22 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||
|
||||
arena->dss_prec = extent_dss_prec_get();
|
||||
|
||||
arena->purging = false;
|
||||
arena->nactive = 0;
|
||||
arena->ndirty = 0;
|
||||
atomic_write_u(&arena->purging, 0);
|
||||
atomic_write_zu(&arena->nactive, 0);
|
||||
|
||||
arena_decay_init(arena, arena_decay_time_default_get());
|
||||
|
||||
ql_new(&arena->large);
|
||||
extent_list_init(&arena->large);
|
||||
if (malloc_mutex_init(&arena->large_mtx, "arena_large",
|
||||
WITNESS_RANK_ARENA_LARGE)) {
|
||||
goto label_error;
|
||||
}
|
||||
|
||||
for (i = 0; i < NPSIZES+1; i++) {
|
||||
extent_heap_new(&arena->extents_cached[i]);
|
||||
extent_heap_new(&arena->extents_retained[i]);
|
||||
if (extents_init(tsdn, &arena->extents_cached, extent_state_dirty)) {
|
||||
goto label_error;
|
||||
}
|
||||
|
||||
extent_init(&arena->extents_dirty, arena, NULL, 0, 0, 0, false, false,
|
||||
false, false);
|
||||
|
||||
if (malloc_mutex_init(&arena->extents_mtx, "arena_extents",
|
||||
WITNESS_RANK_ARENA_EXTENTS)) {
|
||||
if (extents_init(tsdn, &arena->extents_retained,
|
||||
extent_state_retained)) {
|
||||
goto label_error;
|
||||
}
|
||||
|
||||
@ -1738,9 +1623,9 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||
arena->extent_grow_next = psz2ind(HUGEPAGE);
|
||||
}
|
||||
|
||||
ql_new(&arena->extent_cache);
|
||||
if (malloc_mutex_init(&arena->extent_cache_mtx, "arena_extent_cache",
|
||||
WITNESS_RANK_ARENA_EXTENT_CACHE)) {
|
||||
extent_list_init(&arena->extent_freelist);
|
||||
if (malloc_mutex_init(&arena->extent_freelist_mtx, "extent_freelist",
|
||||
WITNESS_RANK_EXTENT_FREELIST)) {
|
||||
goto label_error;
|
||||
}
|
||||
|
||||
@ -1753,8 +1638,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||
}
|
||||
bin->slabcur = NULL;
|
||||
extent_heap_new(&bin->slabs_nonfull);
|
||||
extent_init(&bin->slabs_full, arena, NULL, 0, 0, 0, false,
|
||||
false, false, false);
|
||||
extent_list_init(&bin->slabs_full);
|
||||
if (config_stats) {
|
||||
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
|
||||
}
|
||||
@ -1782,12 +1666,13 @@ arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
|
||||
|
||||
void
|
||||
arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
|
||||
malloc_mutex_prefork(tsdn, &arena->extents_mtx);
|
||||
extents_prefork(tsdn, &arena->extents_cached);
|
||||
extents_prefork(tsdn, &arena->extents_retained);
|
||||
}
|
||||
|
||||
void
|
||||
arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
|
||||
malloc_mutex_prefork(tsdn, &arena->extent_cache_mtx);
|
||||
malloc_mutex_prefork(tsdn, &arena->extent_freelist_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
@ -1810,8 +1695,9 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
|
||||
malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
|
||||
}
|
||||
base_postfork_parent(tsdn, arena->base);
|
||||
malloc_mutex_postfork_parent(tsdn, &arena->extent_cache_mtx);
|
||||
malloc_mutex_postfork_parent(tsdn, &arena->extents_mtx);
|
||||
malloc_mutex_postfork_parent(tsdn, &arena->extent_freelist_mtx);
|
||||
extents_postfork_parent(tsdn, &arena->extents_cached);
|
||||
extents_postfork_parent(tsdn, &arena->extents_retained);
|
||||
malloc_mutex_postfork_parent(tsdn, &arena->lock);
|
||||
}
|
||||
|
||||
@ -1824,7 +1710,8 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
|
||||
malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
|
||||
}
|
||||
base_postfork_child(tsdn, arena->base);
|
||||
malloc_mutex_postfork_child(tsdn, &arena->extent_cache_mtx);
|
||||
malloc_mutex_postfork_child(tsdn, &arena->extents_mtx);
|
||||
malloc_mutex_postfork_child(tsdn, &arena->extent_freelist_mtx);
|
||||
extents_postfork_child(tsdn, &arena->extents_cached);
|
||||
extents_postfork_child(tsdn, &arena->extents_retained);
|
||||
malloc_mutex_postfork_child(tsdn, &arena->lock);
|
||||
}
|
||||
|
@ -87,7 +87,8 @@ base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
|
||||
sn = *extent_sn_next;
|
||||
(*extent_sn_next)++;
|
||||
|
||||
extent_init(extent, NULL, addr, size, 0, sn, true, true, true, false);
|
||||
extent_init(extent, NULL, addr, size, 0, sn, extent_state_active, true,
|
||||
true, false);
|
||||
}
|
||||
|
||||
static void *
|
||||
@ -104,7 +105,7 @@ base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
|
||||
assert(extent_size_get(extent) >= *gap_size + size);
|
||||
extent_init(extent, NULL, (void *)((uintptr_t)extent_addr_get(extent) +
|
||||
*gap_size + size), extent_size_get(extent) - *gap_size - size, 0,
|
||||
extent_sn_get(extent), true, true, true, false);
|
||||
extent_sn_get(extent), extent_state_active, true, true, false);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
628
src/extent.c
628
src/extent.c
File diff suppressed because it is too large
Load Diff
@ -143,7 +143,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||
if (gap_size != 0) {
|
||||
extent_init(gap, arena, gap_addr, gap_size,
|
||||
gap_size, arena_extent_sn_next(arena),
|
||||
false, false, true, false);
|
||||
extent_state_active, false, true, false);
|
||||
}
|
||||
dss_next = (void *)((uintptr_t)ret + size);
|
||||
if ((uintptr_t)ret < (uintptr_t)max_cur ||
|
||||
@ -180,7 +180,8 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||
extent_t extent;
|
||||
|
||||
extent_init(&extent, arena, ret, size,
|
||||
size, 0, true, false, true, false);
|
||||
size, 0, extent_state_active, false,
|
||||
true, false);
|
||||
if (extent_purge_forced_wrapper(tsdn,
|
||||
arena, &extent_hooks, &extent, 0,
|
||||
size)) {
|
||||
|
46
src/large.c
46
src/large.c
@ -40,8 +40,7 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||
|
||||
/* Insert extent into large. */
|
||||
malloc_mutex_lock(tsdn, &arena->large_mtx);
|
||||
ql_elm_new(extent, ql_link);
|
||||
ql_tail_insert(&arena->large, extent, ql_link);
|
||||
extent_list_append(&arena->large, extent);
|
||||
malloc_mutex_unlock(tsdn, &arena->large_mtx);
|
||||
if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
|
||||
prof_idump(tsdn);
|
||||
@ -138,19 +137,19 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
||||
bool zero) {
|
||||
arena_t *arena = extent_arena_get(extent);
|
||||
size_t oldusize = extent_usize_get(extent);
|
||||
bool is_zeroed_trail = false;
|
||||
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
|
||||
size_t trailsize = usize - extent_usize_get(extent);
|
||||
extent_t *trail;
|
||||
|
||||
if (extent_hooks->merge == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if ((trail = arena_extent_cache_alloc(tsdn, arena, &extent_hooks,
|
||||
extent_past_get(extent), trailsize, CACHELINE, &is_zeroed_trail)) ==
|
||||
NULL) {
|
||||
bool commit = true;
|
||||
bool is_zeroed_trail = false;
|
||||
bool commit = true;
|
||||
extent_t *trail;
|
||||
if ((trail = extent_alloc_cache(tsdn, arena, &extent_hooks,
|
||||
extent_past_get(extent), trailsize, 0, CACHELINE, &is_zeroed_trail,
|
||||
&commit, false)) == NULL) {
|
||||
if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
|
||||
extent_past_get(extent), trailsize, 0, CACHELINE,
|
||||
&is_zeroed_trail, &commit, false)) == NULL) {
|
||||
@ -291,32 +290,39 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
|
||||
* independent of these considerations.
|
||||
*/
|
||||
static void
|
||||
large_dalloc_impl(tsdn_t *tsdn, extent_t *extent, bool junked_locked) {
|
||||
arena_t *arena;
|
||||
|
||||
arena = extent_arena_get(extent);
|
||||
large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
bool junked_locked) {
|
||||
malloc_mutex_lock(tsdn, &arena->large_mtx);
|
||||
ql_remove(&arena->large, extent, ql_link);
|
||||
extent_list_remove(&arena->large, extent);
|
||||
malloc_mutex_unlock(tsdn, &arena->large_mtx);
|
||||
if (!junked_locked) {
|
||||
large_dalloc_maybe_junk(extent_addr_get(extent),
|
||||
extent_usize_get(extent));
|
||||
}
|
||||
arena_extent_dalloc_large(tsdn, arena, extent, junked_locked);
|
||||
arena_extent_dalloc_large_prep(tsdn, arena, extent, junked_locked);
|
||||
}
|
||||
|
||||
if (!junked_locked) {
|
||||
arena_decay_tick(tsdn, arena);
|
||||
}
|
||||
static void
|
||||
large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
|
||||
arena_extent_dalloc_large_finish(tsdn, arena, extent);
|
||||
}
|
||||
|
||||
void
|
||||
large_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent) {
|
||||
large_dalloc_impl(tsdn, extent, true);
|
||||
large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) {
|
||||
large_dalloc_prep_impl(tsdn, extent_arena_get(extent), extent, true);
|
||||
}
|
||||
|
||||
void
|
||||
large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) {
|
||||
large_dalloc_finish_impl(tsdn, extent_arena_get(extent), extent);
|
||||
}
|
||||
|
||||
void
|
||||
large_dalloc(tsdn_t *tsdn, extent_t *extent) {
|
||||
large_dalloc_impl(tsdn, extent, false);
|
||||
arena_t *arena = extent_arena_get(extent);
|
||||
large_dalloc_prep_impl(tsdn, arena, extent, false);
|
||||
large_dalloc_finish_impl(tsdn, arena, extent);
|
||||
arena_decay_tick(tsdn, arena);
|
||||
}
|
||||
|
||||
size_t
|
||||
|
31
src/tcache.c
31
src/tcache.c
@ -170,17 +170,15 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
void
|
||||
tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
unsigned rem, tcache_t *tcache) {
|
||||
arena_t *arena;
|
||||
void *ptr;
|
||||
unsigned i, nflush, ndeferred;
|
||||
bool merged_stats = false;
|
||||
|
||||
assert(binind < nhbins);
|
||||
assert(rem <= tbin->ncached);
|
||||
|
||||
arena = arena_choose(tsd, NULL);
|
||||
arena_t *arena = arena_choose(tsd, NULL);
|
||||
assert(arena != NULL);
|
||||
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
|
||||
unsigned nflush = tbin->ncached - rem;
|
||||
while (nflush > 0) {
|
||||
/* Lock the arena associated with the first object. */
|
||||
extent_t *extent = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1));
|
||||
arena_t *locked_arena = extent_arena_get(extent);
|
||||
@ -189,7 +187,17 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
if (config_prof) {
|
||||
idump = false;
|
||||
}
|
||||
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock);
|
||||
for (unsigned i = 0; i < nflush; i++) {
|
||||
void *ptr = *(tbin->avail - 1 - i);
|
||||
assert(ptr != NULL);
|
||||
extent = iealloc(tsd_tsdn(tsd), ptr);
|
||||
if (extent_arena_get(extent) == locked_arena) {
|
||||
large_dalloc_prep_junked_locked(tsd_tsdn(tsd),
|
||||
extent);
|
||||
}
|
||||
}
|
||||
if ((config_prof || config_stats) && locked_arena == arena) {
|
||||
if (config_prof) {
|
||||
idump = arena_prof_accum_locked(arena,
|
||||
@ -205,14 +213,15 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
tbin->tstats.nrequests = 0;
|
||||
}
|
||||
}
|
||||
ndeferred = 0;
|
||||
for (i = 0; i < nflush; i++) {
|
||||
ptr = *(tbin->avail - 1 - i);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock);
|
||||
|
||||
unsigned ndeferred = 0;
|
||||
for (unsigned i = 0; i < nflush; i++) {
|
||||
void *ptr = *(tbin->avail - 1 - i);
|
||||
assert(ptr != NULL);
|
||||
extent = iealloc(tsd_tsdn(tsd), ptr);
|
||||
if (extent_arena_get(extent) == locked_arena) {
|
||||
large_dalloc_junked_locked(tsd_tsdn(tsd),
|
||||
extent);
|
||||
large_dalloc_finish(tsd_tsdn(tsd), extent);
|
||||
} else {
|
||||
/*
|
||||
* This object was allocated via a different
|
||||
@ -224,12 +233,12 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
ndeferred++;
|
||||
}
|
||||
}
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock);
|
||||
if (config_prof && idump) {
|
||||
prof_idump(tsd_tsdn(tsd));
|
||||
}
|
||||
arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
|
||||
ndeferred);
|
||||
nflush = ndeferred;
|
||||
}
|
||||
if (config_stats && !merged_stats) {
|
||||
/*
|
||||
|
@ -63,7 +63,7 @@ vsalloc(tsdn_t *tsdn, const void *ptr) {
|
||||
if (extent == NULL) {
|
||||
return 0;
|
||||
}
|
||||
if (!extent_active_get(extent)) {
|
||||
if (extent_state_get(extent) != extent_state_active) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -8,8 +8,8 @@ TEST_BEGIN(test_arena_slab_regind) {
|
||||
extent_t slab;
|
||||
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||
extent_init(&slab, NULL, mallocx(bin_info->slab_size,
|
||||
MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, 0, 0, true,
|
||||
false, true, true);
|
||||
MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, 0, 0,
|
||||
extent_state_active, false, true, true);
|
||||
assert_ptr_not_null(extent_addr_get(&slab),
|
||||
"Unexpected malloc() failure");
|
||||
for (regind = 0; regind < bin_info->nregs; regind++) {
|
||||
|
Loading…
Reference in New Issue
Block a user