Disentangle arena and extent locking.
Refactor arena and extent locking protocols such that arena and
extent locks are never held when calling into the extent_*_wrapper()
API. This requires extra care during purging since the arena lock no
longer protects the inner purging logic. It also requires extra care to
protect extents from being merged with adjacent extents.
Convert extent_t's 'active' flag to an enumerated 'state', so that
retained extents are explicitly marked as such, rather than depending on
ring linkage state.
Refactor the extent collections (and their synchronization) for cached
and retained extents into extents_t. Incorporate LRU functionality to
support purging. Incorporate page count accounting, which replaces
arena->ndirty and arena->stats.retained.
Assert that no core locks are held when entering any internal
[de]allocation functions. This is in addition to existing assertions
that no locks are held when entering external [de]allocation functions.
Audit and document synchronization protocols for all arena_t fields.
This fixes a potential deadlock due to recursive allocation during
gdump, in a similar fashion to b49c649bc1
(Fix lock order reversal during gdump.), but with a necessarily much
broader code impact.
This commit is contained in:
parent
1b6e43507e
commit
d27f29b468
@ -13,22 +13,17 @@ extern ssize_t opt_decay_time;
|
|||||||
|
|
||||||
extern const arena_bin_info_t arena_bin_info[NBINS];
|
extern const arena_bin_info_t arena_bin_info[NBINS];
|
||||||
|
|
||||||
extent_t *arena_extent_cache_alloc(tsdn_t *tsdn, arena_t *arena,
|
|
||||||
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size,
|
|
||||||
size_t alignment, bool *zero);
|
|
||||||
void arena_extent_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
|
void arena_extent_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
|
||||||
extent_hooks_t **r_extent_hooks, extent_t *extent);
|
extent_hooks_t **r_extent_hooks, extent_t *extent);
|
||||||
void arena_extent_cache_maybe_insert(tsdn_t *tsdn, arena_t *arena,
|
|
||||||
extent_t *extent, bool cache);
|
|
||||||
void arena_extent_cache_maybe_remove(tsdn_t *tsdn, arena_t *arena,
|
|
||||||
extent_t *extent, bool cache);
|
|
||||||
#ifdef JEMALLOC_JET
|
#ifdef JEMALLOC_JET
|
||||||
size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr);
|
size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr);
|
||||||
#endif
|
#endif
|
||||||
extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
|
extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
|
||||||
size_t usize, size_t alignment, bool *zero);
|
size_t usize, size_t alignment, bool *zero);
|
||||||
void arena_extent_dalloc_large(tsdn_t *tsdn, arena_t *arena,
|
void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
|
||||||
extent_t *extent, bool locked);
|
extent_t *extent, bool locked);
|
||||||
|
void arena_extent_dalloc_large_finish(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
extent_t *extent);
|
||||||
void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
|
void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
|
||||||
extent_t *extent, size_t oldsize);
|
extent_t *extent, size_t oldsize);
|
||||||
void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
|
void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
@ -66,8 +66,8 @@ struct arena_decay_s {
|
|||||||
/*
|
/*
|
||||||
* Number of dirty pages at beginning of current epoch. During epoch
|
* Number of dirty pages at beginning of current epoch. During epoch
|
||||||
* advancement we use the delta between arena->decay.ndirty and
|
* advancement we use the delta between arena->decay.ndirty and
|
||||||
* arena->ndirty to determine how many dirty pages, if any, were
|
* extents_npages_get(&arena->extents_cached) to determine how many
|
||||||
* generated.
|
* dirty pages, if any, were generated.
|
||||||
*/
|
*/
|
||||||
size_t nunpurged;
|
size_t nunpurged;
|
||||||
/*
|
/*
|
||||||
@ -98,8 +98,8 @@ struct arena_bin_s {
|
|||||||
*/
|
*/
|
||||||
extent_heap_t slabs_nonfull;
|
extent_heap_t slabs_nonfull;
|
||||||
|
|
||||||
/* Ring sentinel used to track full slabs. */
|
/* List used to track full slabs. */
|
||||||
extent_t slabs_full;
|
extent_list_t slabs_full;
|
||||||
|
|
||||||
/* Bin statistics. */
|
/* Bin statistics. */
|
||||||
malloc_bin_stats_t stats;
|
malloc_bin_stats_t stats;
|
||||||
@ -107,84 +107,97 @@ struct arena_bin_s {
|
|||||||
|
|
||||||
struct arena_s {
|
struct arena_s {
|
||||||
/*
|
/*
|
||||||
* Number of threads currently assigned to this arena, synchronized via
|
* Number of threads currently assigned to this arena. Each thread has
|
||||||
* atomic operations. Each thread has two distinct assignments, one for
|
* two distinct assignments, one for application-serving allocation, and
|
||||||
* application-serving allocation, and the other for internal metadata
|
* the other for internal metadata allocation. Internal metadata must
|
||||||
* allocation. Internal metadata must not be allocated from arenas
|
* not be allocated from arenas explicitly created via the arenas.create
|
||||||
* explicitly created via the arenas.create mallctl, because the
|
* mallctl, because the arena.<i>.reset mallctl indiscriminately
|
||||||
* arena.<i>.reset mallctl indiscriminately discards all allocations for
|
* discards all allocations for the affected arena.
|
||||||
* the affected arena.
|
|
||||||
*
|
*
|
||||||
* 0: Application allocation.
|
* 0: Application allocation.
|
||||||
* 1: Internal metadata allocation.
|
* 1: Internal metadata allocation.
|
||||||
|
*
|
||||||
|
* Synchronization: atomic.
|
||||||
*/
|
*/
|
||||||
unsigned nthreads[2];
|
unsigned nthreads[2];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There are three classes of arena operations from a locking
|
* Synchronizes various arena operations, as indicated in field-specific
|
||||||
* perspective:
|
* comments.
|
||||||
* 1) Thread assignment (modifies nthreads) is synchronized via atomics.
|
|
||||||
* 2) Bin-related operations are protected by bin locks.
|
|
||||||
* 3) Extent-related operations are protected by this mutex.
|
|
||||||
*/
|
*/
|
||||||
malloc_mutex_t lock;
|
malloc_mutex_t lock;
|
||||||
|
|
||||||
|
/* Synchronization: lock. */
|
||||||
arena_stats_t stats;
|
arena_stats_t stats;
|
||||||
/*
|
/*
|
||||||
* List of tcaches for extant threads associated with this arena.
|
* List of tcaches for extant threads associated with this arena.
|
||||||
* Stats from these are merged incrementally, and at exit if
|
* Stats from these are merged incrementally, and at exit if
|
||||||
* opt_stats_print is enabled.
|
* opt_stats_print is enabled.
|
||||||
|
*
|
||||||
|
* Synchronization: lock.
|
||||||
*/
|
*/
|
||||||
ql_head(tcache_t) tcache_ql;
|
ql_head(tcache_t) tcache_ql;
|
||||||
|
|
||||||
|
/* Synchronization: lock. */
|
||||||
uint64_t prof_accumbytes;
|
uint64_t prof_accumbytes;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PRNG state for cache index randomization of large allocation base
|
* PRNG state for cache index randomization of large allocation base
|
||||||
* pointers.
|
* pointers.
|
||||||
|
*
|
||||||
|
* Synchronization: atomic.
|
||||||
*/
|
*/
|
||||||
size_t offset_state;
|
size_t offset_state;
|
||||||
|
|
||||||
/* Extent serial number generator state. */
|
/*
|
||||||
|
* Extent serial number generator state.
|
||||||
|
*
|
||||||
|
* Synchronization: atomic.
|
||||||
|
*/
|
||||||
size_t extent_sn_next;
|
size_t extent_sn_next;
|
||||||
|
|
||||||
|
/* Synchronization: lock. */
|
||||||
dss_prec_t dss_prec;
|
dss_prec_t dss_prec;
|
||||||
|
|
||||||
/* True if a thread is currently executing arena_purge_to_limit(). */
|
/*
|
||||||
bool purging;
|
* 1/0 (true/false) if a thread is currently executing
|
||||||
|
* arena_purge_to_limit().
|
||||||
|
*
|
||||||
|
* Synchronization: atomic.
|
||||||
|
*/
|
||||||
|
unsigned purging;
|
||||||
|
|
||||||
/* Number of pages in active extents. */
|
/*
|
||||||
|
* Number of pages in active extents.
|
||||||
|
*
|
||||||
|
* Synchronization: atomic.
|
||||||
|
*/
|
||||||
size_t nactive;
|
size_t nactive;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Current count of pages within unused extents that are potentially
|
* Decay-based purging state.
|
||||||
* dirty, and for which pages_purge_*() has not been called. By
|
*
|
||||||
* tracking this, we can institute a limit on how much dirty unused
|
* Synchronization: lock.
|
||||||
* memory is mapped for each arena.
|
|
||||||
*/
|
*/
|
||||||
size_t ndirty;
|
|
||||||
|
|
||||||
/* Decay-based purging state. */
|
|
||||||
arena_decay_t decay;
|
arena_decay_t decay;
|
||||||
|
|
||||||
/* Extant large allocations. */
|
/*
|
||||||
ql_head(extent_t) large;
|
* Extant large allocations.
|
||||||
|
*
|
||||||
|
* Synchronization: large_mtx.
|
||||||
|
*/
|
||||||
|
extent_list_t large;
|
||||||
/* Synchronizes all large allocation/update/deallocation. */
|
/* Synchronizes all large allocation/update/deallocation. */
|
||||||
malloc_mutex_t large_mtx;
|
malloc_mutex_t large_mtx;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Heaps of extents that were previously allocated. These are used when
|
* Collections of extents that were previously allocated. These are
|
||||||
* allocating extents, in an attempt to re-use address space.
|
* used when allocating extents, in an attempt to re-use address space.
|
||||||
|
*
|
||||||
|
* Synchronization: internal.
|
||||||
*/
|
*/
|
||||||
extent_heap_t extents_cached[NPSIZES+1];
|
extents_t extents_cached;
|
||||||
extent_heap_t extents_retained[NPSIZES+1];
|
extents_t extents_retained;
|
||||||
/*
|
|
||||||
* Ring sentinel used to track unused dirty memory. Dirty memory is
|
|
||||||
* managed as an LRU of cached extents.
|
|
||||||
*/
|
|
||||||
extent_t extents_dirty;
|
|
||||||
/* Protects extents_{cached,retained,dirty}. */
|
|
||||||
malloc_mutex_t extents_mtx;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Next extent size class in a growing series to use when satisfying a
|
* Next extent size class in a growing series to use when satisfying a
|
||||||
@ -192,17 +205,31 @@ struct arena_s {
|
|||||||
* the number of disjoint virtual memory ranges so that extent merging
|
* the number of disjoint virtual memory ranges so that extent merging
|
||||||
* can be effective even if multiple arenas' extent allocation requests
|
* can be effective even if multiple arenas' extent allocation requests
|
||||||
* are highly interleaved.
|
* are highly interleaved.
|
||||||
|
*
|
||||||
|
* Synchronization: atomic.
|
||||||
*/
|
*/
|
||||||
pszind_t extent_grow_next;
|
pszind_t extent_grow_next;
|
||||||
|
|
||||||
/* Cache of extent structures that were allocated via base_alloc(). */
|
/*
|
||||||
ql_head(extent_t) extent_cache;
|
* Freelist of extent structures that were allocated via base_alloc().
|
||||||
malloc_mutex_t extent_cache_mtx;
|
*
|
||||||
|
* Synchronization: extent_freelist_mtx.
|
||||||
|
*/
|
||||||
|
extent_list_t extent_freelist;
|
||||||
|
malloc_mutex_t extent_freelist_mtx;
|
||||||
|
|
||||||
/* bins is used to store heaps of free regions. */
|
/*
|
||||||
|
* bins is used to store heaps of free regions.
|
||||||
|
*
|
||||||
|
* Synchronization: internal.
|
||||||
|
*/
|
||||||
arena_bin_t bins[NBINS];
|
arena_bin_t bins[NBINS];
|
||||||
|
|
||||||
/* Base allocator, from which arena metadata are allocated. */
|
/*
|
||||||
|
* Base allocator, from which arena metadata are allocated.
|
||||||
|
*
|
||||||
|
* Synchronization: internal.
|
||||||
|
*/
|
||||||
base_t *base;
|
base_t *base;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -21,9 +21,13 @@ size_t extent_size_quantize_ceil(size_t size);
|
|||||||
|
|
||||||
ph_proto(, extent_heap_, extent_heap_t, extent_t)
|
ph_proto(, extent_heap_, extent_heap_t, extent_t)
|
||||||
|
|
||||||
extent_t *extent_alloc_cache_locked(tsdn_t *tsdn, arena_t *arena,
|
bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state);
|
||||||
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
|
extent_state_t extents_state_get(const extents_t *extents);
|
||||||
size_t alignment, bool *zero, bool *commit, bool slab);
|
size_t extents_npages_get(extents_t *extents);
|
||||||
|
extent_t *extents_evict(tsdn_t *tsdn, extents_t *extents, size_t npages_min);
|
||||||
|
void extents_prefork(tsdn_t *tsdn, extents_t *extents);
|
||||||
|
void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents);
|
||||||
|
void extents_postfork_child(tsdn_t *tsdn, extents_t *extents);
|
||||||
extent_t *extent_alloc_cache(tsdn_t *tsdn, arena_t *arena,
|
extent_t *extent_alloc_cache(tsdn_t *tsdn, arena_t *arena,
|
||||||
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
|
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
|
||||||
size_t alignment, bool *zero, bool *commit, bool slab);
|
size_t alignment, bool *zero, bool *commit, bool slab);
|
||||||
|
@ -12,8 +12,7 @@ void *extent_before_get(const extent_t *extent);
|
|||||||
void *extent_last_get(const extent_t *extent);
|
void *extent_last_get(const extent_t *extent);
|
||||||
void *extent_past_get(const extent_t *extent);
|
void *extent_past_get(const extent_t *extent);
|
||||||
size_t extent_sn_get(const extent_t *extent);
|
size_t extent_sn_get(const extent_t *extent);
|
||||||
bool extent_active_get(const extent_t *extent);
|
extent_state_t extent_state_get(const extent_t *extent);
|
||||||
bool extent_retained_get(const extent_t *extent);
|
|
||||||
bool extent_zeroed_get(const extent_t *extent);
|
bool extent_zeroed_get(const extent_t *extent);
|
||||||
bool extent_committed_get(const extent_t *extent);
|
bool extent_committed_get(const extent_t *extent);
|
||||||
bool extent_slab_get(const extent_t *extent);
|
bool extent_slab_get(const extent_t *extent);
|
||||||
@ -26,16 +25,19 @@ void extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment);
|
|||||||
void extent_size_set(extent_t *extent, size_t size);
|
void extent_size_set(extent_t *extent, size_t size);
|
||||||
void extent_usize_set(extent_t *extent, size_t usize);
|
void extent_usize_set(extent_t *extent, size_t usize);
|
||||||
void extent_sn_set(extent_t *extent, size_t sn);
|
void extent_sn_set(extent_t *extent, size_t sn);
|
||||||
void extent_active_set(extent_t *extent, bool active);
|
void extent_state_set(extent_t *extent, extent_state_t state);
|
||||||
void extent_zeroed_set(extent_t *extent, bool zeroed);
|
void extent_zeroed_set(extent_t *extent, bool zeroed);
|
||||||
void extent_committed_set(extent_t *extent, bool committed);
|
void extent_committed_set(extent_t *extent, bool committed);
|
||||||
void extent_slab_set(extent_t *extent, bool slab);
|
void extent_slab_set(extent_t *extent, bool slab);
|
||||||
void extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx);
|
void extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx);
|
||||||
void extent_init(extent_t *extent, arena_t *arena, void *addr,
|
void extent_init(extent_t *extent, arena_t *arena, void *addr,
|
||||||
size_t size, size_t usize, size_t sn, bool active, bool zeroed,
|
size_t size, size_t usize, size_t sn, extent_state_t state, bool zeroed,
|
||||||
bool committed, bool slab);
|
bool committed, bool slab);
|
||||||
void extent_ring_insert(extent_t *sentinel, extent_t *extent);
|
void extent_list_init(extent_list_t *list);
|
||||||
void extent_ring_remove(extent_t *extent);
|
extent_t *extent_list_first(const extent_list_t *list);
|
||||||
|
extent_t *extent_list_last(const extent_list_t *list);
|
||||||
|
void extent_list_append(extent_list_t *list, extent_t *extent);
|
||||||
|
void extent_list_remove(extent_list_t *list, extent_t *extent);
|
||||||
int extent_sn_comp(const extent_t *a, const extent_t *b);
|
int extent_sn_comp(const extent_t *a, const extent_t *b);
|
||||||
int extent_ad_comp(const extent_t *a, const extent_t *b);
|
int extent_ad_comp(const extent_t *a, const extent_t *b);
|
||||||
int extent_snad_comp(const extent_t *a, const extent_t *b);
|
int extent_snad_comp(const extent_t *a, const extent_t *b);
|
||||||
@ -103,14 +105,9 @@ extent_sn_get(const extent_t *extent) {
|
|||||||
return extent->e_sn;
|
return extent->e_sn;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
JEMALLOC_INLINE extent_state_t
|
||||||
extent_active_get(const extent_t *extent) {
|
extent_state_get(const extent_t *extent) {
|
||||||
return extent->e_active;
|
return extent->e_state;
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
|
||||||
extent_retained_get(const extent_t *extent) {
|
|
||||||
return (qr_next(extent, qr_link) == extent);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
JEMALLOC_INLINE bool
|
||||||
@ -191,8 +188,8 @@ extent_sn_set(extent_t *extent, size_t sn) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_active_set(extent_t *extent, bool active) {
|
extent_state_set(extent_t *extent, extent_state_t state) {
|
||||||
extent->e_active = active;
|
extent->e_state = state;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
@ -217,7 +214,7 @@ extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
|
|||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||||
size_t usize, size_t sn, bool active, bool zeroed, bool committed,
|
size_t usize, size_t sn, extent_state_t state, bool zeroed, bool committed,
|
||||||
bool slab) {
|
bool slab) {
|
||||||
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
||||||
|
|
||||||
@ -226,24 +223,39 @@ extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
|||||||
extent_size_set(extent, size);
|
extent_size_set(extent, size);
|
||||||
extent_usize_set(extent, usize);
|
extent_usize_set(extent, usize);
|
||||||
extent_sn_set(extent, sn);
|
extent_sn_set(extent, sn);
|
||||||
extent_active_set(extent, active);
|
extent_state_set(extent, state);
|
||||||
extent_zeroed_set(extent, zeroed);
|
extent_zeroed_set(extent, zeroed);
|
||||||
extent_committed_set(extent, committed);
|
extent_committed_set(extent, committed);
|
||||||
extent_slab_set(extent, slab);
|
extent_slab_set(extent, slab);
|
||||||
if (config_prof) {
|
if (config_prof) {
|
||||||
extent_prof_tctx_set(extent, NULL);
|
extent_prof_tctx_set(extent, NULL);
|
||||||
}
|
}
|
||||||
qr_new(extent, qr_link);
|
ql_elm_new(extent, ql_link);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_ring_insert(extent_t *sentinel, extent_t *extent) {
|
extent_list_init(extent_list_t *list) {
|
||||||
qr_meld(sentinel, extent, extent_t, qr_link);
|
ql_new(list);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE extent_t *
|
||||||
|
extent_list_first(const extent_list_t *list) {
|
||||||
|
return ql_first(list);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE extent_t *
|
||||||
|
extent_list_last(const extent_list_t *list) {
|
||||||
|
return ql_last(list, ql_link);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
extent_ring_remove(extent_t *extent) {
|
extent_list_append(extent_list_t *list, extent_t *extent) {
|
||||||
qr_remove(extent, qr_link);
|
ql_tail_insert(list, extent, ql_link);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE void
|
||||||
|
extent_list_remove(extent_list_t *list, extent_t *extent) {
|
||||||
|
ql_remove(list, extent, ql_link);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE int
|
JEMALLOC_INLINE int
|
||||||
|
@ -1,6 +1,12 @@
|
|||||||
#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
|
#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
|
||||||
#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
|
#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
extent_state_active = 0,
|
||||||
|
extent_state_dirty = 1,
|
||||||
|
extent_state_retained = 2
|
||||||
|
} extent_state_t;
|
||||||
|
|
||||||
/* Extent (span of pages). Use accessor functions for e_* fields. */
|
/* Extent (span of pages). Use accessor functions for e_* fields. */
|
||||||
struct extent_s {
|
struct extent_s {
|
||||||
/* Arena from which this extent came, if any. */
|
/* Arena from which this extent came, if any. */
|
||||||
@ -32,8 +38,8 @@ struct extent_s {
|
|||||||
*/
|
*/
|
||||||
size_t e_sn;
|
size_t e_sn;
|
||||||
|
|
||||||
/* True if extent is active (in use). */
|
/* Extent state. */
|
||||||
bool e_active;
|
extent_state_t e_state;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The zeroed flag is used by extent recycling code to track whether
|
* The zeroed flag is used by extent recycling code to track whether
|
||||||
@ -67,18 +73,48 @@ struct extent_s {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Linkage for arena's extents_dirty and arena_bin_t's slabs_full rings.
|
* List linkage, used by a variety of lists:
|
||||||
|
* - arena_bin_t's slabs_full
|
||||||
|
* - extents_t's LRU
|
||||||
|
* - stashed dirty extents
|
||||||
|
* - arena's large allocations
|
||||||
|
* - arena's extent structure freelist
|
||||||
*/
|
*/
|
||||||
qr(extent_t) qr_link;
|
ql_elm(extent_t) ql_link;
|
||||||
|
|
||||||
union {
|
/* Linkage for per size class sn/address-ordered heaps. */
|
||||||
/* Linkage for per size class sn/address-ordered heaps. */
|
phn(extent_t) ph_link;
|
||||||
phn(extent_t) ph_link;
|
|
||||||
|
|
||||||
/* Linkage for arena's large and extent_cache lists. */
|
|
||||||
ql_elm(extent_t) ql_link;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
typedef ql_head(extent_t) extent_list_t;
|
||||||
typedef ph(extent_t) extent_heap_t;
|
typedef ph(extent_t) extent_heap_t;
|
||||||
|
|
||||||
|
/* Quantized collection of extents, with built-in LRU queue. */
|
||||||
|
struct extents_s {
|
||||||
|
malloc_mutex_t mtx;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Quantized per size class heaps of extents.
|
||||||
|
*
|
||||||
|
* Synchronization: mtx.
|
||||||
|
*/
|
||||||
|
extent_heap_t heaps[NPSIZES+1];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* LRU of all extents in heaps.
|
||||||
|
*
|
||||||
|
* Synchronization: mtx.
|
||||||
|
*/
|
||||||
|
extent_list_t lru;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Page sum for all extents in heaps.
|
||||||
|
*
|
||||||
|
* Synchronization: atomic.
|
||||||
|
*/
|
||||||
|
size_t npages;
|
||||||
|
|
||||||
|
/* All stored extents must be in the same state. */
|
||||||
|
extent_state_t state;
|
||||||
|
};
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */
|
#endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
#define JEMALLOC_INTERNAL_EXTENT_TYPES_H
|
#define JEMALLOC_INTERNAL_EXTENT_TYPES_H
|
||||||
|
|
||||||
typedef struct extent_s extent_t;
|
typedef struct extent_s extent_t;
|
||||||
|
typedef struct extents_s extents_t;
|
||||||
|
|
||||||
#define EXTENT_HOOKS_INITIALIZER NULL
|
#define EXTENT_HOOKS_INITIALIZER NULL
|
||||||
|
|
||||||
|
@ -979,6 +979,7 @@ iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
|
|||||||
assert(!is_internal || tcache == NULL);
|
assert(!is_internal || tcache == NULL);
|
||||||
assert(!is_internal || arena == NULL || arena_ind_get(arena) <
|
assert(!is_internal || arena == NULL || arena_ind_get(arena) <
|
||||||
narenas_auto);
|
narenas_auto);
|
||||||
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
|
ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
|
||||||
if (config_stats && is_internal && likely(ret != NULL)) {
|
if (config_stats && is_internal && likely(ret != NULL)) {
|
||||||
@ -1004,6 +1005,7 @@ ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
|||||||
assert(!is_internal || tcache == NULL);
|
assert(!is_internal || tcache == NULL);
|
||||||
assert(!is_internal || arena == NULL || arena_ind_get(arena) <
|
assert(!is_internal || arena == NULL || arena_ind_get(arena) <
|
||||||
narenas_auto);
|
narenas_auto);
|
||||||
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
|
ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
|
||||||
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
|
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
|
||||||
@ -1042,7 +1044,7 @@ ivsalloc(tsdn_t *tsdn, const void *ptr) {
|
|||||||
if (extent == NULL) {
|
if (extent == NULL) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
assert(extent_active_get(extent));
|
assert(extent_state_get(extent) == extent_state_active);
|
||||||
/* Only slab members should be looked up via interior pointers. */
|
/* Only slab members should be looked up via interior pointers. */
|
||||||
assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
|
assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
|
||||||
|
|
||||||
@ -1056,6 +1058,7 @@ idalloctm(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
|
|||||||
assert(!is_internal || tcache == NULL);
|
assert(!is_internal || tcache == NULL);
|
||||||
assert(!is_internal || arena_ind_get(iaalloc(tsdn, ptr)) <
|
assert(!is_internal || arena_ind_get(iaalloc(tsdn, ptr)) <
|
||||||
narenas_auto);
|
narenas_auto);
|
||||||
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||||
if (config_stats && is_internal) {
|
if (config_stats && is_internal) {
|
||||||
arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, extent,
|
arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, extent,
|
||||||
ptr));
|
ptr));
|
||||||
@ -1073,6 +1076,7 @@ idalloc(tsd_t *tsd, extent_t *extent, void *ptr) {
|
|||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
isdalloct(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
|
isdalloct(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
|
||||||
tcache_t *tcache, bool slow_path) {
|
tcache_t *tcache, bool slow_path) {
|
||||||
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||||
arena_sdalloc(tsdn, extent, ptr, size, tcache, slow_path);
|
arena_sdalloc(tsdn, extent, ptr, size, tcache, slow_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1080,6 +1084,7 @@ JEMALLOC_ALWAYS_INLINE void *
|
|||||||
iralloct_realign(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
|
iralloct_realign(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
|
||||||
size_t size, size_t extra, size_t alignment, bool zero, tcache_t *tcache,
|
size_t size, size_t extra, size_t alignment, bool zero, tcache_t *tcache,
|
||||||
arena_t *arena) {
|
arena_t *arena) {
|
||||||
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||||
void *p;
|
void *p;
|
||||||
size_t usize, copysize;
|
size_t usize, copysize;
|
||||||
|
|
||||||
@ -1117,6 +1122,7 @@ iralloct(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, size_t size,
|
|||||||
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) {
|
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) {
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
|
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
|
||||||
!= 0) {
|
!= 0) {
|
||||||
@ -1144,6 +1150,7 @@ ixalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, size_t size,
|
|||||||
size_t extra, size_t alignment, bool zero) {
|
size_t extra, size_t alignment, bool zero) {
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
|
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
|
||||||
!= 0) {
|
!= 0) {
|
||||||
|
@ -17,7 +17,8 @@ extern large_dalloc_maybe_junk_t *large_dalloc_maybe_junk;
|
|||||||
void large_dalloc_junk(void *ptr, size_t usize);
|
void large_dalloc_junk(void *ptr, size_t usize);
|
||||||
void large_dalloc_maybe_junk(void *ptr, size_t usize);
|
void large_dalloc_maybe_junk(void *ptr, size_t usize);
|
||||||
#endif
|
#endif
|
||||||
void large_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent);
|
void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent);
|
||||||
|
void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent);
|
||||||
void large_dalloc(tsdn_t *tsdn, extent_t *extent);
|
void large_dalloc(tsdn_t *tsdn, extent_t *extent);
|
||||||
size_t large_salloc(tsdn_t *tsdn, const extent_t *extent);
|
size_t large_salloc(tsdn_t *tsdn, const extent_t *extent);
|
||||||
prof_tctx_t *large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent);
|
prof_tctx_t *large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent);
|
||||||
|
@ -25,11 +25,9 @@ arena_destroy
|
|||||||
arena_dss_prec_get
|
arena_dss_prec_get
|
||||||
arena_dss_prec_set
|
arena_dss_prec_set
|
||||||
arena_extent_alloc_large
|
arena_extent_alloc_large
|
||||||
arena_extent_cache_alloc
|
|
||||||
arena_extent_cache_dalloc
|
arena_extent_cache_dalloc
|
||||||
arena_extent_cache_maybe_insert
|
arena_extent_dalloc_large_finish
|
||||||
arena_extent_cache_maybe_remove
|
arena_extent_dalloc_large_prep
|
||||||
arena_extent_dalloc_large
|
|
||||||
arena_extent_ralloc_large_expand
|
arena_extent_ralloc_large_expand
|
||||||
arena_extent_ralloc_large_shrink
|
arena_extent_ralloc_large_shrink
|
||||||
arena_extent_sn_next
|
arena_extent_sn_next
|
||||||
@ -141,15 +139,12 @@ ctl_postfork_parent
|
|||||||
ctl_prefork
|
ctl_prefork
|
||||||
decay_ticker_get
|
decay_ticker_get
|
||||||
dss_prec_names
|
dss_prec_names
|
||||||
extent_active_get
|
|
||||||
extent_active_set
|
|
||||||
extent_ad_comp
|
extent_ad_comp
|
||||||
extent_addr_get
|
extent_addr_get
|
||||||
extent_addr_randomize
|
extent_addr_randomize
|
||||||
extent_addr_set
|
extent_addr_set
|
||||||
extent_alloc
|
extent_alloc
|
||||||
extent_alloc_cache
|
extent_alloc_cache
|
||||||
extent_alloc_cache_locked
|
|
||||||
extent_alloc_dss
|
extent_alloc_dss
|
||||||
extent_alloc_mmap
|
extent_alloc_mmap
|
||||||
extent_alloc_wrapper
|
extent_alloc_wrapper
|
||||||
@ -184,6 +179,10 @@ extent_hooks_set
|
|||||||
extent_in_dss
|
extent_in_dss
|
||||||
extent_init
|
extent_init
|
||||||
extent_last_get
|
extent_last_get
|
||||||
|
extent_list_append
|
||||||
|
extent_list_first
|
||||||
|
extent_list_last
|
||||||
|
extent_list_remove
|
||||||
extent_lookup
|
extent_lookup
|
||||||
extent_merge_wrapper
|
extent_merge_wrapper
|
||||||
extent_past_get
|
extent_past_get
|
||||||
@ -191,9 +190,6 @@ extent_prof_tctx_get
|
|||||||
extent_prof_tctx_set
|
extent_prof_tctx_set
|
||||||
extent_purge_forced_wrapper
|
extent_purge_forced_wrapper
|
||||||
extent_purge_lazy_wrapper
|
extent_purge_lazy_wrapper
|
||||||
extent_retained_get
|
|
||||||
extent_ring_insert
|
|
||||||
extent_ring_remove
|
|
||||||
extent_size_get
|
extent_size_get
|
||||||
extent_size_quantize_ceil
|
extent_size_quantize_ceil
|
||||||
extent_size_quantize_floor
|
extent_size_quantize_floor
|
||||||
@ -207,11 +203,20 @@ extent_sn_get
|
|||||||
extent_sn_set
|
extent_sn_set
|
||||||
extent_snad_comp
|
extent_snad_comp
|
||||||
extent_split_wrapper
|
extent_split_wrapper
|
||||||
|
extent_state_get
|
||||||
|
extent_state_set
|
||||||
extent_usize_get
|
extent_usize_get
|
||||||
extent_usize_set
|
extent_usize_set
|
||||||
extent_zeroed_get
|
extent_zeroed_get
|
||||||
extent_zeroed_set
|
extent_zeroed_set
|
||||||
|
extents_evict
|
||||||
|
extents_init
|
||||||
|
extents_npages_get
|
||||||
|
extents_prefork
|
||||||
|
extents_postfork_child
|
||||||
|
extents_postfork_parent
|
||||||
extents_rtree
|
extents_rtree
|
||||||
|
extents_state_get
|
||||||
ffs_llu
|
ffs_llu
|
||||||
ffs_lu
|
ffs_lu
|
||||||
ffs_u
|
ffs_u
|
||||||
@ -255,9 +260,10 @@ jemalloc_postfork_child
|
|||||||
jemalloc_postfork_parent
|
jemalloc_postfork_parent
|
||||||
jemalloc_prefork
|
jemalloc_prefork
|
||||||
large_dalloc
|
large_dalloc
|
||||||
|
large_dalloc_finish
|
||||||
large_dalloc_junk
|
large_dalloc_junk
|
||||||
large_dalloc_junked_locked
|
|
||||||
large_dalloc_maybe_junk
|
large_dalloc_maybe_junk
|
||||||
|
large_dalloc_prep_junked_locked
|
||||||
large_malloc
|
large_malloc
|
||||||
large_palloc
|
large_palloc
|
||||||
large_prof_tctx_get
|
large_prof_tctx_get
|
||||||
|
@ -70,9 +70,14 @@ struct malloc_large_stats_s {
|
|||||||
size_t curlextents;
|
size_t curlextents;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Arena stats. Note that fields marked "derived" are not directly maintained
|
||||||
|
* within the arena code; rather their values are derived during stats merge
|
||||||
|
* requests.
|
||||||
|
*/
|
||||||
struct arena_stats_s {
|
struct arena_stats_s {
|
||||||
/* Number of bytes currently mapped. */
|
/* Number of bytes currently mapped, excluding retained memory. */
|
||||||
size_t mapped;
|
size_t mapped; /* Derived. */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Number of bytes currently retained as a side effect of munmap() being
|
* Number of bytes currently retained as a side effect of munmap() being
|
||||||
@ -80,7 +85,7 @@ struct arena_stats_s {
|
|||||||
* always decommitted or purged), but they are excluded from the mapped
|
* always decommitted or purged), but they are excluded from the mapped
|
||||||
* statistic (above).
|
* statistic (above).
|
||||||
*/
|
*/
|
||||||
size_t retained;
|
size_t retained; /* Derived. */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Total number of purge sweeps, total number of madvise calls made,
|
* Total number of purge sweeps, total number of madvise calls made,
|
||||||
@ -91,9 +96,9 @@ struct arena_stats_s {
|
|||||||
uint64_t nmadvise;
|
uint64_t nmadvise;
|
||||||
uint64_t purged;
|
uint64_t purged;
|
||||||
|
|
||||||
size_t base;
|
size_t base; /* Derived. */
|
||||||
size_t internal; /* Protected via atomic_*_zu(). */
|
size_t internal; /* Protected via atomic_*_zu(). */
|
||||||
size_t resident;
|
size_t resident; /* Derived. */
|
||||||
|
|
||||||
size_t allocated_large;
|
size_t allocated_large;
|
||||||
uint64_t nmalloc_large;
|
uint64_t nmalloc_large;
|
||||||
@ -101,7 +106,7 @@ struct arena_stats_s {
|
|||||||
uint64_t nrequests_large;
|
uint64_t nrequests_large;
|
||||||
|
|
||||||
/* Number of bytes cached in tcache associated with this arena. */
|
/* Number of bytes cached in tcache associated with this arena. */
|
||||||
size_t tcache_bytes;
|
size_t tcache_bytes; /* Derived. */
|
||||||
|
|
||||||
/* One element for each large size class. */
|
/* One element for each large size class. */
|
||||||
malloc_large_stats_t lstats[NSIZES - NBINS];
|
malloc_large_stats_t lstats[NSIZES - NBINS];
|
||||||
|
@ -26,9 +26,17 @@ typedef int witness_comp_t (const witness_t *, void *, const witness_t *,
|
|||||||
#define WITNESS_RANK_PROF_TDATA 7U
|
#define WITNESS_RANK_PROF_TDATA 7U
|
||||||
#define WITNESS_RANK_PROF_GCTX 8U
|
#define WITNESS_RANK_PROF_GCTX 8U
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Used as an argument to witness_depth_to_rank() in order to validate depth
|
||||||
|
* excluding non-core locks with lower ranks. Since the rank argument to
|
||||||
|
* witness_depth_to_rank() is inclusive rather than exclusive, this definition
|
||||||
|
* can have the same value as the minimally ranked core lock.
|
||||||
|
*/
|
||||||
|
#define WITNESS_RANK_CORE 9U
|
||||||
|
|
||||||
#define WITNESS_RANK_ARENA 9U
|
#define WITNESS_RANK_ARENA 9U
|
||||||
#define WITNESS_RANK_ARENA_EXTENTS 10U
|
#define WITNESS_RANK_EXTENTS 10U
|
||||||
#define WITNESS_RANK_ARENA_EXTENT_CACHE 11U
|
#define WITNESS_RANK_EXTENT_FREELIST 11U
|
||||||
|
|
||||||
#define WITNESS_RANK_RTREE_ELM 12U
|
#define WITNESS_RANK_RTREE_ELM 12U
|
||||||
#define WITNESS_RANK_RTREE 13U
|
#define WITNESS_RANK_RTREE 13U
|
||||||
|
377
src/arena.c
377
src/arena.c
@ -37,75 +37,13 @@ static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena,
|
|||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
static size_t
|
|
||||||
arena_extent_dirty_npages(const extent_t *extent) {
|
|
||||||
return (extent_size_get(extent) >> LG_PAGE);
|
|
||||||
}
|
|
||||||
|
|
||||||
static extent_t *
|
|
||||||
arena_extent_cache_alloc_locked(tsdn_t *tsdn, arena_t *arena,
|
|
||||||
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
|
|
||||||
size_t alignment, bool *zero, bool slab) {
|
|
||||||
bool commit = true;
|
|
||||||
|
|
||||||
malloc_mutex_assert_owner(tsdn, &arena->lock);
|
|
||||||
|
|
||||||
return extent_alloc_cache(tsdn, arena, r_extent_hooks, new_addr, usize,
|
|
||||||
pad, alignment, zero, &commit, slab);
|
|
||||||
}
|
|
||||||
|
|
||||||
extent_t *
|
|
||||||
arena_extent_cache_alloc(tsdn_t *tsdn, arena_t *arena,
|
|
||||||
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size,
|
|
||||||
size_t alignment, bool *zero) {
|
|
||||||
extent_t *extent;
|
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
|
||||||
extent = arena_extent_cache_alloc_locked(tsdn, arena, r_extent_hooks,
|
|
||||||
new_addr, size, 0, alignment, zero, false);
|
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
|
||||||
|
|
||||||
return extent;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
arena_extent_cache_dalloc_locked(tsdn_t *tsdn, arena_t *arena,
|
|
||||||
extent_hooks_t **r_extent_hooks, extent_t *extent) {
|
|
||||||
malloc_mutex_assert_owner(tsdn, &arena->lock);
|
|
||||||
|
|
||||||
extent_dalloc_cache(tsdn, arena, r_extent_hooks, extent);
|
|
||||||
arena_maybe_purge(tsdn, arena);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
arena_extent_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
|
arena_extent_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
|
||||||
extent_hooks_t **r_extent_hooks, extent_t *extent) {
|
extent_hooks_t **r_extent_hooks, extent_t *extent) {
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||||
arena_extent_cache_dalloc_locked(tsdn, arena, r_extent_hooks, extent);
|
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
extent_dalloc_cache(tsdn, arena, r_extent_hooks, extent);
|
||||||
arena_extent_cache_maybe_insert(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
arena_purge(tsdn, arena, false);
|
||||||
bool cache) {
|
|
||||||
malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
|
|
||||||
|
|
||||||
if (cache) {
|
|
||||||
extent_ring_insert(&arena->extents_dirty, extent);
|
|
||||||
arena->ndirty += arena_extent_dirty_npages(extent);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
arena_extent_cache_maybe_remove(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
|
||||||
bool dirty) {
|
|
||||||
malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
|
|
||||||
|
|
||||||
if (dirty) {
|
|
||||||
extent_ring_remove(extent);
|
|
||||||
assert(arena->ndirty >= arena_extent_dirty_npages(extent));
|
|
||||||
arena->ndirty -= arena_extent_dirty_npages(extent);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE_C void *
|
JEMALLOC_INLINE_C void *
|
||||||
@ -180,13 +118,13 @@ arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab,
|
|||||||
|
|
||||||
static void
|
static void
|
||||||
arena_nactive_add(arena_t *arena, size_t add_pages) {
|
arena_nactive_add(arena_t *arena, size_t add_pages) {
|
||||||
arena->nactive += add_pages;
|
atomic_add_zu(&arena->nactive, add_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_nactive_sub(arena_t *arena, size_t sub_pages) {
|
arena_nactive_sub(arena_t *arena, size_t sub_pages) {
|
||||||
assert(arena->nactive >= sub_pages);
|
assert(atomic_read_zu(&arena->nactive) >= sub_pages);
|
||||||
arena->nactive -= sub_pages;
|
atomic_sub_zu(&arena->nactive, sub_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -269,6 +207,8 @@ arena_extent_alloc_large_hard(tsdn_t *tsdn, arena_t *arena,
|
|||||||
extent_t *extent;
|
extent_t *extent;
|
||||||
bool commit = true;
|
bool commit = true;
|
||||||
|
|
||||||
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
extent = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL, usize,
|
extent = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL, usize,
|
||||||
large_pad, alignment, zero, &commit, false);
|
large_pad, alignment, zero, &commit, false);
|
||||||
if (extent == NULL) {
|
if (extent == NULL) {
|
||||||
@ -291,6 +231,8 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
|||||||
extent_t *extent;
|
extent_t *extent;
|
||||||
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
|
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
|
||||||
|
|
||||||
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
|
|
||||||
/* Optimistically update stats. */
|
/* Optimistically update stats. */
|
||||||
@ -300,9 +242,11 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
|||||||
}
|
}
|
||||||
arena_nactive_add(arena, (usize + large_pad) >> LG_PAGE);
|
arena_nactive_add(arena, (usize + large_pad) >> LG_PAGE);
|
||||||
|
|
||||||
extent = arena_extent_cache_alloc_locked(tsdn, arena, &extent_hooks,
|
|
||||||
NULL, usize, large_pad, alignment, zero, false);
|
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
|
|
||||||
|
bool commit = true;
|
||||||
|
extent = extent_alloc_cache(tsdn, arena, &extent_hooks, NULL, usize,
|
||||||
|
large_pad, alignment, zero, &commit, false);
|
||||||
if (extent == NULL) {
|
if (extent == NULL) {
|
||||||
extent = arena_extent_alloc_large_hard(tsdn, arena,
|
extent = arena_extent_alloc_large_hard(tsdn, arena,
|
||||||
&extent_hooks, usize, alignment, zero);
|
&extent_hooks, usize, alignment, zero);
|
||||||
@ -312,10 +256,8 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
arena_extent_dalloc_large(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||||
bool locked) {
|
bool locked) {
|
||||||
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
|
|
||||||
|
|
||||||
if (!locked) {
|
if (!locked) {
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
} else {
|
} else {
|
||||||
@ -326,12 +268,17 @@ arena_extent_dalloc_large(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
|||||||
extent_usize_get(extent));
|
extent_usize_get(extent));
|
||||||
arena->stats.mapped -= extent_size_get(extent);
|
arena->stats.mapped -= extent_size_get(extent);
|
||||||
}
|
}
|
||||||
arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
|
|
||||||
|
|
||||||
arena_extent_cache_dalloc_locked(tsdn, arena, &extent_hooks, extent);
|
|
||||||
if (!locked) {
|
if (!locked) {
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
}
|
}
|
||||||
|
arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
arena_extent_dalloc_large_finish(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
extent_t *extent) {
|
||||||
|
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
|
||||||
|
extent_dalloc_cache(tsdn, arena, &extent_hooks, extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -414,8 +361,9 @@ arena_decay_backlog_npages_limit(const arena_t *arena) {
|
|||||||
|
|
||||||
static void
|
static void
|
||||||
arena_decay_backlog_update_last(arena_t *arena) {
|
arena_decay_backlog_update_last(arena_t *arena) {
|
||||||
size_t ndirty_delta = (arena->ndirty > arena->decay.nunpurged) ?
|
size_t ndirty = extents_npages_get(&arena->extents_cached);
|
||||||
arena->ndirty - arena->decay.nunpurged : 0;
|
size_t ndirty_delta = (ndirty > arena->decay.nunpurged) ? ndirty -
|
||||||
|
arena->decay.nunpurged : 0;
|
||||||
arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
|
arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -468,10 +416,15 @@ static void
|
|||||||
arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena) {
|
arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena) {
|
||||||
size_t ndirty_limit = arena_decay_backlog_npages_limit(arena);
|
size_t ndirty_limit = arena_decay_backlog_npages_limit(arena);
|
||||||
|
|
||||||
if (arena->ndirty > ndirty_limit) {
|
if (extents_npages_get(&arena->extents_cached) > ndirty_limit) {
|
||||||
arena_purge_to_limit(tsdn, arena, ndirty_limit);
|
arena_purge_to_limit(tsdn, arena, ndirty_limit);
|
||||||
}
|
}
|
||||||
arena->decay.nunpurged = arena->ndirty;
|
/*
|
||||||
|
* There may be concurrent ndirty fluctuation between the purge above
|
||||||
|
* and the nunpurged update below, but this is inconsequential to decay
|
||||||
|
* machinery correctness.
|
||||||
|
*/
|
||||||
|
arena->decay.nunpurged = extents_npages_get(&arena->extents_cached);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -492,7 +445,7 @@ arena_decay_init(arena_t *arena, ssize_t decay_time) {
|
|||||||
nstime_update(&arena->decay.epoch);
|
nstime_update(&arena->decay.epoch);
|
||||||
arena->decay.jitter_state = (uint64_t)(uintptr_t)arena;
|
arena->decay.jitter_state = (uint64_t)(uintptr_t)arena;
|
||||||
arena_decay_deadline_init(arena);
|
arena_decay_deadline_init(arena);
|
||||||
arena->decay.nunpurged = arena->ndirty;
|
arena->decay.nunpurged = extents_npages_get(&arena->extents_cached);
|
||||||
memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
|
memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -540,9 +493,9 @@ arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
void
|
||||||
arena_maybe_purge_helper(tsdn_t *tsdn, arena_t *arena) {
|
arena_maybe_purge(tsdn_t *tsdn, arena_t *arena) {
|
||||||
nstime_t time;
|
malloc_mutex_assert_owner(tsdn, &arena->lock);
|
||||||
|
|
||||||
/* Purge all or nothing if the option is disabled. */
|
/* Purge all or nothing if the option is disabled. */
|
||||||
if (arena->decay.time <= 0) {
|
if (arena->decay.time <= 0) {
|
||||||
@ -552,6 +505,7 @@ arena_maybe_purge_helper(tsdn_t *tsdn, arena_t *arena) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nstime_t time;
|
||||||
nstime_init(&time, 0);
|
nstime_init(&time, 0);
|
||||||
nstime_update(&time);
|
nstime_update(&time);
|
||||||
if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch,
|
if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch,
|
||||||
@ -583,95 +537,40 @@ arena_maybe_purge_helper(tsdn_t *tsdn, arena_t *arena) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
arena_maybe_purge(tsdn_t *tsdn, arena_t *arena) {
|
|
||||||
malloc_mutex_assert_owner(tsdn, &arena->lock);
|
|
||||||
|
|
||||||
/* Don't recursively purge. */
|
|
||||||
if (arena->purging) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
arena_maybe_purge_helper(tsdn, arena);
|
|
||||||
}
|
|
||||||
|
|
||||||
static size_t
|
|
||||||
arena_dirty_count(tsdn_t *tsdn, arena_t *arena) {
|
|
||||||
extent_t *extent;
|
|
||||||
size_t ndirty = 0;
|
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->extents_mtx);
|
|
||||||
|
|
||||||
for (extent = qr_next(&arena->extents_dirty, qr_link); extent !=
|
|
||||||
&arena->extents_dirty; extent = qr_next(extent, qr_link)) {
|
|
||||||
ndirty += extent_size_get(extent) >> LG_PAGE;
|
|
||||||
}
|
|
||||||
|
|
||||||
malloc_mutex_unlock(tsdn, &arena->extents_mtx);
|
|
||||||
|
|
||||||
return ndirty;
|
|
||||||
}
|
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
||||||
size_t ndirty_limit, extent_t *purge_extents_sentinel) {
|
size_t ndirty_limit, extent_list_t *purge_extents) {
|
||||||
extent_t *extent, *next;
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||||
size_t nstashed = 0;
|
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->extents_mtx);
|
|
||||||
|
|
||||||
/* Stash extents according to ndirty_limit. */
|
/* Stash extents according to ndirty_limit. */
|
||||||
for (extent = qr_next(&arena->extents_dirty, qr_link); extent !=
|
size_t nstashed = 0;
|
||||||
&arena->extents_dirty; extent = next) {
|
for (extent_t *extent = extents_evict(tsdn, &arena->extents_cached,
|
||||||
size_t npages;
|
ndirty_limit); extent != NULL; extent = extents_evict(tsdn,
|
||||||
bool zero, commit;
|
&arena->extents_cached, ndirty_limit)) {
|
||||||
UNUSED extent_t *textent;
|
extent_list_append(purge_extents, extent);
|
||||||
|
nstashed += extent_size_get(extent) >> LG_PAGE;
|
||||||
npages = extent_size_get(extent) >> LG_PAGE;
|
|
||||||
if (arena->ndirty - (nstashed + npages) < ndirty_limit) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
next = qr_next(extent, qr_link);
|
|
||||||
/* Allocate. */
|
|
||||||
zero = false;
|
|
||||||
commit = false;
|
|
||||||
textent = extent_alloc_cache_locked(tsdn, arena, r_extent_hooks,
|
|
||||||
extent_base_get(extent), extent_size_get(extent), 0, PAGE,
|
|
||||||
&zero, &commit, false);
|
|
||||||
assert(textent == extent);
|
|
||||||
assert(zero == extent_zeroed_get(extent));
|
|
||||||
extent_ring_remove(extent);
|
|
||||||
extent_ring_insert(purge_extents_sentinel, extent);
|
|
||||||
|
|
||||||
nstashed += npages;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
malloc_mutex_unlock(tsdn, &arena->extents_mtx);
|
|
||||||
return nstashed;
|
return nstashed;
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
arena_purge_stashed(tsdn_t *tsdn, arena_t *arena,
|
arena_purge_stashed(tsdn_t *tsdn, arena_t *arena,
|
||||||
extent_hooks_t **r_extent_hooks, extent_t *purge_extents_sentinel) {
|
extent_hooks_t **r_extent_hooks, extent_list_t *purge_extents) {
|
||||||
UNUSED size_t nmadvise;
|
UNUSED size_t nmadvise;
|
||||||
size_t npurged;
|
size_t npurged;
|
||||||
extent_t *extent, *next;
|
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
nmadvise = 0;
|
nmadvise = 0;
|
||||||
}
|
}
|
||||||
npurged = 0;
|
npurged = 0;
|
||||||
|
|
||||||
for (extent = qr_next(purge_extents_sentinel, qr_link); extent !=
|
for (extent_t *extent = extent_list_first(purge_extents); extent !=
|
||||||
purge_extents_sentinel; extent = next) {
|
NULL; extent = extent_list_first(purge_extents)) {
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
nmadvise++;
|
nmadvise++;
|
||||||
}
|
}
|
||||||
npurged += extent_size_get(extent) >> LG_PAGE;
|
npurged += extent_size_get(extent) >> LG_PAGE;
|
||||||
|
extent_list_remove(purge_extents, extent);
|
||||||
next = qr_next(extent, qr_link);
|
|
||||||
extent_ring_remove(extent);
|
|
||||||
extent_dalloc_wrapper(tsdn, arena, r_extent_hooks, extent);
|
extent_dalloc_wrapper(tsdn, arena, r_extent_hooks, extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -684,43 +583,44 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ndirty_limit: Purge as many dirty extents as possible without violating the
|
* ndirty_limit: Purge as many dirty extents as possible without violating the
|
||||||
* invariant: (arena->ndirty >= ndirty_limit)
|
* invariant: (extents_npages_get(&arena->extents_cached) >= ndirty_limit)
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit) {
|
arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit) {
|
||||||
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 1);
|
||||||
|
malloc_mutex_assert_owner(tsdn, &arena->lock);
|
||||||
|
|
||||||
|
if (atomic_cas_u(&arena->purging, 0, 1)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
|
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
|
||||||
size_t npurge, npurged;
|
size_t npurge, npurged;
|
||||||
extent_t purge_extents_sentinel;
|
extent_list_t purge_extents;
|
||||||
|
|
||||||
arena->purging = true;
|
extent_list_init(&purge_extents);
|
||||||
|
|
||||||
/*
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
* Calls to arena_dirty_count() are disabled even for debug builds
|
|
||||||
* because overhead grows nonlinearly as memory usage increases.
|
|
||||||
*/
|
|
||||||
if (false && config_debug) {
|
|
||||||
size_t ndirty = arena_dirty_count(tsdn, arena);
|
|
||||||
assert(ndirty == arena->ndirty);
|
|
||||||
}
|
|
||||||
extent_init(&purge_extents_sentinel, arena, NULL, 0, 0, 0, false, false,
|
|
||||||
false, false);
|
|
||||||
|
|
||||||
npurge = arena_stash_dirty(tsdn, arena, &extent_hooks, ndirty_limit,
|
npurge = arena_stash_dirty(tsdn, arena, &extent_hooks, ndirty_limit,
|
||||||
&purge_extents_sentinel);
|
&purge_extents);
|
||||||
if (npurge == 0) {
|
if (npurge == 0) {
|
||||||
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
goto label_return;
|
goto label_return;
|
||||||
}
|
}
|
||||||
npurged = arena_purge_stashed(tsdn, arena, &extent_hooks,
|
npurged = arena_purge_stashed(tsdn, arena, &extent_hooks,
|
||||||
&purge_extents_sentinel);
|
&purge_extents);
|
||||||
assert(npurged == npurge);
|
assert(npurged == npurge);
|
||||||
|
|
||||||
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
arena->stats.npurge++;
|
arena->stats.npurge++;
|
||||||
}
|
}
|
||||||
|
|
||||||
label_return:
|
label_return:
|
||||||
arena->purging = false;
|
atomic_write_u(&arena->purging, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -737,9 +637,14 @@ arena_purge(tsdn_t *tsdn, arena_t *arena, bool all) {
|
|||||||
static void
|
static void
|
||||||
arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
|
arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
|
||||||
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
|
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
|
||||||
|
size_t npages = extent_size_get(slab) >> LG_PAGE;
|
||||||
|
|
||||||
arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
|
extent_dalloc_cache(tsdn, arena, &extent_hooks, slab);
|
||||||
arena_extent_cache_dalloc_locked(tsdn, arena, &extent_hooks, slab);
|
|
||||||
|
arena_nactive_sub(arena, npages);
|
||||||
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
|
arena_maybe_purge(tsdn, arena);
|
||||||
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -768,19 +673,16 @@ arena_bin_slabs_nonfull_tryget(arena_bin_t *bin) {
|
|||||||
static void
|
static void
|
||||||
arena_bin_slabs_full_insert(arena_bin_t *bin, extent_t *slab) {
|
arena_bin_slabs_full_insert(arena_bin_t *bin, extent_t *slab) {
|
||||||
assert(extent_slab_data_get(slab)->nfree == 0);
|
assert(extent_slab_data_get(slab)->nfree == 0);
|
||||||
extent_ring_insert(&bin->slabs_full, slab);
|
extent_list_append(&bin->slabs_full, slab);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_bin_slabs_full_remove(extent_t *slab) {
|
arena_bin_slabs_full_remove(arena_bin_t *bin, extent_t *slab) {
|
||||||
extent_ring_remove(slab);
|
extent_list_remove(&bin->slabs_full, slab);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
arena_reset(tsd_t *tsd, arena_t *arena) {
|
arena_reset(tsd_t *tsd, arena_t *arena) {
|
||||||
unsigned i;
|
|
||||||
extent_t *extent;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Locking in this function is unintuitive. The caller guarantees that
|
* Locking in this function is unintuitive. The caller guarantees that
|
||||||
* no concurrent operations are happening in this arena, but there are
|
* no concurrent operations are happening in this arena, but there are
|
||||||
@ -797,8 +699,9 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
|
|||||||
|
|
||||||
/* Large allocations. */
|
/* Large allocations. */
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
|
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
|
||||||
for (extent = ql_last(&arena->large, ql_link); extent != NULL; extent =
|
|
||||||
ql_last(&arena->large, ql_link)) {
|
for (extent_t *extent = extent_list_first(&arena->large); extent !=
|
||||||
|
NULL; extent = extent_list_first(&arena->large)) {
|
||||||
void *ptr = extent_base_get(extent);
|
void *ptr = extent_base_get(extent);
|
||||||
size_t usize;
|
size_t usize;
|
||||||
|
|
||||||
@ -819,10 +722,8 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
|
|||||||
}
|
}
|
||||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
|
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
|
||||||
|
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
|
|
||||||
|
|
||||||
/* Bins. */
|
/* Bins. */
|
||||||
for (i = 0; i < NBINS; i++) {
|
for (unsigned i = 0; i < NBINS; i++) {
|
||||||
extent_t *slab;
|
extent_t *slab;
|
||||||
arena_bin_t *bin = &arena->bins[i];
|
arena_bin_t *bin = &arena->bins[i];
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
||||||
@ -839,10 +740,9 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
|
|||||||
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
|
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
||||||
}
|
}
|
||||||
for (slab = qr_next(&bin->slabs_full, qr_link); slab !=
|
for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
|
||||||
&bin->slabs_full; slab = qr_next(&bin->slabs_full,
|
slab = extent_list_first(&bin->slabs_full)) {
|
||||||
qr_link)) {
|
arena_bin_slabs_full_remove(bin, slab);
|
||||||
arena_bin_slabs_full_remove(slab);
|
|
||||||
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
||||||
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
|
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
||||||
@ -854,17 +754,12 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
|
|||||||
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(!arena->purging);
|
assert(atomic_read_u(&arena->purging) == 0);
|
||||||
arena->nactive = 0;
|
atomic_write_zu(&arena->nactive, 0);
|
||||||
|
|
||||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
|
arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
|
||||||
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
|
|
||||||
size_t i;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Iterate over the retained extents and blindly attempt to deallocate
|
* Iterate over the retained extents and blindly attempt to deallocate
|
||||||
* them. This gives the extent allocator underlying the extent hooks an
|
* them. This gives the extent allocator underlying the extent hooks an
|
||||||
@ -876,15 +771,11 @@ arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
|
|||||||
* dss for arenas to be destroyed), or provide custom extent hooks that
|
* dss for arenas to be destroyed), or provide custom extent hooks that
|
||||||
* either unmap retained extents or track them for later use.
|
* either unmap retained extents or track them for later use.
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < sizeof(arena->extents_retained)/sizeof(extent_heap_t);
|
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
|
||||||
i++) {
|
for (extent_t *extent = extents_evict(tsdn, &arena->extents_retained,
|
||||||
extent_heap_t *extents = &arena->extents_retained[i];
|
0); extent != NULL; extent = extents_evict(tsdn,
|
||||||
extent_t *extent;
|
&arena->extents_retained, 0)) {
|
||||||
|
extent_dalloc_wrapper_try(tsdn, arena, &extent_hooks, extent);
|
||||||
while ((extent = extent_heap_remove_first(extents)) != NULL) {
|
|
||||||
extent_dalloc_wrapper_try(tsdn, arena, &extent_hooks,
|
|
||||||
extent);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -899,7 +790,7 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
|
|||||||
* Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
|
* Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
|
||||||
* extents, so only retained extents may remain.
|
* extents, so only retained extents may remain.
|
||||||
*/
|
*/
|
||||||
assert(arena->ndirty == 0);
|
assert(extents_npages_get(&arena->extents_cached) == 0);
|
||||||
|
|
||||||
/* Attempt to deallocate retained memory. */
|
/* Attempt to deallocate retained memory. */
|
||||||
arena_destroy_retained(tsd_tsdn(tsd), arena);
|
arena_destroy_retained(tsd_tsdn(tsd), arena);
|
||||||
@ -929,12 +820,12 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
|
|||||||
extent_t *slab;
|
extent_t *slab;
|
||||||
bool zero, commit;
|
bool zero, commit;
|
||||||
|
|
||||||
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
zero = false;
|
zero = false;
|
||||||
commit = true;
|
commit = true;
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
|
||||||
slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL,
|
slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL,
|
||||||
bin_info->slab_size, 0, PAGE, &zero, &commit, true);
|
bin_info->slab_size, 0, PAGE, &zero, &commit, true);
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
|
||||||
|
|
||||||
return slab;
|
return slab;
|
||||||
}
|
}
|
||||||
@ -942,13 +833,13 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
|
|||||||
static extent_t *
|
static extent_t *
|
||||||
arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
||||||
const arena_bin_info_t *bin_info) {
|
const arena_bin_info_t *bin_info) {
|
||||||
extent_t *slab;
|
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
|
||||||
arena_slab_data_t *slab_data;
|
|
||||||
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
|
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
|
||||||
bool zero = false;
|
bool zero = false;
|
||||||
|
bool commit = true;
|
||||||
slab = arena_extent_cache_alloc_locked(tsdn, arena, &extent_hooks, NULL,
|
extent_t *slab = extent_alloc_cache(tsdn, arena, &extent_hooks, NULL,
|
||||||
bin_info->slab_size, 0, PAGE, &zero, true);
|
bin_info->slab_size, 0, PAGE, &zero, &commit, true);
|
||||||
if (slab == NULL) {
|
if (slab == NULL) {
|
||||||
slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
|
slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
|
||||||
bin_info);
|
bin_info);
|
||||||
@ -958,10 +849,12 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
|||||||
}
|
}
|
||||||
assert(extent_slab_get(slab));
|
assert(extent_slab_get(slab));
|
||||||
|
|
||||||
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
|
|
||||||
arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
|
arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
|
||||||
|
|
||||||
/* Initialize slab internals. */
|
/* Initialize slab internals. */
|
||||||
slab_data = extent_slab_data_get(slab);
|
arena_slab_data_t *slab_data = extent_slab_data_get(slab);
|
||||||
slab_data->binind = binind;
|
slab_data->binind = binind;
|
||||||
slab_data->nfree = bin_info->nregs;
|
slab_data->nfree = bin_info->nregs;
|
||||||
bitmap_init(slab_data->bitmap, &bin_info->bitmap_info);
|
bitmap_init(slab_data->bitmap, &bin_info->bitmap_info);
|
||||||
@ -969,6 +862,7 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
|||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
arena->stats.mapped += extent_size_get(slab);
|
arena->stats.mapped += extent_size_get(slab);
|
||||||
}
|
}
|
||||||
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
|
|
||||||
return slab;
|
return slab;
|
||||||
}
|
}
|
||||||
@ -991,9 +885,7 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
|
|||||||
/* Allocate a new slab. */
|
/* Allocate a new slab. */
|
||||||
malloc_mutex_unlock(tsdn, &bin->lock);
|
malloc_mutex_unlock(tsdn, &bin->lock);
|
||||||
/******************************/
|
/******************************/
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
|
||||||
slab = arena_slab_alloc(tsdn, arena, binind, bin_info);
|
slab = arena_slab_alloc(tsdn, arena, binind, bin_info);
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
|
||||||
/********************************/
|
/********************************/
|
||||||
malloc_mutex_lock(tsdn, &bin->lock);
|
malloc_mutex_lock(tsdn, &bin->lock);
|
||||||
if (slab != NULL) {
|
if (slab != NULL) {
|
||||||
@ -1317,7 +1209,7 @@ arena_dissociate_bin_slab(extent_t *slab, arena_bin_t *bin) {
|
|||||||
* into the non-full slabs heap.
|
* into the non-full slabs heap.
|
||||||
*/
|
*/
|
||||||
if (bin_info->nregs == 1) {
|
if (bin_info->nregs == 1) {
|
||||||
arena_bin_slabs_full_remove(slab);
|
arena_bin_slabs_full_remove(bin, slab);
|
||||||
} else {
|
} else {
|
||||||
arena_bin_slabs_nonfull_remove(bin, slab);
|
arena_bin_slabs_nonfull_remove(bin, slab);
|
||||||
}
|
}
|
||||||
@ -1331,9 +1223,7 @@ arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
|||||||
|
|
||||||
malloc_mutex_unlock(tsdn, &bin->lock);
|
malloc_mutex_unlock(tsdn, &bin->lock);
|
||||||
/******************************/
|
/******************************/
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
|
||||||
arena_slab_dalloc(tsdn, arena, slab);
|
arena_slab_dalloc(tsdn, arena, slab);
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
|
||||||
/****************************/
|
/****************************/
|
||||||
malloc_mutex_lock(tsdn, &bin->lock);
|
malloc_mutex_lock(tsdn, &bin->lock);
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
@ -1385,7 +1275,7 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
|||||||
arena_dissociate_bin_slab(slab, bin);
|
arena_dissociate_bin_slab(slab, bin);
|
||||||
arena_dalloc_bin_slab(tsdn, arena, slab, bin);
|
arena_dalloc_bin_slab(tsdn, arena, slab, bin);
|
||||||
} else if (slab_data->nfree == 1 && slab != bin->slabcur) {
|
} else if (slab_data->nfree == 1 && slab != bin->slabcur) {
|
||||||
arena_bin_slabs_full_remove(slab);
|
arena_bin_slabs_full_remove(bin, slab);
|
||||||
arena_bin_lower_slab(tsdn, arena, slab, bin);
|
arena_bin_lower_slab(tsdn, arena, slab, bin);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1554,8 +1444,8 @@ arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
|
|||||||
*nthreads += arena_nthreads_get(arena, false);
|
*nthreads += arena_nthreads_get(arena, false);
|
||||||
*dss = dss_prec_names[arena->dss_prec];
|
*dss = dss_prec_names[arena->dss_prec];
|
||||||
*decay_time = arena->decay.time;
|
*decay_time = arena->decay.time;
|
||||||
*nactive += arena->nactive;
|
*nactive += atomic_read_zu(&arena->nactive);
|
||||||
*ndirty += arena->ndirty;
|
*ndirty += extents_npages_get(&arena->extents_cached);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -1585,14 +1475,15 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
|||||||
&base_mapped);
|
&base_mapped);
|
||||||
|
|
||||||
astats->mapped += base_mapped + arena->stats.mapped;
|
astats->mapped += base_mapped + arena->stats.mapped;
|
||||||
astats->retained += arena->stats.retained;
|
astats->retained += (extents_npages_get(&arena->extents_retained) <<
|
||||||
|
LG_PAGE);
|
||||||
astats->npurge += arena->stats.npurge;
|
astats->npurge += arena->stats.npurge;
|
||||||
astats->nmadvise += arena->stats.nmadvise;
|
astats->nmadvise += arena->stats.nmadvise;
|
||||||
astats->purged += arena->stats.purged;
|
astats->purged += arena->stats.purged;
|
||||||
astats->base += base_allocated;
|
astats->base += base_allocated;
|
||||||
astats->internal += arena_internal_get(arena);
|
astats->internal += arena_internal_get(arena);
|
||||||
astats->resident += base_resident + (((arena->nactive + arena->ndirty)
|
astats->resident += base_resident + (((atomic_read_zu(&arena->nactive) +
|
||||||
<< LG_PAGE));
|
extents_npages_get(&arena->extents_cached)) << LG_PAGE));
|
||||||
astats->allocated_large += arena->stats.allocated_large;
|
astats->allocated_large += arena->stats.allocated_large;
|
||||||
astats->nmalloc_large += arena->stats.nmalloc_large;
|
astats->nmalloc_large += arena->stats.nmalloc_large;
|
||||||
astats->ndalloc_large += arena->stats.ndalloc_large;
|
astats->ndalloc_large += arena->stats.ndalloc_large;
|
||||||
@ -1709,28 +1600,22 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
|||||||
|
|
||||||
arena->dss_prec = extent_dss_prec_get();
|
arena->dss_prec = extent_dss_prec_get();
|
||||||
|
|
||||||
arena->purging = false;
|
atomic_write_u(&arena->purging, 0);
|
||||||
arena->nactive = 0;
|
atomic_write_zu(&arena->nactive, 0);
|
||||||
arena->ndirty = 0;
|
|
||||||
|
|
||||||
arena_decay_init(arena, arena_decay_time_default_get());
|
arena_decay_init(arena, arena_decay_time_default_get());
|
||||||
|
|
||||||
ql_new(&arena->large);
|
extent_list_init(&arena->large);
|
||||||
if (malloc_mutex_init(&arena->large_mtx, "arena_large",
|
if (malloc_mutex_init(&arena->large_mtx, "arena_large",
|
||||||
WITNESS_RANK_ARENA_LARGE)) {
|
WITNESS_RANK_ARENA_LARGE)) {
|
||||||
goto label_error;
|
goto label_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < NPSIZES+1; i++) {
|
if (extents_init(tsdn, &arena->extents_cached, extent_state_dirty)) {
|
||||||
extent_heap_new(&arena->extents_cached[i]);
|
goto label_error;
|
||||||
extent_heap_new(&arena->extents_retained[i]);
|
|
||||||
}
|
}
|
||||||
|
if (extents_init(tsdn, &arena->extents_retained,
|
||||||
extent_init(&arena->extents_dirty, arena, NULL, 0, 0, 0, false, false,
|
extent_state_retained)) {
|
||||||
false, false);
|
|
||||||
|
|
||||||
if (malloc_mutex_init(&arena->extents_mtx, "arena_extents",
|
|
||||||
WITNESS_RANK_ARENA_EXTENTS)) {
|
|
||||||
goto label_error;
|
goto label_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1738,9 +1623,9 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
|||||||
arena->extent_grow_next = psz2ind(HUGEPAGE);
|
arena->extent_grow_next = psz2ind(HUGEPAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
ql_new(&arena->extent_cache);
|
extent_list_init(&arena->extent_freelist);
|
||||||
if (malloc_mutex_init(&arena->extent_cache_mtx, "arena_extent_cache",
|
if (malloc_mutex_init(&arena->extent_freelist_mtx, "extent_freelist",
|
||||||
WITNESS_RANK_ARENA_EXTENT_CACHE)) {
|
WITNESS_RANK_EXTENT_FREELIST)) {
|
||||||
goto label_error;
|
goto label_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1753,8 +1638,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
|||||||
}
|
}
|
||||||
bin->slabcur = NULL;
|
bin->slabcur = NULL;
|
||||||
extent_heap_new(&bin->slabs_nonfull);
|
extent_heap_new(&bin->slabs_nonfull);
|
||||||
extent_init(&bin->slabs_full, arena, NULL, 0, 0, 0, false,
|
extent_list_init(&bin->slabs_full);
|
||||||
false, false, false);
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
|
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
|
||||||
}
|
}
|
||||||
@ -1782,12 +1666,13 @@ arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
|
|||||||
|
|
||||||
void
|
void
|
||||||
arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
|
arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
|
||||||
malloc_mutex_prefork(tsdn, &arena->extents_mtx);
|
extents_prefork(tsdn, &arena->extents_cached);
|
||||||
|
extents_prefork(tsdn, &arena->extents_retained);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
|
arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
|
||||||
malloc_mutex_prefork(tsdn, &arena->extent_cache_mtx);
|
malloc_mutex_prefork(tsdn, &arena->extent_freelist_mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -1810,8 +1695,9 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
|
|||||||
malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
|
malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
|
||||||
}
|
}
|
||||||
base_postfork_parent(tsdn, arena->base);
|
base_postfork_parent(tsdn, arena->base);
|
||||||
malloc_mutex_postfork_parent(tsdn, &arena->extent_cache_mtx);
|
malloc_mutex_postfork_parent(tsdn, &arena->extent_freelist_mtx);
|
||||||
malloc_mutex_postfork_parent(tsdn, &arena->extents_mtx);
|
extents_postfork_parent(tsdn, &arena->extents_cached);
|
||||||
|
extents_postfork_parent(tsdn, &arena->extents_retained);
|
||||||
malloc_mutex_postfork_parent(tsdn, &arena->lock);
|
malloc_mutex_postfork_parent(tsdn, &arena->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1824,7 +1710,8 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
|
|||||||
malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
|
malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
|
||||||
}
|
}
|
||||||
base_postfork_child(tsdn, arena->base);
|
base_postfork_child(tsdn, arena->base);
|
||||||
malloc_mutex_postfork_child(tsdn, &arena->extent_cache_mtx);
|
malloc_mutex_postfork_child(tsdn, &arena->extent_freelist_mtx);
|
||||||
malloc_mutex_postfork_child(tsdn, &arena->extents_mtx);
|
extents_postfork_child(tsdn, &arena->extents_cached);
|
||||||
|
extents_postfork_child(tsdn, &arena->extents_retained);
|
||||||
malloc_mutex_postfork_child(tsdn, &arena->lock);
|
malloc_mutex_postfork_child(tsdn, &arena->lock);
|
||||||
}
|
}
|
||||||
|
@ -87,7 +87,8 @@ base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
|
|||||||
sn = *extent_sn_next;
|
sn = *extent_sn_next;
|
||||||
(*extent_sn_next)++;
|
(*extent_sn_next)++;
|
||||||
|
|
||||||
extent_init(extent, NULL, addr, size, 0, sn, true, true, true, false);
|
extent_init(extent, NULL, addr, size, 0, sn, extent_state_active, true,
|
||||||
|
true, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
@ -104,7 +105,7 @@ base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
|
|||||||
assert(extent_size_get(extent) >= *gap_size + size);
|
assert(extent_size_get(extent) >= *gap_size + size);
|
||||||
extent_init(extent, NULL, (void *)((uintptr_t)extent_addr_get(extent) +
|
extent_init(extent, NULL, (void *)((uintptr_t)extent_addr_get(extent) +
|
||||||
*gap_size + size), extent_size_get(extent) - *gap_size - size, 0,
|
*gap_size + size), extent_size_get(extent) - *gap_size - size, 0,
|
||||||
extent_sn_get(extent), true, true, true, false);
|
extent_sn_get(extent), extent_state_active, true, true, false);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
628
src/extent.c
628
src/extent.c
File diff suppressed because it is too large
Load Diff
@ -143,7 +143,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
|||||||
if (gap_size != 0) {
|
if (gap_size != 0) {
|
||||||
extent_init(gap, arena, gap_addr, gap_size,
|
extent_init(gap, arena, gap_addr, gap_size,
|
||||||
gap_size, arena_extent_sn_next(arena),
|
gap_size, arena_extent_sn_next(arena),
|
||||||
false, false, true, false);
|
extent_state_active, false, true, false);
|
||||||
}
|
}
|
||||||
dss_next = (void *)((uintptr_t)ret + size);
|
dss_next = (void *)((uintptr_t)ret + size);
|
||||||
if ((uintptr_t)ret < (uintptr_t)max_cur ||
|
if ((uintptr_t)ret < (uintptr_t)max_cur ||
|
||||||
@ -180,7 +180,8 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
|||||||
extent_t extent;
|
extent_t extent;
|
||||||
|
|
||||||
extent_init(&extent, arena, ret, size,
|
extent_init(&extent, arena, ret, size,
|
||||||
size, 0, true, false, true, false);
|
size, 0, extent_state_active, false,
|
||||||
|
true, false);
|
||||||
if (extent_purge_forced_wrapper(tsdn,
|
if (extent_purge_forced_wrapper(tsdn,
|
||||||
arena, &extent_hooks, &extent, 0,
|
arena, &extent_hooks, &extent, 0,
|
||||||
size)) {
|
size)) {
|
||||||
|
46
src/large.c
46
src/large.c
@ -40,8 +40,7 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
|
|
||||||
/* Insert extent into large. */
|
/* Insert extent into large. */
|
||||||
malloc_mutex_lock(tsdn, &arena->large_mtx);
|
malloc_mutex_lock(tsdn, &arena->large_mtx);
|
||||||
ql_elm_new(extent, ql_link);
|
extent_list_append(&arena->large, extent);
|
||||||
ql_tail_insert(&arena->large, extent, ql_link);
|
|
||||||
malloc_mutex_unlock(tsdn, &arena->large_mtx);
|
malloc_mutex_unlock(tsdn, &arena->large_mtx);
|
||||||
if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
|
if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
|
||||||
prof_idump(tsdn);
|
prof_idump(tsdn);
|
||||||
@ -138,19 +137,19 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
|||||||
bool zero) {
|
bool zero) {
|
||||||
arena_t *arena = extent_arena_get(extent);
|
arena_t *arena = extent_arena_get(extent);
|
||||||
size_t oldusize = extent_usize_get(extent);
|
size_t oldusize = extent_usize_get(extent);
|
||||||
bool is_zeroed_trail = false;
|
|
||||||
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
|
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
|
||||||
size_t trailsize = usize - extent_usize_get(extent);
|
size_t trailsize = usize - extent_usize_get(extent);
|
||||||
extent_t *trail;
|
|
||||||
|
|
||||||
if (extent_hooks->merge == NULL) {
|
if (extent_hooks->merge == NULL) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((trail = arena_extent_cache_alloc(tsdn, arena, &extent_hooks,
|
bool is_zeroed_trail = false;
|
||||||
extent_past_get(extent), trailsize, CACHELINE, &is_zeroed_trail)) ==
|
bool commit = true;
|
||||||
NULL) {
|
extent_t *trail;
|
||||||
bool commit = true;
|
if ((trail = extent_alloc_cache(tsdn, arena, &extent_hooks,
|
||||||
|
extent_past_get(extent), trailsize, 0, CACHELINE, &is_zeroed_trail,
|
||||||
|
&commit, false)) == NULL) {
|
||||||
if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
|
if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
|
||||||
extent_past_get(extent), trailsize, 0, CACHELINE,
|
extent_past_get(extent), trailsize, 0, CACHELINE,
|
||||||
&is_zeroed_trail, &commit, false)) == NULL) {
|
&is_zeroed_trail, &commit, false)) == NULL) {
|
||||||
@ -291,32 +290,39 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
|
|||||||
* independent of these considerations.
|
* independent of these considerations.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
large_dalloc_impl(tsdn_t *tsdn, extent_t *extent, bool junked_locked) {
|
large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||||
arena_t *arena;
|
bool junked_locked) {
|
||||||
|
|
||||||
arena = extent_arena_get(extent);
|
|
||||||
malloc_mutex_lock(tsdn, &arena->large_mtx);
|
malloc_mutex_lock(tsdn, &arena->large_mtx);
|
||||||
ql_remove(&arena->large, extent, ql_link);
|
extent_list_remove(&arena->large, extent);
|
||||||
malloc_mutex_unlock(tsdn, &arena->large_mtx);
|
malloc_mutex_unlock(tsdn, &arena->large_mtx);
|
||||||
if (!junked_locked) {
|
if (!junked_locked) {
|
||||||
large_dalloc_maybe_junk(extent_addr_get(extent),
|
large_dalloc_maybe_junk(extent_addr_get(extent),
|
||||||
extent_usize_get(extent));
|
extent_usize_get(extent));
|
||||||
}
|
}
|
||||||
arena_extent_dalloc_large(tsdn, arena, extent, junked_locked);
|
arena_extent_dalloc_large_prep(tsdn, arena, extent, junked_locked);
|
||||||
|
}
|
||||||
|
|
||||||
if (!junked_locked) {
|
static void
|
||||||
arena_decay_tick(tsdn, arena);
|
large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
|
||||||
}
|
arena_extent_dalloc_large_finish(tsdn, arena, extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
large_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent) {
|
large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) {
|
||||||
large_dalloc_impl(tsdn, extent, true);
|
large_dalloc_prep_impl(tsdn, extent_arena_get(extent), extent, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) {
|
||||||
|
large_dalloc_finish_impl(tsdn, extent_arena_get(extent), extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
large_dalloc(tsdn_t *tsdn, extent_t *extent) {
|
large_dalloc(tsdn_t *tsdn, extent_t *extent) {
|
||||||
large_dalloc_impl(tsdn, extent, false);
|
arena_t *arena = extent_arena_get(extent);
|
||||||
|
large_dalloc_prep_impl(tsdn, arena, extent, false);
|
||||||
|
large_dalloc_finish_impl(tsdn, arena, extent);
|
||||||
|
arena_decay_tick(tsdn, arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t
|
size_t
|
||||||
|
31
src/tcache.c
31
src/tcache.c
@ -170,17 +170,15 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
void
|
void
|
||||||
tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||||
unsigned rem, tcache_t *tcache) {
|
unsigned rem, tcache_t *tcache) {
|
||||||
arena_t *arena;
|
|
||||||
void *ptr;
|
|
||||||
unsigned i, nflush, ndeferred;
|
|
||||||
bool merged_stats = false;
|
bool merged_stats = false;
|
||||||
|
|
||||||
assert(binind < nhbins);
|
assert(binind < nhbins);
|
||||||
assert(rem <= tbin->ncached);
|
assert(rem <= tbin->ncached);
|
||||||
|
|
||||||
arena = arena_choose(tsd, NULL);
|
arena_t *arena = arena_choose(tsd, NULL);
|
||||||
assert(arena != NULL);
|
assert(arena != NULL);
|
||||||
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
|
unsigned nflush = tbin->ncached - rem;
|
||||||
|
while (nflush > 0) {
|
||||||
/* Lock the arena associated with the first object. */
|
/* Lock the arena associated with the first object. */
|
||||||
extent_t *extent = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1));
|
extent_t *extent = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1));
|
||||||
arena_t *locked_arena = extent_arena_get(extent);
|
arena_t *locked_arena = extent_arena_get(extent);
|
||||||
@ -189,7 +187,17 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
|||||||
if (config_prof) {
|
if (config_prof) {
|
||||||
idump = false;
|
idump = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock);
|
malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock);
|
||||||
|
for (unsigned i = 0; i < nflush; i++) {
|
||||||
|
void *ptr = *(tbin->avail - 1 - i);
|
||||||
|
assert(ptr != NULL);
|
||||||
|
extent = iealloc(tsd_tsdn(tsd), ptr);
|
||||||
|
if (extent_arena_get(extent) == locked_arena) {
|
||||||
|
large_dalloc_prep_junked_locked(tsd_tsdn(tsd),
|
||||||
|
extent);
|
||||||
|
}
|
||||||
|
}
|
||||||
if ((config_prof || config_stats) && locked_arena == arena) {
|
if ((config_prof || config_stats) && locked_arena == arena) {
|
||||||
if (config_prof) {
|
if (config_prof) {
|
||||||
idump = arena_prof_accum_locked(arena,
|
idump = arena_prof_accum_locked(arena,
|
||||||
@ -205,14 +213,15 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
|||||||
tbin->tstats.nrequests = 0;
|
tbin->tstats.nrequests = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ndeferred = 0;
|
malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock);
|
||||||
for (i = 0; i < nflush; i++) {
|
|
||||||
ptr = *(tbin->avail - 1 - i);
|
unsigned ndeferred = 0;
|
||||||
|
for (unsigned i = 0; i < nflush; i++) {
|
||||||
|
void *ptr = *(tbin->avail - 1 - i);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
extent = iealloc(tsd_tsdn(tsd), ptr);
|
extent = iealloc(tsd_tsdn(tsd), ptr);
|
||||||
if (extent_arena_get(extent) == locked_arena) {
|
if (extent_arena_get(extent) == locked_arena) {
|
||||||
large_dalloc_junked_locked(tsd_tsdn(tsd),
|
large_dalloc_finish(tsd_tsdn(tsd), extent);
|
||||||
extent);
|
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* This object was allocated via a different
|
* This object was allocated via a different
|
||||||
@ -224,12 +233,12 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
|||||||
ndeferred++;
|
ndeferred++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock);
|
|
||||||
if (config_prof && idump) {
|
if (config_prof && idump) {
|
||||||
prof_idump(tsd_tsdn(tsd));
|
prof_idump(tsd_tsdn(tsd));
|
||||||
}
|
}
|
||||||
arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
|
arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
|
||||||
ndeferred);
|
ndeferred);
|
||||||
|
nflush = ndeferred;
|
||||||
}
|
}
|
||||||
if (config_stats && !merged_stats) {
|
if (config_stats && !merged_stats) {
|
||||||
/*
|
/*
|
||||||
|
@ -63,7 +63,7 @@ vsalloc(tsdn_t *tsdn, const void *ptr) {
|
|||||||
if (extent == NULL) {
|
if (extent == NULL) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (!extent_active_get(extent)) {
|
if (extent_state_get(extent) != extent_state_active) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,8 +8,8 @@ TEST_BEGIN(test_arena_slab_regind) {
|
|||||||
extent_t slab;
|
extent_t slab;
|
||||||
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||||
extent_init(&slab, NULL, mallocx(bin_info->slab_size,
|
extent_init(&slab, NULL, mallocx(bin_info->slab_size,
|
||||||
MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, 0, 0, true,
|
MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, 0, 0,
|
||||||
false, true, true);
|
extent_state_active, false, true, true);
|
||||||
assert_ptr_not_null(extent_addr_get(&slab),
|
assert_ptr_not_null(extent_addr_get(&slab),
|
||||||
"Unexpected malloc() failure");
|
"Unexpected malloc() failure");
|
||||||
for (regind = 0; regind < bin_info->nregs; regind++) {
|
for (regind = 0; regind < bin_info->nregs; regind++) {
|
||||||
|
Loading…
Reference in New Issue
Block a user