2017-01-11 10:06:31 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H
|
|
|
|
#define JEMALLOC_INTERNAL_EXTENT_INLINES_H
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-05-24 03:28:19 +08:00
|
|
|
#include "jemalloc/internal/mutex.h"
|
2017-05-24 05:56:24 +08:00
|
|
|
#include "jemalloc/internal/mutex_pool.h"
|
2017-04-25 09:05:15 +08:00
|
|
|
#include "jemalloc/internal/pages.h"
|
2017-04-20 03:48:50 +08:00
|
|
|
#include "jemalloc/internal/prng.h"
|
2017-04-11 08:11:33 +08:00
|
|
|
#include "jemalloc/internal/ql.h"
|
2017-12-15 04:46:39 +08:00
|
|
|
#include "jemalloc/internal/sc.h"
|
2017-05-31 01:45:37 +08:00
|
|
|
#include "jemalloc/internal/sz.h"
|
2017-04-11 08:11:33 +08:00
|
|
|
|
2017-05-16 05:23:51 +08:00
|
|
|
static inline void
|
|
|
|
extent_lock(tsdn_t *tsdn, extent_t *extent) {
|
|
|
|
assert(extent != NULL);
|
|
|
|
mutex_pool_lock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
extent_unlock(tsdn_t *tsdn, extent_t *extent) {
|
|
|
|
assert(extent != NULL);
|
|
|
|
mutex_pool_unlock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
extent_lock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
|
|
|
|
assert(extent1 != NULL && extent2 != NULL);
|
|
|
|
mutex_pool_lock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
|
|
|
|
(uintptr_t)extent2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
extent_unlock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
|
|
|
|
assert(extent1 != NULL && extent2 != NULL);
|
|
|
|
mutex_pool_unlock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
|
|
|
|
(uintptr_t)extent2);
|
|
|
|
}
|
|
|
|
|
2018-12-04 10:30:58 +08:00
|
|
|
static inline unsigned
|
|
|
|
extent_arena_ind_get(const extent_t *extent) {
|
2017-03-25 06:22:26 +08:00
|
|
|
unsigned arena_ind = (unsigned)((extent->e_bits &
|
|
|
|
EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT);
|
2017-05-14 06:20:48 +08:00
|
|
|
assert(arena_ind < MALLOCX_ARENA_LIMIT);
|
2018-12-04 10:30:58 +08:00
|
|
|
|
|
|
|
return arena_ind;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline arena_t *
|
|
|
|
extent_arena_get(const extent_t *extent) {
|
|
|
|
unsigned arena_ind = extent_arena_ind_get(extent);
|
|
|
|
|
2017-04-05 06:12:24 +08:00
|
|
|
return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_ACQUIRE);
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline szind_t
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_szind_get_maybe_invalid(const extent_t *extent) {
|
2017-03-25 06:22:26 +08:00
|
|
|
szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >>
|
|
|
|
EXTENT_BITS_SZIND_SHIFT);
|
2017-12-15 04:46:39 +08:00
|
|
|
assert(szind <= SC_NSIZES);
|
2017-03-25 06:22:26 +08:00
|
|
|
return szind;
|
2017-03-14 08:36:57 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline szind_t
|
2017-03-17 08:57:52 +08:00
|
|
|
extent_szind_get(const extent_t *extent) {
|
|
|
|
szind_t szind = extent_szind_get_maybe_invalid(extent);
|
2017-12-15 04:46:39 +08:00
|
|
|
assert(szind < SC_NSIZES); /* Never call when "invalid". */
|
2017-03-17 08:57:52 +08:00
|
|
|
return szind;
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline size_t
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_usize_get(const extent_t *extent) {
|
2017-05-31 01:45:37 +08:00
|
|
|
return sz_index2size(extent_szind_get(extent));
|
2016-05-28 09:57:15 +08:00
|
|
|
}
|
|
|
|
|
2018-11-13 07:56:04 +08:00
|
|
|
static inline unsigned
|
|
|
|
extent_binshard_get(const extent_t *extent) {
|
|
|
|
unsigned binshard = (unsigned)((extent->e_bits &
|
|
|
|
EXTENT_BITS_BINSHARD_MASK) >> EXTENT_BITS_BINSHARD_SHIFT);
|
|
|
|
assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
|
|
|
|
return binshard;
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline size_t
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_sn_get(const extent_t *extent) {
|
2017-03-25 06:22:26 +08:00
|
|
|
return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >>
|
|
|
|
EXTENT_BITS_SN_SHIFT);
|
2016-11-16 05:07:53 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline extent_state_t
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_state_get(const extent_t *extent) {
|
2017-03-25 06:22:26 +08:00
|
|
|
return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >>
|
|
|
|
EXTENT_BITS_STATE_SHIFT);
|
2016-05-17 04:25:18 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline bool
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_zeroed_get(const extent_t *extent) {
|
2017-03-25 06:22:26 +08:00
|
|
|
return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >>
|
|
|
|
EXTENT_BITS_ZEROED_SHIFT);
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline bool
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_committed_get(const extent_t *extent) {
|
2017-03-25 06:22:26 +08:00
|
|
|
return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >>
|
|
|
|
EXTENT_BITS_COMMITTED_SHIFT);
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
2017-09-19 08:25:57 +08:00
|
|
|
static inline bool
|
|
|
|
extent_dumpable_get(const extent_t *extent) {
|
|
|
|
return (bool)((extent->e_bits & EXTENT_BITS_DUMPABLE_MASK) >>
|
|
|
|
EXTENT_BITS_DUMPABLE_SHIFT);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline bool
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_slab_get(const extent_t *extent) {
|
2017-03-25 06:22:26 +08:00
|
|
|
return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >>
|
|
|
|
EXTENT_BITS_SLAB_SHIFT);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline unsigned
|
2017-03-28 07:41:47 +08:00
|
|
|
extent_nfree_get(const extent_t *extent) {
|
|
|
|
assert(extent_slab_get(extent));
|
|
|
|
return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK) >>
|
|
|
|
EXTENT_BITS_NFREE_SHIFT);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void *
|
2017-03-25 06:22:26 +08:00
|
|
|
extent_base_get(const extent_t *extent) {
|
|
|
|
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
|
|
|
|
!extent_slab_get(extent));
|
|
|
|
return PAGE_ADDR2BASE(extent->e_addr);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void *
|
2017-03-25 06:22:26 +08:00
|
|
|
extent_addr_get(const extent_t *extent) {
|
|
|
|
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
|
|
|
|
!extent_slab_get(extent));
|
|
|
|
return extent->e_addr;
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline size_t
|
2017-03-25 06:22:26 +08:00
|
|
|
extent_size_get(const extent_t *extent) {
|
2017-04-17 12:51:26 +08:00
|
|
|
return (extent->e_size_esn & EXTENT_SIZE_MASK);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline size_t
|
2017-04-17 12:51:26 +08:00
|
|
|
extent_esn_get(const extent_t *extent) {
|
|
|
|
return (extent->e_size_esn & EXTENT_ESN_MASK);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline size_t
|
2017-04-17 12:51:26 +08:00
|
|
|
extent_bsize_get(const extent_t *extent) {
|
|
|
|
return extent->e_bsize;
|
2017-03-25 06:22:26 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void *
|
2017-03-25 06:22:26 +08:00
|
|
|
extent_before_get(const extent_t *extent) {
|
|
|
|
return (void *)((uintptr_t)extent_base_get(extent) - PAGE);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void *
|
2017-03-25 06:22:26 +08:00
|
|
|
extent_last_get(const extent_t *extent) {
|
|
|
|
return (void *)((uintptr_t)extent_base_get(extent) +
|
|
|
|
extent_size_get(extent) - PAGE);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void *
|
2017-03-25 06:22:26 +08:00
|
|
|
extent_past_get(const extent_t *extent) {
|
|
|
|
return (void *)((uintptr_t)extent_base_get(extent) +
|
|
|
|
extent_size_get(extent));
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline arena_slab_data_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_slab_data_get(extent_t *extent) {
|
2017-03-25 06:22:26 +08:00
|
|
|
assert(extent_slab_get(extent));
|
2017-01-20 10:15:45 +08:00
|
|
|
return &extent->e_slab_data;
|
2016-05-30 09:34:50 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline const arena_slab_data_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_slab_data_get_const(const extent_t *extent) {
|
2017-03-25 06:22:26 +08:00
|
|
|
assert(extent_slab_get(extent));
|
2017-01-20 10:15:45 +08:00
|
|
|
return &extent->e_slab_data;
|
2016-05-30 09:34:50 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline prof_tctx_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_prof_tctx_get(const extent_t *extent) {
|
2017-04-05 05:33:25 +08:00
|
|
|
return (prof_tctx_t *)atomic_load_p(&extent->e_prof_tctx,
|
|
|
|
ATOMIC_ACQUIRE);
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
2018-07-06 01:56:33 +08:00
|
|
|
static inline nstime_t
|
|
|
|
extent_prof_alloc_time_get(const extent_t *extent) {
|
|
|
|
return extent->e_alloc_time;
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_arena_set(extent_t *extent, arena_t *arena) {
|
2017-03-25 06:22:26 +08:00
|
|
|
unsigned arena_ind = (arena != NULL) ? arena_ind_get(arena) : ((1U <<
|
|
|
|
MALLOCX_ARENA_BITS) - 1);
|
|
|
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ARENA_MASK) |
|
|
|
|
((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT);
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
2018-11-13 07:56:04 +08:00
|
|
|
static inline void
|
|
|
|
extent_binshard_set(extent_t *extent, unsigned binshard) {
|
|
|
|
/* The assertion assumes szind is set already. */
|
|
|
|
assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
|
|
|
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_BINSHARD_MASK) |
|
|
|
|
((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_addr_set(extent_t *extent, void *addr) {
|
2016-03-24 12:09:28 +08:00
|
|
|
extent->e_addr = addr;
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2018-05-03 17:40:53 +08:00
|
|
|
extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) {
|
2016-05-28 09:57:15 +08:00
|
|
|
assert(extent_base_get(extent) == extent_addr_get(extent));
|
|
|
|
|
|
|
|
if (alignment < PAGE) {
|
|
|
|
unsigned lg_range = LG_PAGE -
|
|
|
|
lg_floor(CACHELINE_CEILING(alignment));
|
2017-10-14 01:27:13 +08:00
|
|
|
size_t r;
|
|
|
|
if (!tsdn_null(tsdn)) {
|
|
|
|
tsd_t *tsd = tsdn_tsd(tsdn);
|
|
|
|
r = (size_t)prng_lg_range_u64(
|
|
|
|
tsd_offset_statep_get(tsd), lg_range);
|
|
|
|
} else {
|
|
|
|
r = prng_lg_range_zu(
|
|
|
|
&extent_arena_get(extent)->offset_state,
|
|
|
|
lg_range, true);
|
|
|
|
}
|
2016-05-28 15:17:28 +08:00
|
|
|
uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
|
|
|
|
lg_range);
|
2016-05-28 09:57:15 +08:00
|
|
|
extent->e_addr = (void *)((uintptr_t)extent->e_addr +
|
|
|
|
random_offset);
|
2016-05-28 15:17:28 +08:00
|
|
|
assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) ==
|
|
|
|
extent->e_addr);
|
2016-05-28 09:57:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_size_set(extent_t *extent, size_t size) {
|
2017-04-17 12:51:26 +08:00
|
|
|
assert((size & ~EXTENT_SIZE_MASK) == 0);
|
|
|
|
extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-04-17 12:51:26 +08:00
|
|
|
extent_esn_set(extent_t *extent, size_t esn) {
|
|
|
|
extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn &
|
|
|
|
EXTENT_ESN_MASK);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-04-17 12:51:26 +08:00
|
|
|
extent_bsize_set(extent_t *extent, size_t bsize) {
|
|
|
|
extent->e_bsize = bsize;
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-03-14 08:36:57 +08:00
|
|
|
extent_szind_set(extent_t *extent, szind_t szind) {
|
2017-12-15 04:46:39 +08:00
|
|
|
assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
|
2017-03-25 06:22:26 +08:00
|
|
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) |
|
|
|
|
((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT);
|
2016-05-28 15:17:28 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-03-28 07:41:47 +08:00
|
|
|
extent_nfree_set(extent_t *extent, unsigned nfree) {
|
|
|
|
assert(extent_slab_get(extent));
|
|
|
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK) |
|
|
|
|
((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
|
2018-11-13 07:56:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
extent_nfree_binshard_set(extent_t *extent, unsigned nfree, unsigned binshard) {
|
|
|
|
/* The assertion assumes szind is set already. */
|
|
|
|
assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
|
|
|
|
extent->e_bits = (extent->e_bits &
|
|
|
|
(~EXTENT_BITS_NFREE_MASK & ~EXTENT_BITS_BINSHARD_MASK)) |
|
|
|
|
((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT) |
|
|
|
|
((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
|
2017-03-28 07:41:47 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-03-28 07:41:47 +08:00
|
|
|
extent_nfree_inc(extent_t *extent) {
|
|
|
|
assert(extent_slab_get(extent));
|
|
|
|
extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-03-28 07:41:47 +08:00
|
|
|
extent_nfree_dec(extent_t *extent) {
|
|
|
|
assert(extent_slab_get(extent));
|
|
|
|
extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
|
|
|
|
}
|
|
|
|
|
2018-10-30 06:09:21 +08:00
|
|
|
static inline void
|
|
|
|
extent_nfree_sub(extent_t *extent, uint64_t n) {
|
|
|
|
assert(extent_slab_get(extent));
|
|
|
|
extent->e_bits -= (n << EXTENT_BITS_NFREE_SHIFT);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_sn_set(extent_t *extent, size_t sn) {
|
2017-03-25 06:22:26 +08:00
|
|
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) |
|
|
|
|
((uint64_t)sn << EXTENT_BITS_SN_SHIFT);
|
2016-11-16 05:07:53 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_state_set(extent_t *extent, extent_state_t state) {
|
2017-03-25 06:22:26 +08:00
|
|
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) |
|
|
|
|
((uint64_t)state << EXTENT_BITS_STATE_SHIFT);
|
2016-03-28 18:17:10 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_zeroed_set(extent_t *extent, bool zeroed) {
|
2017-03-25 06:22:26 +08:00
|
|
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) |
|
|
|
|
((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT);
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_committed_set(extent_t *extent, bool committed) {
|
2017-03-25 06:22:26 +08:00
|
|
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) |
|
|
|
|
((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT);
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
2017-09-19 08:25:57 +08:00
|
|
|
static inline void
|
|
|
|
extent_dumpable_set(extent_t *extent, bool dumpable) {
|
|
|
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_DUMPABLE_MASK) |
|
|
|
|
((uint64_t)dumpable << EXTENT_BITS_DUMPABLE_SHIFT);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_slab_set(extent_t *extent, bool slab) {
|
2017-03-25 06:22:26 +08:00
|
|
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) |
|
|
|
|
((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT);
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
|
2017-04-05 05:33:25 +08:00
|
|
|
atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE);
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
2015-02-18 07:13:52 +08:00
|
|
|
|
2018-07-06 01:56:33 +08:00
|
|
|
static inline void
|
|
|
|
extent_prof_alloc_time_set(extent_t *extent, nstime_t t) {
|
|
|
|
nstime_copy(&extent->e_alloc_time, &t);
|
|
|
|
}
|
|
|
|
|
2019-07-13 07:20:23 +08:00
|
|
|
static inline bool
|
|
|
|
extent_is_head_get(extent_t *extent) {
|
|
|
|
if (maps_coalesce) {
|
|
|
|
not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
return (bool)((extent->e_bits & EXTENT_BITS_IS_HEAD_MASK) >>
|
|
|
|
EXTENT_BITS_IS_HEAD_SHIFT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
extent_is_head_set(extent_t *extent, bool is_head) {
|
|
|
|
if (maps_coalesce) {
|
|
|
|
not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_IS_HEAD_MASK) |
|
|
|
|
((uint64_t)is_head << EXTENT_BITS_IS_HEAD_SHIFT);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
2017-03-14 08:36:57 +08:00
|
|
|
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
|
2019-07-13 07:20:23 +08:00
|
|
|
bool committed, bool dumpable, extent_head_state_t is_head) {
|
2016-05-28 09:57:15 +08:00
|
|
|
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
|
|
|
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_arena_set(extent, arena);
|
|
|
|
extent_addr_set(extent, addr);
|
|
|
|
extent_size_set(extent, size);
|
2017-03-14 08:36:57 +08:00
|
|
|
extent_slab_set(extent, slab);
|
|
|
|
extent_szind_set(extent, szind);
|
2016-11-16 05:07:53 +08:00
|
|
|
extent_sn_set(extent, sn);
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_state_set(extent, state);
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_zeroed_set(extent, zeroed);
|
|
|
|
extent_committed_set(extent, committed);
|
2017-09-19 08:25:57 +08:00
|
|
|
extent_dumpable_set(extent, dumpable);
|
2017-03-28 07:41:47 +08:00
|
|
|
ql_elm_new(extent, ql_link);
|
2019-07-13 07:20:23 +08:00
|
|
|
if (!maps_coalesce) {
|
|
|
|
extent_is_head_set(extent, (is_head == EXTENT_IS_HEAD) ? true :
|
|
|
|
false);
|
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
if (config_prof) {
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_prof_tctx_set(extent, NULL);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-30 13:57:14 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-04-17 12:51:26 +08:00
|
|
|
extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
|
|
|
|
extent_arena_set(extent, NULL);
|
|
|
|
extent_addr_set(extent, addr);
|
|
|
|
extent_bsize_set(extent, bsize);
|
|
|
|
extent_slab_set(extent, false);
|
2017-12-15 04:46:39 +08:00
|
|
|
extent_szind_set(extent, SC_NSIZES);
|
2017-04-17 12:51:26 +08:00
|
|
|
extent_sn_set(extent, sn);
|
|
|
|
extent_state_set(extent, extent_state_active);
|
|
|
|
extent_zeroed_set(extent, true);
|
|
|
|
extent_committed_set(extent, true);
|
2017-09-19 08:25:57 +08:00
|
|
|
extent_dumpable_set(extent, true);
|
2017-04-17 12:51:26 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_list_init(extent_list_t *list) {
|
|
|
|
ql_new(list);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline extent_t *
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_list_first(const extent_list_t *list) {
|
|
|
|
return ql_first(list);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline extent_t *
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_list_last(const extent_list_t *list) {
|
|
|
|
return ql_last(list, ql_link);
|
2015-02-18 14:23:10 +08:00
|
|
|
}
|
2015-02-18 17:15:50 +08:00
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_list_append(extent_list_t *list, extent_t *extent) {
|
|
|
|
ql_tail_insert(list, extent, ql_link);
|
2015-02-18 17:15:50 +08:00
|
|
|
}
|
|
|
|
|
2017-11-15 08:09:31 +08:00
|
|
|
static inline void
|
|
|
|
extent_list_prepend(extent_list_t *list, extent_t *extent) {
|
|
|
|
ql_head_insert(list, extent, ql_link);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-03-03 10:04:35 +08:00
|
|
|
extent_list_replace(extent_list_t *list, extent_t *to_remove,
|
|
|
|
extent_t *to_insert) {
|
|
|
|
ql_after_insert(to_remove, to_insert, ql_link);
|
|
|
|
ql_remove(list, to_remove, ql_link);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2017-01-30 13:57:14 +08:00
|
|
|
extent_list_remove(extent_list_t *list, extent_t *extent) {
|
|
|
|
ql_remove(list, extent, ql_link);
|
2015-02-18 17:15:50 +08:00
|
|
|
}
|
2016-11-16 05:07:53 +08:00
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline int
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_sn_comp(const extent_t *a, const extent_t *b) {
|
2016-11-16 05:07:53 +08:00
|
|
|
size_t a_sn = extent_sn_get(a);
|
|
|
|
size_t b_sn = extent_sn_get(b);
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return (a_sn > b_sn) - (a_sn < b_sn);
|
2016-11-16 05:07:53 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline int
|
2017-04-17 13:31:16 +08:00
|
|
|
extent_esn_comp(const extent_t *a, const extent_t *b) {
|
|
|
|
size_t a_esn = extent_esn_get(a);
|
|
|
|
size_t b_esn = extent_esn_get(b);
|
|
|
|
|
|
|
|
return (a_esn > b_esn) - (a_esn < b_esn);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline int
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_ad_comp(const extent_t *a, const extent_t *b) {
|
2016-11-16 05:07:53 +08:00
|
|
|
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
|
|
|
|
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return (a_addr > b_addr) - (a_addr < b_addr);
|
2016-11-16 05:07:53 +08:00
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline int
|
2017-04-17 13:31:16 +08:00
|
|
|
extent_ead_comp(const extent_t *a, const extent_t *b) {
|
|
|
|
uintptr_t a_eaddr = (uintptr_t)a;
|
|
|
|
uintptr_t b_eaddr = (uintptr_t)b;
|
|
|
|
|
|
|
|
return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline int
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_snad_comp(const extent_t *a, const extent_t *b) {
|
2016-11-16 05:07:53 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = extent_sn_comp(a, b);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (ret != 0) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-11-16 05:07:53 +08:00
|
|
|
|
|
|
|
ret = extent_ad_comp(a, b);
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2016-11-16 05:07:53 +08:00
|
|
|
}
|
2017-04-17 13:31:16 +08:00
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline int
|
2017-04-17 13:31:16 +08:00
|
|
|
extent_esnead_comp(const extent_t *a, const extent_t *b) {
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = extent_esn_comp(a, b);
|
|
|
|
if (ret != 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = extent_ead_comp(a, b);
|
|
|
|
return ret;
|
|
|
|
}
|
2015-02-16 10:04:46 +08:00
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_EXTENT_INLINES_H */
|