2010-01-17 01:53:50 +08:00
|
|
|
#define JEMALLOC_ARENA_C_
|
2010-02-12 06:45:59 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal.h"
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
|
|
|
|
2016-02-20 12:09:31 +08:00
|
|
|
purge_mode_t opt_purge = PURGE_DEFAULT;
|
|
|
|
const char *purge_mode_names[] = {
|
|
|
|
"ratio",
|
|
|
|
"decay",
|
|
|
|
"N/A"
|
|
|
|
};
|
2010-01-17 01:53:50 +08:00
|
|
|
ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
|
2015-03-19 09:55:33 +08:00
|
|
|
static ssize_t lg_dirty_mult_default;
|
2016-02-20 12:09:31 +08:00
|
|
|
ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
|
|
|
|
static ssize_t decay_time_default;
|
|
|
|
|
2016-04-07 20:04:12 +08:00
|
|
|
const arena_bin_info_t arena_bin_info[NBINS] = {
|
2016-05-30 09:34:50 +08:00
|
|
|
#define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \
|
|
|
|
{reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)},
|
|
|
|
#define BIN_INFO_bin_no(reg_size, slab_size, nregs)
|
2016-04-18 07:16:11 +08:00
|
|
|
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
|
|
|
|
lg_delta_lookup) \
|
2016-04-07 20:04:12 +08:00
|
|
|
BIN_INFO_bin_##bin((1U<<lg_grp) + (ndelta<<lg_delta), \
|
|
|
|
(pgs << LG_PAGE), (pgs << LG_PAGE) / ((1U<<lg_grp) + \
|
|
|
|
(ndelta<<lg_delta)))
|
|
|
|
SIZE_CLASSES
|
|
|
|
#undef BIN_INFO_bin_yes
|
|
|
|
#undef BIN_INFO_bin_no
|
|
|
|
#undef SC
|
|
|
|
};
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
2014-01-15 08:23:03 +08:00
|
|
|
/*
|
|
|
|
* Function prototypes for static functions that are referenced prior to
|
|
|
|
* definition.
|
|
|
|
*/
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
|
2016-04-14 14:36:15 +08:00
|
|
|
size_t ndirty_limit);
|
2016-05-30 09:34:50 +08:00
|
|
|
static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
extent_t *slab, arena_bin_t *bin);
|
|
|
|
static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
extent_t *slab, arena_bin_t *bin);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
2015-02-16 10:04:46 +08:00
|
|
|
static size_t
|
2016-03-24 12:09:28 +08:00
|
|
|
arena_chunk_dirty_npages(const extent_t *extent)
|
2015-02-16 10:04:46 +08:00
|
|
|
{
|
|
|
|
|
2016-03-24 12:09:28 +08:00
|
|
|
return (extent_size_get(extent) >> LG_PAGE);
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
2016-05-19 12:02:46 +08:00
|
|
|
static extent_t *
|
|
|
|
arena_chunk_cache_alloc_locked(tsdn_t *tsdn, arena_t *arena,
|
2016-05-28 09:57:15 +08:00
|
|
|
chunk_hooks_t *chunk_hooks, void *new_addr, size_t usize, size_t pad,
|
|
|
|
size_t alignment, bool *zero, bool slab)
|
2016-05-19 12:02:46 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
malloc_mutex_assert_owner(tsdn, &arena->lock);
|
|
|
|
|
2016-05-28 09:57:15 +08:00
|
|
|
return (chunk_alloc_cache(tsdn, arena, chunk_hooks, new_addr, usize,
|
|
|
|
pad, alignment, zero, slab));
|
2016-05-19 12:02:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
extent_t *
|
|
|
|
arena_chunk_cache_alloc(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
|
|
|
bool *zero)
|
|
|
|
{
|
|
|
|
extent_t *extent;
|
|
|
|
|
|
|
|
malloc_mutex_lock(tsdn, &arena->lock);
|
|
|
|
extent = arena_chunk_cache_alloc_locked(tsdn, arena, chunk_hooks,
|
2016-05-28 09:57:15 +08:00
|
|
|
new_addr, size, 0, alignment, zero, false);
|
2016-05-19 12:02:46 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
|
|
|
|
|
|
|
return (extent);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
arena_chunk_cache_dalloc_locked(tsdn_t *tsdn, arena_t *arena,
|
2016-05-24 05:56:35 +08:00
|
|
|
chunk_hooks_t *chunk_hooks, extent_t *extent)
|
2016-05-19 12:02:46 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
malloc_mutex_assert_owner(tsdn, &arena->lock);
|
|
|
|
|
2016-05-24 05:56:35 +08:00
|
|
|
chunk_dalloc_cache(tsdn, arena, chunk_hooks, extent);
|
2016-05-19 12:02:46 +08:00
|
|
|
arena_maybe_purge(tsdn, arena);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
arena_chunk_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
|
2016-05-24 05:56:35 +08:00
|
|
|
chunk_hooks_t *chunk_hooks, extent_t *extent)
|
2016-05-19 12:02:46 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
malloc_mutex_lock(tsdn, &arena->lock);
|
2016-05-24 05:56:35 +08:00
|
|
|
arena_chunk_cache_dalloc_locked(tsdn, arena, chunk_hooks, extent);
|
2016-05-19 12:02:46 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
|
|
|
}
|
|
|
|
|
2015-02-16 10:04:46 +08:00
|
|
|
void
|
2016-03-24 12:09:28 +08:00
|
|
|
arena_chunk_cache_maybe_insert(arena_t *arena, extent_t *extent, bool cache)
|
2015-02-16 10:04:46 +08:00
|
|
|
{
|
|
|
|
|
2015-02-18 17:15:50 +08:00
|
|
|
if (cache) {
|
2016-05-30 09:34:50 +08:00
|
|
|
extent_ring_insert(&arena->extents_dirty, extent);
|
2016-03-24 12:09:28 +08:00
|
|
|
arena->ndirty += arena_chunk_dirty_npages(extent);
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-03-24 12:09:28 +08:00
|
|
|
arena_chunk_cache_maybe_remove(arena_t *arena, extent_t *extent, bool dirty)
|
2015-02-16 10:04:46 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
if (dirty) {
|
2016-05-30 09:34:50 +08:00
|
|
|
extent_ring_remove(extent);
|
2016-03-24 12:09:28 +08:00
|
|
|
assert(arena->ndirty >= arena_chunk_dirty_npages(extent));
|
|
|
|
arena->ndirty -= arena_chunk_dirty_npages(extent);
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-31 07:38:08 +08:00
|
|
|
JEMALLOC_INLINE_C void *
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab,
|
2016-05-26 07:21:37 +08:00
|
|
|
const arena_bin_info_t *bin_info)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_slab_data_t *slab_data = extent_slab_data_get(slab);
|
2016-02-26 12:51:00 +08:00
|
|
|
size_t regind;
|
2016-05-30 09:34:50 +08:00
|
|
|
|
|
|
|
assert(slab_data->nfree > 0);
|
|
|
|
assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
|
|
|
|
|
|
|
|
regind = (unsigned)bitmap_sfu(slab_data->bitmap,
|
|
|
|
&bin_info->bitmap_info);
|
|
|
|
ret = (void *)((uintptr_t)extent_addr_get(slab) +
|
|
|
|
(uintptr_t)(bin_info->reg_size * regind));
|
|
|
|
slab_data->nfree--;
|
2010-03-14 05:41:58 +08:00
|
|
|
return (ret);
|
2010-02-11 02:37:56 +08:00
|
|
|
}
|
|
|
|
|
2016-05-26 07:21:37 +08:00
|
|
|
JEMALLOC_INLINE_C size_t
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_slab_regind(extent_t *slab, const arena_bin_info_t *bin_info,
|
|
|
|
const void *ptr)
|
2016-05-26 07:21:37 +08:00
|
|
|
{
|
|
|
|
size_t diff, interval, shift, regind;
|
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
/* Freeing a pointer outside the slab can cause assertion failure. */
|
|
|
|
assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
|
|
|
|
assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
|
|
|
|
/* Freeing an interior pointer can cause assertion failure. */
|
|
|
|
assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
|
|
|
|
(uintptr_t)bin_info->reg_size == 0);
|
2016-05-26 07:21:37 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Avoid doing division with a variable divisor if possible. Using
|
|
|
|
* actual division here can reduce allocator throughput by over 20%!
|
|
|
|
*/
|
2016-05-30 09:34:50 +08:00
|
|
|
diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
|
2016-05-26 07:21:37 +08:00
|
|
|
|
|
|
|
/* Rescale (factor powers of 2 out of the numerator and denominator). */
|
|
|
|
interval = bin_info->reg_size;
|
|
|
|
shift = ffs_zu(interval) - 1;
|
|
|
|
diff >>= shift;
|
|
|
|
interval >>= shift;
|
|
|
|
|
|
|
|
if (interval == 1) {
|
|
|
|
/* The divisor was a power of 2. */
|
|
|
|
regind = diff;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* To divide by a number D that is not a power of two we
|
|
|
|
* multiply by (2^21 / D) and then right shift by 21 positions.
|
|
|
|
*
|
|
|
|
* X / D
|
|
|
|
*
|
|
|
|
* becomes
|
|
|
|
*
|
|
|
|
* (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT
|
|
|
|
*
|
|
|
|
* We can omit the first three elements, because we never
|
|
|
|
* divide by 0, and 1 and 2 are both powers of two, which are
|
|
|
|
* handled above.
|
|
|
|
*/
|
2016-05-30 09:34:50 +08:00
|
|
|
#define SIZE_INV_SHIFT ((sizeof(size_t) << 3) - LG_SLAB_MAXREGS)
|
2016-05-26 07:21:37 +08:00
|
|
|
#define SIZE_INV(s) (((ZU(1) << SIZE_INV_SHIFT) / (s)) + 1)
|
|
|
|
static const size_t interval_invs[] = {
|
|
|
|
SIZE_INV(3),
|
|
|
|
SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
|
|
|
|
SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
|
|
|
|
SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
|
|
|
|
SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
|
|
|
|
SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
|
|
|
|
SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
|
|
|
|
SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
|
|
|
|
};
|
|
|
|
|
|
|
|
if (likely(interval <= ((sizeof(interval_invs) / sizeof(size_t))
|
|
|
|
+ 2))) {
|
|
|
|
regind = (diff * interval_invs[interval - 3]) >>
|
|
|
|
SIZE_INV_SHIFT;
|
|
|
|
} else
|
|
|
|
regind = diff / interval;
|
|
|
|
#undef SIZE_INV
|
|
|
|
#undef SIZE_INV_SHIFT
|
|
|
|
}
|
|
|
|
assert(diff == regind * interval);
|
|
|
|
assert(regind < bin_info->nregs);
|
|
|
|
|
|
|
|
return (regind);
|
|
|
|
}
|
|
|
|
|
2014-10-31 07:38:08 +08:00
|
|
|
JEMALLOC_INLINE_C void
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab,
|
|
|
|
arena_slab_data_t *slab_data, void *ptr)
|
2010-02-11 02:37:56 +08:00
|
|
|
{
|
2016-05-30 09:34:50 +08:00
|
|
|
szind_t binind = slab_data->binind;
|
2016-04-07 20:04:12 +08:00
|
|
|
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
2016-05-30 09:34:50 +08:00
|
|
|
size_t regind = arena_slab_regind(slab, bin_info, ptr);
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
assert(slab_data->nfree < bin_info->nregs);
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
/* Freeing an unallocated pointer can cause assertion failure. */
|
2016-05-30 09:34:50 +08:00
|
|
|
assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
|
2013-01-22 12:04:42 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
|
|
|
|
slab_data->nfree++;
|
2010-10-19 08:45:40 +08:00
|
|
|
}
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
static void
|
2016-02-28 04:34:50 +08:00
|
|
|
arena_nactive_add(arena_t *arena, size_t add_pages)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
|
|
|
|
2014-01-15 08:23:03 +08:00
|
|
|
if (config_stats) {
|
2016-02-27 09:29:35 +08:00
|
|
|
size_t cactive_add = CHUNK_CEILING((arena->nactive +
|
|
|
|
add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
|
2014-08-07 14:38:39 +08:00
|
|
|
LG_PAGE);
|
2016-02-27 09:29:35 +08:00
|
|
|
if (cactive_add != 0)
|
|
|
|
stats_cactive_add(cactive_add);
|
|
|
|
}
|
2016-02-28 04:34:50 +08:00
|
|
|
arena->nactive += add_pages;
|
2016-02-27 09:29:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-02-28 04:34:50 +08:00
|
|
|
arena_nactive_sub(arena_t *arena, size_t sub_pages)
|
2016-02-27 09:29:35 +08:00
|
|
|
{
|
|
|
|
|
2016-05-28 15:17:28 +08:00
|
|
|
assert(arena->nactive >= sub_pages);
|
2016-02-27 09:29:35 +08:00
|
|
|
if (config_stats) {
|
|
|
|
size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
|
|
|
|
CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
|
|
|
|
if (cactive_sub != 0)
|
|
|
|
stats_cactive_sub(cactive_sub);
|
2014-01-15 08:23:03 +08:00
|
|
|
}
|
2016-02-28 04:34:50 +08:00
|
|
|
arena->nactive -= sub_pages;
|
2014-01-15 08:23:03 +08:00
|
|
|
}
|
|
|
|
|
2014-10-15 13:20:00 +08:00
|
|
|
static void
|
|
|
|
arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
|
|
|
|
{
|
2016-05-28 15:17:28 +08:00
|
|
|
szind_t index = size2index(usize);
|
|
|
|
szind_t hindex = (index >= NBINS) ? index - NBINS : 0;
|
2014-10-15 13:20:00 +08:00
|
|
|
|
|
|
|
cassert(config_stats);
|
|
|
|
|
|
|
|
arena->stats.nmalloc_huge++;
|
|
|
|
arena->stats.allocated_huge += usize;
|
2016-05-28 15:17:28 +08:00
|
|
|
arena->stats.hstats[hindex].nmalloc++;
|
|
|
|
arena->stats.hstats[hindex].nrequests++;
|
|
|
|
arena->stats.hstats[hindex].curhchunks++;
|
2014-10-15 13:20:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
|
|
|
|
{
|
2016-05-28 15:17:28 +08:00
|
|
|
szind_t index = size2index(usize);
|
|
|
|
szind_t hindex = (index >= NBINS) ? index - NBINS : 0;
|
2014-10-15 13:20:00 +08:00
|
|
|
|
|
|
|
cassert(config_stats);
|
|
|
|
|
|
|
|
arena->stats.nmalloc_huge--;
|
|
|
|
arena->stats.allocated_huge -= usize;
|
2016-05-28 15:17:28 +08:00
|
|
|
arena->stats.hstats[hindex].nmalloc--;
|
|
|
|
arena->stats.hstats[hindex].nrequests--;
|
|
|
|
arena->stats.hstats[hindex].curhchunks--;
|
2014-10-15 13:20:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
|
|
|
|
{
|
2016-05-28 15:17:28 +08:00
|
|
|
szind_t index = size2index(usize);
|
|
|
|
szind_t hindex = (index >= NBINS) ? index - NBINS : 0;
|
2014-10-15 13:20:00 +08:00
|
|
|
|
|
|
|
cassert(config_stats);
|
|
|
|
|
|
|
|
arena->stats.ndalloc_huge++;
|
|
|
|
arena->stats.allocated_huge -= usize;
|
2016-05-28 15:17:28 +08:00
|
|
|
arena->stats.hstats[hindex].ndalloc++;
|
|
|
|
arena->stats.hstats[hindex].curhchunks--;
|
2014-10-15 13:20:00 +08:00
|
|
|
}
|
|
|
|
|
2016-04-26 04:26:54 +08:00
|
|
|
static void
|
|
|
|
arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
|
|
|
|
{
|
2016-05-28 15:17:28 +08:00
|
|
|
szind_t index = size2index(usize);
|
|
|
|
szind_t hindex = (index >= NBINS) ? index - NBINS : 0;
|
2016-04-26 04:26:54 +08:00
|
|
|
|
|
|
|
cassert(config_stats);
|
|
|
|
|
|
|
|
arena->stats.ndalloc_huge++;
|
2016-05-28 15:17:28 +08:00
|
|
|
arena->stats.hstats[hindex].ndalloc--;
|
2016-04-26 04:26:54 +08:00
|
|
|
}
|
|
|
|
|
2014-10-15 13:20:00 +08:00
|
|
|
static void
|
2016-05-28 15:17:28 +08:00
|
|
|
arena_huge_ralloc_stats_update(arena_t *arena, size_t oldusize, size_t usize)
|
2014-10-15 13:20:00 +08:00
|
|
|
{
|
|
|
|
|
2016-05-28 15:17:28 +08:00
|
|
|
arena_huge_dalloc_stats_update(arena, oldusize);
|
2014-10-15 13:20:00 +08:00
|
|
|
arena_huge_malloc_stats_update(arena, usize);
|
|
|
|
}
|
|
|
|
|
2016-05-19 12:02:46 +08:00
|
|
|
static extent_t *
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
|
2016-05-27 13:12:38 +08:00
|
|
|
chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero)
|
2015-02-19 08:40:53 +08:00
|
|
|
{
|
2016-05-19 12:02:46 +08:00
|
|
|
extent_t *extent;
|
2015-08-05 01:49:46 +08:00
|
|
|
bool commit = true;
|
2015-02-19 08:40:53 +08:00
|
|
|
|
2016-05-27 13:12:38 +08:00
|
|
|
extent = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, usize,
|
2016-05-28 09:57:15 +08:00
|
|
|
large_pad, alignment, zero, &commit, false);
|
2016-05-19 12:02:46 +08:00
|
|
|
if (extent == NULL) {
|
2015-02-19 08:40:53 +08:00
|
|
|
/* Revert optimistic stats updates. */
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->lock);
|
2015-02-19 08:40:53 +08:00
|
|
|
if (config_stats) {
|
|
|
|
arena_huge_malloc_stats_update_undo(arena, usize);
|
|
|
|
arena->stats.mapped -= usize;
|
|
|
|
}
|
2016-05-28 15:17:28 +08:00
|
|
|
arena_nactive_sub(arena, (usize + large_pad) >> LG_PAGE);
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
2015-02-19 08:40:53 +08:00
|
|
|
}
|
|
|
|
|
2016-05-19 12:02:46 +08:00
|
|
|
return (extent);
|
2015-02-19 08:40:53 +08:00
|
|
|
}
|
|
|
|
|
2016-05-19 12:02:46 +08:00
|
|
|
extent_t *
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
2016-04-14 14:36:15 +08:00
|
|
|
size_t alignment, bool *zero)
|
2014-10-15 13:20:00 +08:00
|
|
|
{
|
2016-05-19 12:02:46 +08:00
|
|
|
extent_t *extent;
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
2014-05-16 13:22:27 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->lock);
|
2015-02-19 08:40:53 +08:00
|
|
|
|
|
|
|
/* Optimistically update stats. */
|
2014-10-15 13:20:00 +08:00
|
|
|
if (config_stats) {
|
|
|
|
arena_huge_malloc_stats_update(arena, usize);
|
|
|
|
arena->stats.mapped += usize;
|
|
|
|
}
|
2016-05-28 15:17:28 +08:00
|
|
|
arena_nactive_add(arena, (usize + large_pad) >> LG_PAGE);
|
2014-10-15 13:20:00 +08:00
|
|
|
|
2016-05-19 12:02:46 +08:00
|
|
|
extent = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL,
|
2016-05-28 09:57:15 +08:00
|
|
|
usize, large_pad, alignment, zero, false);
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
2016-05-19 12:02:46 +08:00
|
|
|
if (extent == NULL) {
|
|
|
|
extent = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
|
2016-05-27 13:12:38 +08:00
|
|
|
usize, alignment, zero);
|
2014-10-15 13:20:00 +08:00
|
|
|
}
|
|
|
|
|
2016-05-19 12:02:46 +08:00
|
|
|
return (extent);
|
2014-05-16 13:22:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-05-28 15:17:28 +08:00
|
|
|
arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
|
|
|
bool locked)
|
2014-05-16 13:22:27 +08:00
|
|
|
{
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
2014-05-16 13:22:27 +08:00
|
|
|
|
2016-05-28 15:17:28 +08:00
|
|
|
if (!locked)
|
|
|
|
malloc_mutex_lock(tsdn, &arena->lock);
|
2014-05-16 13:22:27 +08:00
|
|
|
if (config_stats) {
|
2016-05-28 15:17:28 +08:00
|
|
|
arena_huge_dalloc_stats_update(arena, extent_usize_get(extent));
|
2016-05-24 05:56:35 +08:00
|
|
|
arena->stats.mapped -= extent_size_get(extent);
|
2014-05-16 13:22:27 +08:00
|
|
|
}
|
2016-05-24 05:56:35 +08:00
|
|
|
arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
|
|
|
|
|
|
|
|
arena_chunk_cache_dalloc_locked(tsdn, arena, &chunk_hooks, extent);
|
2016-05-28 15:17:28 +08:00
|
|
|
if (!locked)
|
|
|
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
2014-05-16 13:22:27 +08:00
|
|
|
}
|
|
|
|
|
2014-10-15 13:20:00 +08:00
|
|
|
void
|
2016-05-19 12:02:46 +08:00
|
|
|
arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
2016-05-28 15:17:28 +08:00
|
|
|
size_t oldusize)
|
2014-10-15 13:20:00 +08:00
|
|
|
{
|
2016-05-28 15:17:28 +08:00
|
|
|
size_t usize = extent_usize_get(extent);
|
|
|
|
size_t udiff = oldusize - usize;
|
2010-03-15 10:43:56 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->lock);
|
2014-10-15 13:20:00 +08:00
|
|
|
if (config_stats) {
|
2016-05-28 15:17:28 +08:00
|
|
|
arena_huge_ralloc_stats_update(arena, oldusize, usize);
|
|
|
|
arena->stats.mapped -= udiff;
|
2014-10-15 13:20:00 +08:00
|
|
|
}
|
2016-02-28 04:34:50 +08:00
|
|
|
arena_nactive_sub(arena, udiff >> LG_PAGE);
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
2015-02-19 08:40:53 +08:00
|
|
|
}
|
|
|
|
|
2016-05-19 12:02:46 +08:00
|
|
|
void
|
2016-05-19 01:32:05 +08:00
|
|
|
arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
2016-05-28 15:17:28 +08:00
|
|
|
size_t oldusize)
|
2014-10-15 13:20:00 +08:00
|
|
|
{
|
2016-05-28 15:17:28 +08:00
|
|
|
size_t usize = extent_usize_get(extent);
|
|
|
|
size_t udiff = usize - oldusize;
|
2014-10-15 13:20:00 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->lock);
|
2014-10-15 13:20:00 +08:00
|
|
|
if (config_stats) {
|
2016-05-28 15:17:28 +08:00
|
|
|
arena_huge_ralloc_stats_update(arena, oldusize, usize);
|
|
|
|
arena->stats.mapped += udiff;
|
2014-10-15 13:20:00 +08:00
|
|
|
}
|
2016-02-28 04:34:50 +08:00
|
|
|
arena_nactive_add(arena, udiff >> LG_PAGE);
|
2016-05-19 01:32:05 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2015-03-19 09:55:33 +08:00
|
|
|
static bool
|
|
|
|
arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
|
|
|
|
{
|
|
|
|
|
2015-03-25 06:59:28 +08:00
|
|
|
return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
|
|
|
|
<< 3));
|
2015-03-19 09:55:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena)
|
2015-03-19 09:55:33 +08:00
|
|
|
{
|
|
|
|
ssize_t lg_dirty_mult;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->lock);
|
2015-03-19 09:55:33 +08:00
|
|
|
lg_dirty_mult = arena->lg_dirty_mult;
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
2015-03-19 09:55:33 +08:00
|
|
|
|
|
|
|
return (lg_dirty_mult);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult)
|
2015-03-19 09:55:33 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
|
|
|
|
return (true);
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->lock);
|
2015-03-19 09:55:33 +08:00
|
|
|
arena->lg_dirty_mult = lg_dirty_mult;
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_maybe_purge(tsdn, arena);
|
|
|
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
2015-03-19 09:55:33 +08:00
|
|
|
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
2016-02-20 12:09:31 +08:00
|
|
|
static void
|
|
|
|
arena_decay_deadline_init(arena_t *arena)
|
|
|
|
{
|
|
|
|
|
|
|
|
assert(opt_purge == purge_mode_decay);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Generate a new deadline that is uniformly random within the next
|
|
|
|
* epoch after the current one.
|
|
|
|
*/
|
2016-02-22 03:25:02 +08:00
|
|
|
nstime_copy(&arena->decay_deadline, &arena->decay_epoch);
|
|
|
|
nstime_add(&arena->decay_deadline, &arena->decay_interval);
|
2016-02-20 12:09:31 +08:00
|
|
|
if (arena->decay_time > 0) {
|
2016-02-22 03:25:02 +08:00
|
|
|
nstime_t jitter;
|
|
|
|
|
|
|
|
nstime_init(&jitter, prng_range(&arena->decay_jitter_state,
|
2016-05-28 09:57:15 +08:00
|
|
|
nstime_ns(&arena->decay_interval), false));
|
2016-02-22 03:25:02 +08:00
|
|
|
nstime_add(&arena->decay_deadline, &jitter);
|
2016-02-20 12:09:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2016-02-22 03:25:02 +08:00
|
|
|
arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
|
2016-02-20 12:09:31 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
assert(opt_purge == purge_mode_decay);
|
|
|
|
|
2016-02-22 03:25:02 +08:00
|
|
|
return (nstime_compare(&arena->decay_deadline, time) <= 0);
|
2016-02-20 12:09:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static size_t
|
|
|
|
arena_decay_backlog_npages_limit(const arena_t *arena)
|
|
|
|
{
|
|
|
|
static const uint64_t h_steps[] = {
|
|
|
|
#define STEP(step, h, x, y) \
|
|
|
|
h,
|
|
|
|
SMOOTHSTEP
|
|
|
|
#undef STEP
|
|
|
|
};
|
|
|
|
uint64_t sum;
|
|
|
|
size_t npages_limit_backlog;
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
assert(opt_purge == purge_mode_decay);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For each element of decay_backlog, multiply by the corresponding
|
|
|
|
* fixed-point smoothstep decay factor. Sum the products, then divide
|
|
|
|
* to round down to the nearest whole number of pages.
|
|
|
|
*/
|
|
|
|
sum = 0;
|
|
|
|
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
|
|
|
|
sum += arena->decay_backlog[i] * h_steps[i];
|
2016-04-12 15:50:54 +08:00
|
|
|
npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
|
2016-02-20 12:09:31 +08:00
|
|
|
|
|
|
|
return (npages_limit_backlog);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-02-22 03:25:02 +08:00
|
|
|
arena_decay_epoch_advance(arena_t *arena, const nstime_t *time)
|
2016-02-20 12:09:31 +08:00
|
|
|
{
|
2016-04-12 15:50:54 +08:00
|
|
|
uint64_t nadvance_u64;
|
2016-02-22 03:25:02 +08:00
|
|
|
nstime_t delta;
|
2016-02-20 12:09:31 +08:00
|
|
|
size_t ndirty_delta;
|
|
|
|
|
|
|
|
assert(opt_purge == purge_mode_decay);
|
|
|
|
assert(arena_decay_deadline_reached(arena, time));
|
|
|
|
|
2016-02-22 03:25:02 +08:00
|
|
|
nstime_copy(&delta, time);
|
|
|
|
nstime_subtract(&delta, &arena->decay_epoch);
|
2016-04-12 15:50:54 +08:00
|
|
|
nadvance_u64 = nstime_divide(&delta, &arena->decay_interval);
|
|
|
|
assert(nadvance_u64 > 0);
|
2016-02-20 12:09:31 +08:00
|
|
|
|
2016-04-12 15:50:54 +08:00
|
|
|
/* Add nadvance_u64 decay intervals to epoch. */
|
2016-02-22 03:25:02 +08:00
|
|
|
nstime_copy(&delta, &arena->decay_interval);
|
2016-04-12 15:50:54 +08:00
|
|
|
nstime_imultiply(&delta, nadvance_u64);
|
2016-02-22 03:25:02 +08:00
|
|
|
nstime_add(&arena->decay_epoch, &delta);
|
2016-02-20 12:09:31 +08:00
|
|
|
|
|
|
|
/* Set a new deadline. */
|
|
|
|
arena_decay_deadline_init(arena);
|
|
|
|
|
|
|
|
/* Update the backlog. */
|
2016-04-12 15:50:54 +08:00
|
|
|
if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
|
2016-02-20 12:09:31 +08:00
|
|
|
memset(arena->decay_backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
|
|
|
|
sizeof(size_t));
|
|
|
|
} else {
|
2016-04-12 15:50:54 +08:00
|
|
|
size_t nadvance_z = (size_t)nadvance_u64;
|
|
|
|
|
|
|
|
assert((uint64_t)nadvance_z == nadvance_u64);
|
|
|
|
|
|
|
|
memmove(arena->decay_backlog, &arena->decay_backlog[nadvance_z],
|
|
|
|
(SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
|
|
|
|
if (nadvance_z > 1) {
|
2016-02-20 12:09:31 +08:00
|
|
|
memset(&arena->decay_backlog[SMOOTHSTEP_NSTEPS -
|
2016-04-12 15:50:54 +08:00
|
|
|
nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
|
2016-02-20 12:09:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
ndirty_delta = (arena->ndirty > arena->decay_ndirty) ? arena->ndirty -
|
|
|
|
arena->decay_ndirty : 0;
|
|
|
|
arena->decay_ndirty = arena->ndirty;
|
|
|
|
arena->decay_backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
|
|
|
|
arena->decay_backlog_npages_limit =
|
|
|
|
arena_decay_backlog_npages_limit(arena);
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t
|
|
|
|
arena_decay_npages_limit(arena_t *arena)
|
|
|
|
{
|
|
|
|
size_t npages_limit;
|
|
|
|
|
|
|
|
assert(opt_purge == purge_mode_decay);
|
|
|
|
|
|
|
|
npages_limit = arena->decay_backlog_npages_limit;
|
|
|
|
|
|
|
|
/* Add in any dirty pages created during the current epoch. */
|
|
|
|
if (arena->ndirty > arena->decay_ndirty)
|
|
|
|
npages_limit += arena->ndirty - arena->decay_ndirty;
|
|
|
|
|
|
|
|
return (npages_limit);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
arena_decay_init(arena_t *arena, ssize_t decay_time)
|
|
|
|
{
|
|
|
|
|
|
|
|
arena->decay_time = decay_time;
|
|
|
|
if (decay_time > 0) {
|
2016-02-22 03:25:02 +08:00
|
|
|
nstime_init2(&arena->decay_interval, decay_time, 0);
|
|
|
|
nstime_idivide(&arena->decay_interval, SMOOTHSTEP_NSTEPS);
|
2016-02-20 12:09:31 +08:00
|
|
|
}
|
|
|
|
|
2016-02-22 03:25:02 +08:00
|
|
|
nstime_init(&arena->decay_epoch, 0);
|
|
|
|
nstime_update(&arena->decay_epoch);
|
2016-02-20 12:09:31 +08:00
|
|
|
arena->decay_jitter_state = (uint64_t)(uintptr_t)arena;
|
|
|
|
arena_decay_deadline_init(arena);
|
|
|
|
arena->decay_ndirty = arena->ndirty;
|
|
|
|
arena->decay_backlog_npages_limit = 0;
|
|
|
|
memset(arena->decay_backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
arena_decay_time_valid(ssize_t decay_time)
|
|
|
|
{
|
|
|
|
|
2016-03-03 14:41:32 +08:00
|
|
|
if (decay_time < -1)
|
|
|
|
return (false);
|
|
|
|
if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
|
|
|
|
return (true);
|
|
|
|
return (false);
|
2016-02-20 12:09:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
|
2016-02-20 12:09:31 +08:00
|
|
|
{
|
|
|
|
ssize_t decay_time;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->lock);
|
2016-02-20 12:09:31 +08:00
|
|
|
decay_time = arena->decay_time;
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
2016-02-20 12:09:31 +08:00
|
|
|
|
|
|
|
return (decay_time);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
|
2016-02-20 12:09:31 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
if (!arena_decay_time_valid(decay_time))
|
|
|
|
return (true);
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->lock);
|
2016-02-20 12:09:31 +08:00
|
|
|
/*
|
|
|
|
* Restart decay backlog from scratch, which may cause many dirty pages
|
|
|
|
* to be immediately purged. It would conceptually be possible to map
|
|
|
|
* the old backlog onto the new backlog, but there is no justification
|
|
|
|
* for such complexity since decay_time changes are intended to be
|
|
|
|
* infrequent, either between the {-1, 0, >0} states, or a one-time
|
|
|
|
* arbitrary change during initial arena configuration.
|
|
|
|
*/
|
|
|
|
arena_decay_init(arena, decay_time);
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_maybe_purge(tsdn, arena);
|
|
|
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
2016-02-20 12:09:31 +08:00
|
|
|
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
2016-02-20 11:51:23 +08:00
|
|
|
static void
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
|
2010-03-15 08:36:10 +08:00
|
|
|
{
|
2012-10-31 06:42:37 +08:00
|
|
|
|
2016-02-20 12:09:31 +08:00
|
|
|
assert(opt_purge == purge_mode_ratio);
|
|
|
|
|
2012-10-31 06:42:37 +08:00
|
|
|
/* Don't purge if the option is disabled. */
|
2015-03-19 09:55:33 +08:00
|
|
|
if (arena->lg_dirty_mult < 0)
|
2012-10-31 06:42:37 +08:00
|
|
|
return;
|
2016-02-20 11:51:23 +08:00
|
|
|
|
2012-10-31 06:42:37 +08:00
|
|
|
/*
|
2015-06-23 09:50:32 +08:00
|
|
|
* Iterate, since preventing recursive purging could otherwise leave too
|
|
|
|
* many dirty pages.
|
2012-10-31 06:42:37 +08:00
|
|
|
*/
|
2015-06-23 09:50:32 +08:00
|
|
|
while (true) {
|
|
|
|
size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
|
|
|
|
if (threshold < chunk_npages)
|
|
|
|
threshold = chunk_npages;
|
|
|
|
/*
|
|
|
|
* Don't purge unless the number of purgeable pages exceeds the
|
|
|
|
* threshold.
|
|
|
|
*/
|
|
|
|
if (arena->ndirty <= threshold)
|
|
|
|
return;
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_purge_to_limit(tsdn, arena, threshold);
|
2015-06-23 09:50:32 +08:00
|
|
|
}
|
2010-03-15 08:36:10 +08:00
|
|
|
}
|
|
|
|
|
2016-02-20 12:09:31 +08:00
|
|
|
static void
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
|
2016-02-20 12:09:31 +08:00
|
|
|
{
|
2016-02-22 03:25:02 +08:00
|
|
|
nstime_t time;
|
2016-02-20 12:09:31 +08:00
|
|
|
size_t ndirty_limit;
|
|
|
|
|
|
|
|
assert(opt_purge == purge_mode_decay);
|
|
|
|
|
|
|
|
/* Purge all or nothing if the option is disabled. */
|
|
|
|
if (arena->decay_time <= 0) {
|
|
|
|
if (arena->decay_time == 0)
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_purge_to_limit(tsdn, arena, 0);
|
2016-02-20 12:09:31 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-02-22 03:25:02 +08:00
|
|
|
nstime_copy(&time, &arena->decay_epoch);
|
|
|
|
if (unlikely(nstime_update(&time))) {
|
2016-02-20 12:09:31 +08:00
|
|
|
/* Time went backwards. Force an epoch advance. */
|
2016-02-22 03:25:02 +08:00
|
|
|
nstime_copy(&time, &arena->decay_deadline);
|
2016-02-20 12:09:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (arena_decay_deadline_reached(arena, &time))
|
|
|
|
arena_decay_epoch_advance(arena, &time);
|
|
|
|
|
|
|
|
ndirty_limit = arena_decay_npages_limit(arena);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't try to purge unless the number of purgeable pages exceeds the
|
|
|
|
* current limit.
|
|
|
|
*/
|
|
|
|
if (arena->ndirty <= ndirty_limit)
|
|
|
|
return;
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_purge_to_limit(tsdn, arena, ndirty_limit);
|
2016-02-20 12:09:31 +08:00
|
|
|
}
|
|
|
|
|
2016-02-20 11:51:23 +08:00
|
|
|
void
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
|
2016-02-20 11:51:23 +08:00
|
|
|
{
|
|
|
|
|
2016-05-19 12:02:46 +08:00
|
|
|
malloc_mutex_assert_owner(tsdn, &arena->lock);
|
|
|
|
|
2016-02-20 11:51:23 +08:00
|
|
|
/* Don't recursively purge. */
|
|
|
|
if (arena->purging)
|
|
|
|
return;
|
|
|
|
|
2016-02-20 12:09:31 +08:00
|
|
|
if (opt_purge == purge_mode_ratio)
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_maybe_purge_ratio(tsdn, arena);
|
2016-02-20 12:09:31 +08:00
|
|
|
else
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_maybe_purge_decay(tsdn, arena);
|
2016-02-20 11:51:23 +08:00
|
|
|
}
|
|
|
|
|
2014-07-22 01:23:36 +08:00
|
|
|
static size_t
|
2016-04-16 15:36:11 +08:00
|
|
|
arena_dirty_count(tsdn_t *tsdn, arena_t *arena)
|
2014-07-22 01:23:36 +08:00
|
|
|
{
|
2016-05-30 09:34:50 +08:00
|
|
|
extent_t *extent;
|
2014-07-22 01:23:36 +08:00
|
|
|
size_t ndirty = 0;
|
2015-02-16 10:04:46 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
for (extent = qr_next(&arena->extents_dirty, qr_link); extent !=
|
|
|
|
&arena->extents_dirty; extent = qr_next(extent, qr_link))
|
|
|
|
ndirty += extent_size_get(extent) >> LG_PAGE;
|
2014-07-22 01:23:36 +08:00
|
|
|
|
2014-11-01 17:29:10 +08:00
|
|
|
return (ndirty);
|
2014-01-15 08:23:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static size_t
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
2016-05-30 09:34:50 +08:00
|
|
|
size_t ndirty_limit, extent_t *purge_extents_sentinel)
|
2014-01-15 08:23:03 +08:00
|
|
|
{
|
2016-05-30 09:34:50 +08:00
|
|
|
extent_t *extent, *next;
|
2014-07-22 09:09:04 +08:00
|
|
|
size_t nstashed = 0;
|
2012-10-31 06:42:37 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
/* Stash extents according to ndirty_limit. */
|
|
|
|
for (extent = qr_next(&arena->extents_dirty, qr_link); extent !=
|
|
|
|
&arena->extents_dirty; extent = next) {
|
2015-02-16 10:04:46 +08:00
|
|
|
size_t npages;
|
2016-05-30 09:34:50 +08:00
|
|
|
bool zero;
|
|
|
|
UNUSED extent_t *textent;
|
2014-08-15 05:45:58 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
npages = extent_size_get(extent) >> LG_PAGE;
|
|
|
|
if (opt_purge == purge_mode_decay && arena->ndirty - (nstashed +
|
|
|
|
npages) < ndirty_limit)
|
|
|
|
break;
|
|
|
|
|
|
|
|
next = qr_next(extent, qr_link);
|
|
|
|
/* Allocate. */
|
|
|
|
zero = false;
|
|
|
|
textent = arena_chunk_cache_alloc_locked(tsdn, arena,
|
|
|
|
chunk_hooks, extent_base_get(extent),
|
|
|
|
extent_size_get(extent), 0, CACHELINE, &zero, false);
|
|
|
|
assert(textent == extent);
|
|
|
|
assert(zero == extent_zeroed_get(extent));
|
|
|
|
extent_ring_remove(extent);
|
|
|
|
extent_ring_insert(purge_extents_sentinel, extent);
|
2014-07-22 09:09:04 +08:00
|
|
|
|
|
|
|
nstashed += npages;
|
2016-02-20 12:09:31 +08:00
|
|
|
if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
|
|
|
|
ndirty_limit)
|
2014-07-22 09:09:04 +08:00
|
|
|
break;
|
2010-03-15 08:36:10 +08:00
|
|
|
}
|
2014-07-22 09:09:04 +08:00
|
|
|
|
|
|
|
return (nstashed);
|
2014-01-15 08:23:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static size_t
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
2016-05-30 09:34:50 +08:00
|
|
|
extent_t *purge_extents_sentinel)
|
2014-01-15 08:23:03 +08:00
|
|
|
{
|
2016-05-30 09:34:50 +08:00
|
|
|
UNUSED size_t nmadvise;
|
|
|
|
size_t npurged;
|
|
|
|
extent_t *extent, *next;
|
2010-03-15 08:36:10 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats)
|
|
|
|
nmadvise = 0;
|
2012-10-31 06:42:37 +08:00
|
|
|
npurged = 0;
|
2014-07-22 09:09:04 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
for (extent = qr_next(purge_extents_sentinel, qr_link); extent !=
|
|
|
|
purge_extents_sentinel; extent = next) {
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats)
|
|
|
|
nmadvise++;
|
2016-05-30 09:34:50 +08:00
|
|
|
npurged += extent_size_get(extent) >> LG_PAGE;
|
|
|
|
|
|
|
|
next = qr_next(extent, qr_link);
|
|
|
|
extent_ring_remove(extent);
|
|
|
|
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, extent);
|
2010-03-15 08:36:10 +08:00
|
|
|
}
|
2014-07-22 09:09:04 +08:00
|
|
|
|
|
|
|
if (config_stats) {
|
2012-02-11 12:22:09 +08:00
|
|
|
arena->stats.nmadvise += nmadvise;
|
2014-07-22 09:09:04 +08:00
|
|
|
arena->stats.purged += npurged;
|
|
|
|
}
|
2010-03-15 08:36:10 +08:00
|
|
|
|
2014-01-15 08:23:03 +08:00
|
|
|
return (npurged);
|
|
|
|
}
|
|
|
|
|
2016-02-20 12:09:31 +08:00
|
|
|
/*
|
|
|
|
* NB: ndirty_limit is interpreted differently depending on opt_purge:
|
2016-05-30 09:34:50 +08:00
|
|
|
* - purge_mode_ratio: Purge as few dirty extents as possible to reach the
|
2016-02-20 12:09:31 +08:00
|
|
|
* desired state:
|
|
|
|
* (arena->ndirty <= ndirty_limit)
|
2016-05-30 09:34:50 +08:00
|
|
|
* - purge_mode_decay: Purge as many dirty extents as possible without
|
2016-02-20 12:09:31 +08:00
|
|
|
* violating the invariant:
|
|
|
|
* (arena->ndirty >= ndirty_limit)
|
|
|
|
*/
|
2015-03-19 09:55:33 +08:00
|
|
|
static void
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
2016-05-11 13:21:10 +08:00
|
|
|
chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
|
2016-02-20 11:51:23 +08:00
|
|
|
size_t npurge, npurged;
|
2016-05-30 09:34:50 +08:00
|
|
|
extent_t purge_extents_sentinel;
|
2014-07-22 09:09:04 +08:00
|
|
|
|
2015-06-23 09:50:32 +08:00
|
|
|
arena->purging = true;
|
|
|
|
|
2014-11-01 17:29:10 +08:00
|
|
|
/*
|
|
|
|
* Calls to arena_dirty_count() are disabled even for debug builds
|
|
|
|
* because overhead grows nonlinearly as memory usage increases.
|
|
|
|
*/
|
|
|
|
if (false && config_debug) {
|
2016-04-16 15:36:11 +08:00
|
|
|
size_t ndirty = arena_dirty_count(tsdn, arena);
|
2014-07-22 01:23:36 +08:00
|
|
|
assert(ndirty == arena->ndirty);
|
2010-03-05 13:35:07 +08:00
|
|
|
}
|
2016-02-20 12:09:31 +08:00
|
|
|
assert(opt_purge != purge_mode_ratio || (arena->nactive >>
|
|
|
|
arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
extent_init(&purge_extents_sentinel, arena, NULL, 0, 0, false, false,
|
|
|
|
false, false);
|
2015-02-16 10:04:46 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
|
2016-05-30 09:34:50 +08:00
|
|
|
&purge_extents_sentinel);
|
2016-02-20 11:51:23 +08:00
|
|
|
if (npurge == 0)
|
|
|
|
goto label_return;
|
2016-05-11 13:21:10 +08:00
|
|
|
npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks,
|
2016-05-30 09:34:50 +08:00
|
|
|
&purge_extents_sentinel);
|
2016-02-20 11:51:23 +08:00
|
|
|
assert(npurged == npurge);
|
2015-06-23 09:50:32 +08:00
|
|
|
|
2016-02-20 11:51:23 +08:00
|
|
|
if (config_stats)
|
|
|
|
arena->stats.npurge++;
|
|
|
|
|
|
|
|
label_return:
|
2015-06-23 09:50:32 +08:00
|
|
|
arena->purging = false;
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2010-10-01 07:55:08 +08:00
|
|
|
void
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
|
2010-10-01 07:55:08 +08:00
|
|
|
{
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->lock);
|
2016-02-20 12:09:31 +08:00
|
|
|
if (all)
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_purge_to_limit(tsdn, arena, 0);
|
2016-02-20 12:09:31 +08:00
|
|
|
else
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_maybe_purge(tsdn, arena);
|
|
|
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
2010-10-01 07:55:08 +08:00
|
|
|
}
|
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
static void
|
|
|
|
arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab)
|
|
|
|
{
|
|
|
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
|
|
|
|
|
|
|
arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
|
|
|
|
arena_chunk_cache_dalloc_locked(tsdn, arena, &chunk_hooks, slab);
|
|
|
|
}
|
|
|
|
|
2016-04-23 05:37:17 +08:00
|
|
|
void
|
|
|
|
arena_reset(tsd_t *tsd, arena_t *arena)
|
|
|
|
{
|
|
|
|
unsigned i;
|
2016-03-24 12:09:28 +08:00
|
|
|
extent_t *extent;
|
2016-04-23 05:37:17 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Locking in this function is unintuitive. The caller guarantees that
|
|
|
|
* no concurrent operations are happening in this arena, but there are
|
|
|
|
* still reasons that some locking is necessary:
|
|
|
|
*
|
|
|
|
* - Some of the functions in the transitive closure of calls assume
|
|
|
|
* appropriate locks are held, and in some cases these locks are
|
|
|
|
* temporarily dropped to avoid lock order reversal or deadlock due to
|
|
|
|
* reentry.
|
|
|
|
* - mallctl("epoch", ...) may concurrently refresh stats. While
|
|
|
|
* strictly speaking this is a "concurrent operation", disallowing
|
|
|
|
* stats refreshes would impose an inconvenient burden.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Huge allocations. */
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
|
2016-03-24 12:09:28 +08:00
|
|
|
for (extent = ql_last(&arena->huge, ql_link); extent != NULL; extent =
|
2016-04-23 05:37:17 +08:00
|
|
|
ql_last(&arena->huge, ql_link)) {
|
2016-05-28 09:57:15 +08:00
|
|
|
void *ptr = extent_base_get(extent);
|
2016-04-26 04:26:54 +08:00
|
|
|
size_t usize;
|
2016-04-23 05:37:17 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
|
2016-05-28 15:17:28 +08:00
|
|
|
if (config_stats || (config_prof && opt_prof))
|
|
|
|
usize = isalloc(tsd_tsdn(tsd), extent, ptr);
|
2016-04-26 04:26:54 +08:00
|
|
|
/* Remove huge allocation from prof sample set. */
|
|
|
|
if (config_prof && opt_prof)
|
2016-03-24 11:29:33 +08:00
|
|
|
prof_free(tsd, extent, ptr, usize);
|
2016-05-19 12:02:46 +08:00
|
|
|
huge_dalloc(tsd_tsdn(tsd), extent);
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
|
2016-04-26 04:26:54 +08:00
|
|
|
/* Cancel out unwanted effects on stats. */
|
|
|
|
if (config_stats)
|
|
|
|
arena_huge_reset_stats_cancel(arena, usize);
|
2016-04-23 05:37:17 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
|
2016-04-23 05:37:17 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
|
2016-04-23 05:37:17 +08:00
|
|
|
|
|
|
|
/* Bins. */
|
|
|
|
for (i = 0; i < NBINS; i++) {
|
2016-05-30 09:34:50 +08:00
|
|
|
extent_t *slab, *next;
|
2016-04-23 05:37:17 +08:00
|
|
|
arena_bin_t *bin = &arena->bins[i];
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
2016-05-30 09:34:50 +08:00
|
|
|
if (bin->slabcur != NULL) {
|
|
|
|
arena_slab_dalloc(tsd_tsdn(tsd), arena, bin->slabcur);
|
|
|
|
bin->slabcur = NULL;
|
|
|
|
}
|
|
|
|
while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) !=
|
|
|
|
NULL)
|
|
|
|
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
|
|
|
|
for (slab = qr_next(&bin->slabs_full, qr_link); slab !=
|
|
|
|
&bin->slabs_full; slab = next) {
|
|
|
|
next = qr_next(slab, qr_link);
|
|
|
|
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
|
|
|
|
}
|
2016-04-23 05:37:17 +08:00
|
|
|
if (config_stats) {
|
|
|
|
bin->stats.curregs = 0;
|
2016-05-30 09:34:50 +08:00
|
|
|
bin->stats.curslabs = 0;
|
2016-04-23 05:37:17 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
2016-04-23 05:37:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
assert(!arena->purging);
|
|
|
|
arena->nactive = 0;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
|
2016-04-23 05:37:17 +08:00
|
|
|
}
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
static void
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_bin_slabs_nonfull_insert(arena_bin_t *bin, extent_t *slab)
|
|
|
|
{
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
assert(extent_slab_data_get(slab)->nfree > 0);
|
|
|
|
extent_heap_insert(&bin->slabs_nonfull, slab);
|
2014-01-15 08:23:03 +08:00
|
|
|
}
|
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
static void
|
|
|
|
arena_bin_slabs_nonfull_remove(arena_bin_t *bin, extent_t *slab)
|
2014-01-15 08:23:03 +08:00
|
|
|
{
|
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
extent_heap_remove(&bin->slabs_nonfull, slab);
|
|
|
|
}
|
2015-08-05 01:49:46 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
static extent_t *
|
|
|
|
arena_bin_slabs_nonfull_tryget(arena_bin_t *bin)
|
|
|
|
{
|
|
|
|
extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
|
|
|
|
if (slab == NULL)
|
|
|
|
return (NULL);
|
|
|
|
if (config_stats)
|
|
|
|
bin->stats.reslabs++;
|
|
|
|
return (slab);
|
2015-08-05 01:49:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_bin_slabs_full_insert(arena_bin_t *bin, extent_t *slab)
|
|
|
|
{
|
2014-01-15 08:23:03 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
assert(extent_slab_data_get(slab)->nfree == 0);
|
|
|
|
extent_ring_insert(&bin->slabs_full, slab);
|
|
|
|
}
|
2014-01-15 08:23:03 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
static void
|
|
|
|
arena_bin_slabs_full_remove(extent_t *slab)
|
|
|
|
{
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
extent_ring_remove(slab);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
static extent_t *
|
|
|
|
arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|
|
|
const arena_bin_info_t *bin_info)
|
2012-02-14 09:36:52 +08:00
|
|
|
{
|
2016-05-30 09:34:50 +08:00
|
|
|
extent_t *slab;
|
|
|
|
bool zero, commit;
|
|
|
|
|
|
|
|
zero = false;
|
|
|
|
commit = true;
|
|
|
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
|
|
|
slab = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL,
|
|
|
|
bin_info->slab_size, 0, PAGE, &zero, &commit, true);
|
|
|
|
malloc_mutex_lock(tsdn, &arena->lock);
|
2012-02-14 09:36:52 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
return (slab);
|
2012-02-14 09:36:52 +08:00
|
|
|
}
|
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
static extent_t *
|
|
|
|
arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
|
|
|
const arena_bin_info_t *bin_info)
|
2012-02-14 09:36:52 +08:00
|
|
|
{
|
2016-05-30 09:34:50 +08:00
|
|
|
extent_t *slab;
|
|
|
|
arena_slab_data_t *slab_data;
|
|
|
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
|
|
|
bool zero;
|
|
|
|
|
|
|
|
zero = false;
|
|
|
|
slab = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL,
|
|
|
|
bin_info->slab_size, 0, PAGE, &zero, true);
|
|
|
|
if (slab == NULL) {
|
|
|
|
slab = arena_slab_alloc_hard(tsdn, arena, &chunk_hooks,
|
|
|
|
bin_info);
|
|
|
|
if (slab == NULL)
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
assert(extent_slab_get(slab));
|
|
|
|
|
|
|
|
arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
|
|
|
|
|
|
|
|
/* Initialize slab internals. */
|
|
|
|
slab_data = extent_slab_data_get(slab);
|
|
|
|
slab_data->binind = binind;
|
|
|
|
slab_data->nfree = bin_info->nregs;
|
|
|
|
bitmap_init(slab_data->bitmap, &bin_info->bitmap_info);
|
2016-03-27 08:30:37 +08:00
|
|
|
|
|
|
|
if (config_stats)
|
2016-05-30 09:34:50 +08:00
|
|
|
arena->stats.mapped += extent_size_get(slab);
|
2016-03-27 08:30:37 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
return (slab);
|
2012-02-14 09:36:52 +08:00
|
|
|
}
|
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
static extent_t *
|
|
|
|
arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
|
|
|
|
szind_t binind)
|
2012-02-14 09:36:52 +08:00
|
|
|
{
|
2016-05-30 09:34:50 +08:00
|
|
|
extent_t *slab;
|
2016-04-07 20:04:12 +08:00
|
|
|
const arena_bin_info_t *bin_info;
|
2012-02-14 09:36:52 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
/* Look for a usable slab. */
|
|
|
|
slab = arena_bin_slabs_nonfull_tryget(bin);
|
|
|
|
if (slab != NULL)
|
|
|
|
return (slab);
|
|
|
|
/* No existing slabs have any space available. */
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2011-03-16 04:59:15 +08:00
|
|
|
bin_info = &arena_bin_info[binind];
|
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
/* Allocate a new slab. */
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &bin->lock);
|
2010-03-16 13:25:23 +08:00
|
|
|
/******************************/
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->lock);
|
2016-05-30 09:34:50 +08:00
|
|
|
slab = arena_slab_alloc(tsdn, arena, binind, bin_info);
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
2010-03-16 13:25:23 +08:00
|
|
|
/********************************/
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &bin->lock);
|
2016-05-30 09:34:50 +08:00
|
|
|
if (slab != NULL) {
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats) {
|
2016-05-30 09:34:50 +08:00
|
|
|
bin->stats.nslabs++;
|
|
|
|
bin->stats.curslabs++;
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2016-05-30 09:34:50 +08:00
|
|
|
return (slab);
|
2010-03-15 10:43:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-05-30 09:34:50 +08:00
|
|
|
* arena_slab_alloc() failed, but another thread may have made
|
Fix numerous arena bugs.
In arena_ralloc_large_grow(), update the map element for the end of the
newly grown run, rather than the interior map element that was the
beginning of the appended run. This is a long-standing bug, and it had
the potential to cause massive corruption, but triggering it required
roughly the following sequence of events:
1) Large in-place growing realloc(), with left-over space in the run
that followed the large object.
2) Allocation of the remainder run left over from (1).
3) Deallocation of the remainder run *before* deallocation of the
large run, with unfortunate interior map state left over from
previous run allocation/deallocation activity, such that one or
more pages of allocated memory would be treated as part of the
remainder run during run coalescing.
In summary, this was a bad bug, but it was difficult to trigger.
In arena_bin_malloc_hard(), if another thread wins the race to allocate
a bin run, dispose of the spare run via arena_bin_lower_run() rather
than arena_run_dalloc(), since the run has already been prepared for use
as a bin run. This bug has existed since March 14, 2010:
e00572b384c81bd2aba57fac32f7077a34388915
mmap()/munmap() without arena->lock or bin->lock.
Fix bugs in arena_dalloc_bin_run(), arena_trim_head(),
arena_trim_tail(), and arena_ralloc_large_grow() that could cause the
CHUNK_MAP_UNZEROED map bit to become corrupted. These are all
long-standing bugs, but the chances of them actually causing problems
was much lower before the CHUNK_MAP_ZEROED --> CHUNK_MAP_UNZEROED
conversion.
Fix a large run statistics regression in arena_ralloc_large_grow() that
was introduced on September 17, 2010:
8e3c3c61b5bb676a705450708e7e79698cdc9e0c
Add {,r,s,d}allocm().
Add debug code to validate that supposedly pre-zeroed memory really is.
2010-10-18 08:51:37 +08:00
|
|
|
* sufficient memory available while this one dropped bin->lock above,
|
2010-03-15 10:43:56 +08:00
|
|
|
* so search one more time.
|
|
|
|
*/
|
2016-05-30 09:34:50 +08:00
|
|
|
slab = arena_bin_slabs_nonfull_tryget(bin);
|
|
|
|
if (slab != NULL)
|
|
|
|
return (slab);
|
2010-03-15 10:43:56 +08:00
|
|
|
|
|
|
|
return (NULL);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
|
2010-01-17 01:53:50 +08:00
|
|
|
static void *
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
|
|
|
|
szind_t binind)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
2016-04-07 20:04:12 +08:00
|
|
|
const arena_bin_info_t *bin_info;
|
2016-05-30 09:34:50 +08:00
|
|
|
extent_t *slab;
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2011-03-16 04:59:15 +08:00
|
|
|
bin_info = &arena_bin_info[binind];
|
2016-05-30 09:34:50 +08:00
|
|
|
if (bin->slabcur != NULL) {
|
|
|
|
arena_bin_slabs_full_insert(bin, bin->slabcur);
|
|
|
|
bin->slabcur = NULL;
|
|
|
|
}
|
|
|
|
slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind);
|
|
|
|
if (bin->slabcur != NULL) {
|
2010-03-15 10:43:56 +08:00
|
|
|
/*
|
2016-05-30 09:34:50 +08:00
|
|
|
* Another thread updated slabcur while this one ran without the
|
|
|
|
* bin lock in arena_bin_nonfull_slab_get().
|
2010-03-15 10:43:56 +08:00
|
|
|
*/
|
2016-05-30 09:34:50 +08:00
|
|
|
if (extent_slab_data_get(bin->slabcur)->nfree > 0) {
|
|
|
|
void *ret = arena_slab_reg_alloc(tsdn, bin->slabcur,
|
|
|
|
bin_info);
|
|
|
|
if (slab != NULL) {
|
|
|
|
/*
|
|
|
|
* arena_slab_alloc() may have allocated slab,
|
|
|
|
* or it may have been pulled from
|
|
|
|
* slabs_nonfull. Therefore it is unsafe to
|
|
|
|
* make any assumptions about how slab has
|
|
|
|
* previously been used, and
|
|
|
|
* arena_bin_lower_slab() must be called, as if
|
|
|
|
* a region were just deallocated from the slab.
|
|
|
|
*/
|
|
|
|
if (extent_slab_data_get(slab)->nfree ==
|
|
|
|
bin_info->nregs) {
|
|
|
|
arena_dalloc_bin_slab(tsdn, arena, slab,
|
|
|
|
bin);
|
|
|
|
} else {
|
|
|
|
arena_bin_lower_slab(tsdn, arena, slab,
|
|
|
|
bin);
|
|
|
|
}
|
2016-05-26 07:21:37 +08:00
|
|
|
}
|
2016-05-30 09:34:50 +08:00
|
|
|
return (ret);
|
2010-03-15 10:43:56 +08:00
|
|
|
}
|
2016-05-30 09:34:50 +08:00
|
|
|
|
|
|
|
arena_bin_slabs_full_insert(bin, bin->slabcur);
|
|
|
|
bin->slabcur = NULL;
|
2010-03-15 10:43:56 +08:00
|
|
|
}
|
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
if (slab == NULL)
|
2010-01-17 01:53:50 +08:00
|
|
|
return (NULL);
|
2016-05-30 09:34:50 +08:00
|
|
|
bin->slabcur = slab;
|
2010-03-15 10:43:56 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
assert(extent_slab_data_get(bin->slabcur)->nfree > 0);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
return (arena_slab_reg_alloc(tsdn, slab, bin_info));
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
|
2016-02-20 12:09:31 +08:00
|
|
|
szind_t binind, uint64_t prof_accumbytes)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
|
|
|
unsigned i, nfill;
|
|
|
|
arena_bin_t *bin;
|
|
|
|
|
|
|
|
assert(tbin->ncached == 0);
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
|
|
|
|
prof_idump(tsdn);
|
2010-03-16 13:25:23 +08:00
|
|
|
bin = &arena->bins[binind];
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &bin->lock);
|
2011-03-21 15:18:17 +08:00
|
|
|
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
|
|
|
|
tbin->lg_fill_div); i < nfill; i++) {
|
2016-05-30 09:34:50 +08:00
|
|
|
extent_t *slab;
|
2015-09-04 18:15:28 +08:00
|
|
|
void *ptr;
|
2016-05-30 09:34:50 +08:00
|
|
|
if ((slab = bin->slabcur) != NULL &&
|
|
|
|
extent_slab_data_get(slab)->nfree > 0) {
|
|
|
|
ptr = arena_slab_reg_alloc(tsdn, slab,
|
2016-05-26 07:21:37 +08:00
|
|
|
&arena_bin_info[binind]);
|
|
|
|
} else
|
2016-05-30 09:34:50 +08:00
|
|
|
ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind);
|
2014-10-06 04:05:10 +08:00
|
|
|
if (ptr == NULL) {
|
|
|
|
/*
|
|
|
|
* OOM. tbin->avail isn't yet filled down to its first
|
|
|
|
* element, so the successful allocations (if any) must
|
2015-10-28 06:12:10 +08:00
|
|
|
* be moved just before tbin->avail before bailing out.
|
2014-10-06 04:05:10 +08:00
|
|
|
*/
|
|
|
|
if (i > 0) {
|
2015-10-28 06:12:10 +08:00
|
|
|
memmove(tbin->avail - i, tbin->avail - nfill,
|
2014-10-06 04:05:10 +08:00
|
|
|
i * sizeof(void *));
|
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
break;
|
2014-10-06 04:05:10 +08:00
|
|
|
}
|
2014-12-09 05:12:41 +08:00
|
|
|
if (config_fill && unlikely(opt_junk_alloc)) {
|
2012-04-06 15:35:09 +08:00
|
|
|
arena_alloc_junk_small(ptr, &arena_bin_info[binind],
|
|
|
|
true);
|
|
|
|
}
|
2011-03-19 01:53:15 +08:00
|
|
|
/* Insert such that low regions get used first. */
|
2015-10-28 06:12:10 +08:00
|
|
|
*(tbin->avail - nfill + i) = ptr;
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats) {
|
|
|
|
bin->stats.nmalloc += i;
|
|
|
|
bin->stats.nrequests += tbin->tstats.nrequests;
|
2014-10-13 13:53:59 +08:00
|
|
|
bin->stats.curregs += i;
|
2012-02-11 12:22:09 +08:00
|
|
|
bin->stats.nfills++;
|
|
|
|
tbin->tstats.nrequests = 0;
|
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &bin->lock);
|
2010-01-17 01:53:50 +08:00
|
|
|
tbin->ncached = i;
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_decay_tick(tsdn, arena);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2012-04-06 15:35:09 +08:00
|
|
|
void
|
2016-04-07 20:04:12 +08:00
|
|
|
arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, bool zero)
|
2012-04-06 15:35:09 +08:00
|
|
|
{
|
|
|
|
|
2016-04-06 09:18:15 +08:00
|
|
|
if (!zero)
|
|
|
|
memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
|
2013-12-18 07:14:36 +08:00
|
|
|
}
|
|
|
|
|
2014-01-08 08:47:56 +08:00
|
|
|
#ifdef JEMALLOC_JET
|
|
|
|
#undef arena_dalloc_junk_small
|
2016-04-19 06:11:20 +08:00
|
|
|
#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
|
2014-01-08 08:47:56 +08:00
|
|
|
#endif
|
2013-12-18 07:14:36 +08:00
|
|
|
void
|
2016-04-07 20:04:12 +08:00
|
|
|
arena_dalloc_junk_small(void *ptr, const arena_bin_info_t *bin_info)
|
2013-12-18 07:14:36 +08:00
|
|
|
{
|
2012-04-06 15:35:09 +08:00
|
|
|
|
2016-04-06 09:18:15 +08:00
|
|
|
memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
|
2012-04-06 15:35:09 +08:00
|
|
|
}
|
2014-01-08 08:47:56 +08:00
|
|
|
#ifdef JEMALLOC_JET
|
|
|
|
#undef arena_dalloc_junk_small
|
|
|
|
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
|
|
|
|
arena_dalloc_junk_small_t *arena_dalloc_junk_small =
|
2016-04-19 06:11:20 +08:00
|
|
|
JEMALLOC_N(n_arena_dalloc_junk_small);
|
2014-01-08 08:47:56 +08:00
|
|
|
#endif
|
2012-04-06 15:35:09 +08:00
|
|
|
|
2016-02-20 10:40:03 +08:00
|
|
|
static void *
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
arena_bin_t *bin;
|
2016-02-26 07:29:49 +08:00
|
|
|
size_t usize;
|
2016-05-30 09:34:50 +08:00
|
|
|
extent_t *slab;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2012-02-29 08:50:47 +08:00
|
|
|
assert(binind < NBINS);
|
2010-01-17 01:53:50 +08:00
|
|
|
bin = &arena->bins[binind];
|
2016-02-26 07:29:49 +08:00
|
|
|
usize = index2size(binind);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &bin->lock);
|
2016-05-30 09:34:50 +08:00
|
|
|
if ((slab = bin->slabcur) != NULL && extent_slab_data_get(slab)->nfree >
|
|
|
|
0)
|
|
|
|
ret = arena_slab_reg_alloc(tsdn, slab, &arena_bin_info[binind]);
|
2010-01-17 01:53:50 +08:00
|
|
|
else
|
2016-05-30 09:34:50 +08:00
|
|
|
ret = arena_bin_malloc_hard(tsdn, arena, bin, binind);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
if (ret == NULL) {
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &bin->lock);
|
2010-01-17 01:53:50 +08:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats) {
|
|
|
|
bin->stats.nmalloc++;
|
|
|
|
bin->stats.nrequests++;
|
2014-10-13 13:53:59 +08:00
|
|
|
bin->stats.curregs++;
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &bin->lock);
|
2016-05-28 15:17:28 +08:00
|
|
|
if (config_prof && arena_prof_accum(tsdn, arena, usize))
|
2016-05-11 13:21:10 +08:00
|
|
|
prof_idump(tsdn);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
if (!zero) {
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_fill) {
|
2014-12-09 05:12:41 +08:00
|
|
|
if (unlikely(opt_junk_alloc)) {
|
2012-04-06 15:35:09 +08:00
|
|
|
arena_alloc_junk_small(ret,
|
|
|
|
&arena_bin_info[binind], false);
|
2014-09-12 07:20:44 +08:00
|
|
|
} else if (unlikely(opt_zero))
|
2016-02-26 07:29:49 +08:00
|
|
|
memset(ret, 0, usize);
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2012-04-06 15:35:09 +08:00
|
|
|
} else {
|
2014-12-09 05:12:41 +08:00
|
|
|
if (config_fill && unlikely(opt_junk_alloc)) {
|
2012-04-06 15:35:09 +08:00
|
|
|
arena_alloc_junk_small(ret, &arena_bin_info[binind],
|
|
|
|
true);
|
|
|
|
}
|
2016-02-26 07:29:49 +08:00
|
|
|
memset(ret, 0, usize);
|
2012-04-06 15:35:09 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_decay_tick(tsdn, arena);
|
2010-01-17 01:53:50 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2016-02-20 10:40:03 +08:00
|
|
|
void *
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
|
2016-04-23 05:34:14 +08:00
|
|
|
bool zero)
|
2016-02-20 10:40:03 +08:00
|
|
|
{
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
assert(!tsdn_null(tsdn) || arena != NULL);
|
|
|
|
|
|
|
|
if (likely(!tsdn_null(tsdn)))
|
|
|
|
arena = arena_choose(tsdn_tsd(tsdn), arena);
|
2016-02-20 10:40:03 +08:00
|
|
|
if (unlikely(arena == NULL))
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
if (likely(size <= SMALL_MAXCLASS))
|
2016-05-11 13:21:10 +08:00
|
|
|
return (arena_malloc_small(tsdn, arena, ind, zero));
|
|
|
|
return (huge_malloc(tsdn, arena, index2size(ind), zero));
|
2016-02-20 10:40:03 +08:00
|
|
|
}
|
|
|
|
|
2015-02-13 06:06:37 +08:00
|
|
|
void *
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
2015-02-13 06:06:37 +08:00
|
|
|
bool zero, tcache_t *tcache)
|
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
|
2015-05-05 00:58:36 +08:00
|
|
|
if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
|
2015-05-20 08:42:31 +08:00
|
|
|
&& (usize & PAGE_MASK) == 0))) {
|
2016-05-30 09:34:50 +08:00
|
|
|
/* Small; alignment doesn't require special slab placement. */
|
2016-05-11 13:21:10 +08:00
|
|
|
ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
|
2015-10-28 06:12:10 +08:00
|
|
|
tcache, true);
|
2015-05-20 08:42:31 +08:00
|
|
|
} else {
|
2016-05-28 15:17:28 +08:00
|
|
|
if (likely(alignment <= CACHELINE))
|
2016-05-11 13:21:10 +08:00
|
|
|
ret = huge_malloc(tsdn, arena, usize, zero);
|
2016-05-25 09:22:10 +08:00
|
|
|
else
|
2016-05-11 13:21:10 +08:00
|
|
|
ret = huge_palloc(tsdn, arena, usize, alignment, zero);
|
2015-02-13 06:06:37 +08:00
|
|
|
}
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2010-04-01 07:45:04 +08:00
|
|
|
void
|
2016-05-28 15:17:28 +08:00
|
|
|
arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
|
|
|
size_t usize)
|
2010-04-01 07:45:04 +08:00
|
|
|
{
|
2016-05-29 08:29:03 +08:00
|
|
|
arena_t *arena = extent_arena_get(extent);
|
2010-04-01 07:45:04 +08:00
|
|
|
|
2012-04-19 04:38:40 +08:00
|
|
|
cassert(config_prof);
|
2010-04-01 07:45:04 +08:00
|
|
|
assert(ptr != NULL);
|
2016-05-28 15:17:28 +08:00
|
|
|
assert(isalloc(tsdn, extent, ptr) == LARGE_MINCLASS);
|
|
|
|
assert(usize <= SMALL_MAXCLASS);
|
2010-04-01 07:45:04 +08:00
|
|
|
|
2016-05-28 15:17:28 +08:00
|
|
|
extent_usize_set(extent, usize);
|
2010-04-01 07:45:04 +08:00
|
|
|
|
2016-05-29 08:29:03 +08:00
|
|
|
/*
|
|
|
|
* Cancel out as much of the excessive prof_accumbytes increase as
|
|
|
|
* possible without underflowing. Interval-triggered dumps occur
|
|
|
|
* slightly more often than intended as a result of incomplete
|
|
|
|
* canceling.
|
|
|
|
*/
|
|
|
|
malloc_mutex_lock(tsdn, &arena->lock);
|
|
|
|
if (arena->prof_accumbytes >= LARGE_MINCLASS - usize)
|
|
|
|
arena->prof_accumbytes -= LARGE_MINCLASS - usize;
|
|
|
|
else
|
|
|
|
arena->prof_accumbytes = 0;
|
|
|
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
|
|
|
|
2016-05-28 15:17:28 +08:00
|
|
|
assert(isalloc(tsdn, extent, ptr) == usize);
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t
|
|
|
|
arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr)
|
|
|
|
{
|
|
|
|
|
|
|
|
cassert(config_prof);
|
|
|
|
assert(ptr != NULL);
|
|
|
|
|
|
|
|
extent_usize_set(extent, LARGE_MINCLASS);
|
|
|
|
|
|
|
|
assert(isalloc(tsdn, extent, ptr) == LARGE_MINCLASS);
|
|
|
|
|
|
|
|
return (LARGE_MINCLASS);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
arena_dalloc_promoted(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
|
|
|
tcache_t *tcache, bool slow_path)
|
|
|
|
{
|
|
|
|
size_t usize;
|
|
|
|
|
|
|
|
cassert(config_prof);
|
|
|
|
assert(opt_prof);
|
|
|
|
|
|
|
|
usize = arena_prof_demote(tsdn, extent, ptr);
|
|
|
|
if (usize <= tcache_maxclass) {
|
|
|
|
tcache_dalloc_huge(tsdn_tsd(tsdn), tcache, ptr, usize,
|
|
|
|
slow_path);
|
|
|
|
} else
|
|
|
|
huge_dalloc(tsdn, extent);
|
2010-04-01 07:45:04 +08:00
|
|
|
}
|
2010-02-11 02:37:56 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
static void
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_dissociate_bin_slab(extent_t *slab, arena_bin_t *bin)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
/* Dissociate slab from bin. */
|
|
|
|
if (slab == bin->slabcur)
|
|
|
|
bin->slabcur = NULL;
|
2011-03-16 04:59:15 +08:00
|
|
|
else {
|
2016-05-30 09:34:50 +08:00
|
|
|
szind_t binind = extent_slab_data_get(slab)->binind;
|
2016-04-07 20:04:12 +08:00
|
|
|
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
2011-03-16 04:59:15 +08:00
|
|
|
|
2016-03-27 08:30:37 +08:00
|
|
|
/*
|
|
|
|
* The following block's conditional is necessary because if the
|
2016-05-30 09:34:50 +08:00
|
|
|
* slab only contains one region, then it never gets inserted
|
|
|
|
* into the non-full slabs heap.
|
2016-03-27 08:30:37 +08:00
|
|
|
*/
|
2016-05-30 09:34:50 +08:00
|
|
|
if (bin_info->nregs == 1)
|
|
|
|
arena_bin_slabs_full_remove(slab);
|
|
|
|
else
|
|
|
|
arena_bin_slabs_nonfull_remove(bin, slab);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
2010-10-18 15:04:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
|
|
|
arena_bin_t *bin)
|
2010-10-18 15:04:44 +08:00
|
|
|
{
|
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
assert(slab != bin->slabcur);
|
2010-03-14 12:32:56 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &bin->lock);
|
2010-03-15 10:43:56 +08:00
|
|
|
/******************************/
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->lock);
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_slab_dalloc(tsdn, arena, slab);
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
2010-03-15 10:43:56 +08:00
|
|
|
/****************************/
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &bin->lock);
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats)
|
2016-05-30 09:34:50 +08:00
|
|
|
bin->stats.curslabs--;
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
Fix numerous arena bugs.
In arena_ralloc_large_grow(), update the map element for the end of the
newly grown run, rather than the interior map element that was the
beginning of the appended run. This is a long-standing bug, and it had
the potential to cause massive corruption, but triggering it required
roughly the following sequence of events:
1) Large in-place growing realloc(), with left-over space in the run
that followed the large object.
2) Allocation of the remainder run left over from (1).
3) Deallocation of the remainder run *before* deallocation of the
large run, with unfortunate interior map state left over from
previous run allocation/deallocation activity, such that one or
more pages of allocated memory would be treated as part of the
remainder run during run coalescing.
In summary, this was a bad bug, but it was difficult to trigger.
In arena_bin_malloc_hard(), if another thread wins the race to allocate
a bin run, dispose of the spare run via arena_bin_lower_run() rather
than arena_run_dalloc(), since the run has already been prepared for use
as a bin run. This bug has existed since March 14, 2010:
e00572b384c81bd2aba57fac32f7077a34388915
mmap()/munmap() without arena->lock or bin->lock.
Fix bugs in arena_dalloc_bin_run(), arena_trim_head(),
arena_trim_tail(), and arena_ralloc_large_grow() that could cause the
CHUNK_MAP_UNZEROED map bit to become corrupted. These are all
long-standing bugs, but the chances of them actually causing problems
was much lower before the CHUNK_MAP_ZEROED --> CHUNK_MAP_UNZEROED
conversion.
Fix a large run statistics regression in arena_ralloc_large_grow() that
was introduced on September 17, 2010:
8e3c3c61b5bb676a705450708e7e79698cdc9e0c
Add {,r,s,d}allocm().
Add debug code to validate that supposedly pre-zeroed memory really is.
2010-10-18 08:51:37 +08:00
|
|
|
static void
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
|
|
|
arena_bin_t *bin)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
assert(extent_slab_data_get(slab)->nfree > 0);
|
|
|
|
|
2010-10-18 11:57:30 +08:00
|
|
|
/*
|
2016-05-30 09:34:50 +08:00
|
|
|
* Make sure that if bin->slabcur is non-NULL, it refers to the lowest
|
|
|
|
* non-full slab. It is okay to NULL slabcur out rather than
|
|
|
|
* proactively keeping it pointing at the lowest non-full slab.
|
2010-10-18 11:57:30 +08:00
|
|
|
*/
|
2016-05-30 09:34:50 +08:00
|
|
|
if (bin->slabcur != NULL && (uintptr_t)extent_addr_get(slab) <
|
|
|
|
(uintptr_t)extent_addr_get(bin->slabcur)) {
|
|
|
|
/* Switch slabcur. */
|
|
|
|
if (extent_slab_data_get(bin->slabcur)->nfree > 0)
|
|
|
|
arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
|
|
|
|
else
|
|
|
|
arena_bin_slabs_full_insert(bin, bin->slabcur);
|
|
|
|
bin->slabcur = slab;
|
2012-02-14 09:36:52 +08:00
|
|
|
if (config_stats)
|
2016-05-30 09:34:50 +08:00
|
|
|
bin->stats.reslabs++;
|
2012-02-14 09:36:52 +08:00
|
|
|
} else
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_bin_slabs_nonfull_insert(bin, slab);
|
Fix numerous arena bugs.
In arena_ralloc_large_grow(), update the map element for the end of the
newly grown run, rather than the interior map element that was the
beginning of the appended run. This is a long-standing bug, and it had
the potential to cause massive corruption, but triggering it required
roughly the following sequence of events:
1) Large in-place growing realloc(), with left-over space in the run
that followed the large object.
2) Allocation of the remainder run left over from (1).
3) Deallocation of the remainder run *before* deallocation of the
large run, with unfortunate interior map state left over from
previous run allocation/deallocation activity, such that one or
more pages of allocated memory would be treated as part of the
remainder run during run coalescing.
In summary, this was a bad bug, but it was difficult to trigger.
In arena_bin_malloc_hard(), if another thread wins the race to allocate
a bin run, dispose of the spare run via arena_bin_lower_run() rather
than arena_run_dalloc(), since the run has already been prepared for use
as a bin run. This bug has existed since March 14, 2010:
e00572b384c81bd2aba57fac32f7077a34388915
mmap()/munmap() without arena->lock or bin->lock.
Fix bugs in arena_dalloc_bin_run(), arena_trim_head(),
arena_trim_tail(), and arena_ralloc_large_grow() that could cause the
CHUNK_MAP_UNZEROED map bit to become corrupted. These are all
long-standing bugs, but the chances of them actually causing problems
was much lower before the CHUNK_MAP_ZEROED --> CHUNK_MAP_UNZEROED
conversion.
Fix a large run statistics regression in arena_ralloc_large_grow() that
was introduced on September 17, 2010:
8e3c3c61b5bb676a705450708e7e79698cdc9e0c
Add {,r,s,d}allocm().
Add debug code to validate that supposedly pre-zeroed memory really is.
2010-10-18 08:51:37 +08:00
|
|
|
}
|
|
|
|
|
2014-10-10 08:54:06 +08:00
|
|
|
static void
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
|
|
|
void *ptr, bool junked)
|
Fix numerous arena bugs.
In arena_ralloc_large_grow(), update the map element for the end of the
newly grown run, rather than the interior map element that was the
beginning of the appended run. This is a long-standing bug, and it had
the potential to cause massive corruption, but triggering it required
roughly the following sequence of events:
1) Large in-place growing realloc(), with left-over space in the run
that followed the large object.
2) Allocation of the remainder run left over from (1).
3) Deallocation of the remainder run *before* deallocation of the
large run, with unfortunate interior map state left over from
previous run allocation/deallocation activity, such that one or
more pages of allocated memory would be treated as part of the
remainder run during run coalescing.
In summary, this was a bad bug, but it was difficult to trigger.
In arena_bin_malloc_hard(), if another thread wins the race to allocate
a bin run, dispose of the spare run via arena_bin_lower_run() rather
than arena_run_dalloc(), since the run has already been prepared for use
as a bin run. This bug has existed since March 14, 2010:
e00572b384c81bd2aba57fac32f7077a34388915
mmap()/munmap() without arena->lock or bin->lock.
Fix bugs in arena_dalloc_bin_run(), arena_trim_head(),
arena_trim_tail(), and arena_ralloc_large_grow() that could cause the
CHUNK_MAP_UNZEROED map bit to become corrupted. These are all
long-standing bugs, but the chances of them actually causing problems
was much lower before the CHUNK_MAP_ZEROED --> CHUNK_MAP_UNZEROED
conversion.
Fix a large run statistics regression in arena_ralloc_large_grow() that
was introduced on September 17, 2010:
8e3c3c61b5bb676a705450708e7e79698cdc9e0c
Add {,r,s,d}allocm().
Add debug code to validate that supposedly pre-zeroed memory really is.
2010-10-18 08:51:37 +08:00
|
|
|
{
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_slab_data_t *slab_data = extent_slab_data_get(slab);
|
|
|
|
szind_t binind = slab_data->binind;
|
|
|
|
arena_bin_t *bin = &arena->bins[binind];
|
|
|
|
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
Fix numerous arena bugs.
In arena_ralloc_large_grow(), update the map element for the end of the
newly grown run, rather than the interior map element that was the
beginning of the appended run. This is a long-standing bug, and it had
the potential to cause massive corruption, but triggering it required
roughly the following sequence of events:
1) Large in-place growing realloc(), with left-over space in the run
that followed the large object.
2) Allocation of the remainder run left over from (1).
3) Deallocation of the remainder run *before* deallocation of the
large run, with unfortunate interior map state left over from
previous run allocation/deallocation activity, such that one or
more pages of allocated memory would be treated as part of the
remainder run during run coalescing.
In summary, this was a bad bug, but it was difficult to trigger.
In arena_bin_malloc_hard(), if another thread wins the race to allocate
a bin run, dispose of the spare run via arena_bin_lower_run() rather
than arena_run_dalloc(), since the run has already been prepared for use
as a bin run. This bug has existed since March 14, 2010:
e00572b384c81bd2aba57fac32f7077a34388915
mmap()/munmap() without arena->lock or bin->lock.
Fix bugs in arena_dalloc_bin_run(), arena_trim_head(),
arena_trim_tail(), and arena_ralloc_large_grow() that could cause the
CHUNK_MAP_UNZEROED map bit to become corrupted. These are all
long-standing bugs, but the chances of them actually causing problems
was much lower before the CHUNK_MAP_ZEROED --> CHUNK_MAP_UNZEROED
conversion.
Fix a large run statistics regression in arena_ralloc_large_grow() that
was introduced on September 17, 2010:
8e3c3c61b5bb676a705450708e7e79698cdc9e0c
Add {,r,s,d}allocm().
Add debug code to validate that supposedly pre-zeroed memory really is.
2010-10-18 08:51:37 +08:00
|
|
|
|
2014-12-09 05:12:41 +08:00
|
|
|
if (!junked && config_fill && unlikely(opt_junk_free))
|
2012-04-06 15:35:09 +08:00
|
|
|
arena_dalloc_junk_small(ptr, bin_info);
|
Fix numerous arena bugs.
In arena_ralloc_large_grow(), update the map element for the end of the
newly grown run, rather than the interior map element that was the
beginning of the appended run. This is a long-standing bug, and it had
the potential to cause massive corruption, but triggering it required
roughly the following sequence of events:
1) Large in-place growing realloc(), with left-over space in the run
that followed the large object.
2) Allocation of the remainder run left over from (1).
3) Deallocation of the remainder run *before* deallocation of the
large run, with unfortunate interior map state left over from
previous run allocation/deallocation activity, such that one or
more pages of allocated memory would be treated as part of the
remainder run during run coalescing.
In summary, this was a bad bug, but it was difficult to trigger.
In arena_bin_malloc_hard(), if another thread wins the race to allocate
a bin run, dispose of the spare run via arena_bin_lower_run() rather
than arena_run_dalloc(), since the run has already been prepared for use
as a bin run. This bug has existed since March 14, 2010:
e00572b384c81bd2aba57fac32f7077a34388915
mmap()/munmap() without arena->lock or bin->lock.
Fix bugs in arena_dalloc_bin_run(), arena_trim_head(),
arena_trim_tail(), and arena_ralloc_large_grow() that could cause the
CHUNK_MAP_UNZEROED map bit to become corrupted. These are all
long-standing bugs, but the chances of them actually causing problems
was much lower before the CHUNK_MAP_ZEROED --> CHUNK_MAP_UNZEROED
conversion.
Fix a large run statistics regression in arena_ralloc_large_grow() that
was introduced on September 17, 2010:
8e3c3c61b5bb676a705450708e7e79698cdc9e0c
Add {,r,s,d}allocm().
Add debug code to validate that supposedly pre-zeroed memory really is.
2010-10-18 08:51:37 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_slab_reg_dalloc(tsdn, slab, slab_data, ptr);
|
|
|
|
if (slab_data->nfree == bin_info->nregs) {
|
|
|
|
arena_dissociate_bin_slab(slab, bin);
|
|
|
|
arena_dalloc_bin_slab(tsdn, arena, slab, bin);
|
|
|
|
} else if (slab_data->nfree == 1 && slab != bin->slabcur) {
|
|
|
|
arena_bin_slabs_full_remove(slab);
|
|
|
|
arena_bin_lower_slab(tsdn, arena, slab, bin);
|
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats) {
|
|
|
|
bin->stats.ndalloc++;
|
2014-10-13 13:53:59 +08:00
|
|
|
bin->stats.curregs--;
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2014-10-10 08:54:06 +08:00
|
|
|
void
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
|
|
|
void *ptr)
|
2014-10-10 08:54:06 +08:00
|
|
|
{
|
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true);
|
2014-10-10 08:54:06 +08:00
|
|
|
}
|
|
|
|
|
2016-03-24 11:29:33 +08:00
|
|
|
static void
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr)
|
2012-05-02 15:30:36 +08:00
|
|
|
{
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_bin_t *bin = &arena->bins[extent_slab_data_get(extent)->binind];
|
2012-05-02 15:30:36 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &bin->lock);
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false);
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &bin->lock);
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr)
|
2012-05-02 15:30:36 +08:00
|
|
|
{
|
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
arena_dalloc_bin(tsdn, arena, extent, ptr);
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_decay_tick(tsdn, arena);
|
2012-05-02 15:30:36 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
bool
|
2016-03-24 11:29:33 +08:00
|
|
|
arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
|
|
|
|
size_t size, size_t extra, bool zero)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
2015-09-12 07:18:53 +08:00
|
|
|
size_t usize_min, usize_max;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-02-26 07:29:49 +08:00
|
|
|
/* Calls with non-zero extra had to clamp extra. */
|
|
|
|
assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
|
|
|
|
|
|
|
|
if (unlikely(size > HUGE_MAXCLASS))
|
|
|
|
return (true);
|
|
|
|
|
2015-09-12 07:18:53 +08:00
|
|
|
usize_min = s2u(size);
|
|
|
|
usize_max = s2u(size + extra);
|
2016-05-28 15:17:28 +08:00
|
|
|
if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) {
|
2015-02-13 06:06:37 +08:00
|
|
|
/*
|
|
|
|
* Avoid moving the allocation if the size class can be left the
|
|
|
|
* same.
|
|
|
|
*/
|
2016-05-28 15:17:28 +08:00
|
|
|
assert(arena_bin_info[size2index(oldsize)].reg_size ==
|
|
|
|
oldsize);
|
|
|
|
if ((usize_max > SMALL_MAXCLASS || size2index(usize_max) !=
|
|
|
|
size2index(oldsize)) && (size > oldsize || usize_max <
|
|
|
|
oldsize))
|
|
|
|
return (true);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-03-24 11:29:33 +08:00
|
|
|
arena_decay_tick(tsdn, extent_arena_get(extent));
|
2016-02-20 11:24:58 +08:00
|
|
|
return (false);
|
2016-05-28 15:17:28 +08:00
|
|
|
} else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) {
|
2016-05-19 12:02:46 +08:00
|
|
|
return (huge_ralloc_no_move(tsdn, extent, usize_min, usize_max,
|
|
|
|
zero));
|
2015-09-12 07:18:53 +08:00
|
|
|
}
|
2016-05-19 12:02:46 +08:00
|
|
|
|
|
|
|
return (true);
|
2015-09-12 07:18:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
2015-09-12 07:18:53 +08:00
|
|
|
size_t alignment, bool zero, tcache_t *tcache)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (alignment == 0)
|
2016-05-11 13:21:10 +08:00
|
|
|
return (arena_malloc(tsdn, arena, usize, size2index(usize),
|
|
|
|
zero, tcache, true));
|
2015-09-12 07:18:53 +08:00
|
|
|
usize = sa2u(usize, alignment);
|
2016-02-26 07:29:49 +08:00
|
|
|
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
|
2015-09-12 07:18:53 +08:00
|
|
|
return (NULL);
|
2016-05-11 13:21:10 +08:00
|
|
|
return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
2016-03-24 11:29:33 +08:00
|
|
|
arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
|
|
|
|
size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache)
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
2016-05-19 12:02:46 +08:00
|
|
|
size_t usize, copysize;
|
2015-09-12 07:18:53 +08:00
|
|
|
|
|
|
|
usize = s2u(size);
|
2016-02-26 07:29:49 +08:00
|
|
|
if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
|
2015-09-12 07:18:53 +08:00
|
|
|
return (NULL);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2016-05-28 15:17:28 +08:00
|
|
|
if (likely(usize <= SMALL_MAXCLASS)) {
|
2015-02-13 06:06:37 +08:00
|
|
|
/* Try to avoid moving the allocation. */
|
2016-03-24 11:29:33 +08:00
|
|
|
if (!arena_ralloc_no_move(tsdn, extent, ptr, oldsize, usize, 0,
|
|
|
|
zero))
|
2015-02-13 06:06:37 +08:00
|
|
|
return (ptr);
|
2016-05-19 12:02:46 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-05-28 15:17:28 +08:00
|
|
|
if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) {
|
2016-05-19 12:02:46 +08:00
|
|
|
return (huge_ralloc(tsdn, arena, extent, usize, alignment, zero,
|
|
|
|
tcache));
|
|
|
|
}
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2016-05-19 12:02:46 +08:00
|
|
|
/*
|
|
|
|
* size and oldsize are different enough that we need to move the
|
|
|
|
* object. In that case, fall back to allocating new space and copying.
|
|
|
|
*/
|
|
|
|
ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, zero,
|
|
|
|
tcache);
|
|
|
|
if (ret == NULL)
|
|
|
|
return (NULL);
|
2015-02-13 06:06:37 +08:00
|
|
|
|
2016-05-19 12:02:46 +08:00
|
|
|
/*
|
|
|
|
* Junk/zero-filling were already done by
|
|
|
|
* ipalloc()/arena_malloc().
|
|
|
|
*/
|
|
|
|
|
|
|
|
copysize = (usize < oldsize) ? usize : oldsize;
|
|
|
|
memcpy(ret, ptr, copysize);
|
|
|
|
isdalloct(tsdn, extent, ptr, oldsize, tcache, true);
|
2010-01-17 01:53:50 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
dss_prec_t
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
|
2012-10-12 04:53:15 +08:00
|
|
|
{
|
|
|
|
dss_prec_t ret;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->lock);
|
2012-10-12 04:53:15 +08:00
|
|
|
ret = arena->dss_prec;
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
2012-10-12 04:53:15 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2014-04-16 03:09:48 +08:00
|
|
|
bool
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
|
2012-10-12 04:53:15 +08:00
|
|
|
{
|
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
if (!have_dss)
|
2014-04-16 03:09:48 +08:00
|
|
|
return (dss_prec != dss_prec_disabled);
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->lock);
|
2012-10-12 04:53:15 +08:00
|
|
|
arena->dss_prec = dss_prec;
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
2014-04-16 03:09:48 +08:00
|
|
|
return (false);
|
2012-10-12 04:53:15 +08:00
|
|
|
}
|
|
|
|
|
2015-03-19 09:55:33 +08:00
|
|
|
ssize_t
|
|
|
|
arena_lg_dirty_mult_default_get(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
|
|
|
|
{
|
|
|
|
|
2016-02-20 12:09:31 +08:00
|
|
|
if (opt_purge != purge_mode_ratio)
|
|
|
|
return (true);
|
2015-03-19 09:55:33 +08:00
|
|
|
if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
|
|
|
|
return (true);
|
|
|
|
atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
2016-02-20 12:09:31 +08:00
|
|
|
ssize_t
|
|
|
|
arena_decay_time_default_get(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
arena_decay_time_default_set(ssize_t decay_time)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (opt_purge != purge_mode_decay)
|
|
|
|
return (true);
|
|
|
|
if (!arena_decay_time_valid(decay_time))
|
|
|
|
return (true);
|
|
|
|
atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
2016-02-28 12:40:13 +08:00
|
|
|
static void
|
|
|
|
arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
|
|
|
|
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
|
|
|
|
size_t *nactive, size_t *ndirty)
|
2012-10-12 04:53:15 +08:00
|
|
|
{
|
|
|
|
|
2016-04-23 05:34:14 +08:00
|
|
|
*nthreads += arena_nthreads_get(arena, false);
|
2012-10-12 04:53:15 +08:00
|
|
|
*dss = dss_prec_names[arena->dss_prec];
|
2015-03-25 07:36:12 +08:00
|
|
|
*lg_dirty_mult = arena->lg_dirty_mult;
|
2016-02-20 12:09:31 +08:00
|
|
|
*decay_time = arena->decay_time;
|
2012-10-12 04:53:15 +08:00
|
|
|
*nactive += arena->nactive;
|
|
|
|
*ndirty += arena->ndirty;
|
2016-02-28 12:40:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
2016-04-14 14:36:15 +08:00
|
|
|
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
|
|
|
|
size_t *nactive, size_t *ndirty)
|
2016-02-28 12:40:13 +08:00
|
|
|
{
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->lock);
|
2016-02-28 12:40:13 +08:00
|
|
|
arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
|
|
|
|
decay_time, nactive, ndirty);
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
2016-02-28 12:40:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
2016-04-14 14:36:15 +08:00
|
|
|
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
|
|
|
|
size_t *nactive, size_t *ndirty, arena_stats_t *astats,
|
2016-05-28 15:17:28 +08:00
|
|
|
malloc_bin_stats_t *bstats, malloc_huge_stats_t *hstats)
|
2016-02-28 12:40:13 +08:00
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
cassert(config_stats);
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->lock);
|
2016-02-28 12:40:13 +08:00
|
|
|
arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
|
|
|
|
decay_time, nactive, ndirty);
|
2012-10-12 04:53:15 +08:00
|
|
|
|
|
|
|
astats->mapped += arena->stats.mapped;
|
2016-05-04 13:11:35 +08:00
|
|
|
astats->retained += arena->stats.retained;
|
2012-10-12 04:53:15 +08:00
|
|
|
astats->npurge += arena->stats.npurge;
|
|
|
|
astats->nmadvise += arena->stats.nmadvise;
|
|
|
|
astats->purged += arena->stats.purged;
|
2014-11-28 03:22:36 +08:00
|
|
|
astats->metadata_mapped += arena->stats.metadata_mapped;
|
|
|
|
astats->metadata_allocated += arena_metadata_allocated_get(arena);
|
2014-05-16 13:22:27 +08:00
|
|
|
astats->allocated_huge += arena->stats.allocated_huge;
|
|
|
|
astats->nmalloc_huge += arena->stats.nmalloc_huge;
|
|
|
|
astats->ndalloc_huge += arena->stats.ndalloc_huge;
|
2016-05-28 15:17:28 +08:00
|
|
|
astats->nrequests_huge += arena->stats.nrequests_huge;
|
2012-10-12 04:53:15 +08:00
|
|
|
|
2016-05-28 15:17:28 +08:00
|
|
|
for (i = 0; i < NSIZES - NBINS; i++) {
|
2014-10-13 13:53:59 +08:00
|
|
|
hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
|
|
|
|
hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
|
2016-05-28 15:17:28 +08:00
|
|
|
hstats[i].nrequests += arena->stats.hstats[i].nrequests;
|
2014-10-13 13:53:59 +08:00
|
|
|
hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
|
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
2012-10-12 04:53:15 +08:00
|
|
|
|
|
|
|
for (i = 0; i < NBINS; i++) {
|
|
|
|
arena_bin_t *bin = &arena->bins[i];
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &bin->lock);
|
2012-10-12 04:53:15 +08:00
|
|
|
bstats[i].nmalloc += bin->stats.nmalloc;
|
|
|
|
bstats[i].ndalloc += bin->stats.ndalloc;
|
|
|
|
bstats[i].nrequests += bin->stats.nrequests;
|
2014-10-13 13:53:59 +08:00
|
|
|
bstats[i].curregs += bin->stats.curregs;
|
2012-10-12 04:53:15 +08:00
|
|
|
if (config_tcache) {
|
|
|
|
bstats[i].nfills += bin->stats.nfills;
|
|
|
|
bstats[i].nflushes += bin->stats.nflushes;
|
|
|
|
}
|
2016-05-30 09:34:50 +08:00
|
|
|
bstats[i].nslabs += bin->stats.nslabs;
|
|
|
|
bstats[i].reslabs += bin->stats.reslabs;
|
|
|
|
bstats[i].curslabs += bin->stats.curslabs;
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &bin->lock);
|
2012-10-12 04:53:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-25 15:58:10 +08:00
|
|
|
unsigned
|
2016-04-23 05:34:14 +08:00
|
|
|
arena_nthreads_get(arena_t *arena, bool internal)
|
2016-02-25 15:58:10 +08:00
|
|
|
{
|
|
|
|
|
2016-04-23 05:34:14 +08:00
|
|
|
return (atomic_read_u(&arena->nthreads[internal]));
|
2016-02-25 15:58:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-04-23 05:34:14 +08:00
|
|
|
arena_nthreads_inc(arena_t *arena, bool internal)
|
2016-02-25 15:58:10 +08:00
|
|
|
{
|
|
|
|
|
2016-04-23 05:34:14 +08:00
|
|
|
atomic_add_u(&arena->nthreads[internal], 1);
|
2016-02-25 15:58:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-04-23 05:34:14 +08:00
|
|
|
arena_nthreads_dec(arena_t *arena, bool internal)
|
2016-02-25 15:58:10 +08:00
|
|
|
{
|
|
|
|
|
2016-04-23 05:34:14 +08:00
|
|
|
atomic_sub_u(&arena->nthreads[internal], 1);
|
2016-02-25 15:58:10 +08:00
|
|
|
}
|
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
arena_t *
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_new(tsdn_t *tsdn, unsigned ind)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
arena_t *arena;
|
2010-01-17 01:53:50 +08:00
|
|
|
unsigned i;
|
|
|
|
|
2016-05-28 15:17:28 +08:00
|
|
|
arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t));
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
if (arena == NULL)
|
|
|
|
return (NULL);
|
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
arena->ind = ind;
|
2016-04-23 05:34:14 +08:00
|
|
|
arena->nthreads[0] = arena->nthreads[1] = 0;
|
2016-04-14 14:36:15 +08:00
|
|
|
if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
return (NULL);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-05-28 15:17:28 +08:00
|
|
|
if (config_stats && config_tcache)
|
|
|
|
ql_new(&arena->tcache_ql);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof)
|
|
|
|
arena->prof_accumbytes = 0;
|
2010-02-12 05:19:21 +08:00
|
|
|
|
2015-05-05 00:58:36 +08:00
|
|
|
if (config_cache_oblivious) {
|
|
|
|
/*
|
|
|
|
* A nondeterministic seed based on the address of arena reduces
|
|
|
|
* the likelihood of lockstep non-uniform cache index
|
|
|
|
* utilization among identical concurrent processes, but at the
|
|
|
|
* cost of test repeatability. For debug builds, instead use a
|
|
|
|
* deterministic seed.
|
|
|
|
*/
|
|
|
|
arena->offset_state = config_debug ? ind :
|
|
|
|
(uint64_t)(uintptr_t)arena;
|
|
|
|
}
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
arena->dss_prec = chunk_dss_prec_get(tsdn);
|
2012-10-12 04:53:15 +08:00
|
|
|
|
2015-03-19 09:55:33 +08:00
|
|
|
arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
|
2015-06-23 09:50:32 +08:00
|
|
|
arena->purging = false;
|
2010-01-17 01:53:50 +08:00
|
|
|
arena->nactive = 0;
|
|
|
|
arena->ndirty = 0;
|
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
extent_init(&arena->extents_dirty, arena, NULL, 0, 0, false, false,
|
|
|
|
false, false);
|
2015-02-16 10:04:46 +08:00
|
|
|
|
2016-02-20 12:09:31 +08:00
|
|
|
if (opt_purge == purge_mode_decay)
|
|
|
|
arena_decay_init(arena, arena_decay_time_default_get());
|
|
|
|
|
2015-02-16 10:04:46 +08:00
|
|
|
ql_new(&arena->huge);
|
2016-04-14 14:36:15 +08:00
|
|
|
if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
|
|
|
|
WITNESS_RANK_ARENA_HUGE))
|
2015-02-16 10:04:46 +08:00
|
|
|
return (NULL);
|
|
|
|
|
2016-05-18 05:58:56 +08:00
|
|
|
for (i = 0; i < NPSIZES; i++) {
|
|
|
|
extent_heap_new(&arena->chunks_cached[i]);
|
|
|
|
extent_heap_new(&arena->chunks_retained[i]);
|
|
|
|
}
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
|
|
|
|
WITNESS_RANK_ARENA_CHUNKS))
|
2015-02-16 10:04:46 +08:00
|
|
|
return (NULL);
|
2016-03-24 12:09:28 +08:00
|
|
|
ql_new(&arena->extent_cache);
|
|
|
|
if (malloc_mutex_init(&arena->extent_cache_mtx, "arena_extent_cache",
|
|
|
|
WITNESS_RANK_ARENA_EXTENT_CACHE))
|
2015-02-16 10:04:46 +08:00
|
|
|
return (NULL);
|
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
arena->chunk_hooks = chunk_hooks_default;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/* Initialize bins. */
|
2012-02-29 08:50:47 +08:00
|
|
|
for (i = 0; i < NBINS; i++) {
|
2016-04-23 05:36:48 +08:00
|
|
|
arena_bin_t *bin = &arena->bins[i];
|
2016-04-14 14:36:15 +08:00
|
|
|
if (malloc_mutex_init(&bin->lock, "arena_bin",
|
|
|
|
WITNESS_RANK_ARENA_BIN))
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
return (NULL);
|
2016-05-30 09:34:50 +08:00
|
|
|
bin->slabcur = NULL;
|
|
|
|
extent_heap_new(&bin->slabs_nonfull);
|
|
|
|
extent_init(&bin->slabs_full, arena, NULL, 0, 0, false, false,
|
|
|
|
false, false);
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats)
|
|
|
|
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
return (arena);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2016-04-09 05:16:19 +08:00
|
|
|
void
|
2010-01-30 06:30:41 +08:00
|
|
|
arena_boot(void)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
|
|
|
|
2015-03-19 09:55:33 +08:00
|
|
|
arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
|
2016-02-20 12:09:31 +08:00
|
|
|
arena_decay_time_default_set(opt_decay_time);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
2012-03-14 07:31:41 +08:00
|
|
|
|
|
|
|
void
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_prefork0(tsdn_t *tsdn, arena_t *arena)
|
2012-03-14 07:31:41 +08:00
|
|
|
{
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_prefork(tsdn, &arena->lock);
|
2016-04-26 14:14:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_prefork1(tsdn_t *tsdn, arena_t *arena)
|
2016-04-26 14:14:40 +08:00
|
|
|
{
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_prefork(tsdn, &arena->chunks_mtx);
|
2016-04-26 14:14:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_prefork2(tsdn_t *tsdn, arena_t *arena)
|
2016-04-26 14:14:40 +08:00
|
|
|
{
|
|
|
|
|
2016-03-24 12:09:28 +08:00
|
|
|
malloc_mutex_prefork(tsdn, &arena->extent_cache_mtx);
|
2016-04-26 14:14:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_prefork3(tsdn_t *tsdn, arena_t *arena)
|
2016-04-26 14:14:40 +08:00
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
2012-03-14 07:31:41 +08:00
|
|
|
for (i = 0; i < NBINS; i++)
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
|
|
|
|
malloc_mutex_prefork(tsdn, &arena->huge_mtx);
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
|
2012-03-14 07:31:41 +08:00
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
|
2012-03-14 07:31:41 +08:00
|
|
|
for (i = 0; i < NBINS; i++)
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
|
2016-03-24 12:09:28 +08:00
|
|
|
malloc_mutex_postfork_parent(tsdn, &arena->extent_cache_mtx);
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
|
|
|
|
malloc_mutex_postfork_parent(tsdn, &arena->lock);
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
|
2012-03-14 07:31:41 +08:00
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
|
2012-03-14 07:31:41 +08:00
|
|
|
for (i = 0; i < NBINS; i++)
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
|
2016-03-24 12:09:28 +08:00
|
|
|
malloc_mutex_postfork_child(tsdn, &arena->extent_cache_mtx);
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
|
|
|
|
malloc_mutex_postfork_child(tsdn, &arena->lock);
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|