Do not allocate metadata via non-auto arenas, nor tcaches.
This assures that all internally allocated metadata come from the first opt_narenas arenas, i.e. the automatically multiplexed arenas.
This commit is contained in:
@@ -290,10 +290,18 @@ struct arena_s {
|
||||
unsigned ind;
|
||||
|
||||
/*
|
||||
* Number of threads currently assigned to this arena. This field is
|
||||
* synchronized via atomic operations.
|
||||
* Number of threads currently assigned to this arena, synchronized via
|
||||
* atomic operations. Each thread has two distinct assignments, one for
|
||||
* application-serving allocation, and the other for internal metadata
|
||||
* allocation. Internal metadata must not be allocated from arenas
|
||||
* created via the arenas.extend mallctl, because the arena.<i>.reset
|
||||
* mallctl indiscriminately discards all allocations for the affected
|
||||
* arena.
|
||||
*
|
||||
* 0: Application allocation.
|
||||
* 1: Internal metadata allocation.
|
||||
*/
|
||||
unsigned nthreads;
|
||||
unsigned nthreads[2];
|
||||
|
||||
/*
|
||||
* There are three classes of arena operations from a locking
|
||||
@@ -541,7 +549,7 @@ void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
|
||||
void arena_quarantine_junk_small(void *ptr, size_t usize);
|
||||
void *arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t ind, bool zero);
|
||||
void *arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
|
||||
bool zero, tcache_t *tcache);
|
||||
bool zero);
|
||||
void *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize,
|
||||
size_t alignment, bool zero, tcache_t *tcache);
|
||||
void arena_prof_promoted(tsd_t *tsd, const void *ptr, size_t size);
|
||||
@@ -583,9 +591,9 @@ void arena_stats_merge(tsd_t *tsd, arena_t *arena, unsigned *nthreads,
|
||||
size_t *nactive, size_t *ndirty, arena_stats_t *astats,
|
||||
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
|
||||
malloc_huge_stats_t *hstats);
|
||||
unsigned arena_nthreads_get(arena_t *arena);
|
||||
void arena_nthreads_inc(arena_t *arena);
|
||||
void arena_nthreads_dec(arena_t *arena);
|
||||
unsigned arena_nthreads_get(arena_t *arena, bool internal);
|
||||
void arena_nthreads_inc(arena_t *arena, bool internal);
|
||||
void arena_nthreads_dec(arena_t *arena, bool internal);
|
||||
arena_t *arena_new(tsd_t *tsd, unsigned ind);
|
||||
bool arena_boot(void);
|
||||
void arena_prefork(tsd_t *tsd, arena_t *arena);
|
||||
@@ -1320,7 +1328,7 @@ arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, bool zero,
|
||||
assert(size > tcache_maxclass);
|
||||
}
|
||||
|
||||
return (arena_malloc_hard(tsd, arena, size, ind, zero, tcache));
|
||||
return (arena_malloc_hard(tsd, arena, size, ind, zero));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE arena_t *
|
||||
@@ -1426,7 +1434,7 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
|
||||
}
|
||||
}
|
||||
} else
|
||||
huge_dalloc(tsd, ptr, tcache);
|
||||
huge_dalloc(tsd, ptr);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
@@ -1477,7 +1485,7 @@ arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
|
||||
}
|
||||
}
|
||||
} else
|
||||
huge_dalloc(tsd, ptr, tcache);
|
||||
huge_dalloc(tsd, ptr);
|
||||
}
|
||||
# endif /* JEMALLOC_ARENA_INLINE_B */
|
||||
#endif
|
||||
|
@@ -9,10 +9,9 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero,
|
||||
tcache_t *tcache);
|
||||
void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero);
|
||||
void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
|
||||
bool zero, tcache_t *tcache);
|
||||
bool zero);
|
||||
bool huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize,
|
||||
size_t usize_min, size_t usize_max, bool zero);
|
||||
void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
|
||||
@@ -21,7 +20,7 @@ void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
|
||||
typedef void (huge_dalloc_junk_t)(tsd_t *, void *, size_t);
|
||||
extern huge_dalloc_junk_t *huge_dalloc_junk;
|
||||
#endif
|
||||
void huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
|
||||
void huge_dalloc(tsd_t *tsd, void *ptr);
|
||||
arena_t *huge_aalloc(const void *ptr);
|
||||
size_t huge_salloc(tsd_t *tsd, const void *ptr);
|
||||
prof_tctx_t *huge_prof_tctx_get(tsd_t *tsd, const void *ptr);
|
||||
|
@@ -443,6 +443,9 @@ extern bool in_valgrind;
|
||||
/* Number of CPUs. */
|
||||
extern unsigned ncpus;
|
||||
|
||||
/* Number of arenas used for automatic multiplexing of threads and arenas. */
|
||||
extern unsigned narenas_auto;
|
||||
|
||||
/*
|
||||
* Arenas that are used to service external requests. Not all elements of the
|
||||
* arenas array are necessarily used; arenas are created lazily as needed.
|
||||
@@ -469,10 +472,11 @@ void bootstrap_free(void *ptr);
|
||||
unsigned narenas_total_get(void);
|
||||
arena_t *arena_init(tsd_t *tsd, unsigned ind);
|
||||
arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
|
||||
arena_t *arena_choose_hard(tsd_t *tsd);
|
||||
arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
|
||||
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
|
||||
void thread_allocated_cleanup(tsd_t *tsd);
|
||||
void thread_deallocated_cleanup(tsd_t *tsd);
|
||||
void iarena_cleanup(tsd_t *tsd);
|
||||
void arena_cleanup(tsd_t *tsd);
|
||||
void arenas_tdata_cleanup(tsd_t *tsd);
|
||||
void narenas_tdata_cleanup(tsd_t *tsd);
|
||||
@@ -546,7 +550,7 @@ size_t s2u_compute(size_t size);
|
||||
size_t s2u_lookup(size_t size);
|
||||
size_t s2u(size_t size);
|
||||
size_t sa2u(size_t size, size_t alignment);
|
||||
arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
|
||||
arena_t *arena_choose(tsd_t *tsd, arena_t *arena, bool internal);
|
||||
arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind,
|
||||
bool refresh_if_missing);
|
||||
arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing);
|
||||
@@ -784,15 +788,16 @@ sa2u(size_t size, size_t alignment)
|
||||
|
||||
/* Choose an arena based on a per-thread value. */
|
||||
JEMALLOC_INLINE arena_t *
|
||||
arena_choose(tsd_t *tsd, arena_t *arena)
|
||||
arena_choose(tsd_t *tsd, arena_t *arena, bool internal)
|
||||
{
|
||||
arena_t *ret;
|
||||
|
||||
if (arena != NULL)
|
||||
return (arena);
|
||||
|
||||
if (unlikely((ret = tsd_arena_get(tsd)) == NULL))
|
||||
ret = arena_choose_hard(tsd);
|
||||
ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
|
||||
if (unlikely(ret == NULL))
|
||||
ret = arena_choose_hard(tsd, internal);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
@@ -935,6 +940,8 @@ iallocztm(tsd_t *tsd, size_t size, szind_t ind, bool zero, tcache_t *tcache,
|
||||
void *ret;
|
||||
|
||||
assert(size != 0);
|
||||
assert(!is_metadata || tcache == NULL);
|
||||
assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
|
||||
|
||||
ret = arena_malloc(tsd, arena, size, ind, zero, tcache, slow_path);
|
||||
if (config_stats && is_metadata && likely(ret != NULL)) {
|
||||
@@ -982,6 +989,8 @@ ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
|
||||
|
||||
assert(usize != 0);
|
||||
assert(usize == sa2u(usize, alignment));
|
||||
assert(!is_metadata || tcache == NULL);
|
||||
assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
|
||||
|
||||
ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache);
|
||||
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
|
||||
@@ -1052,6 +1061,8 @@ idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata,
|
||||
{
|
||||
|
||||
assert(ptr != NULL);
|
||||
assert(!is_metadata || tcache == NULL);
|
||||
assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto);
|
||||
if (config_stats && is_metadata) {
|
||||
arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsd, ptr,
|
||||
config_prof));
|
||||
|
@@ -286,6 +286,7 @@ huge_ralloc_no_move
|
||||
huge_salloc
|
||||
iaalloc
|
||||
iallocztm
|
||||
iarena_cleanup
|
||||
icalloc
|
||||
icalloct
|
||||
idalloc
|
||||
@@ -342,6 +343,7 @@ malloc_write
|
||||
map_bias
|
||||
map_misc_offset
|
||||
mb_write
|
||||
narenas_auto
|
||||
narenas_tdata_cleanup
|
||||
narenas_total_get
|
||||
ncpus
|
||||
|
@@ -293,7 +293,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
assert(tcache_success == (ret != NULL));
|
||||
if (unlikely(!tcache_success)) {
|
||||
bool tcache_hard_success;
|
||||
arena = arena_choose(tsd, arena);
|
||||
arena = arena_choose(tsd, arena, false);
|
||||
if (unlikely(arena == NULL))
|
||||
return (NULL);
|
||||
|
||||
@@ -354,7 +354,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
* Only allocate one large object at a time, because it's quite
|
||||
* expensive to create one and not use it.
|
||||
*/
|
||||
arena = arena_choose(tsd, arena);
|
||||
arena = arena_choose(tsd, arena, false);
|
||||
if (unlikely(arena == NULL))
|
||||
return (NULL);
|
||||
|
||||
@@ -459,8 +459,10 @@ JEMALLOC_ALWAYS_INLINE tcache_t *
|
||||
tcaches_get(tsd_t *tsd, unsigned ind)
|
||||
{
|
||||
tcaches_t *elm = &tcaches[ind];
|
||||
if (unlikely(elm->tcache == NULL))
|
||||
elm->tcache = tcache_create(tsd, arena_choose(tsd, NULL));
|
||||
if (unlikely(elm->tcache == NULL)) {
|
||||
elm->tcache = tcache_create(tsd, arena_choose(tsd, NULL,
|
||||
false));
|
||||
}
|
||||
return (elm->tcache);
|
||||
}
|
||||
#endif
|
||||
|
@@ -536,6 +536,7 @@ struct tsd_init_head_s {
|
||||
O(thread_allocated, uint64_t) \
|
||||
O(thread_deallocated, uint64_t) \
|
||||
O(prof_tdata, prof_tdata_t *) \
|
||||
O(iarena, arena_t *) \
|
||||
O(arena, arena_t *) \
|
||||
O(arenas_tdata, arena_tdata_t *) \
|
||||
O(narenas_tdata, unsigned) \
|
||||
@@ -552,6 +553,7 @@ struct tsd_init_head_s {
|
||||
NULL, \
|
||||
NULL, \
|
||||
NULL, \
|
||||
NULL, \
|
||||
0, \
|
||||
false, \
|
||||
tcache_enabled_default, \
|
||||
|
Reference in New Issue
Block a user