Pull out arena_bin_info_t and arena_bin_t into their own file.

In the process, kill arena_bin_index, which is unused.  To follow are several
diffs continuing this separation.
This commit is contained in:
David T. Goldblatt 2017-10-01 17:22:06 -07:00 committed by David Goldblatt
parent 740bdd68b1
commit 4bf4a1c4ea
16 changed files with 169 additions and 155 deletions

View File

@ -93,6 +93,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \
$(srcroot)src/arena.c \ $(srcroot)src/arena.c \
$(srcroot)src/background_thread.c \ $(srcroot)src/background_thread.c \
$(srcroot)src/base.c \ $(srcroot)src/base.c \
$(srcroot)src/bin.c \
$(srcroot)src/bitmap.c \ $(srcroot)src/bitmap.c \
$(srcroot)src/ckh.c \ $(srcroot)src/ckh.c \
$(srcroot)src/ctl.c \ $(srcroot)src/ctl.c \

View File

@ -1,6 +1,7 @@
#ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H #ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H
#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H #define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/pages.h" #include "jemalloc/internal/pages.h"
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/size_classes.h"
@ -9,8 +10,6 @@
extern ssize_t opt_dirty_decay_ms; extern ssize_t opt_dirty_decay_ms;
extern ssize_t opt_muzzy_decay_ms; extern ssize_t opt_muzzy_decay_ms;
extern const arena_bin_info_t arena_bin_info[NBINS];
extern percpu_arena_mode_t opt_percpu_arena; extern percpu_arena_mode_t opt_percpu_arena;
extern const char *percpu_arena_mode_names[]; extern const char *percpu_arena_mode_names[];
@ -51,10 +50,10 @@ void arena_reset(tsd_t *tsd, arena_t *arena);
void arena_destroy(tsd_t *tsd, arena_t *arena); void arena_destroy(tsd_t *tsd, arena_t *arena);
void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes); cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, void arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info,
bool zero); bool zero);
typedef void (arena_dalloc_junk_small_t)(void *, const arena_bin_info_t *); typedef void (arena_dalloc_junk_small_t)(void *, const bin_info_t *);
extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small; extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small;
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,

View File

@ -8,13 +8,6 @@
#include "jemalloc/internal/sz.h" #include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h" #include "jemalloc/internal/ticker.h"
static inline szind_t
arena_bin_index(arena_t *arena, arena_bin_t *bin) {
szind_t binind = (szind_t)(bin - arena->bins);
assert(binind < NBINS);
return binind;
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t * JEMALLOC_ALWAYS_INLINE prof_tctx_t *
arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) { arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
cassert(config_prof); cassert(config_prof);

View File

@ -2,6 +2,7 @@
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H #define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/jemalloc_internal_types.h"
@ -13,42 +14,6 @@
#include "jemalloc/internal/stats.h" #include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ticker.h" #include "jemalloc/internal/ticker.h"
/*
* Read-only information associated with each element of arena_t's bins array
* is stored separately, partly to reduce memory usage (only one copy, rather
* than one per arena), but mainly to avoid false cacheline sharing.
*
* Each slab has the following layout:
*
* /--------------------\
* | region 0 |
* |--------------------|
* | region 1 |
* |--------------------|
* | ... |
* | ... |
* | ... |
* |--------------------|
* | region nregs-1 |
* \--------------------/
*/
struct arena_bin_info_s {
/* Size of regions in a slab for this bin's size class. */
size_t reg_size;
/* Total size of a slab for this bin's size class. */
size_t slab_size;
/* Total number of regions in a slab for this bin's size class. */
uint32_t nregs;
/*
* Metadata used to manipulate bitmaps for slabs associated with this
* bin.
*/
bitmap_info_t bitmap_info;
};
struct arena_decay_s { struct arena_decay_s {
/* Synchronizes all non-atomic fields. */ /* Synchronizes all non-atomic fields. */
malloc_mutex_t mtx; malloc_mutex_t mtx;
@ -109,32 +74,6 @@ struct arena_decay_s {
uint64_t ceil_npages; uint64_t ceil_npages;
}; };
struct arena_bin_s {
/* All operations on arena_bin_t fields require lock ownership. */
malloc_mutex_t lock;
/*
* Current slab being used to service allocations of this bin's size
* class. slabcur is independent of slabs_{nonfull,full}; whenever
* slabcur is reassigned, the previous slab must be deallocated or
* inserted into slabs_{nonfull,full}.
*/
extent_t *slabcur;
/*
* Heap of non-full slabs. This heap is used to assure that new
* allocations come from the non-full slab that is oldest/lowest in
* memory.
*/
extent_heap_t slabs_nonfull;
/* List used to track full slabs. */
extent_list_t slabs_full;
/* Bin statistics. */
malloc_bin_stats_t stats;
};
struct arena_s { struct arena_s {
/* /*
* Number of threads currently assigned to this arena. Each thread has * Number of threads currently assigned to this arena. Each thread has
@ -264,7 +203,7 @@ struct arena_s {
* *
* Synchronization: internal. * Synchronization: internal.
*/ */
arena_bin_t bins[NBINS]; bin_t bins[NBINS];
/* /*
* Base allocator, from which arena metadata are allocated. * Base allocator, from which arena metadata are allocated.

View File

@ -12,9 +12,7 @@
#define DECAY_NTICKS_PER_UPDATE 1000 #define DECAY_NTICKS_PER_UPDATE 1000
typedef struct arena_slab_data_s arena_slab_data_t; typedef struct arena_slab_data_s arena_slab_data_t;
typedef struct arena_bin_info_s arena_bin_info_t;
typedef struct arena_decay_s arena_decay_t; typedef struct arena_decay_s arena_decay_t;
typedef struct arena_bin_s arena_bin_t;
typedef struct arena_s arena_t; typedef struct arena_s arena_t;
typedef struct arena_tdata_s arena_tdata_t; typedef struct arena_tdata_s arena_tdata_t;
typedef struct alloc_ctx_s alloc_ctx_t; typedef struct alloc_ctx_s alloc_ctx_t;

View File

@ -0,0 +1,81 @@
#ifndef JEMALLOC_INTERNAL_BIN_H
#define JEMALLOC_INTERNAL_BIN_H
#include "jemalloc/internal/extent_types.h"
#include "jemalloc/internal/extent_structs.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/stats.h"
/*
* A bin contains a set of extents that are currently being used for slab
* allocations.
*/
/*
* Read-only information associated with each element of arena_t's bins array
* is stored separately, partly to reduce memory usage (only one copy, rather
* than one per arena), but mainly to avoid false cacheline sharing.
*
* Each slab has the following layout:
*
* /--------------------\
* | region 0 |
* |--------------------|
* | region 1 |
* |--------------------|
* | ... |
* | ... |
* | ... |
* |--------------------|
* | region nregs-1 |
* \--------------------/
*/
typedef struct bin_info_s bin_info_t;
struct bin_info_s {
/* Size of regions in a slab for this bin's size class. */
size_t reg_size;
/* Total size of a slab for this bin's size class. */
size_t slab_size;
/* Total number of regions in a slab for this bin's size class. */
uint32_t nregs;
/*
* Metadata used to manipulate bitmaps for slabs associated with this
* bin.
*/
bitmap_info_t bitmap_info;
};
extern const bin_info_t bin_infos[NBINS];
typedef struct bin_s bin_t;
struct bin_s {
/* All operations on bin_t fields require lock ownership. */
malloc_mutex_t lock;
/*
* Current slab being used to service allocations of this bin's size
* class. slabcur is independent of slabs_{nonfull,full}; whenever
* slabcur is reassigned, the previous slab must be deallocated or
* inserted into slabs_{nonfull,full}.
*/
extent_t *slabcur;
/*
* Heap of non-full slabs. This heap is used to assure that new
* allocations come from the non-full slab that is oldest/lowest in
* memory.
*/
extent_heap_t slabs_nonfull;
/* List used to track full slabs. */
extent_list_t slabs_full;
/* Bin statistics. */
malloc_bin_stats_t stats;
};
#endif /* JEMALLOC_INTERNAL_BIN_H */

View File

@ -143,7 +143,7 @@ struct extent_s {
/* /*
* List linkage, used by a variety of lists: * List linkage, used by a variety of lists:
* - arena_bin_t's slabs_full * - bin_t's slabs_full
* - extents_t's LRU * - extents_t's LRU
* - stashed dirty extents * - stashed dirty extents
* - arena's large allocations * - arena's large allocations

View File

@ -1,6 +1,7 @@
#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H #ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H
#define JEMALLOC_INTERNAL_TCACHE_INLINES_H #define JEMALLOC_INTERNAL_TCACHE_INLINES_H
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sz.h" #include "jemalloc/internal/sz.h"
@ -76,16 +77,15 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
if (likely(!zero)) { if (likely(!zero)) {
if (slow_path && config_fill) { if (slow_path && config_fill) {
if (unlikely(opt_junk_alloc)) { if (unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, arena_alloc_junk_small(ret, &bin_infos[binind],
&arena_bin_info[binind], false); false);
} else if (unlikely(opt_zero)) { } else if (unlikely(opt_zero)) {
memset(ret, 0, usize); memset(ret, 0, usize);
} }
} }
} else { } else {
if (slow_path && config_fill && unlikely(opt_junk_alloc)) { if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &arena_bin_info[binind], arena_alloc_junk_small(ret, &bin_infos[binind], true);
true);
} }
memset(ret, 0, usize); memset(ret, 0, usize);
} }
@ -169,7 +169,7 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS); assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
if (slow_path && config_fill && unlikely(opt_junk_free)) { if (slow_path && config_fill && unlikely(opt_junk_free)) {
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]); arena_dalloc_junk_small(ptr, &bin_infos[binind]);
} }
bin = tcache_small_bin_get(tcache, binind); bin = tcache_small_bin_get(tcache, binind);

View File

@ -32,21 +32,6 @@ ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
static atomic_zd_t dirty_decay_ms_default; static atomic_zd_t dirty_decay_ms_default;
static atomic_zd_t muzzy_decay_ms_default; static atomic_zd_t muzzy_decay_ms_default;
const arena_bin_info_t arena_bin_info[NBINS] = {
#define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \
{reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)},
#define BIN_INFO_bin_no(reg_size, slab_size, nregs)
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
lg_delta_lookup) \
BIN_INFO_bin_##bin((1U<<lg_grp) + (ndelta<<lg_delta), \
(pgs << LG_PAGE), (pgs << LG_PAGE) / ((1U<<lg_grp) + \
(ndelta<<lg_delta)))
SIZE_CLASSES
#undef BIN_INFO_bin_yes
#undef BIN_INFO_bin_no
#undef SC
};
const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = { const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
#define STEP(step, h, x, y) \ #define STEP(step, h, x, y) \
h, h,
@ -66,9 +51,9 @@ static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
bool is_background_thread, bool all); bool is_background_thread, bool all);
static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
arena_bin_t *bin); bin_t *bin);
static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
arena_bin_t *bin); bin_t *bin);
/******************************************************************************/ /******************************************************************************/
@ -352,7 +337,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
nstime_subtract(&astats->uptime, &arena->create_time); nstime_subtract(&astats->uptime, &arena->create_time);
for (szind_t i = 0; i < NBINS; i++) { for (szind_t i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i]; bin_t *bin = &arena->bins[i];
malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_lock(tsdn, &bin->lock);
malloc_mutex_prof_read(tsdn, &bstats[i].mutex_data, &bin->lock); malloc_mutex_prof_read(tsdn, &bstats[i].mutex_data, &bin->lock);
@ -385,8 +370,7 @@ arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
} }
static void * static void *
arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab, arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab, const bin_info_t *bin_info) {
const arena_bin_info_t *bin_info) {
void *ret; void *ret;
arena_slab_data_t *slab_data = extent_slab_data_get(slab); arena_slab_data_t *slab_data = extent_slab_data_get(slab);
size_t regind; size_t regind;
@ -413,7 +397,7 @@ arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab)); assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
/* Freeing an interior pointer can cause assertion failure. */ /* Freeing an interior pointer can cause assertion failure. */
assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) % assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
(uintptr_t)arena_bin_info[binind].reg_size == 0); (uintptr_t)bin_infos[binind].reg_size == 0);
/* Avoid doing division with a variable divisor. */ /* Avoid doing division with a variable divisor. */
diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)); diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
@ -434,7 +418,7 @@ arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
default: not_reached(); default: not_reached();
} }
assert(regind < arena_bin_info[binind].nregs); assert(regind < bin_infos[binind].nregs);
return regind; return regind;
} }
@ -443,7 +427,7 @@ static void
arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab, arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab,
arena_slab_data_t *slab_data, void *ptr) { arena_slab_data_t *slab_data, void *ptr) {
szind_t binind = extent_szind_get(slab); szind_t binind = extent_szind_get(slab);
const arena_bin_info_t *bin_info = &arena_bin_info[binind]; const bin_info_t *bin_info = &bin_infos[binind];
size_t regind = arena_slab_regind(slab, binind, ptr); size_t regind = arena_slab_regind(slab, binind, ptr);
assert(extent_nfree_get(slab) < bin_info->nregs); assert(extent_nfree_get(slab) < bin_info->nregs);
@ -1089,18 +1073,18 @@ arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
} }
static void static void
arena_bin_slabs_nonfull_insert(arena_bin_t *bin, extent_t *slab) { arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) {
assert(extent_nfree_get(slab) > 0); assert(extent_nfree_get(slab) > 0);
extent_heap_insert(&bin->slabs_nonfull, slab); extent_heap_insert(&bin->slabs_nonfull, slab);
} }
static void static void
arena_bin_slabs_nonfull_remove(arena_bin_t *bin, extent_t *slab) { arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) {
extent_heap_remove(&bin->slabs_nonfull, slab); extent_heap_remove(&bin->slabs_nonfull, slab);
} }
static extent_t * static extent_t *
arena_bin_slabs_nonfull_tryget(arena_bin_t *bin) { arena_bin_slabs_nonfull_tryget(bin_t *bin) {
extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull); extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
if (slab == NULL) { if (slab == NULL) {
return NULL; return NULL;
@ -1112,7 +1096,7 @@ arena_bin_slabs_nonfull_tryget(arena_bin_t *bin) {
} }
static void static void
arena_bin_slabs_full_insert(arena_t *arena, arena_bin_t *bin, extent_t *slab) { arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) {
assert(extent_nfree_get(slab) == 0); assert(extent_nfree_get(slab) == 0);
/* /*
* Tracking extents is required by arena_reset, which is not allowed * Tracking extents is required by arena_reset, which is not allowed
@ -1126,7 +1110,7 @@ arena_bin_slabs_full_insert(arena_t *arena, arena_bin_t *bin, extent_t *slab) {
} }
static void static void
arena_bin_slabs_full_remove(arena_t *arena, arena_bin_t *bin, extent_t *slab) { arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) {
if (arena_is_auto(arena)) { if (arena_is_auto(arena)) {
return; return;
} }
@ -1180,7 +1164,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
/* Bins. */ /* Bins. */
for (unsigned i = 0; i < NBINS; i++) { for (unsigned i = 0; i < NBINS; i++) {
extent_t *slab; extent_t *slab;
arena_bin_t *bin = &arena->bins[i]; bin_t *bin = &arena->bins[i];
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
if (bin->slabcur != NULL) { if (bin->slabcur != NULL) {
slab = bin->slabcur; slab = bin->slabcur;
@ -1269,7 +1253,7 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
static extent_t * static extent_t *
arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, const arena_bin_info_t *bin_info, extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info,
szind_t szind) { szind_t szind) {
extent_t *slab; extent_t *slab;
bool zero, commit; bool zero, commit;
@ -1292,7 +1276,7 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
static extent_t * static extent_t *
arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
const arena_bin_info_t *bin_info) { const bin_info_t *bin_info) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
@ -1328,10 +1312,10 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
} }
static extent_t * static extent_t *
arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind) { szind_t binind) {
extent_t *slab; extent_t *slab;
const arena_bin_info_t *bin_info; const bin_info_t *bin_info;
/* Look for a usable slab. */ /* Look for a usable slab. */
slab = arena_bin_slabs_nonfull_tryget(bin); slab = arena_bin_slabs_nonfull_tryget(bin);
@ -1340,7 +1324,7 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
} }
/* No existing slabs have any space available. */ /* No existing slabs have any space available. */
bin_info = &arena_bin_info[binind]; bin_info = &bin_infos[binind];
/* Allocate a new slab. */ /* Allocate a new slab. */
malloc_mutex_unlock(tsdn, &bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
@ -1371,12 +1355,12 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */ /* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
static void * static void *
arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind) { szind_t binind) {
const arena_bin_info_t *bin_info; const bin_info_t *bin_info;
extent_t *slab; extent_t *slab;
bin_info = &arena_bin_info[binind]; bin_info = &bin_infos[binind];
if (!arena_is_auto(arena) && bin->slabcur != NULL) { if (!arena_is_auto(arena) && bin->slabcur != NULL) {
arena_bin_slabs_full_insert(arena, bin, bin->slabcur); arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
bin->slabcur = NULL; bin->slabcur = NULL;
@ -1429,7 +1413,7 @@ void
arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) {
unsigned i, nfill; unsigned i, nfill;
arena_bin_t *bin; bin_t *bin;
assert(tbin->ncached == 0); assert(tbin->ncached == 0);
@ -1445,7 +1429,7 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
0) { 0) {
ptr = arena_slab_reg_alloc(tsdn, slab, ptr = arena_slab_reg_alloc(tsdn, slab,
&arena_bin_info[binind]); &bin_infos[binind]);
} else { } else {
ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind); ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind);
} }
@ -1462,8 +1446,7 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
break; break;
} }
if (config_fill && unlikely(opt_junk_alloc)) { if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ptr, &arena_bin_info[binind], arena_alloc_junk_small(ptr, &bin_infos[binind], true);
true);
} }
/* Insert such that low regions get used first. */ /* Insert such that low regions get used first. */
*(tbin->avail - nfill + i) = ptr; *(tbin->avail - nfill + i) = ptr;
@ -1481,14 +1464,14 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
} }
void void
arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, bool zero) { arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) {
if (!zero) { if (!zero) {
memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size); memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
} }
} }
static void static void
arena_dalloc_junk_small_impl(void *ptr, const arena_bin_info_t *bin_info) { arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) {
memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size); memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
} }
arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small = arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
@ -1497,7 +1480,7 @@ arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
static void * static void *
arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
void *ret; void *ret;
arena_bin_t *bin; bin_t *bin;
size_t usize; size_t usize;
extent_t *slab; extent_t *slab;
@ -1507,7 +1490,7 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_lock(tsdn, &bin->lock);
if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) { if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
ret = arena_slab_reg_alloc(tsdn, slab, &arena_bin_info[binind]); ret = arena_slab_reg_alloc(tsdn, slab, &bin_infos[binind]);
} else { } else {
ret = arena_bin_malloc_hard(tsdn, arena, bin, binind); ret = arena_bin_malloc_hard(tsdn, arena, bin, binind);
} }
@ -1531,14 +1514,14 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
if (config_fill) { if (config_fill) {
if (unlikely(opt_junk_alloc)) { if (unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, arena_alloc_junk_small(ret,
&arena_bin_info[binind], false); &bin_infos[binind], false);
} else if (unlikely(opt_zero)) { } else if (unlikely(opt_zero)) {
memset(ret, 0, usize); memset(ret, 0, usize);
} }
} }
} else { } else {
if (config_fill && unlikely(opt_junk_alloc)) { if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &arena_bin_info[binind], arena_alloc_junk_small(ret, &bin_infos[binind],
true); true);
} }
memset(ret, 0, usize); memset(ret, 0, usize);
@ -1643,13 +1626,13 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
} }
static void static void
arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, arena_bin_t *bin) { arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
/* Dissociate slab from bin. */ /* Dissociate slab from bin. */
if (slab == bin->slabcur) { if (slab == bin->slabcur) {
bin->slabcur = NULL; bin->slabcur = NULL;
} else { } else {
szind_t binind = extent_szind_get(slab); szind_t binind = extent_szind_get(slab);
const arena_bin_info_t *bin_info = &arena_bin_info[binind]; const bin_info_t *bin_info = &bin_infos[binind];
/* /*
* The following block's conditional is necessary because if the * The following block's conditional is necessary because if the
@ -1666,7 +1649,7 @@ arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, arena_bin_t *bin) {
static void static void
arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
arena_bin_t *bin) { bin_t *bin) {
assert(slab != bin->slabcur); assert(slab != bin->slabcur);
malloc_mutex_unlock(tsdn, &bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
@ -1680,8 +1663,7 @@ arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
} }
static void static void
arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, bin_t *bin) {
arena_bin_t *bin) {
assert(extent_nfree_get(slab) > 0); assert(extent_nfree_get(slab) > 0);
/* /*
@ -1711,8 +1693,8 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
void *ptr, bool junked) { void *ptr, bool junked) {
arena_slab_data_t *slab_data = extent_slab_data_get(slab); arena_slab_data_t *slab_data = extent_slab_data_get(slab);
szind_t binind = extent_szind_get(slab); szind_t binind = extent_szind_get(slab);
arena_bin_t *bin = &arena->bins[binind]; bin_t *bin = &arena->bins[binind];
const arena_bin_info_t *bin_info = &arena_bin_info[binind]; const bin_info_t *bin_info = &bin_infos[binind];
if (!junked && config_fill && unlikely(opt_junk_free)) { if (!junked && config_fill && unlikely(opt_junk_free)) {
arena_dalloc_junk_small(ptr, bin_info); arena_dalloc_junk_small(ptr, bin_info);
@ -1743,7 +1725,7 @@ arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
static void static void
arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
szind_t binind = extent_szind_get(extent); szind_t binind = extent_szind_get(extent);
arena_bin_t *bin = &arena->bins[binind]; bin_t *bin = &arena->bins[binind];
malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_lock(tsdn, &bin->lock);
arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false); arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false);
@ -1777,7 +1759,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
* Avoid moving the allocation if the size class can be left the * Avoid moving the allocation if the size class can be left the
* same. * same.
*/ */
assert(arena_bin_info[sz_size2index(oldsize)].reg_size == assert(bin_infos[sz_size2index(oldsize)].reg_size ==
oldsize); oldsize);
if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) != if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) !=
sz_size2index(oldsize)) && (size > oldsize || usize_max < sz_size2index(oldsize)) && (size > oldsize || usize_max <
@ -2060,7 +2042,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
/* Initialize bins. */ /* Initialize bins. */
for (i = 0; i < NBINS; i++) { for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i]; bin_t *bin = &arena->bins[i];
if (malloc_mutex_init(&bin->lock, "arena_bin", if (malloc_mutex_init(&bin->lock, "arena_bin",
WITNESS_RANK_ARENA_BIN, malloc_mutex_rank_exclusive)) { WITNESS_RANK_ARENA_BIN, malloc_mutex_rank_exclusive)) {
goto label_error; goto label_error;

21
src/bin.c Normal file
View File

@ -0,0 +1,21 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/bin.h"
const bin_info_t bin_infos[NBINS] = {
#define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \
{reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)},
#define BIN_INFO_bin_no(reg_size, slab_size, nregs)
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
lg_delta_lookup) \
BIN_INFO_bin_##bin((1U<<lg_grp) + (ndelta<<lg_delta), \
(pgs << LG_PAGE), (pgs << LG_PAGE) / ((1U<<lg_grp) + \
(ndelta<<lg_delta)))
SIZE_CLASSES
#undef BIN_INFO_bin_yes
#undef BIN_INFO_bin_no
#undef SC
};

View File

@ -2333,9 +2333,9 @@ CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t) CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned) CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned) CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_slab_size, arena_bin_info[mib[2]].slab_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
if (i > NBINS) { if (i > NBINS) {
@ -2680,7 +2680,7 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
MUTEX_PROF_RESET(arena->base->mtx); MUTEX_PROF_RESET(arena->base->mtx);
for (szind_t i = 0; i < NBINS; i++) { for (szind_t i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i]; bin_t *bin = &arena->bins[i];
MUTEX_PROF_RESET(bin->lock); MUTEX_PROF_RESET(bin->lock);
} }
} }

View File

@ -121,7 +121,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
/* Lock the arena bin associated with the first object. */ /* Lock the arena bin associated with the first object. */
extent_t *extent = item_extent[0]; extent_t *extent = item_extent[0];
arena_t *bin_arena = extent_arena_get(extent); arena_t *bin_arena = extent_arena_get(extent);
arena_bin_t *bin = &bin_arena->bins[binind]; bin_t *bin = &bin_arena->bins[binind];
if (config_prof && bin_arena == arena) { if (config_prof && bin_arena == arena) {
if (arena_prof_accum(tsd_tsdn(tsd), arena, if (arena_prof_accum(tsd_tsdn(tsd), arena,
@ -169,7 +169,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
* The flush loop didn't happen to flush to this thread's * The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now. * arena, so the stats didn't get merged. Manually do so now.
*/ */
arena_bin_t *bin = &arena->bins[binind]; bin_t *bin = &arena->bins[binind];
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
bin->stats.nflushes++; bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests; bin->stats.nrequests += tbin->tstats.nrequests;
@ -533,7 +533,7 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
/* Merge and reset tcache stats. */ /* Merge and reset tcache stats. */
for (i = 0; i < NBINS; i++) { for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i]; bin_t *bin = &arena->bins[i];
cache_bin_t *tbin = tcache_small_bin_get(tcache, i); cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_lock(tsdn, &bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests; bin->stats.nrequests += tbin->tstats.nrequests;
@ -674,13 +674,13 @@ tcache_boot(tsdn_t *tsdn) {
stack_nelms = 0; stack_nelms = 0;
unsigned i; unsigned i;
for (i = 0; i < NBINS; i++) { for (i = 0; i < NBINS; i++) {
if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
tcache_bin_info[i].ncached_max = tcache_bin_info[i].ncached_max =
TCACHE_NSLOTS_SMALL_MIN; TCACHE_NSLOTS_SMALL_MIN;
} else if ((arena_bin_info[i].nregs << 1) <= } else if ((bin_infos[i].nregs << 1) <=
TCACHE_NSLOTS_SMALL_MAX) { TCACHE_NSLOTS_SMALL_MAX) {
tcache_bin_info[i].ncached_max = tcache_bin_info[i].ncached_max =
(arena_bin_info[i].nregs << 1); (bin_infos[i].nregs << 1);
} else { } else {
tcache_bin_info[i].ncached_max = tcache_bin_info[i].ncached_max =
TCACHE_NSLOTS_SMALL_MAX; TCACHE_NSLOTS_SMALL_MAX;

View File

@ -15,7 +15,7 @@ watch_junking(void *p) {
} }
static void static void
arena_dalloc_junk_small_intercept(void *ptr, const arena_bin_info_t *bin_info) { arena_dalloc_junk_small_intercept(void *ptr, const bin_info_t *bin_info) {
size_t i; size_t i;
arena_dalloc_junk_small_orig(ptr, bin_info); arena_dalloc_junk_small_orig(ptr, bin_info);

View File

@ -696,10 +696,10 @@ TEST_BEGIN(test_arenas_bin_constants) {
assert_zu_eq(name, expected, "Incorrect "#name" size"); \ assert_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0) } while (0)
TEST_ARENAS_BIN_CONSTANT(size_t, size, arena_bin_info[0].reg_size); TEST_ARENAS_BIN_CONSTANT(size_t, size, bin_infos[0].reg_size);
TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, arena_bin_info[0].nregs); TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, bin_infos[0].nregs);
TEST_ARENAS_BIN_CONSTANT(size_t, slab_size, TEST_ARENAS_BIN_CONSTANT(size_t, slab_size,
arena_bin_info[0].slab_size); bin_infos[0].slab_size);
#undef TEST_ARENAS_BIN_CONSTANT #undef TEST_ARENAS_BIN_CONSTANT
} }

View File

@ -6,7 +6,7 @@ TEST_BEGIN(test_arena_slab_regind) {
for (binind = 0; binind < NBINS; binind++) { for (binind = 0; binind < NBINS; binind++) {
size_t regind; size_t regind;
extent_t slab; extent_t slab;
const arena_bin_info_t *bin_info = &arena_bin_info[binind]; const bin_info_t *bin_info = &bin_infos[binind];
extent_init(&slab, NULL, mallocx(bin_info->slab_size, extent_init(&slab, NULL, mallocx(bin_info->slab_size,
MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, true, MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, true,
binind, 0, extent_state_active, false, true, true); binind, 0, extent_state_active, false, true, true);

View File

@ -245,7 +245,7 @@ TEST_BEGIN(test_stats_arenas_bins) {
(void *)&arena_ind, sizeof(arena_ind)), 0, (void *)&arena_ind, sizeof(arena_ind)), 0,
"Unexpected mallctl() failure"); "Unexpected mallctl() failure");
p = malloc(arena_bin_info[0].reg_size); p = malloc(bin_infos[0].reg_size);
assert_ptr_not_null(p, "Unexpected malloc() failure"); assert_ptr_not_null(p, "Unexpected malloc() failure");
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),