From 4bf4a1c4ea418ba490d35d23aee0f535e96ddd23 Mon Sep 17 00:00:00 2001 From: "David T. Goldblatt" Date: Sun, 1 Oct 2017 17:22:06 -0700 Subject: [PATCH] Pull out arena_bin_info_t and arena_bin_t into their own file. In the process, kill arena_bin_index, which is unused. To follow are several diffs continuing this separation. --- Makefile.in | 1 + include/jemalloc/internal/arena_externs.h | 7 +- include/jemalloc/internal/arena_inlines_b.h | 7 -- include/jemalloc/internal/arena_structs_b.h | 65 +------------- include/jemalloc/internal/arena_types.h | 2 - include/jemalloc/internal/bin.h | 81 +++++++++++++++++ include/jemalloc/internal/extent_structs.h | 2 +- include/jemalloc/internal/tcache_inlines.h | 10 +-- src/arena.c | 96 +++++++++------------ src/bin.c | 21 +++++ src/ctl.c | 8 +- src/tcache.c | 12 +-- test/unit/junk.c | 2 +- test/unit/mallctl.c | 6 +- test/unit/slab.c | 2 +- test/unit/stats.c | 2 +- 16 files changed, 169 insertions(+), 155 deletions(-) create mode 100644 include/jemalloc/internal/bin.h create mode 100644 src/bin.c diff --git a/Makefile.in b/Makefile.in index 0698633b..2f0b3b24 100644 --- a/Makefile.in +++ b/Makefile.in @@ -93,6 +93,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \ $(srcroot)src/arena.c \ $(srcroot)src/background_thread.c \ $(srcroot)src/base.c \ + $(srcroot)src/bin.c \ $(srcroot)src/bitmap.c \ $(srcroot)src/ckh.c \ $(srcroot)src/ctl.c \ diff --git a/include/jemalloc/internal/arena_externs.h b/include/jemalloc/internal/arena_externs.h index 5a0e3add..77a2b541 100644 --- a/include/jemalloc/internal/arena_externs.h +++ b/include/jemalloc/internal/arena_externs.h @@ -1,6 +1,7 @@ #ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H #define JEMALLOC_INTERNAL_ARENA_EXTERNS_H +#include "jemalloc/internal/bin.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/pages.h" #include "jemalloc/internal/size_classes.h" @@ -9,8 +10,6 @@ extern ssize_t opt_dirty_decay_ms; extern ssize_t opt_muzzy_decay_ms; -extern const arena_bin_info_t arena_bin_info[NBINS]; - extern percpu_arena_mode_t opt_percpu_arena; extern const char *percpu_arena_mode_names[]; @@ -51,10 +50,10 @@ void arena_reset(tsd_t *tsd, arena_t *arena); void arena_destroy(tsd_t *tsd, arena_t *arena); void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes); -void arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, +void arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero); -typedef void (arena_dalloc_junk_small_t)(void *, const arena_bin_info_t *); +typedef void (arena_dalloc_junk_small_t)(void *, const bin_info_t *); extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small; void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, diff --git a/include/jemalloc/internal/arena_inlines_b.h b/include/jemalloc/internal/arena_inlines_b.h index 003abe11..7b10d9ef 100644 --- a/include/jemalloc/internal/arena_inlines_b.h +++ b/include/jemalloc/internal/arena_inlines_b.h @@ -8,13 +8,6 @@ #include "jemalloc/internal/sz.h" #include "jemalloc/internal/ticker.h" -static inline szind_t -arena_bin_index(arena_t *arena, arena_bin_t *bin) { - szind_t binind = (szind_t)(bin - arena->bins); - assert(binind < NBINS); - return binind; -} - JEMALLOC_ALWAYS_INLINE prof_tctx_t * arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) { cassert(config_prof); diff --git a/include/jemalloc/internal/arena_structs_b.h b/include/jemalloc/internal/arena_structs_b.h index f74ea97d..d843b09b 100644 --- a/include/jemalloc/internal/arena_structs_b.h +++ b/include/jemalloc/internal/arena_structs_b.h @@ -2,6 +2,7 @@ #define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H #include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/bin.h" #include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/jemalloc_internal_types.h" @@ -13,42 +14,6 @@ #include "jemalloc/internal/stats.h" #include "jemalloc/internal/ticker.h" -/* - * Read-only information associated with each element of arena_t's bins array - * is stored separately, partly to reduce memory usage (only one copy, rather - * than one per arena), but mainly to avoid false cacheline sharing. - * - * Each slab has the following layout: - * - * /--------------------\ - * | region 0 | - * |--------------------| - * | region 1 | - * |--------------------| - * | ... | - * | ... | - * | ... | - * |--------------------| - * | region nregs-1 | - * \--------------------/ - */ -struct arena_bin_info_s { - /* Size of regions in a slab for this bin's size class. */ - size_t reg_size; - - /* Total size of a slab for this bin's size class. */ - size_t slab_size; - - /* Total number of regions in a slab for this bin's size class. */ - uint32_t nregs; - - /* - * Metadata used to manipulate bitmaps for slabs associated with this - * bin. - */ - bitmap_info_t bitmap_info; -}; - struct arena_decay_s { /* Synchronizes all non-atomic fields. */ malloc_mutex_t mtx; @@ -109,32 +74,6 @@ struct arena_decay_s { uint64_t ceil_npages; }; -struct arena_bin_s { - /* All operations on arena_bin_t fields require lock ownership. */ - malloc_mutex_t lock; - - /* - * Current slab being used to service allocations of this bin's size - * class. slabcur is independent of slabs_{nonfull,full}; whenever - * slabcur is reassigned, the previous slab must be deallocated or - * inserted into slabs_{nonfull,full}. - */ - extent_t *slabcur; - - /* - * Heap of non-full slabs. This heap is used to assure that new - * allocations come from the non-full slab that is oldest/lowest in - * memory. - */ - extent_heap_t slabs_nonfull; - - /* List used to track full slabs. */ - extent_list_t slabs_full; - - /* Bin statistics. */ - malloc_bin_stats_t stats; -}; - struct arena_s { /* * Number of threads currently assigned to this arena. Each thread has @@ -264,7 +203,7 @@ struct arena_s { * * Synchronization: internal. */ - arena_bin_t bins[NBINS]; + bin_t bins[NBINS]; /* * Base allocator, from which arena metadata are allocated. diff --git a/include/jemalloc/internal/arena_types.h b/include/jemalloc/internal/arena_types.h index a691bd81..70001b5f 100644 --- a/include/jemalloc/internal/arena_types.h +++ b/include/jemalloc/internal/arena_types.h @@ -12,9 +12,7 @@ #define DECAY_NTICKS_PER_UPDATE 1000 typedef struct arena_slab_data_s arena_slab_data_t; -typedef struct arena_bin_info_s arena_bin_info_t; typedef struct arena_decay_s arena_decay_t; -typedef struct arena_bin_s arena_bin_t; typedef struct arena_s arena_t; typedef struct arena_tdata_s arena_tdata_t; typedef struct alloc_ctx_s alloc_ctx_t; diff --git a/include/jemalloc/internal/bin.h b/include/jemalloc/internal/bin.h new file mode 100644 index 00000000..09717b14 --- /dev/null +++ b/include/jemalloc/internal/bin.h @@ -0,0 +1,81 @@ +#ifndef JEMALLOC_INTERNAL_BIN_H +#define JEMALLOC_INTERNAL_BIN_H + +#include "jemalloc/internal/extent_types.h" +#include "jemalloc/internal/extent_structs.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/stats.h" + +/* + * A bin contains a set of extents that are currently being used for slab + * allocations. + */ + +/* + * Read-only information associated with each element of arena_t's bins array + * is stored separately, partly to reduce memory usage (only one copy, rather + * than one per arena), but mainly to avoid false cacheline sharing. + * + * Each slab has the following layout: + * + * /--------------------\ + * | region 0 | + * |--------------------| + * | region 1 | + * |--------------------| + * | ... | + * | ... | + * | ... | + * |--------------------| + * | region nregs-1 | + * \--------------------/ + */ +typedef struct bin_info_s bin_info_t; +struct bin_info_s { + /* Size of regions in a slab for this bin's size class. */ + size_t reg_size; + + /* Total size of a slab for this bin's size class. */ + size_t slab_size; + + /* Total number of regions in a slab for this bin's size class. */ + uint32_t nregs; + + /* + * Metadata used to manipulate bitmaps for slabs associated with this + * bin. + */ + bitmap_info_t bitmap_info; +}; + +extern const bin_info_t bin_infos[NBINS]; + + +typedef struct bin_s bin_t; +struct bin_s { + /* All operations on bin_t fields require lock ownership. */ + malloc_mutex_t lock; + + /* + * Current slab being used to service allocations of this bin's size + * class. slabcur is independent of slabs_{nonfull,full}; whenever + * slabcur is reassigned, the previous slab must be deallocated or + * inserted into slabs_{nonfull,full}. + */ + extent_t *slabcur; + + /* + * Heap of non-full slabs. This heap is used to assure that new + * allocations come from the non-full slab that is oldest/lowest in + * memory. + */ + extent_heap_t slabs_nonfull; + + /* List used to track full slabs. */ + extent_list_t slabs_full; + + /* Bin statistics. */ + malloc_bin_stats_t stats; +}; + +#endif /* JEMALLOC_INTERNAL_BIN_H */ diff --git a/include/jemalloc/internal/extent_structs.h b/include/jemalloc/internal/extent_structs.h index 722963b5..89b49c72 100644 --- a/include/jemalloc/internal/extent_structs.h +++ b/include/jemalloc/internal/extent_structs.h @@ -143,7 +143,7 @@ struct extent_s { /* * List linkage, used by a variety of lists: - * - arena_bin_t's slabs_full + * - bin_t's slabs_full * - extents_t's LRU * - stashed dirty extents * - arena's large allocations diff --git a/include/jemalloc/internal/tcache_inlines.h b/include/jemalloc/internal/tcache_inlines.h index d1632d8f..14ab037f 100644 --- a/include/jemalloc/internal/tcache_inlines.h +++ b/include/jemalloc/internal/tcache_inlines.h @@ -1,6 +1,7 @@ #ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H #define JEMALLOC_INTERNAL_TCACHE_INLINES_H +#include "jemalloc/internal/bin.h" #include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/sz.h" @@ -76,16 +77,15 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, if (likely(!zero)) { if (slow_path && config_fill) { if (unlikely(opt_junk_alloc)) { - arena_alloc_junk_small(ret, - &arena_bin_info[binind], false); + arena_alloc_junk_small(ret, &bin_infos[binind], + false); } else if (unlikely(opt_zero)) { memset(ret, 0, usize); } } } else { if (slow_path && config_fill && unlikely(opt_junk_alloc)) { - arena_alloc_junk_small(ret, &arena_bin_info[binind], - true); + arena_alloc_junk_small(ret, &bin_infos[binind], true); } memset(ret, 0, usize); } @@ -169,7 +169,7 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS); if (slow_path && config_fill && unlikely(opt_junk_free)) { - arena_dalloc_junk_small(ptr, &arena_bin_info[binind]); + arena_dalloc_junk_small(ptr, &bin_infos[binind]); } bin = tcache_small_bin_get(tcache, binind); diff --git a/src/arena.c b/src/arena.c index a28dbfb0..2dcb447e 100644 --- a/src/arena.c +++ b/src/arena.c @@ -32,21 +32,6 @@ ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT; static atomic_zd_t dirty_decay_ms_default; static atomic_zd_t muzzy_decay_ms_default; -const arena_bin_info_t arena_bin_info[NBINS] = { -#define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \ - {reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)}, -#define BIN_INFO_bin_no(reg_size, slab_size, nregs) -#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \ - lg_delta_lookup) \ - BIN_INFO_bin_##bin((1U<uptime, &arena->create_time); for (szind_t i = 0; i < NBINS; i++) { - arena_bin_t *bin = &arena->bins[i]; + bin_t *bin = &arena->bins[i]; malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_prof_read(tsdn, &bstats[i].mutex_data, &bin->lock); @@ -385,8 +370,7 @@ arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, } static void * -arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab, - const arena_bin_info_t *bin_info) { +arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab, const bin_info_t *bin_info) { void *ret; arena_slab_data_t *slab_data = extent_slab_data_get(slab); size_t regind; @@ -413,7 +397,7 @@ arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) { assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab)); /* Freeing an interior pointer can cause assertion failure. */ assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) % - (uintptr_t)arena_bin_info[binind].reg_size == 0); + (uintptr_t)bin_infos[binind].reg_size == 0); /* Avoid doing division with a variable divisor. */ diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)); @@ -434,7 +418,7 @@ arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) { default: not_reached(); } - assert(regind < arena_bin_info[binind].nregs); + assert(regind < bin_infos[binind].nregs); return regind; } @@ -443,7 +427,7 @@ static void arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab, arena_slab_data_t *slab_data, void *ptr) { szind_t binind = extent_szind_get(slab); - const arena_bin_info_t *bin_info = &arena_bin_info[binind]; + const bin_info_t *bin_info = &bin_infos[binind]; size_t regind = arena_slab_regind(slab, binind, ptr); assert(extent_nfree_get(slab) < bin_info->nregs); @@ -1089,18 +1073,18 @@ arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) { } static void -arena_bin_slabs_nonfull_insert(arena_bin_t *bin, extent_t *slab) { +arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) { assert(extent_nfree_get(slab) > 0); extent_heap_insert(&bin->slabs_nonfull, slab); } static void -arena_bin_slabs_nonfull_remove(arena_bin_t *bin, extent_t *slab) { +arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) { extent_heap_remove(&bin->slabs_nonfull, slab); } static extent_t * -arena_bin_slabs_nonfull_tryget(arena_bin_t *bin) { +arena_bin_slabs_nonfull_tryget(bin_t *bin) { extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull); if (slab == NULL) { return NULL; @@ -1112,7 +1096,7 @@ arena_bin_slabs_nonfull_tryget(arena_bin_t *bin) { } static void -arena_bin_slabs_full_insert(arena_t *arena, arena_bin_t *bin, extent_t *slab) { +arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) { assert(extent_nfree_get(slab) == 0); /* * Tracking extents is required by arena_reset, which is not allowed @@ -1126,7 +1110,7 @@ arena_bin_slabs_full_insert(arena_t *arena, arena_bin_t *bin, extent_t *slab) { } static void -arena_bin_slabs_full_remove(arena_t *arena, arena_bin_t *bin, extent_t *slab) { +arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) { if (arena_is_auto(arena)) { return; } @@ -1180,7 +1164,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) { /* Bins. */ for (unsigned i = 0; i < NBINS; i++) { extent_t *slab; - arena_bin_t *bin = &arena->bins[i]; + bin_t *bin = &arena->bins[i]; malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); if (bin->slabcur != NULL) { slab = bin->slabcur; @@ -1269,7 +1253,7 @@ arena_destroy(tsd_t *tsd, arena_t *arena) { static extent_t * arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, const arena_bin_info_t *bin_info, + extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info, szind_t szind) { extent_t *slab; bool zero, commit; @@ -1292,7 +1276,7 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, static extent_t * arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, - const arena_bin_info_t *bin_info) { + const bin_info_t *bin_info) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); @@ -1328,10 +1312,10 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, } static extent_t * -arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, +arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin, szind_t binind) { extent_t *slab; - const arena_bin_info_t *bin_info; + const bin_info_t *bin_info; /* Look for a usable slab. */ slab = arena_bin_slabs_nonfull_tryget(bin); @@ -1340,7 +1324,7 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, } /* No existing slabs have any space available. */ - bin_info = &arena_bin_info[binind]; + bin_info = &bin_infos[binind]; /* Allocate a new slab. */ malloc_mutex_unlock(tsdn, &bin->lock); @@ -1371,12 +1355,12 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, /* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */ static void * -arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, +arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin, szind_t binind) { - const arena_bin_info_t *bin_info; + const bin_info_t *bin_info; extent_t *slab; - bin_info = &arena_bin_info[binind]; + bin_info = &bin_infos[binind]; if (!arena_is_auto(arena) && bin->slabcur != NULL) { arena_bin_slabs_full_insert(arena, bin, bin->slabcur); bin->slabcur = NULL; @@ -1429,7 +1413,7 @@ void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { unsigned i, nfill; - arena_bin_t *bin; + bin_t *bin; assert(tbin->ncached == 0); @@ -1445,7 +1429,7 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) { ptr = arena_slab_reg_alloc(tsdn, slab, - &arena_bin_info[binind]); + &bin_infos[binind]); } else { ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind); } @@ -1462,8 +1446,7 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, break; } if (config_fill && unlikely(opt_junk_alloc)) { - arena_alloc_junk_small(ptr, &arena_bin_info[binind], - true); + arena_alloc_junk_small(ptr, &bin_infos[binind], true); } /* Insert such that low regions get used first. */ *(tbin->avail - nfill + i) = ptr; @@ -1481,14 +1464,14 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, } void -arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, bool zero) { +arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) { if (!zero) { memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size); } } static void -arena_dalloc_junk_small_impl(void *ptr, const arena_bin_info_t *bin_info) { +arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) { memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size); } arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small = @@ -1497,7 +1480,7 @@ arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small = static void * arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { void *ret; - arena_bin_t *bin; + bin_t *bin; size_t usize; extent_t *slab; @@ -1507,7 +1490,7 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { malloc_mutex_lock(tsdn, &bin->lock); if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) { - ret = arena_slab_reg_alloc(tsdn, slab, &arena_bin_info[binind]); + ret = arena_slab_reg_alloc(tsdn, slab, &bin_infos[binind]); } else { ret = arena_bin_malloc_hard(tsdn, arena, bin, binind); } @@ -1531,14 +1514,14 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { if (config_fill) { if (unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, - &arena_bin_info[binind], false); + &bin_infos[binind], false); } else if (unlikely(opt_zero)) { memset(ret, 0, usize); } } } else { if (config_fill && unlikely(opt_junk_alloc)) { - arena_alloc_junk_small(ret, &arena_bin_info[binind], + arena_alloc_junk_small(ret, &bin_infos[binind], true); } memset(ret, 0, usize); @@ -1643,13 +1626,13 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, } static void -arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, arena_bin_t *bin) { +arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) { /* Dissociate slab from bin. */ if (slab == bin->slabcur) { bin->slabcur = NULL; } else { szind_t binind = extent_szind_get(slab); - const arena_bin_info_t *bin_info = &arena_bin_info[binind]; + const bin_info_t *bin_info = &bin_infos[binind]; /* * The following block's conditional is necessary because if the @@ -1666,7 +1649,7 @@ arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, arena_bin_t *bin) { static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, - arena_bin_t *bin) { + bin_t *bin) { assert(slab != bin->slabcur); malloc_mutex_unlock(tsdn, &bin->lock); @@ -1680,8 +1663,7 @@ arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, } static void -arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, - arena_bin_t *bin) { +arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, bin_t *bin) { assert(extent_nfree_get(slab) > 0); /* @@ -1711,8 +1693,8 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab, void *ptr, bool junked) { arena_slab_data_t *slab_data = extent_slab_data_get(slab); szind_t binind = extent_szind_get(slab); - arena_bin_t *bin = &arena->bins[binind]; - const arena_bin_info_t *bin_info = &arena_bin_info[binind]; + bin_t *bin = &arena->bins[binind]; + const bin_info_t *bin_info = &bin_infos[binind]; if (!junked && config_fill && unlikely(opt_junk_free)) { arena_dalloc_junk_small(ptr, bin_info); @@ -1743,7 +1725,7 @@ arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent, static void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { szind_t binind = extent_szind_get(extent); - arena_bin_t *bin = &arena->bins[binind]; + bin_t *bin = &arena->bins[binind]; malloc_mutex_lock(tsdn, &bin->lock); arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false); @@ -1777,7 +1759,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, * Avoid moving the allocation if the size class can be left the * same. */ - assert(arena_bin_info[sz_size2index(oldsize)].reg_size == + assert(bin_infos[sz_size2index(oldsize)].reg_size == oldsize); if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) != sz_size2index(oldsize)) && (size > oldsize || usize_max < @@ -2060,7 +2042,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { /* Initialize bins. */ for (i = 0; i < NBINS; i++) { - arena_bin_t *bin = &arena->bins[i]; + bin_t *bin = &arena->bins[i]; if (malloc_mutex_init(&bin->lock, "arena_bin", WITNESS_RANK_ARENA_BIN, malloc_mutex_rank_exclusive)) { goto label_error; diff --git a/src/bin.c b/src/bin.c new file mode 100644 index 00000000..59cdd2c1 --- /dev/null +++ b/src/bin.c @@ -0,0 +1,21 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/bin.h" + +const bin_info_t bin_infos[NBINS] = { +#define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \ + {reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)}, +#define BIN_INFO_bin_no(reg_size, slab_size, nregs) +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \ + lg_delta_lookup) \ + BIN_INFO_bin_##bin((1U< NBINS) { @@ -2680,7 +2680,7 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, MUTEX_PROF_RESET(arena->base->mtx); for (szind_t i = 0; i < NBINS; i++) { - arena_bin_t *bin = &arena->bins[i]; + bin_t *bin = &arena->bins[i]; MUTEX_PROF_RESET(bin->lock); } } diff --git a/src/tcache.c b/src/tcache.c index e22f8067..6d516731 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -121,7 +121,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, /* Lock the arena bin associated with the first object. */ extent_t *extent = item_extent[0]; arena_t *bin_arena = extent_arena_get(extent); - arena_bin_t *bin = &bin_arena->bins[binind]; + bin_t *bin = &bin_arena->bins[binind]; if (config_prof && bin_arena == arena) { if (arena_prof_accum(tsd_tsdn(tsd), arena, @@ -169,7 +169,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ - arena_bin_t *bin = &arena->bins[binind]; + bin_t *bin = &arena->bins[binind]; malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); bin->stats.nflushes++; bin->stats.nrequests += tbin->tstats.nrequests; @@ -533,7 +533,7 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { /* Merge and reset tcache stats. */ for (i = 0; i < NBINS; i++) { - arena_bin_t *bin = &arena->bins[i]; + bin_t *bin = &arena->bins[i]; cache_bin_t *tbin = tcache_small_bin_get(tcache, i); malloc_mutex_lock(tsdn, &bin->lock); bin->stats.nrequests += tbin->tstats.nrequests; @@ -674,13 +674,13 @@ tcache_boot(tsdn_t *tsdn) { stack_nelms = 0; unsigned i; for (i = 0; i < NBINS; i++) { - if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { + if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_SMALL_MIN; - } else if ((arena_bin_info[i].nregs << 1) <= + } else if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) { tcache_bin_info[i].ncached_max = - (arena_bin_info[i].nregs << 1); + (bin_infos[i].nregs << 1); } else { tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_SMALL_MAX; diff --git a/test/unit/junk.c b/test/unit/junk.c index fd0e65b1..243ced41 100644 --- a/test/unit/junk.c +++ b/test/unit/junk.c @@ -15,7 +15,7 @@ watch_junking(void *p) { } static void -arena_dalloc_junk_small_intercept(void *ptr, const arena_bin_info_t *bin_info) { +arena_dalloc_junk_small_intercept(void *ptr, const bin_info_t *bin_info) { size_t i; arena_dalloc_junk_small_orig(ptr, bin_info); diff --git a/test/unit/mallctl.c b/test/unit/mallctl.c index 4cfd981a..e812b52f 100644 --- a/test/unit/mallctl.c +++ b/test/unit/mallctl.c @@ -696,10 +696,10 @@ TEST_BEGIN(test_arenas_bin_constants) { assert_zu_eq(name, expected, "Incorrect "#name" size"); \ } while (0) - TEST_ARENAS_BIN_CONSTANT(size_t, size, arena_bin_info[0].reg_size); - TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, arena_bin_info[0].nregs); + TEST_ARENAS_BIN_CONSTANT(size_t, size, bin_infos[0].reg_size); + TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, bin_infos[0].nregs); TEST_ARENAS_BIN_CONSTANT(size_t, slab_size, - arena_bin_info[0].slab_size); + bin_infos[0].slab_size); #undef TEST_ARENAS_BIN_CONSTANT } diff --git a/test/unit/slab.c b/test/unit/slab.c index ea344f8f..7e662aed 100644 --- a/test/unit/slab.c +++ b/test/unit/slab.c @@ -6,7 +6,7 @@ TEST_BEGIN(test_arena_slab_regind) { for (binind = 0; binind < NBINS; binind++) { size_t regind; extent_t slab; - const arena_bin_info_t *bin_info = &arena_bin_info[binind]; + const bin_info_t *bin_info = &bin_infos[binind]; extent_init(&slab, NULL, mallocx(bin_info->slab_size, MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, true, binind, 0, extent_state_active, false, true, true); diff --git a/test/unit/stats.c b/test/unit/stats.c index d9849d80..231010e4 100644 --- a/test/unit/stats.c +++ b/test/unit/stats.c @@ -245,7 +245,7 @@ TEST_BEGIN(test_stats_arenas_bins) { (void *)&arena_ind, sizeof(arena_ind)), 0, "Unexpected mallctl() failure"); - p = malloc(arena_bin_info[0].reg_size); + p = malloc(bin_infos[0].reg_size); assert_ptr_not_null(p, "Unexpected malloc() failure"); assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),