2017-10-02 08:22:06 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_BIN_H
|
|
|
|
#define JEMALLOC_INTERNAL_BIN_H
|
|
|
|
|
2023-06-10 08:37:47 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
2017-12-15 04:46:39 +08:00
|
|
|
#include "jemalloc/internal/bin_stats.h"
|
2018-11-28 04:38:47 +08:00
|
|
|
#include "jemalloc/internal/bin_types.h"
|
2019-12-10 02:41:25 +08:00
|
|
|
#include "jemalloc/internal/edata.h"
|
2017-10-02 08:22:06 +08:00
|
|
|
#include "jemalloc/internal/mutex.h"
|
2017-12-15 04:46:39 +08:00
|
|
|
#include "jemalloc/internal/sc.h"
|
2017-10-02 08:22:06 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* A bin contains a set of extents that are currently being used for slab
|
|
|
|
* allocations.
|
|
|
|
*/
|
|
|
|
typedef struct bin_s bin_t;
|
|
|
|
struct bin_s {
|
|
|
|
/* All operations on bin_t fields require lock ownership. */
|
|
|
|
malloc_mutex_t lock;
|
|
|
|
|
2021-02-02 04:03:11 +08:00
|
|
|
/*
|
|
|
|
* Bin statistics. These get touched every time the lock is acquired,
|
|
|
|
* so put them close by in the hopes of getting some cache locality.
|
|
|
|
*/
|
|
|
|
bin_stats_t stats;
|
|
|
|
|
2017-10-02 08:22:06 +08:00
|
|
|
/*
|
|
|
|
* Current slab being used to service allocations of this bin's size
|
|
|
|
* class. slabcur is independent of slabs_{nonfull,full}; whenever
|
|
|
|
* slabcur is reassigned, the previous slab must be deallocated or
|
|
|
|
* inserted into slabs_{nonfull,full}.
|
|
|
|
*/
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *slabcur;
|
2017-10-02 08:22:06 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Heap of non-full slabs. This heap is used to assure that new
|
|
|
|
* allocations come from the non-full slab that is oldest/lowest in
|
|
|
|
* memory.
|
|
|
|
*/
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_heap_t slabs_nonfull;
|
2017-10-02 08:22:06 +08:00
|
|
|
|
|
|
|
/* List used to track full slabs. */
|
2020-06-12 06:15:51 +08:00
|
|
|
edata_list_active_t slabs_full;
|
2017-10-02 08:22:06 +08:00
|
|
|
};
|
|
|
|
|
2018-11-13 07:56:04 +08:00
|
|
|
/* A set of sharded bins of the same size class. */
|
|
|
|
typedef struct bins_s bins_t;
|
|
|
|
struct bins_s {
|
|
|
|
/* Sharded bins. Dynamically sized. */
|
|
|
|
bin_t *bin_shards;
|
|
|
|
};
|
|
|
|
|
2023-07-07 07:27:56 +08:00
|
|
|
void bin_shard_sizes_boot(unsigned bin_shard_sizes[SC_NBINS]);
|
2018-11-21 05:51:32 +08:00
|
|
|
bool bin_update_shard_size(unsigned bin_shards[SC_NBINS], size_t start_size,
|
|
|
|
size_t end_size, size_t nshards);
|
2017-12-15 04:46:39 +08:00
|
|
|
|
2017-10-02 09:27:40 +08:00
|
|
|
/* Initializes a bin to empty. Returns true on error. */
|
2017-10-02 09:02:39 +08:00
|
|
|
bool bin_init(bin_t *bin);
|
2017-10-02 09:27:40 +08:00
|
|
|
|
|
|
|
/* Forking. */
|
2017-10-02 09:10:36 +08:00
|
|
|
void bin_prefork(tsdn_t *tsdn, bin_t *bin);
|
|
|
|
void bin_postfork_parent(tsdn_t *tsdn, bin_t *bin);
|
|
|
|
void bin_postfork_child(tsdn_t *tsdn, bin_t *bin);
|
2017-10-02 09:02:39 +08:00
|
|
|
|
2017-10-02 09:27:40 +08:00
|
|
|
/* Stats. */
|
|
|
|
static inline void
|
2019-11-06 12:43:59 +08:00
|
|
|
bin_stats_merge(tsdn_t *tsdn, bin_stats_data_t *dst_bin_stats, bin_t *bin) {
|
2017-10-02 09:27:40 +08:00
|
|
|
malloc_mutex_lock(tsdn, &bin->lock);
|
2018-11-13 07:56:04 +08:00
|
|
|
malloc_mutex_prof_accum(tsdn, &dst_bin_stats->mutex_data, &bin->lock);
|
2019-11-06 12:43:59 +08:00
|
|
|
bin_stats_t *stats = &dst_bin_stats->stats_data;
|
|
|
|
stats->nmalloc += bin->stats.nmalloc;
|
|
|
|
stats->ndalloc += bin->stats.ndalloc;
|
|
|
|
stats->nrequests += bin->stats.nrequests;
|
|
|
|
stats->curregs += bin->stats.curregs;
|
|
|
|
stats->nfills += bin->stats.nfills;
|
|
|
|
stats->nflushes += bin->stats.nflushes;
|
|
|
|
stats->nslabs += bin->stats.nslabs;
|
|
|
|
stats->reslabs += bin->stats.reslabs;
|
|
|
|
stats->curslabs += bin->stats.curslabs;
|
|
|
|
stats->nonfull_slabs += bin->stats.nonfull_slabs;
|
2017-10-02 09:27:40 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &bin->lock);
|
|
|
|
}
|
|
|
|
|
2017-10-02 08:22:06 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_BIN_H */
|