Rename extents_t -> eset_t.

This commit is contained in:
David T. Goldblatt
2019-09-20 19:59:55 -07:00
committed by David Goldblatt
parent 723ccc6c27
commit 4e5e43f22e
8 changed files with 268 additions and 256 deletions

View File

@@ -5,6 +5,7 @@
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/eset.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
@@ -161,9 +162,9 @@ struct arena_s {
*
* Synchronization: internal.
*/
extents_t extents_dirty;
extents_t extents_muzzy;
extents_t extents_retained;
eset_t extents_dirty;
eset_t extents_muzzy;
eset_t extents_retained;
/*
* Decay-based purging state, responsible for scheduling extent state

View File

@@ -0,0 +1,60 @@
#ifndef JEMALLOC_INTERNAL_ESET_H
#define JEMALLOC_INTERNAL_ESET_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/mutex.h"
/*
* An eset ("extent set") is a quantized collection of extents, with built-in
* LRU queue.
*/
typedef struct eset_s eset_t;
struct eset_s {
malloc_mutex_t mtx;
/*
* Quantized per size class heaps of extents.
*
* Synchronization: mtx.
*/
extent_heap_t heaps[SC_NPSIZES + 1];
atomic_zu_t nextents[SC_NPSIZES + 1];
atomic_zu_t nbytes[SC_NPSIZES + 1];
/*
* Bitmap for which set bits correspond to non-empty heaps.
*
* Synchronization: mtx.
*/
bitmap_t bitmap[BITMAP_GROUPS(SC_NPSIZES + 1)];
/*
* LRU of all extents in heaps.
*
* Synchronization: mtx.
*/
extent_list_t lru;
/*
* Page sum for all extents in heaps.
*
* The synchronization here is a little tricky. Modifications to npages
* must hold mtx, but reads need not (though, a reader who sees npages
* without holding the mutex can't assume anything about the rest of the
* state of the eset_t).
*/
atomic_zu_t npages;
/* All stored extents must be in the same state. */
extent_state_t state;
/*
* If true, delay coalescing until eviction; otherwise coalesce during
* deallocation.
*/
bool delay_coalesce;
};
#endif /* JEMALLOC_INTERNAL_ESET_H */

View File

@@ -27,25 +27,25 @@ size_t extent_size_quantize_ceil(size_t size);
ph_proto(, extent_avail_, extent_tree_t, extent_t)
ph_proto(, extent_heap_, extent_heap_t, extent_t)
bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
bool extents_init(tsdn_t *tsdn, eset_t *eset, extent_state_t state,
bool delay_coalesce);
extent_state_t extents_state_get(const extents_t *extents);
size_t extents_npages_get(extents_t *extents);
extent_state_t extents_state_get(const eset_t *eset);
size_t extents_npages_get(eset_t *eset);
/* Get the number of extents in the given page size index. */
size_t extents_nextents_get(extents_t *extents, pszind_t ind);
size_t extents_nextents_get(eset_t *eset, pszind_t ind);
/* Get the sum total bytes of the extents in the given page size index. */
size_t extents_nbytes_get(extents_t *extents, pszind_t ind);
size_t extents_nbytes_get(eset_t *eset, pszind_t ind);
extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
extent_hooks_t **r_extent_hooks, eset_t *eset, void *new_addr,
size_t size, size_t pad, size_t alignment, bool slab, szind_t szind,
bool *zero, bool *commit);
void extents_dalloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent);
extent_hooks_t **r_extent_hooks, eset_t *eset, extent_t *extent);
extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_min);
void extents_prefork(tsdn_t *tsdn, extents_t *extents);
void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents);
void extents_postfork_child(tsdn_t *tsdn, extents_t *extents);
extent_hooks_t **r_extent_hooks, eset_t *eset, size_t npages_min);
void extents_prefork(tsdn_t *tsdn, eset_t *eset);
void extents_postfork_parent(tsdn_t *tsdn, eset_t *eset);
void extents_postfork_child(tsdn_t *tsdn, eset_t *eset);
extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit);

View File

@@ -9,53 +9,6 @@
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/slab_data.h"
/* Quantized collection of extents, with built-in LRU queue. */
struct extents_s {
malloc_mutex_t mtx;
/*
* Quantized per size class heaps of extents.
*
* Synchronization: mtx.
*/
extent_heap_t heaps[SC_NPSIZES + 1];
atomic_zu_t nextents[SC_NPSIZES + 1];
atomic_zu_t nbytes[SC_NPSIZES + 1];
/*
* Bitmap for which set bits correspond to non-empty heaps.
*
* Synchronization: mtx.
*/
bitmap_t bitmap[BITMAP_GROUPS(SC_NPSIZES + 1)];
/*
* LRU of all extents in heaps.
*
* Synchronization: mtx.
*/
extent_list_t lru;
/*
* Page sum for all extents in heaps.
*
* The synchronization here is a little tricky. Modifications to npages
* must hold mtx, but reads need not (though, a reader who sees npages
* without holding the mutex can't assume anything about the rest of the
* state of the extents_t).
*/
atomic_zu_t npages;
/* All stored extents must be in the same state. */
extent_state_t state;
/*
* If true, delay coalescing until eviction; otherwise coalesce during
* deallocation.
*/
bool delay_coalesce;
};
/*
* The following two structs are for experimental purposes. See
* experimental_utilization_query_ctl and

View File

@@ -1,8 +1,6 @@
#ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H
#define JEMALLOC_INTERNAL_EXTENT_TYPES_H
typedef struct extents_s extents_t;
typedef struct extent_util_stats_s extent_util_stats_t;
typedef struct extent_util_stats_verbose_s extent_util_stats_verbose_t;