Header refactoring: Pull size helpers out of jemalloc module.
This commit is contained in:
parent
041e041e1f
commit
8261e581be
@ -112,6 +112,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \
|
|||||||
$(srcroot)src/rtree.c \
|
$(srcroot)src/rtree.c \
|
||||||
$(srcroot)src/stats.c \
|
$(srcroot)src/stats.c \
|
||||||
$(srcroot)src/spin.c \
|
$(srcroot)src/spin.c \
|
||||||
|
$(srcroot)src/sz.c \
|
||||||
$(srcroot)src/tcache.c \
|
$(srcroot)src/tcache.c \
|
||||||
$(srcroot)src/ticker.c \
|
$(srcroot)src/ticker.c \
|
||||||
$(srcroot)src/tsd.c \
|
$(srcroot)src/tsd.c \
|
||||||
|
@ -6,14 +6,6 @@
|
|||||||
#include "jemalloc/internal/size_classes.h"
|
#include "jemalloc/internal/size_classes.h"
|
||||||
#include "jemalloc/internal/stats.h"
|
#include "jemalloc/internal/stats.h"
|
||||||
|
|
||||||
static const size_t large_pad =
|
|
||||||
#ifdef JEMALLOC_CACHE_OBLIVIOUS
|
|
||||||
PAGE
|
|
||||||
#else
|
|
||||||
0
|
|
||||||
#endif
|
|
||||||
;
|
|
||||||
|
|
||||||
extern ssize_t opt_dirty_decay_ms;
|
extern ssize_t opt_dirty_decay_ms;
|
||||||
extern ssize_t opt_muzzy_decay_ms;
|
extern ssize_t opt_muzzy_decay_ms;
|
||||||
|
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#include "jemalloc/internal/mutex.h"
|
#include "jemalloc/internal/mutex.h"
|
||||||
#include "jemalloc/internal/rtree.h"
|
#include "jemalloc/internal/rtree.h"
|
||||||
#include "jemalloc/internal/size_classes.h"
|
#include "jemalloc/internal/size_classes.h"
|
||||||
|
#include "jemalloc/internal/sz.h"
|
||||||
#include "jemalloc/internal/ticker.h"
|
#include "jemalloc/internal/ticker.h"
|
||||||
|
|
||||||
static inline szind_t
|
static inline szind_t
|
||||||
@ -127,7 +128,7 @@ arena_salloc(tsdn_t *tsdn, const void *ptr) {
|
|||||||
(uintptr_t)ptr, true);
|
(uintptr_t)ptr, true);
|
||||||
assert(szind != NSIZES);
|
assert(szind != NSIZES);
|
||||||
|
|
||||||
return index2size(szind);
|
return sz_index2size(szind);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
@ -160,7 +161,7 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
|
|||||||
|
|
||||||
assert(szind != NSIZES);
|
assert(szind != NSIZES);
|
||||||
|
|
||||||
return index2size(szind);
|
return sz_index2size(szind);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
@ -257,7 +258,7 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
|
|||||||
* There is no risk of being confused by a promoted sampled
|
* There is no risk of being confused by a promoted sampled
|
||||||
* object, so base szind and slab on the given size.
|
* object, so base szind and slab on the given size.
|
||||||
*/
|
*/
|
||||||
szind = size2index(size);
|
szind = sz_size2index(size);
|
||||||
slab = (szind < NBINS);
|
slab = (szind < NBINS);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -269,7 +270,7 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
|
|||||||
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
|
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
|
||||||
(uintptr_t)ptr, true, &szind, &slab);
|
(uintptr_t)ptr, true, &szind, &slab);
|
||||||
|
|
||||||
assert(szind == size2index(size));
|
assert(szind == sz_size2index(size));
|
||||||
assert((config_prof && opt_prof) || slab == (szind < NBINS));
|
assert((config_prof && opt_prof) || slab == (szind < NBINS));
|
||||||
|
|
||||||
if (config_debug) {
|
if (config_debug) {
|
||||||
@ -313,7 +314,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
|||||||
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
|
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
|
||||||
(uintptr_t)ptr, true, &local_ctx.szind,
|
(uintptr_t)ptr, true, &local_ctx.szind,
|
||||||
&local_ctx.slab);
|
&local_ctx.slab);
|
||||||
assert(local_ctx.szind == size2index(size));
|
assert(local_ctx.szind == sz_size2index(size));
|
||||||
alloc_ctx = &local_ctx;
|
alloc_ctx = &local_ctx;
|
||||||
}
|
}
|
||||||
slab = alloc_ctx->slab;
|
slab = alloc_ctx->slab;
|
||||||
@ -323,7 +324,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
|||||||
* There is no risk of being confused by a promoted sampled
|
* There is no risk of being confused by a promoted sampled
|
||||||
* object, so base szind and slab on the given size.
|
* object, so base szind and slab on the given size.
|
||||||
*/
|
*/
|
||||||
szind = size2index(size);
|
szind = sz_size2index(size);
|
||||||
slab = (szind < NBINS);
|
slab = (szind < NBINS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H
|
#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H
|
||||||
#define JEMALLOC_INTERNAL_ARENA_TYPES_H
|
#define JEMALLOC_INTERNAL_ARENA_TYPES_H
|
||||||
|
|
||||||
#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
|
|
||||||
|
|
||||||
/* Maximum number of regions in one slab. */
|
/* Maximum number of regions in one slab. */
|
||||||
#define LG_SLAB_MAXREGS (LG_PAGE - LG_TINY_MIN)
|
#define LG_SLAB_MAXREGS (LG_PAGE - LG_TINY_MIN)
|
||||||
#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
|
#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include "jemalloc/internal/pages.h"
|
#include "jemalloc/internal/pages.h"
|
||||||
#include "jemalloc/internal/prng.h"
|
#include "jemalloc/internal/prng.h"
|
||||||
#include "jemalloc/internal/ql.h"
|
#include "jemalloc/internal/ql.h"
|
||||||
|
#include "jemalloc/internal/sz.h"
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
extent_lock(tsdn_t *tsdn, extent_t *extent) {
|
extent_lock(tsdn_t *tsdn, extent_t *extent) {
|
||||||
@ -65,7 +66,7 @@ extent_szind_get(const extent_t *extent) {
|
|||||||
|
|
||||||
static inline size_t
|
static inline size_t
|
||||||
extent_usize_get(const extent_t *extent) {
|
extent_usize_get(const extent_t *extent) {
|
||||||
return index2size(extent_szind_get(extent));
|
return sz_index2size(extent_szind_get(extent));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline size_t
|
static inline size_t
|
||||||
|
@ -53,7 +53,7 @@ struct extent_s {
|
|||||||
* szind: The szind flag indicates usable size class index for
|
* szind: The szind flag indicates usable size class index for
|
||||||
* allocations residing in this extent, regardless of whether the
|
* allocations residing in this extent, regardless of whether the
|
||||||
* extent is a slab. Extent size and usable size often differ
|
* extent is a slab. Extent size and usable size often differ
|
||||||
* even for non-slabs, either due to large_pad or promotion of
|
* even for non-slabs, either due to sz_large_pad or promotion of
|
||||||
* sampled small regions.
|
* sampled small regions.
|
||||||
*
|
*
|
||||||
* nfree: Number of free regions in slab.
|
* nfree: Number of free regions in slab.
|
||||||
|
@ -31,23 +31,6 @@ extern unsigned narenas_auto;
|
|||||||
*/
|
*/
|
||||||
extern atomic_p_t arenas[];
|
extern atomic_p_t arenas[];
|
||||||
|
|
||||||
/*
|
|
||||||
* pind2sz_tab encodes the same information as could be computed by
|
|
||||||
* pind2sz_compute().
|
|
||||||
*/
|
|
||||||
extern size_t const pind2sz_tab[NPSIZES+1];
|
|
||||||
/*
|
|
||||||
* index2size_tab encodes the same information as could be computed (at
|
|
||||||
* unacceptable cost in some code paths) by index2size_compute().
|
|
||||||
*/
|
|
||||||
extern size_t const index2size_tab[NSIZES];
|
|
||||||
/*
|
|
||||||
* size2index_tab is a compact lookup table that rounds request sizes up to
|
|
||||||
* size classes. In order to reduce cache footprint, the table is compressed,
|
|
||||||
* and all accesses are via size2index().
|
|
||||||
*/
|
|
||||||
extern uint8_t const size2index_tab[];
|
|
||||||
|
|
||||||
void *a0malloc(size_t size);
|
void *a0malloc(size_t size);
|
||||||
void a0dalloc(void *ptr);
|
void a0dalloc(void *ptr);
|
||||||
void *bootstrap_malloc(size_t size);
|
void *bootstrap_malloc(size_t size);
|
||||||
|
@ -7,273 +7,6 @@
|
|||||||
#include "jemalloc/internal/size_classes.h"
|
#include "jemalloc/internal/size_classes.h"
|
||||||
#include "jemalloc/internal/ticker.h"
|
#include "jemalloc/internal/ticker.h"
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE pszind_t
|
|
||||||
psz2ind(size_t psz) {
|
|
||||||
if (unlikely(psz > LARGE_MAXCLASS)) {
|
|
||||||
return NPSIZES;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
pszind_t x = lg_floor((psz<<1)-1);
|
|
||||||
pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
|
|
||||||
(LG_SIZE_CLASS_GROUP + LG_PAGE);
|
|
||||||
pszind_t grp = shift << LG_SIZE_CLASS_GROUP;
|
|
||||||
|
|
||||||
pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
|
|
||||||
LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
|
|
||||||
|
|
||||||
size_t delta_inverse_mask = ZD(-1) << lg_delta;
|
|
||||||
pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
|
|
||||||
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
|
|
||||||
|
|
||||||
pszind_t ind = grp + mod;
|
|
||||||
return ind;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline size_t
|
|
||||||
pind2sz_compute(pszind_t pind) {
|
|
||||||
if (unlikely(pind == NPSIZES)) {
|
|
||||||
return LARGE_MAXCLASS + PAGE;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
size_t grp = pind >> LG_SIZE_CLASS_GROUP;
|
|
||||||
size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
|
|
||||||
|
|
||||||
size_t grp_size_mask = ~((!!grp)-1);
|
|
||||||
size_t grp_size = ((ZU(1) << (LG_PAGE +
|
|
||||||
(LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
|
|
||||||
|
|
||||||
size_t shift = (grp == 0) ? 1 : grp;
|
|
||||||
size_t lg_delta = shift + (LG_PAGE-1);
|
|
||||||
size_t mod_size = (mod+1) << lg_delta;
|
|
||||||
|
|
||||||
size_t sz = grp_size + mod_size;
|
|
||||||
return sz;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline size_t
|
|
||||||
pind2sz_lookup(pszind_t pind) {
|
|
||||||
size_t ret = (size_t)pind2sz_tab[pind];
|
|
||||||
assert(ret == pind2sz_compute(pind));
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline size_t
|
|
||||||
pind2sz(pszind_t pind) {
|
|
||||||
assert(pind < NPSIZES+1);
|
|
||||||
return pind2sz_lookup(pind);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline size_t
|
|
||||||
psz2u(size_t psz) {
|
|
||||||
if (unlikely(psz > LARGE_MAXCLASS)) {
|
|
||||||
return LARGE_MAXCLASS + PAGE;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
size_t x = lg_floor((psz<<1)-1);
|
|
||||||
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
|
|
||||||
LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
|
|
||||||
size_t delta = ZU(1) << lg_delta;
|
|
||||||
size_t delta_mask = delta - 1;
|
|
||||||
size_t usize = (psz + delta_mask) & ~delta_mask;
|
|
||||||
return usize;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline szind_t
|
|
||||||
size2index_compute(size_t size) {
|
|
||||||
if (unlikely(size > LARGE_MAXCLASS)) {
|
|
||||||
return NSIZES;
|
|
||||||
}
|
|
||||||
#if (NTBINS != 0)
|
|
||||||
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
|
|
||||||
szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
|
|
||||||
szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
|
|
||||||
return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
{
|
|
||||||
szind_t x = lg_floor((size<<1)-1);
|
|
||||||
szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
|
|
||||||
x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
|
|
||||||
szind_t grp = shift << LG_SIZE_CLASS_GROUP;
|
|
||||||
|
|
||||||
szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
|
|
||||||
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
|
|
||||||
|
|
||||||
size_t delta_inverse_mask = ZD(-1) << lg_delta;
|
|
||||||
szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
|
|
||||||
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
|
|
||||||
|
|
||||||
szind_t index = NTBINS + grp + mod;
|
|
||||||
return index;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE szind_t
|
|
||||||
size2index_lookup(size_t size) {
|
|
||||||
assert(size <= LOOKUP_MAXCLASS);
|
|
||||||
{
|
|
||||||
szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]);
|
|
||||||
assert(ret == size2index_compute(size));
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE szind_t
|
|
||||||
size2index(size_t size) {
|
|
||||||
assert(size > 0);
|
|
||||||
if (likely(size <= LOOKUP_MAXCLASS)) {
|
|
||||||
return size2index_lookup(size);
|
|
||||||
}
|
|
||||||
return size2index_compute(size);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline size_t
|
|
||||||
index2size_compute(szind_t index) {
|
|
||||||
#if (NTBINS > 0)
|
|
||||||
if (index < NTBINS) {
|
|
||||||
return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
{
|
|
||||||
size_t reduced_index = index - NTBINS;
|
|
||||||
size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
|
|
||||||
size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
|
|
||||||
1);
|
|
||||||
|
|
||||||
size_t grp_size_mask = ~((!!grp)-1);
|
|
||||||
size_t grp_size = ((ZU(1) << (LG_QUANTUM +
|
|
||||||
(LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
|
|
||||||
|
|
||||||
size_t shift = (grp == 0) ? 1 : grp;
|
|
||||||
size_t lg_delta = shift + (LG_QUANTUM-1);
|
|
||||||
size_t mod_size = (mod+1) << lg_delta;
|
|
||||||
|
|
||||||
size_t usize = grp_size + mod_size;
|
|
||||||
return usize;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
|
||||||
index2size_lookup(szind_t index) {
|
|
||||||
size_t ret = (size_t)index2size_tab[index];
|
|
||||||
assert(ret == index2size_compute(index));
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
|
||||||
index2size(szind_t index) {
|
|
||||||
assert(index < NSIZES);
|
|
||||||
return index2size_lookup(index);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
|
||||||
s2u_compute(size_t size) {
|
|
||||||
if (unlikely(size > LARGE_MAXCLASS)) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#if (NTBINS > 0)
|
|
||||||
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
|
|
||||||
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
|
|
||||||
size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
|
|
||||||
return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
|
|
||||||
(ZU(1) << lg_ceil));
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
{
|
|
||||||
size_t x = lg_floor((size<<1)-1);
|
|
||||||
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
|
|
||||||
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
|
|
||||||
size_t delta = ZU(1) << lg_delta;
|
|
||||||
size_t delta_mask = delta - 1;
|
|
||||||
size_t usize = (size + delta_mask) & ~delta_mask;
|
|
||||||
return usize;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
|
||||||
s2u_lookup(size_t size) {
|
|
||||||
size_t ret = index2size_lookup(size2index_lookup(size));
|
|
||||||
|
|
||||||
assert(ret == s2u_compute(size));
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Compute usable size that would result from allocating an object with the
|
|
||||||
* specified size.
|
|
||||||
*/
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
|
||||||
s2u(size_t size) {
|
|
||||||
assert(size > 0);
|
|
||||||
if (likely(size <= LOOKUP_MAXCLASS)) {
|
|
||||||
return s2u_lookup(size);
|
|
||||||
}
|
|
||||||
return s2u_compute(size);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Compute usable size that would result from allocating an object with the
|
|
||||||
* specified size and alignment.
|
|
||||||
*/
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
|
||||||
sa2u(size_t size, size_t alignment) {
|
|
||||||
size_t usize;
|
|
||||||
|
|
||||||
assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
|
|
||||||
|
|
||||||
/* Try for a small size class. */
|
|
||||||
if (size <= SMALL_MAXCLASS && alignment < PAGE) {
|
|
||||||
/*
|
|
||||||
* Round size up to the nearest multiple of alignment.
|
|
||||||
*
|
|
||||||
* This done, we can take advantage of the fact that for each
|
|
||||||
* small size class, every object is aligned at the smallest
|
|
||||||
* power of two that is non-zero in the base two representation
|
|
||||||
* of the size. For example:
|
|
||||||
*
|
|
||||||
* Size | Base 2 | Minimum alignment
|
|
||||||
* -----+----------+------------------
|
|
||||||
* 96 | 1100000 | 32
|
|
||||||
* 144 | 10100000 | 32
|
|
||||||
* 192 | 11000000 | 64
|
|
||||||
*/
|
|
||||||
usize = s2u(ALIGNMENT_CEILING(size, alignment));
|
|
||||||
if (usize < LARGE_MINCLASS) {
|
|
||||||
return usize;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Large size class. Beware of overflow. */
|
|
||||||
|
|
||||||
if (unlikely(alignment > LARGE_MAXCLASS)) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Make sure result is a large size class. */
|
|
||||||
if (size <= LARGE_MINCLASS) {
|
|
||||||
usize = LARGE_MINCLASS;
|
|
||||||
} else {
|
|
||||||
usize = s2u(size);
|
|
||||||
if (usize < size) {
|
|
||||||
/* size_t overflow. */
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Calculate the multi-page mapping that large_palloc() would need in
|
|
||||||
* order to guarantee the alignment.
|
|
||||||
*/
|
|
||||||
if (usize + large_pad + PAGE_CEILING(alignment) - PAGE < usize) {
|
|
||||||
/* size_t overflow. */
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
return usize;
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE malloc_cpuid_t
|
JEMALLOC_ALWAYS_INLINE malloc_cpuid_t
|
||||||
malloc_getcpu(void) {
|
malloc_getcpu(void) {
|
||||||
assert(have_percpu_arena);
|
assert(have_percpu_arena);
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
#define JEMALLOC_INTERNAL_INLINES_C_H
|
#define JEMALLOC_INTERNAL_INLINES_C_H
|
||||||
|
|
||||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||||
|
#include "jemalloc/internal/sz.h"
|
||||||
#include "jemalloc/internal/witness.h"
|
#include "jemalloc/internal/witness.h"
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE arena_t *
|
JEMALLOC_ALWAYS_INLINE arena_t *
|
||||||
@ -48,7 +49,7 @@ ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
|||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
assert(usize != 0);
|
assert(usize != 0);
|
||||||
assert(usize == sa2u(usize, alignment));
|
assert(usize == sz_sa2u(usize, alignment));
|
||||||
assert(!is_internal || tcache == NULL);
|
assert(!is_internal || tcache == NULL);
|
||||||
assert(!is_internal || arena == NULL || arena_is_auto(arena));
|
assert(!is_internal || arena == NULL || arena_is_auto(arena));
|
||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
@ -118,7 +119,7 @@ iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
|||||||
void *p;
|
void *p;
|
||||||
size_t usize, copysize;
|
size_t usize, copysize;
|
||||||
|
|
||||||
usize = sa2u(size + extra, alignment);
|
usize = sz_sa2u(size + extra, alignment);
|
||||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -128,7 +129,7 @@ iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
/* Try again, without extra this time. */
|
/* Try again, without extra this time. */
|
||||||
usize = sa2u(size, alignment);
|
usize = sz_sa2u(size, alignment);
|
||||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H
|
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H
|
||||||
#define JEMALLOC_INTERNAL_PROF_INLINES_B_H
|
#define JEMALLOC_INTERNAL_PROF_INLINES_B_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/sz.h"
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE bool
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
prof_active_get_unlocked(void) {
|
prof_active_get_unlocked(void) {
|
||||||
/*
|
/*
|
||||||
@ -113,7 +115,7 @@ prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) {
|
|||||||
prof_tdata_t *tdata;
|
prof_tdata_t *tdata;
|
||||||
prof_bt_t bt;
|
prof_bt_t bt;
|
||||||
|
|
||||||
assert(usize == s2u(usize));
|
assert(usize == sz_s2u(usize));
|
||||||
|
|
||||||
if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
|
if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
|
||||||
&tdata))) {
|
&tdata))) {
|
||||||
|
@ -334,6 +334,7 @@ for lg_z in ${lg_zarr} ; do
|
|||||||
echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}"
|
echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}"
|
||||||
echo "#define SMALL_MAXCLASS ${small_maxclass}"
|
echo "#define SMALL_MAXCLASS ${small_maxclass}"
|
||||||
echo "#define LG_LARGE_MINCLASS ${lg_large_minclass}"
|
echo "#define LG_LARGE_MINCLASS ${lg_large_minclass}"
|
||||||
|
echo "#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)"
|
||||||
echo "#define LARGE_MAXCLASS ${large_maxclass}"
|
echo "#define LARGE_MAXCLASS ${large_maxclass}"
|
||||||
echo "#endif"
|
echo "#endif"
|
||||||
echo
|
echo
|
||||||
|
317
include/jemalloc/internal/sz.h
Normal file
317
include/jemalloc/internal/sz.h
Normal file
@ -0,0 +1,317 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_SIZE_H
|
||||||
|
#define JEMALLOC_INTERNAL_SIZE_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/bit_util.h"
|
||||||
|
#include "jemalloc/internal/pages.h"
|
||||||
|
#include "jemalloc/internal/size_classes.h"
|
||||||
|
#include "jemalloc/internal/util.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* sz module: Size computations.
|
||||||
|
*
|
||||||
|
* Some abbreviations used here:
|
||||||
|
* p: Page
|
||||||
|
* ind: Index
|
||||||
|
* s, sz: Size
|
||||||
|
* u: Usable size
|
||||||
|
* a: Aligned
|
||||||
|
*
|
||||||
|
* These are not always used completely consistently, but should be enough to
|
||||||
|
* interpret function names. E.g. sz_psz2ind converts page size to page size
|
||||||
|
* index; sz_sa2u converts a (size, alignment) allocation request to the usable
|
||||||
|
* size that would result from such an allocation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* sz_pind2sz_tab encodes the same information as could be computed by
|
||||||
|
* sz_pind2sz_compute().
|
||||||
|
*/
|
||||||
|
extern size_t const sz_pind2sz_tab[NPSIZES+1];
|
||||||
|
/*
|
||||||
|
* sz_index2size_tab encodes the same information as could be computed (at
|
||||||
|
* unacceptable cost in some code paths) by sz_index2size_compute().
|
||||||
|
*/
|
||||||
|
extern size_t const sz_index2size_tab[NSIZES];
|
||||||
|
/*
|
||||||
|
* sz_size2index_tab is a compact lookup table that rounds request sizes up to
|
||||||
|
* size classes. In order to reduce cache footprint, the table is compressed,
|
||||||
|
* and all accesses are via sz_size2index().
|
||||||
|
*/
|
||||||
|
extern uint8_t const sz_size2index_tab[];
|
||||||
|
|
||||||
|
static const size_t sz_large_pad =
|
||||||
|
#ifdef JEMALLOC_CACHE_OBLIVIOUS
|
||||||
|
PAGE
|
||||||
|
#else
|
||||||
|
0
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE pszind_t
|
||||||
|
sz_psz2ind(size_t psz) {
|
||||||
|
if (unlikely(psz > LARGE_MAXCLASS)) {
|
||||||
|
return NPSIZES;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
pszind_t x = lg_floor((psz<<1)-1);
|
||||||
|
pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
|
||||||
|
(LG_SIZE_CLASS_GROUP + LG_PAGE);
|
||||||
|
pszind_t grp = shift << LG_SIZE_CLASS_GROUP;
|
||||||
|
|
||||||
|
pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
|
||||||
|
LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
|
||||||
|
|
||||||
|
size_t delta_inverse_mask = ZD(-1) << lg_delta;
|
||||||
|
pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
|
||||||
|
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
|
||||||
|
|
||||||
|
pszind_t ind = grp + mod;
|
||||||
|
return ind;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline size_t
|
||||||
|
sz_pind2sz_compute(pszind_t pind) {
|
||||||
|
if (unlikely(pind == NPSIZES)) {
|
||||||
|
return LARGE_MAXCLASS + PAGE;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
size_t grp = pind >> LG_SIZE_CLASS_GROUP;
|
||||||
|
size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
|
||||||
|
|
||||||
|
size_t grp_size_mask = ~((!!grp)-1);
|
||||||
|
size_t grp_size = ((ZU(1) << (LG_PAGE +
|
||||||
|
(LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
|
||||||
|
|
||||||
|
size_t shift = (grp == 0) ? 1 : grp;
|
||||||
|
size_t lg_delta = shift + (LG_PAGE-1);
|
||||||
|
size_t mod_size = (mod+1) << lg_delta;
|
||||||
|
|
||||||
|
size_t sz = grp_size + mod_size;
|
||||||
|
return sz;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline size_t
|
||||||
|
sz_pind2sz_lookup(pszind_t pind) {
|
||||||
|
size_t ret = (size_t)sz_pind2sz_tab[pind];
|
||||||
|
assert(ret == sz_pind2sz_compute(pind));
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline size_t
|
||||||
|
sz_pind2sz(pszind_t pind) {
|
||||||
|
assert(pind < NPSIZES+1);
|
||||||
|
return sz_pind2sz_lookup(pind);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline size_t
|
||||||
|
sz_psz2u(size_t psz) {
|
||||||
|
if (unlikely(psz > LARGE_MAXCLASS)) {
|
||||||
|
return LARGE_MAXCLASS + PAGE;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
size_t x = lg_floor((psz<<1)-1);
|
||||||
|
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
|
||||||
|
LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
|
||||||
|
size_t delta = ZU(1) << lg_delta;
|
||||||
|
size_t delta_mask = delta - 1;
|
||||||
|
size_t usize = (psz + delta_mask) & ~delta_mask;
|
||||||
|
return usize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline szind_t
|
||||||
|
sz_size2index_compute(size_t size) {
|
||||||
|
if (unlikely(size > LARGE_MAXCLASS)) {
|
||||||
|
return NSIZES;
|
||||||
|
}
|
||||||
|
#if (NTBINS != 0)
|
||||||
|
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
|
||||||
|
szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
|
||||||
|
szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
|
||||||
|
return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
szind_t x = lg_floor((size<<1)-1);
|
||||||
|
szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
|
||||||
|
x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
|
||||||
|
szind_t grp = shift << LG_SIZE_CLASS_GROUP;
|
||||||
|
|
||||||
|
szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
|
||||||
|
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
|
||||||
|
|
||||||
|
size_t delta_inverse_mask = ZD(-1) << lg_delta;
|
||||||
|
szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
|
||||||
|
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
|
||||||
|
|
||||||
|
szind_t index = NTBINS + grp + mod;
|
||||||
|
return index;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE szind_t
|
||||||
|
sz_size2index_lookup(size_t size) {
|
||||||
|
assert(size <= LOOKUP_MAXCLASS);
|
||||||
|
{
|
||||||
|
szind_t ret = (sz_size2index_tab[(size-1) >> LG_TINY_MIN]);
|
||||||
|
assert(ret == sz_size2index_compute(size));
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE szind_t
|
||||||
|
sz_size2index(size_t size) {
|
||||||
|
assert(size > 0);
|
||||||
|
if (likely(size <= LOOKUP_MAXCLASS)) {
|
||||||
|
return sz_size2index_lookup(size);
|
||||||
|
}
|
||||||
|
return sz_size2index_compute(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline size_t
|
||||||
|
sz_index2size_compute(szind_t index) {
|
||||||
|
#if (NTBINS > 0)
|
||||||
|
if (index < NTBINS) {
|
||||||
|
return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
size_t reduced_index = index - NTBINS;
|
||||||
|
size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
|
||||||
|
size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
|
||||||
|
1);
|
||||||
|
|
||||||
|
size_t grp_size_mask = ~((!!grp)-1);
|
||||||
|
size_t grp_size = ((ZU(1) << (LG_QUANTUM +
|
||||||
|
(LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
|
||||||
|
|
||||||
|
size_t shift = (grp == 0) ? 1 : grp;
|
||||||
|
size_t lg_delta = shift + (LG_QUANTUM-1);
|
||||||
|
size_t mod_size = (mod+1) << lg_delta;
|
||||||
|
|
||||||
|
size_t usize = grp_size + mod_size;
|
||||||
|
return usize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
|
sz_index2size_lookup(szind_t index) {
|
||||||
|
size_t ret = (size_t)sz_index2size_tab[index];
|
||||||
|
assert(ret == sz_index2size_compute(index));
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
|
sz_index2size(szind_t index) {
|
||||||
|
assert(index < NSIZES);
|
||||||
|
return sz_index2size_lookup(index);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
|
sz_s2u_compute(size_t size) {
|
||||||
|
if (unlikely(size > LARGE_MAXCLASS)) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#if (NTBINS > 0)
|
||||||
|
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
|
||||||
|
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
|
||||||
|
size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
|
||||||
|
return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
|
||||||
|
(ZU(1) << lg_ceil));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
size_t x = lg_floor((size<<1)-1);
|
||||||
|
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
|
||||||
|
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
|
||||||
|
size_t delta = ZU(1) << lg_delta;
|
||||||
|
size_t delta_mask = delta - 1;
|
||||||
|
size_t usize = (size + delta_mask) & ~delta_mask;
|
||||||
|
return usize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
|
sz_s2u_lookup(size_t size) {
|
||||||
|
size_t ret = sz_index2size_lookup(sz_size2index_lookup(size));
|
||||||
|
|
||||||
|
assert(ret == sz_s2u_compute(size));
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Compute usable size that would result from allocating an object with the
|
||||||
|
* specified size.
|
||||||
|
*/
|
||||||
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
|
sz_s2u(size_t size) {
|
||||||
|
assert(size > 0);
|
||||||
|
if (likely(size <= LOOKUP_MAXCLASS)) {
|
||||||
|
return sz_s2u_lookup(size);
|
||||||
|
}
|
||||||
|
return sz_s2u_compute(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Compute usable size that would result from allocating an object with the
|
||||||
|
* specified size and alignment.
|
||||||
|
*/
|
||||||
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
|
sz_sa2u(size_t size, size_t alignment) {
|
||||||
|
size_t usize;
|
||||||
|
|
||||||
|
assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
|
||||||
|
|
||||||
|
/* Try for a small size class. */
|
||||||
|
if (size <= SMALL_MAXCLASS && alignment < PAGE) {
|
||||||
|
/*
|
||||||
|
* Round size up to the nearest multiple of alignment.
|
||||||
|
*
|
||||||
|
* This done, we can take advantage of the fact that for each
|
||||||
|
* small size class, every object is aligned at the smallest
|
||||||
|
* power of two that is non-zero in the base two representation
|
||||||
|
* of the size. For example:
|
||||||
|
*
|
||||||
|
* Size | Base 2 | Minimum alignment
|
||||||
|
* -----+----------+------------------
|
||||||
|
* 96 | 1100000 | 32
|
||||||
|
* 144 | 10100000 | 32
|
||||||
|
* 192 | 11000000 | 64
|
||||||
|
*/
|
||||||
|
usize = sz_s2u(ALIGNMENT_CEILING(size, alignment));
|
||||||
|
if (usize < LARGE_MINCLASS) {
|
||||||
|
return usize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Large size class. Beware of overflow. */
|
||||||
|
|
||||||
|
if (unlikely(alignment > LARGE_MAXCLASS)) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Make sure result is a large size class. */
|
||||||
|
if (size <= LARGE_MINCLASS) {
|
||||||
|
usize = LARGE_MINCLASS;
|
||||||
|
} else {
|
||||||
|
usize = sz_s2u(size);
|
||||||
|
if (usize < size) {
|
||||||
|
/* size_t overflow. */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Calculate the multi-page mapping that large_palloc() would need in
|
||||||
|
* order to guarantee the alignment.
|
||||||
|
*/
|
||||||
|
if (usize + sz_large_pad + PAGE_CEILING(alignment) - PAGE < usize) {
|
||||||
|
/* size_t overflow. */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return usize;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_SIZE_H */
|
@ -3,6 +3,7 @@
|
|||||||
|
|
||||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||||
#include "jemalloc/internal/size_classes.h"
|
#include "jemalloc/internal/size_classes.h"
|
||||||
|
#include "jemalloc/internal/sz.h"
|
||||||
#include "jemalloc/internal/ticker.h"
|
#include "jemalloc/internal/ticker.h"
|
||||||
#include "jemalloc/internal/util.h"
|
#include "jemalloc/internal/util.h"
|
||||||
|
|
||||||
@ -95,7 +96,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
|||||||
* statement are all static.
|
* statement are all static.
|
||||||
*/
|
*/
|
||||||
if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
|
if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
|
||||||
usize = index2size(binind);
|
usize = sz_index2size(binind);
|
||||||
assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
|
assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -147,7 +148,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = large_malloc(tsd_tsdn(tsd), arena, s2u(size), zero);
|
ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero);
|
||||||
if (ret == NULL) {
|
if (ret == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -157,7 +158,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
|||||||
/* Only compute usize on demand */
|
/* Only compute usize on demand */
|
||||||
if (config_prof || (slow_path && config_fill) ||
|
if (config_prof || (slow_path && config_fill) ||
|
||||||
unlikely(zero)) {
|
unlikely(zero)) {
|
||||||
usize = index2size(binind);
|
usize = sz_index2size(binind);
|
||||||
assert(usize <= tcache_maxclass);
|
assert(usize <= tcache_maxclass);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -221,7 +222,7 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
|||||||
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
|
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
|
||||||
|
|
||||||
if (slow_path && config_fill && unlikely(opt_junk_free)) {
|
if (slow_path && config_fill && unlikely(opt_junk_free)) {
|
||||||
large_dalloc_junk(ptr, index2size(binind));
|
large_dalloc_junk(ptr, sz_index2size(binind));
|
||||||
}
|
}
|
||||||
|
|
||||||
tbin = tcache_large_bin_get(tcache, binind);
|
tbin = tcache_large_bin_get(tcache, binind);
|
||||||
|
55
src/arena.c
55
src/arena.c
@ -289,7 +289,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
|||||||
size_t curlextents = (size_t)(nmalloc - ndalloc);
|
size_t curlextents = (size_t)(nmalloc - ndalloc);
|
||||||
lstats[i].curlextents += curlextents;
|
lstats[i].curlextents += curlextents;
|
||||||
arena_stats_accum_zu(&astats->allocated_large,
|
arena_stats_accum_zu(&astats->allocated_large,
|
||||||
curlextents * index2size(NBINS + i));
|
curlextents * sz_index2size(NBINS + i));
|
||||||
}
|
}
|
||||||
|
|
||||||
arena_stats_unlock(tsdn, &arena->stats);
|
arena_stats_unlock(tsdn, &arena->stats);
|
||||||
@ -303,12 +303,12 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
|||||||
for (; i < NBINS; i++) {
|
for (; i < NBINS; i++) {
|
||||||
tcache_bin_t *tbin = tcache_small_bin_get(tcache, i);
|
tcache_bin_t *tbin = tcache_small_bin_get(tcache, i);
|
||||||
arena_stats_accum_zu(&astats->tcache_bytes,
|
arena_stats_accum_zu(&astats->tcache_bytes,
|
||||||
tbin->ncached * index2size(i));
|
tbin->ncached * sz_index2size(i));
|
||||||
}
|
}
|
||||||
for (; i < nhbins; i++) {
|
for (; i < nhbins; i++) {
|
||||||
tcache_bin_t *tbin = tcache_large_bin_get(tcache, i);
|
tcache_bin_t *tbin = tcache_large_bin_get(tcache, i);
|
||||||
arena_stats_accum_zu(&astats->tcache_bytes,
|
arena_stats_accum_zu(&astats->tcache_bytes,
|
||||||
tbin->ncached * index2size(i));
|
tbin->ncached * sz_index2size(i));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
malloc_mutex_prof_read(tsdn,
|
malloc_mutex_prof_read(tsdn,
|
||||||
@ -467,7 +467,7 @@ arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
|
|||||||
if (usize < LARGE_MINCLASS) {
|
if (usize < LARGE_MINCLASS) {
|
||||||
usize = LARGE_MINCLASS;
|
usize = LARGE_MINCLASS;
|
||||||
}
|
}
|
||||||
index = size2index(usize);
|
index = sz_size2index(usize);
|
||||||
hindex = (index >= NBINS) ? index - NBINS : 0;
|
hindex = (index >= NBINS) ? index - NBINS : 0;
|
||||||
|
|
||||||
arena_stats_add_u64(tsdn, &arena->stats,
|
arena_stats_add_u64(tsdn, &arena->stats,
|
||||||
@ -483,7 +483,7 @@ arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
|
|||||||
if (usize < LARGE_MINCLASS) {
|
if (usize < LARGE_MINCLASS) {
|
||||||
usize = LARGE_MINCLASS;
|
usize = LARGE_MINCLASS;
|
||||||
}
|
}
|
||||||
index = size2index(usize);
|
index = sz_size2index(usize);
|
||||||
hindex = (index >= NBINS) ? index - NBINS : 0;
|
hindex = (index >= NBINS) ? index - NBINS : 0;
|
||||||
|
|
||||||
arena_stats_add_u64(tsdn, &arena->stats,
|
arena_stats_add_u64(tsdn, &arena->stats,
|
||||||
@ -505,21 +505,22 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
|||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
WITNESS_RANK_CORE, 0);
|
WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
szind_t szind = size2index(usize);
|
szind_t szind = sz_size2index(usize);
|
||||||
size_t mapped_add;
|
size_t mapped_add;
|
||||||
bool commit = true;
|
bool commit = true;
|
||||||
extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
|
extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
|
||||||
&arena->extents_dirty, NULL, usize, large_pad, alignment, false,
|
&arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false,
|
||||||
szind, zero, &commit);
|
szind, zero, &commit);
|
||||||
if (extent == NULL) {
|
if (extent == NULL) {
|
||||||
extent = extents_alloc(tsdn, arena, &extent_hooks,
|
extent = extents_alloc(tsdn, arena, &extent_hooks,
|
||||||
&arena->extents_muzzy, NULL, usize, large_pad, alignment,
|
&arena->extents_muzzy, NULL, usize, sz_large_pad, alignment,
|
||||||
false, szind, zero, &commit);
|
false, szind, zero, &commit);
|
||||||
}
|
}
|
||||||
size_t size = usize + large_pad;
|
size_t size = usize + sz_large_pad;
|
||||||
if (extent == NULL) {
|
if (extent == NULL) {
|
||||||
extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL,
|
extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL,
|
||||||
usize, large_pad, alignment, false, szind, zero, &commit);
|
usize, sz_large_pad, alignment, false, szind, zero,
|
||||||
|
&commit);
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
/*
|
/*
|
||||||
* extent may be NULL on OOM, but in that case
|
* extent may be NULL on OOM, but in that case
|
||||||
@ -1146,7 +1147,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
|
|||||||
assert(alloc_ctx.szind != NSIZES);
|
assert(alloc_ctx.szind != NSIZES);
|
||||||
|
|
||||||
if (config_stats || (config_prof && opt_prof)) {
|
if (config_stats || (config_prof && opt_prof)) {
|
||||||
usize = index2size(alloc_ctx.szind);
|
usize = sz_index2size(alloc_ctx.szind);
|
||||||
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
|
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
|
||||||
}
|
}
|
||||||
/* Remove large allocation from prof sample set. */
|
/* Remove large allocation from prof sample set. */
|
||||||
@ -1278,7 +1279,7 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
|||||||
WITNESS_RANK_CORE, 0);
|
WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
|
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
|
||||||
szind_t szind = size2index(bin_info->reg_size);
|
szind_t szind = sz_size2index(bin_info->reg_size);
|
||||||
bool zero = false;
|
bool zero = false;
|
||||||
bool commit = true;
|
bool commit = true;
|
||||||
extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
|
extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
|
||||||
@ -1484,7 +1485,7 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
|
|||||||
|
|
||||||
assert(binind < NBINS);
|
assert(binind < NBINS);
|
||||||
bin = &arena->bins[binind];
|
bin = &arena->bins[binind];
|
||||||
usize = index2size(binind);
|
usize = sz_index2size(binind);
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &bin->lock);
|
malloc_mutex_lock(tsdn, &bin->lock);
|
||||||
if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
|
if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
|
||||||
@ -1544,7 +1545,7 @@ arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
|
|||||||
if (likely(size <= SMALL_MAXCLASS)) {
|
if (likely(size <= SMALL_MAXCLASS)) {
|
||||||
return arena_malloc_small(tsdn, arena, ind, zero);
|
return arena_malloc_small(tsdn, arena, ind, zero);
|
||||||
}
|
}
|
||||||
return large_malloc(tsdn, arena, index2size(ind), zero);
|
return large_malloc(tsdn, arena, sz_index2size(ind), zero);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
@ -1555,8 +1556,8 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
|
if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
|
||||||
&& (usize & PAGE_MASK) == 0))) {
|
&& (usize & PAGE_MASK) == 0))) {
|
||||||
/* Small; alignment doesn't require special slab placement. */
|
/* Small; alignment doesn't require special slab placement. */
|
||||||
ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
|
ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
|
||||||
tcache, true);
|
zero, tcache, true);
|
||||||
} else {
|
} else {
|
||||||
if (likely(alignment <= CACHELINE)) {
|
if (likely(alignment <= CACHELINE)) {
|
||||||
ret = large_malloc(tsdn, arena, usize, zero);
|
ret = large_malloc(tsdn, arena, usize, zero);
|
||||||
@ -1581,7 +1582,7 @@ arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) {
|
|||||||
(uintptr_t)ptr, true);
|
(uintptr_t)ptr, true);
|
||||||
arena_t *arena = extent_arena_get(extent);
|
arena_t *arena = extent_arena_get(extent);
|
||||||
|
|
||||||
szind_t szind = size2index(usize);
|
szind_t szind = sz_size2index(usize);
|
||||||
extent_szind_set(extent, szind);
|
extent_szind_set(extent, szind);
|
||||||
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
|
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
|
||||||
szind, false);
|
szind, false);
|
||||||
@ -1617,7 +1618,7 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
|||||||
size_t usize = arena_prof_demote(tsdn, extent, ptr);
|
size_t usize = arena_prof_demote(tsdn, extent, ptr);
|
||||||
if (usize <= tcache_maxclass) {
|
if (usize <= tcache_maxclass) {
|
||||||
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
|
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
|
||||||
size2index(usize), slow_path);
|
sz_size2index(usize), slow_path);
|
||||||
} else {
|
} else {
|
||||||
large_dalloc(tsdn, extent);
|
large_dalloc(tsdn, extent);
|
||||||
}
|
}
|
||||||
@ -1751,17 +1752,17 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
extent_t *extent = iealloc(tsdn, ptr);
|
extent_t *extent = iealloc(tsdn, ptr);
|
||||||
size_t usize_min = s2u(size);
|
size_t usize_min = sz_s2u(size);
|
||||||
size_t usize_max = s2u(size + extra);
|
size_t usize_max = sz_s2u(size + extra);
|
||||||
if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) {
|
if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) {
|
||||||
/*
|
/*
|
||||||
* Avoid moving the allocation if the size class can be left the
|
* Avoid moving the allocation if the size class can be left the
|
||||||
* same.
|
* same.
|
||||||
*/
|
*/
|
||||||
assert(arena_bin_info[size2index(oldsize)].reg_size ==
|
assert(arena_bin_info[sz_size2index(oldsize)].reg_size ==
|
||||||
oldsize);
|
oldsize);
|
||||||
if ((usize_max > SMALL_MAXCLASS || size2index(usize_max) !=
|
if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) !=
|
||||||
size2index(oldsize)) && (size > oldsize || usize_max <
|
sz_size2index(oldsize)) && (size > oldsize || usize_max <
|
||||||
oldsize)) {
|
oldsize)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -1780,10 +1781,10 @@ static void *
|
|||||||
arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||||
size_t alignment, bool zero, tcache_t *tcache) {
|
size_t alignment, bool zero, tcache_t *tcache) {
|
||||||
if (alignment == 0) {
|
if (alignment == 0) {
|
||||||
return arena_malloc(tsdn, arena, usize, size2index(usize),
|
return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
|
||||||
zero, tcache, true);
|
zero, tcache, true);
|
||||||
}
|
}
|
||||||
usize = sa2u(usize, alignment);
|
usize = sz_sa2u(usize, alignment);
|
||||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -1793,7 +1794,7 @@ arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
|||||||
void *
|
void *
|
||||||
arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
|
arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
|
||||||
size_t size, size_t alignment, bool zero, tcache_t *tcache) {
|
size_t size, size_t alignment, bool zero, tcache_t *tcache) {
|
||||||
size_t usize = s2u(size);
|
size_t usize = sz_s2u(size);
|
||||||
if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) {
|
if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -1998,7 +1999,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
|||||||
goto label_error;
|
goto label_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
arena->extent_grow_next = psz2ind(HUGEPAGE);
|
arena->extent_grow_next = sz_psz2ind(HUGEPAGE);
|
||||||
if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
|
if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
|
||||||
WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
|
WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
|
||||||
goto label_error;
|
goto label_error;
|
||||||
|
15
src/base.c
15
src/base.c
@ -5,6 +5,7 @@
|
|||||||
#include "jemalloc/internal/assert.h"
|
#include "jemalloc/internal/assert.h"
|
||||||
#include "jemalloc/internal/extent_mmap.h"
|
#include "jemalloc/internal/extent_mmap.h"
|
||||||
#include "jemalloc/internal/mutex.h"
|
#include "jemalloc/internal/mutex.h"
|
||||||
|
#include "jemalloc/internal/sz.h"
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Data. */
|
/* Data. */
|
||||||
@ -121,8 +122,8 @@ base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent,
|
|||||||
* Compute the index for the largest size class that does not
|
* Compute the index for the largest size class that does not
|
||||||
* exceed extent's size.
|
* exceed extent's size.
|
||||||
*/
|
*/
|
||||||
szind_t index_floor = size2index(extent_bsize_get(extent) + 1) -
|
szind_t index_floor =
|
||||||
1;
|
sz_size2index(extent_bsize_get(extent) + 1) - 1;
|
||||||
extent_heap_insert(&base->avail[index_floor], extent);
|
extent_heap_insert(&base->avail[index_floor], extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -171,11 +172,11 @@ base_block_alloc(extent_hooks_t *extent_hooks, unsigned ind,
|
|||||||
* HUGEPAGE), or a size large enough to satisfy the requested size and
|
* HUGEPAGE), or a size large enough to satisfy the requested size and
|
||||||
* alignment, whichever is larger.
|
* alignment, whichever is larger.
|
||||||
*/
|
*/
|
||||||
size_t min_block_size = HUGEPAGE_CEILING(psz2u(header_size + gap_size +
|
size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
|
||||||
usize));
|
+ usize));
|
||||||
pszind_t pind_next = (*pind_last + 1 < NPSIZES) ? *pind_last + 1 :
|
pszind_t pind_next = (*pind_last + 1 < NPSIZES) ? *pind_last + 1 :
|
||||||
*pind_last;
|
*pind_last;
|
||||||
size_t next_block_size = HUGEPAGE_CEILING(pind2sz(pind_next));
|
size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
|
||||||
size_t block_size = (min_block_size > next_block_size) ? min_block_size
|
size_t block_size = (min_block_size > next_block_size) ? min_block_size
|
||||||
: next_block_size;
|
: next_block_size;
|
||||||
base_block_t *block = (base_block_t *)base_map(extent_hooks, ind,
|
base_block_t *block = (base_block_t *)base_map(extent_hooks, ind,
|
||||||
@ -183,7 +184,7 @@ base_block_alloc(extent_hooks_t *extent_hooks, unsigned ind,
|
|||||||
if (block == NULL) {
|
if (block == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
*pind_last = psz2ind(block_size);
|
*pind_last = sz_psz2ind(block_size);
|
||||||
block->size = block_size;
|
block->size = block_size;
|
||||||
block->next = NULL;
|
block->next = NULL;
|
||||||
assert(block_size >= header_size);
|
assert(block_size >= header_size);
|
||||||
@ -304,7 +305,7 @@ base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
|
|||||||
|
|
||||||
extent_t *extent = NULL;
|
extent_t *extent = NULL;
|
||||||
malloc_mutex_lock(tsdn, &base->mtx);
|
malloc_mutex_lock(tsdn, &base->mtx);
|
||||||
for (szind_t i = size2index(asize); i < NSIZES; i++) {
|
for (szind_t i = sz_size2index(asize); i < NSIZES; i++) {
|
||||||
extent = extent_heap_remove_first(&base->avail[i]);
|
extent = extent_heap_remove_first(&base->avail[i]);
|
||||||
if (extent != NULL) {
|
if (extent != NULL) {
|
||||||
/* Use existing space. */
|
/* Use existing space. */
|
||||||
|
@ -274,7 +274,7 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) {
|
|||||||
size_t usize;
|
size_t usize;
|
||||||
|
|
||||||
lg_curcells++;
|
lg_curcells++;
|
||||||
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
|
usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
|
||||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
||||||
ret = true;
|
ret = true;
|
||||||
goto label_return;
|
goto label_return;
|
||||||
@ -319,7 +319,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
|
|||||||
*/
|
*/
|
||||||
lg_prevbuckets = ckh->lg_curbuckets;
|
lg_prevbuckets = ckh->lg_curbuckets;
|
||||||
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
|
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
|
||||||
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
|
usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
|
||||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -395,7 +395,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
|||||||
ckh->hash = hash;
|
ckh->hash = hash;
|
||||||
ckh->keycomp = keycomp;
|
ckh->keycomp = keycomp;
|
||||||
|
|
||||||
usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
|
usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
|
||||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
||||||
ret = true;
|
ret = true;
|
||||||
goto label_return;
|
goto label_return;
|
||||||
|
@ -701,7 +701,7 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
|
|||||||
for (i = 0; i < NBINS; i++) {
|
for (i = 0; i < NBINS; i++) {
|
||||||
ctl_arena->astats->allocated_small +=
|
ctl_arena->astats->allocated_small +=
|
||||||
ctl_arena->astats->bstats[i].curregs *
|
ctl_arena->astats->bstats[i].curregs *
|
||||||
index2size(i);
|
sz_index2size(i);
|
||||||
ctl_arena->astats->nmalloc_small +=
|
ctl_arena->astats->nmalloc_small +=
|
||||||
ctl_arena->astats->bstats[i].nmalloc;
|
ctl_arena->astats->bstats[i].nmalloc;
|
||||||
ctl_arena->astats->ndalloc_small +=
|
ctl_arena->astats->ndalloc_small +=
|
||||||
@ -2274,7 +2274,8 @@ arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned)
|
CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned)
|
||||||
CTL_RO_NL_GEN(arenas_lextent_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
|
CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(NBINS+(szind_t)mib[2]),
|
||||||
|
size_t)
|
||||||
static const ctl_named_node_t *
|
static const ctl_named_node_t *
|
||||||
arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
|
arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
|
||||||
size_t i) {
|
size_t i) {
|
||||||
|
21
src/extent.c
21
src/extent.c
@ -220,7 +220,7 @@ extent_size_quantize_floor(size_t size) {
|
|||||||
assert(size > 0);
|
assert(size > 0);
|
||||||
assert((size & PAGE_MASK) == 0);
|
assert((size & PAGE_MASK) == 0);
|
||||||
|
|
||||||
pind = psz2ind(size - large_pad + 1);
|
pind = sz_psz2ind(size - sz_large_pad + 1);
|
||||||
if (pind == 0) {
|
if (pind == 0) {
|
||||||
/*
|
/*
|
||||||
* Avoid underflow. This short-circuit would also do the right
|
* Avoid underflow. This short-circuit would also do the right
|
||||||
@ -230,7 +230,7 @@ extent_size_quantize_floor(size_t size) {
|
|||||||
*/
|
*/
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
ret = pind2sz(pind - 1) + large_pad;
|
ret = sz_pind2sz(pind - 1) + sz_large_pad;
|
||||||
assert(ret <= size);
|
assert(ret <= size);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -243,7 +243,7 @@ extent_size_quantize_ceil(size_t size) {
|
|||||||
size_t ret;
|
size_t ret;
|
||||||
|
|
||||||
assert(size > 0);
|
assert(size > 0);
|
||||||
assert(size - large_pad <= LARGE_MAXCLASS);
|
assert(size - sz_large_pad <= LARGE_MAXCLASS);
|
||||||
assert((size & PAGE_MASK) == 0);
|
assert((size & PAGE_MASK) == 0);
|
||||||
|
|
||||||
ret = extent_size_quantize_floor(size);
|
ret = extent_size_quantize_floor(size);
|
||||||
@ -256,7 +256,8 @@ extent_size_quantize_ceil(size_t size) {
|
|||||||
* search would potentially find sufficiently aligned available
|
* search would potentially find sufficiently aligned available
|
||||||
* memory somewhere lower.
|
* memory somewhere lower.
|
||||||
*/
|
*/
|
||||||
ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad;
|
ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
|
||||||
|
sz_large_pad;
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -300,7 +301,7 @@ extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
|
|||||||
|
|
||||||
size_t size = extent_size_get(extent);
|
size_t size = extent_size_get(extent);
|
||||||
size_t psz = extent_size_quantize_floor(size);
|
size_t psz = extent_size_quantize_floor(size);
|
||||||
pszind_t pind = psz2ind(psz);
|
pszind_t pind = sz_psz2ind(psz);
|
||||||
if (extent_heap_empty(&extents->heaps[pind])) {
|
if (extent_heap_empty(&extents->heaps[pind])) {
|
||||||
bitmap_unset(extents->bitmap, &extents_bitmap_info,
|
bitmap_unset(extents->bitmap, &extents_bitmap_info,
|
||||||
(size_t)pind);
|
(size_t)pind);
|
||||||
@ -329,7 +330,7 @@ extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
|
|||||||
|
|
||||||
size_t size = extent_size_get(extent);
|
size_t size = extent_size_get(extent);
|
||||||
size_t psz = extent_size_quantize_floor(size);
|
size_t psz = extent_size_quantize_floor(size);
|
||||||
pszind_t pind = psz2ind(psz);
|
pszind_t pind = sz_psz2ind(psz);
|
||||||
extent_heap_remove(&extents->heaps[pind], extent);
|
extent_heap_remove(&extents->heaps[pind], extent);
|
||||||
if (extent_heap_empty(&extents->heaps[pind])) {
|
if (extent_heap_empty(&extents->heaps[pind])) {
|
||||||
bitmap_set(extents->bitmap, &extents_bitmap_info,
|
bitmap_set(extents->bitmap, &extents_bitmap_info,
|
||||||
@ -354,7 +355,7 @@ extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
|
|||||||
static extent_t *
|
static extent_t *
|
||||||
extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
|
extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
|
||||||
size_t size) {
|
size_t size) {
|
||||||
pszind_t pind = psz2ind(extent_size_quantize_ceil(size));
|
pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
|
||||||
pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
|
pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
|
||||||
(size_t)pind);
|
(size_t)pind);
|
||||||
if (i < NPSIZES+1) {
|
if (i < NPSIZES+1) {
|
||||||
@ -376,7 +377,7 @@ extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
|
|||||||
size_t size) {
|
size_t size) {
|
||||||
extent_t *ret = NULL;
|
extent_t *ret = NULL;
|
||||||
|
|
||||||
pszind_t pind = psz2ind(extent_size_quantize_ceil(size));
|
pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
|
||||||
for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
|
for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
|
||||||
&extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i =
|
&extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i =
|
||||||
(pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
|
(pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
|
||||||
@ -1040,7 +1041,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
|
|||||||
* satisfy this request.
|
* satisfy this request.
|
||||||
*/
|
*/
|
||||||
pszind_t egn_skip = 0;
|
pszind_t egn_skip = 0;
|
||||||
size_t alloc_size = pind2sz(arena->extent_grow_next + egn_skip);
|
size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
|
||||||
while (alloc_size < alloc_size_min) {
|
while (alloc_size < alloc_size_min) {
|
||||||
egn_skip++;
|
egn_skip++;
|
||||||
if (arena->extent_grow_next + egn_skip == NPSIZES) {
|
if (arena->extent_grow_next + egn_skip == NPSIZES) {
|
||||||
@ -1048,7 +1049,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
|
|||||||
goto label_err;
|
goto label_err;
|
||||||
}
|
}
|
||||||
assert(arena->extent_grow_next + egn_skip < NPSIZES);
|
assert(arena->extent_grow_next + egn_skip < NPSIZES);
|
||||||
alloc_size = pind2sz(arena->extent_grow_next + egn_skip);
|
alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
|
||||||
}
|
}
|
||||||
|
|
||||||
extent_t *extent = extent_alloc(tsdn, arena);
|
extent_t *extent = extent_alloc(tsdn, arena);
|
||||||
|
149
src/jemalloc.c
149
src/jemalloc.c
@ -13,6 +13,7 @@
|
|||||||
#include "jemalloc/internal/rtree.h"
|
#include "jemalloc/internal/rtree.h"
|
||||||
#include "jemalloc/internal/size_classes.h"
|
#include "jemalloc/internal/size_classes.h"
|
||||||
#include "jemalloc/internal/spin.h"
|
#include "jemalloc/internal/spin.h"
|
||||||
|
#include "jemalloc/internal/sz.h"
|
||||||
#include "jemalloc/internal/ticker.h"
|
#include "jemalloc/internal/ticker.h"
|
||||||
#include "jemalloc/internal/util.h"
|
#include "jemalloc/internal/util.h"
|
||||||
|
|
||||||
@ -107,110 +108,6 @@ enum {
|
|||||||
};
|
};
|
||||||
static uint8_t malloc_slow_flags;
|
static uint8_t malloc_slow_flags;
|
||||||
|
|
||||||
JEMALLOC_ALIGNED(CACHELINE)
|
|
||||||
const size_t pind2sz_tab[NPSIZES+1] = {
|
|
||||||
#define PSZ_yes(lg_grp, ndelta, lg_delta) \
|
|
||||||
(((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))),
|
|
||||||
#define PSZ_no(lg_grp, ndelta, lg_delta)
|
|
||||||
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
|
|
||||||
PSZ_##psz(lg_grp, ndelta, lg_delta)
|
|
||||||
SIZE_CLASSES
|
|
||||||
#undef PSZ_yes
|
|
||||||
#undef PSZ_no
|
|
||||||
#undef SC
|
|
||||||
(LARGE_MAXCLASS + PAGE)
|
|
||||||
};
|
|
||||||
|
|
||||||
JEMALLOC_ALIGNED(CACHELINE)
|
|
||||||
const size_t index2size_tab[NSIZES] = {
|
|
||||||
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
|
|
||||||
((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
|
|
||||||
SIZE_CLASSES
|
|
||||||
#undef SC
|
|
||||||
};
|
|
||||||
|
|
||||||
JEMALLOC_ALIGNED(CACHELINE)
|
|
||||||
const uint8_t size2index_tab[] = {
|
|
||||||
#if LG_TINY_MIN == 0
|
|
||||||
#warning "Dangerous LG_TINY_MIN"
|
|
||||||
#define S2B_0(i) i,
|
|
||||||
#elif LG_TINY_MIN == 1
|
|
||||||
#warning "Dangerous LG_TINY_MIN"
|
|
||||||
#define S2B_1(i) i,
|
|
||||||
#elif LG_TINY_MIN == 2
|
|
||||||
#warning "Dangerous LG_TINY_MIN"
|
|
||||||
#define S2B_2(i) i,
|
|
||||||
#elif LG_TINY_MIN == 3
|
|
||||||
#define S2B_3(i) i,
|
|
||||||
#elif LG_TINY_MIN == 4
|
|
||||||
#define S2B_4(i) i,
|
|
||||||
#elif LG_TINY_MIN == 5
|
|
||||||
#define S2B_5(i) i,
|
|
||||||
#elif LG_TINY_MIN == 6
|
|
||||||
#define S2B_6(i) i,
|
|
||||||
#elif LG_TINY_MIN == 7
|
|
||||||
#define S2B_7(i) i,
|
|
||||||
#elif LG_TINY_MIN == 8
|
|
||||||
#define S2B_8(i) i,
|
|
||||||
#elif LG_TINY_MIN == 9
|
|
||||||
#define S2B_9(i) i,
|
|
||||||
#elif LG_TINY_MIN == 10
|
|
||||||
#define S2B_10(i) i,
|
|
||||||
#elif LG_TINY_MIN == 11
|
|
||||||
#define S2B_11(i) i,
|
|
||||||
#else
|
|
||||||
#error "Unsupported LG_TINY_MIN"
|
|
||||||
#endif
|
|
||||||
#if LG_TINY_MIN < 1
|
|
||||||
#define S2B_1(i) S2B_0(i) S2B_0(i)
|
|
||||||
#endif
|
|
||||||
#if LG_TINY_MIN < 2
|
|
||||||
#define S2B_2(i) S2B_1(i) S2B_1(i)
|
|
||||||
#endif
|
|
||||||
#if LG_TINY_MIN < 3
|
|
||||||
#define S2B_3(i) S2B_2(i) S2B_2(i)
|
|
||||||
#endif
|
|
||||||
#if LG_TINY_MIN < 4
|
|
||||||
#define S2B_4(i) S2B_3(i) S2B_3(i)
|
|
||||||
#endif
|
|
||||||
#if LG_TINY_MIN < 5
|
|
||||||
#define S2B_5(i) S2B_4(i) S2B_4(i)
|
|
||||||
#endif
|
|
||||||
#if LG_TINY_MIN < 6
|
|
||||||
#define S2B_6(i) S2B_5(i) S2B_5(i)
|
|
||||||
#endif
|
|
||||||
#if LG_TINY_MIN < 7
|
|
||||||
#define S2B_7(i) S2B_6(i) S2B_6(i)
|
|
||||||
#endif
|
|
||||||
#if LG_TINY_MIN < 8
|
|
||||||
#define S2B_8(i) S2B_7(i) S2B_7(i)
|
|
||||||
#endif
|
|
||||||
#if LG_TINY_MIN < 9
|
|
||||||
#define S2B_9(i) S2B_8(i) S2B_8(i)
|
|
||||||
#endif
|
|
||||||
#if LG_TINY_MIN < 10
|
|
||||||
#define S2B_10(i) S2B_9(i) S2B_9(i)
|
|
||||||
#endif
|
|
||||||
#if LG_TINY_MIN < 11
|
|
||||||
#define S2B_11(i) S2B_10(i) S2B_10(i)
|
|
||||||
#endif
|
|
||||||
#define S2B_no(i)
|
|
||||||
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
|
|
||||||
S2B_##lg_delta_lookup(index)
|
|
||||||
SIZE_CLASSES
|
|
||||||
#undef S2B_3
|
|
||||||
#undef S2B_4
|
|
||||||
#undef S2B_5
|
|
||||||
#undef S2B_6
|
|
||||||
#undef S2B_7
|
|
||||||
#undef S2B_8
|
|
||||||
#undef S2B_9
|
|
||||||
#undef S2B_10
|
|
||||||
#undef S2B_11
|
|
||||||
#undef S2B_no
|
|
||||||
#undef SC
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_THREADED_INIT
|
#ifdef JEMALLOC_THREADED_INIT
|
||||||
/* Used to let the initializing thread recursively allocate. */
|
/* Used to let the initializing thread recursively allocate. */
|
||||||
# define NO_INITIALIZER ((unsigned long)0)
|
# define NO_INITIALIZER ((unsigned long)0)
|
||||||
@ -333,7 +230,7 @@ a0ialloc(size_t size, bool zero, bool is_internal) {
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return iallocztm(TSDN_NULL, size, size2index(size), zero, NULL,
|
return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL,
|
||||||
is_internal, arena_get(TSDN_NULL, 0, true), true);
|
is_internal, arena_get(TSDN_NULL, 0, true), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1687,10 +1584,11 @@ imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
|
|||||||
size_t bumped_usize = usize;
|
size_t bumped_usize = usize;
|
||||||
|
|
||||||
if (usize <= SMALL_MAXCLASS) {
|
if (usize <= SMALL_MAXCLASS) {
|
||||||
assert(((dopts->alignment == 0) ? s2u(LARGE_MINCLASS) :
|
assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) :
|
||||||
sa2u(LARGE_MINCLASS, dopts->alignment)) == LARGE_MINCLASS);
|
sz_sa2u(LARGE_MINCLASS, dopts->alignment))
|
||||||
ind_large = size2index(LARGE_MINCLASS);
|
== LARGE_MINCLASS);
|
||||||
bumped_usize = s2u(LARGE_MINCLASS);
|
ind_large = sz_size2index(LARGE_MINCLASS);
|
||||||
|
bumped_usize = sz_s2u(LARGE_MINCLASS);
|
||||||
ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
|
ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
|
||||||
bumped_usize, ind_large);
|
bumped_usize, ind_large);
|
||||||
if (unlikely(ret == NULL)) {
|
if (unlikely(ret == NULL)) {
|
||||||
@ -1792,16 +1690,16 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
|
|||||||
/* This is the beginning of the "core" algorithm. */
|
/* This is the beginning of the "core" algorithm. */
|
||||||
|
|
||||||
if (dopts->alignment == 0) {
|
if (dopts->alignment == 0) {
|
||||||
ind = size2index(size);
|
ind = sz_size2index(size);
|
||||||
if (unlikely(ind >= NSIZES)) {
|
if (unlikely(ind >= NSIZES)) {
|
||||||
goto label_oom;
|
goto label_oom;
|
||||||
}
|
}
|
||||||
if (config_stats || (config_prof && opt_prof)) {
|
if (config_stats || (config_prof && opt_prof)) {
|
||||||
usize = index2size(ind);
|
usize = sz_index2size(ind);
|
||||||
assert(usize > 0 && usize <= LARGE_MAXCLASS);
|
assert(usize > 0 && usize <= LARGE_MAXCLASS);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
usize = sa2u(size, dopts->alignment);
|
usize = sz_sa2u(size, dopts->alignment);
|
||||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
||||||
goto label_oom;
|
goto label_oom;
|
||||||
}
|
}
|
||||||
@ -2155,10 +2053,10 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
|
|||||||
|
|
||||||
size_t usize;
|
size_t usize;
|
||||||
if (config_prof && opt_prof) {
|
if (config_prof && opt_prof) {
|
||||||
usize = index2size(alloc_ctx.szind);
|
usize = sz_index2size(alloc_ctx.szind);
|
||||||
prof_free(tsd, ptr, usize, &alloc_ctx);
|
prof_free(tsd, ptr, usize, &alloc_ctx);
|
||||||
} else if (config_stats) {
|
} else if (config_stats) {
|
||||||
usize = index2size(alloc_ctx.szind);
|
usize = sz_index2size(alloc_ctx.szind);
|
||||||
}
|
}
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
*tsd_thread_deallocatedp_get(tsd) += usize;
|
*tsd_thread_deallocatedp_get(tsd) += usize;
|
||||||
@ -2192,7 +2090,7 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
|
|||||||
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
|
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
|
||||||
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
|
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
|
||||||
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
|
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
|
||||||
assert(alloc_ctx.szind == size2index(usize));
|
assert(alloc_ctx.szind == sz_size2index(usize));
|
||||||
ctx = &alloc_ctx;
|
ctx = &alloc_ctx;
|
||||||
prof_free(tsd, ptr, usize, ctx);
|
prof_free(tsd, ptr, usize, ctx);
|
||||||
} else {
|
} else {
|
||||||
@ -2247,16 +2145,16 @@ je_realloc(void *ptr, size_t size) {
|
|||||||
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
|
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
|
||||||
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
|
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
|
||||||
assert(alloc_ctx.szind != NSIZES);
|
assert(alloc_ctx.szind != NSIZES);
|
||||||
old_usize = index2size(alloc_ctx.szind);
|
old_usize = sz_index2size(alloc_ctx.szind);
|
||||||
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
|
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
|
||||||
if (config_prof && opt_prof) {
|
if (config_prof && opt_prof) {
|
||||||
usize = s2u(size);
|
usize = sz_s2u(size);
|
||||||
ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ?
|
ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ?
|
||||||
NULL : irealloc_prof(tsd, ptr, old_usize, usize,
|
NULL : irealloc_prof(tsd, ptr, old_usize, usize,
|
||||||
&alloc_ctx);
|
&alloc_ctx);
|
||||||
} else {
|
} else {
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
usize = s2u(size);
|
usize = sz_s2u(size);
|
||||||
}
|
}
|
||||||
ret = iralloc(tsd, ptr, old_usize, size, 0, false);
|
ret = iralloc(tsd, ptr, old_usize, size, 0, false);
|
||||||
}
|
}
|
||||||
@ -2601,10 +2499,11 @@ je_rallocx(void *ptr, size_t size, int flags) {
|
|||||||
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
|
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
|
||||||
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
|
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
|
||||||
assert(alloc_ctx.szind != NSIZES);
|
assert(alloc_ctx.szind != NSIZES);
|
||||||
old_usize = index2size(alloc_ctx.szind);
|
old_usize = sz_index2size(alloc_ctx.szind);
|
||||||
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
|
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
|
||||||
if (config_prof && opt_prof) {
|
if (config_prof && opt_prof) {
|
||||||
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
|
usize = (alignment == 0) ?
|
||||||
|
sz_s2u(size) : sz_sa2u(size, alignment);
|
||||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
||||||
goto label_oom;
|
goto label_oom;
|
||||||
}
|
}
|
||||||
@ -2685,10 +2584,10 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
|
|||||||
* prof_realloc() will use the actual usize to decide whether to sample.
|
* prof_realloc() will use the actual usize to decide whether to sample.
|
||||||
*/
|
*/
|
||||||
if (alignment == 0) {
|
if (alignment == 0) {
|
||||||
usize_max = s2u(size+extra);
|
usize_max = sz_s2u(size+extra);
|
||||||
assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS);
|
assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS);
|
||||||
} else {
|
} else {
|
||||||
usize_max = sa2u(size+extra, alignment);
|
usize_max = sz_sa2u(size+extra, alignment);
|
||||||
if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) {
|
if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) {
|
||||||
/*
|
/*
|
||||||
* usize_max is out of range, and chances are that
|
* usize_max is out of range, and chances are that
|
||||||
@ -2737,7 +2636,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
|
|||||||
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
|
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
|
||||||
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
|
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
|
||||||
assert(alloc_ctx.szind != NSIZES);
|
assert(alloc_ctx.szind != NSIZES);
|
||||||
old_usize = index2size(alloc_ctx.szind);
|
old_usize = sz_index2size(alloc_ctx.szind);
|
||||||
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
|
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
|
||||||
/*
|
/*
|
||||||
* The API explicitly absolves itself of protecting against (size +
|
* The API explicitly absolves itself of protecting against (size +
|
||||||
@ -2847,9 +2746,9 @@ inallocx(tsdn_t *tsdn, size_t size, int flags) {
|
|||||||
|
|
||||||
size_t usize;
|
size_t usize;
|
||||||
if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
|
if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
|
||||||
usize = s2u(size);
|
usize = sz_s2u(size);
|
||||||
} else {
|
} else {
|
||||||
usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
|
usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
|
||||||
}
|
}
|
||||||
witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
|
witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
|
||||||
return usize;
|
return usize;
|
||||||
|
12
src/large.c
12
src/large.c
@ -12,7 +12,7 @@
|
|||||||
|
|
||||||
void *
|
void *
|
||||||
large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) {
|
large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) {
|
||||||
assert(usize == s2u(usize));
|
assert(usize == sz_s2u(usize));
|
||||||
|
|
||||||
return large_palloc(tsdn, arena, usize, CACHELINE, zero);
|
return large_palloc(tsdn, arena, usize, CACHELINE, zero);
|
||||||
}
|
}
|
||||||
@ -27,7 +27,7 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
|
|
||||||
assert(!tsdn_null(tsdn) || arena != NULL);
|
assert(!tsdn_null(tsdn) || arena != NULL);
|
||||||
|
|
||||||
ausize = sa2u(usize, alignment);
|
ausize = sz_sa2u(usize, alignment);
|
||||||
if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS)) {
|
if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS)) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -97,7 +97,7 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
|
|||||||
arena_t *arena = extent_arena_get(extent);
|
arena_t *arena = extent_arena_get(extent);
|
||||||
size_t oldusize = extent_usize_get(extent);
|
size_t oldusize = extent_usize_get(extent);
|
||||||
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
|
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
|
||||||
size_t diff = extent_size_get(extent) - (usize + large_pad);
|
size_t diff = extent_size_get(extent) - (usize + sz_large_pad);
|
||||||
|
|
||||||
assert(oldusize > usize);
|
assert(oldusize > usize);
|
||||||
|
|
||||||
@ -108,8 +108,8 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
|
|||||||
/* Split excess pages. */
|
/* Split excess pages. */
|
||||||
if (diff != 0) {
|
if (diff != 0) {
|
||||||
extent_t *trail = extent_split_wrapper(tsdn, arena,
|
extent_t *trail = extent_split_wrapper(tsdn, arena,
|
||||||
&extent_hooks, extent, usize + large_pad, size2index(usize),
|
&extent_hooks, extent, usize + sz_large_pad,
|
||||||
false, diff, NSIZES, false);
|
sz_size2index(usize), false, diff, NSIZES, false);
|
||||||
if (trail == NULL) {
|
if (trail == NULL) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -178,7 +178,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
|||||||
}
|
}
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
rtree_ctx_t rtree_ctx_fallback;
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||||
szind_t szind = size2index(usize);
|
szind_t szind = sz_size2index(usize);
|
||||||
extent_szind_set(extent, szind);
|
extent_szind_set(extent, szind);
|
||||||
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
|
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
|
||||||
(uintptr_t)extent_addr_get(extent), szind, false);
|
(uintptr_t)extent_addr_get(extent), szind, false);
|
||||||
|
@ -556,7 +556,7 @@ prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) {
|
|||||||
*/
|
*/
|
||||||
size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
|
size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
|
||||||
prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
|
prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
|
||||||
size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
|
sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
|
||||||
true);
|
true);
|
||||||
if (gctx == NULL) {
|
if (gctx == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -819,7 +819,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
|
|||||||
|
|
||||||
/* Link a prof_tctx_t into gctx for this thread. */
|
/* Link a prof_tctx_t into gctx for this thread. */
|
||||||
ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
|
ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
|
||||||
size2index(sizeof(prof_tctx_t)), false, NULL, true,
|
sz_size2index(sizeof(prof_tctx_t)), false, NULL, true,
|
||||||
arena_ichoose(tsd, NULL), true);
|
arena_ichoose(tsd, NULL), true);
|
||||||
if (ret.p == NULL) {
|
if (ret.p == NULL) {
|
||||||
if (new_gctx) {
|
if (new_gctx) {
|
||||||
@ -1899,7 +1899,7 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
|
|||||||
|
|
||||||
/* Initialize an empty cache for this thread. */
|
/* Initialize an empty cache for this thread. */
|
||||||
tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
|
tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
|
||||||
size2index(sizeof(prof_tdata_t)), false, NULL, true,
|
sz_size2index(sizeof(prof_tdata_t)), false, NULL, true,
|
||||||
arena_get(TSDN_NULL, 0, true), true);
|
arena_get(TSDN_NULL, 0, true), true);
|
||||||
if (tdata == NULL) {
|
if (tdata == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -2135,7 +2135,7 @@ prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) {
|
|||||||
return "";
|
return "";
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = iallocztm(tsdn, size, size2index(size), false, NULL, true,
|
ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true,
|
||||||
arena_get(TSDN_NULL, 0, true), true);
|
arena_get(TSDN_NULL, 0, true), true);
|
||||||
if (ret == NULL) {
|
if (ret == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
|
106
src/sz.c
Normal file
106
src/sz.c
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||||
|
#include "jemalloc/internal/sz.h"
|
||||||
|
|
||||||
|
JEMALLOC_ALIGNED(CACHELINE)
|
||||||
|
const size_t sz_pind2sz_tab[NPSIZES+1] = {
|
||||||
|
#define PSZ_yes(lg_grp, ndelta, lg_delta) \
|
||||||
|
(((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))),
|
||||||
|
#define PSZ_no(lg_grp, ndelta, lg_delta)
|
||||||
|
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
|
||||||
|
PSZ_##psz(lg_grp, ndelta, lg_delta)
|
||||||
|
SIZE_CLASSES
|
||||||
|
#undef PSZ_yes
|
||||||
|
#undef PSZ_no
|
||||||
|
#undef SC
|
||||||
|
(LARGE_MAXCLASS + PAGE)
|
||||||
|
};
|
||||||
|
|
||||||
|
JEMALLOC_ALIGNED(CACHELINE)
|
||||||
|
const size_t sz_index2size_tab[NSIZES] = {
|
||||||
|
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
|
||||||
|
((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
|
||||||
|
SIZE_CLASSES
|
||||||
|
#undef SC
|
||||||
|
};
|
||||||
|
|
||||||
|
JEMALLOC_ALIGNED(CACHELINE)
|
||||||
|
const uint8_t sz_size2index_tab[] = {
|
||||||
|
#if LG_TINY_MIN == 0
|
||||||
|
#warning "Dangerous LG_TINY_MIN"
|
||||||
|
#define S2B_0(i) i,
|
||||||
|
#elif LG_TINY_MIN == 1
|
||||||
|
#warning "Dangerous LG_TINY_MIN"
|
||||||
|
#define S2B_1(i) i,
|
||||||
|
#elif LG_TINY_MIN == 2
|
||||||
|
#warning "Dangerous LG_TINY_MIN"
|
||||||
|
#define S2B_2(i) i,
|
||||||
|
#elif LG_TINY_MIN == 3
|
||||||
|
#define S2B_3(i) i,
|
||||||
|
#elif LG_TINY_MIN == 4
|
||||||
|
#define S2B_4(i) i,
|
||||||
|
#elif LG_TINY_MIN == 5
|
||||||
|
#define S2B_5(i) i,
|
||||||
|
#elif LG_TINY_MIN == 6
|
||||||
|
#define S2B_6(i) i,
|
||||||
|
#elif LG_TINY_MIN == 7
|
||||||
|
#define S2B_7(i) i,
|
||||||
|
#elif LG_TINY_MIN == 8
|
||||||
|
#define S2B_8(i) i,
|
||||||
|
#elif LG_TINY_MIN == 9
|
||||||
|
#define S2B_9(i) i,
|
||||||
|
#elif LG_TINY_MIN == 10
|
||||||
|
#define S2B_10(i) i,
|
||||||
|
#elif LG_TINY_MIN == 11
|
||||||
|
#define S2B_11(i) i,
|
||||||
|
#else
|
||||||
|
#error "Unsupported LG_TINY_MIN"
|
||||||
|
#endif
|
||||||
|
#if LG_TINY_MIN < 1
|
||||||
|
#define S2B_1(i) S2B_0(i) S2B_0(i)
|
||||||
|
#endif
|
||||||
|
#if LG_TINY_MIN < 2
|
||||||
|
#define S2B_2(i) S2B_1(i) S2B_1(i)
|
||||||
|
#endif
|
||||||
|
#if LG_TINY_MIN < 3
|
||||||
|
#define S2B_3(i) S2B_2(i) S2B_2(i)
|
||||||
|
#endif
|
||||||
|
#if LG_TINY_MIN < 4
|
||||||
|
#define S2B_4(i) S2B_3(i) S2B_3(i)
|
||||||
|
#endif
|
||||||
|
#if LG_TINY_MIN < 5
|
||||||
|
#define S2B_5(i) S2B_4(i) S2B_4(i)
|
||||||
|
#endif
|
||||||
|
#if LG_TINY_MIN < 6
|
||||||
|
#define S2B_6(i) S2B_5(i) S2B_5(i)
|
||||||
|
#endif
|
||||||
|
#if LG_TINY_MIN < 7
|
||||||
|
#define S2B_7(i) S2B_6(i) S2B_6(i)
|
||||||
|
#endif
|
||||||
|
#if LG_TINY_MIN < 8
|
||||||
|
#define S2B_8(i) S2B_7(i) S2B_7(i)
|
||||||
|
#endif
|
||||||
|
#if LG_TINY_MIN < 9
|
||||||
|
#define S2B_9(i) S2B_8(i) S2B_8(i)
|
||||||
|
#endif
|
||||||
|
#if LG_TINY_MIN < 10
|
||||||
|
#define S2B_10(i) S2B_9(i) S2B_9(i)
|
||||||
|
#endif
|
||||||
|
#if LG_TINY_MIN < 11
|
||||||
|
#define S2B_11(i) S2B_10(i) S2B_10(i)
|
||||||
|
#endif
|
||||||
|
#define S2B_no(i)
|
||||||
|
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
|
||||||
|
S2B_##lg_delta_lookup(index)
|
||||||
|
SIZE_CLASSES
|
||||||
|
#undef S2B_3
|
||||||
|
#undef S2B_4
|
||||||
|
#undef S2B_5
|
||||||
|
#undef S2B_6
|
||||||
|
#undef S2B_7
|
||||||
|
#undef S2B_8
|
||||||
|
#undef S2B_9
|
||||||
|
#undef S2B_10
|
||||||
|
#undef S2B_11
|
||||||
|
#undef S2B_no
|
||||||
|
#undef SC
|
||||||
|
};
|
@ -383,7 +383,7 @@ tsd_tcache_data_init(tsd_t *tsd) {
|
|||||||
assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
|
assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
|
||||||
size_t size = stack_nelms * sizeof(void *);
|
size_t size = stack_nelms * sizeof(void *);
|
||||||
/* Avoid false cacheline sharing. */
|
/* Avoid false cacheline sharing. */
|
||||||
size = sa2u(size, CACHELINE);
|
size = sz_sa2u(size, CACHELINE);
|
||||||
|
|
||||||
void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true,
|
void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true,
|
||||||
NULL, true, arena_get(TSDN_NULL, 0, true));
|
NULL, true, arena_get(TSDN_NULL, 0, true));
|
||||||
@ -430,7 +430,7 @@ tcache_create_explicit(tsd_t *tsd) {
|
|||||||
stack_offset = size;
|
stack_offset = size;
|
||||||
size += stack_nelms * sizeof(void *);
|
size += stack_nelms * sizeof(void *);
|
||||||
/* Avoid false cacheline sharing. */
|
/* Avoid false cacheline sharing. */
|
||||||
size = sa2u(size, CACHELINE);
|
size = sz_sa2u(size, CACHELINE);
|
||||||
|
|
||||||
tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true,
|
tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true,
|
||||||
arena_get(TSDN_NULL, 0, true));
|
arena_get(TSDN_NULL, 0, true));
|
||||||
@ -655,7 +655,7 @@ tcache_boot(tsdn_t *tsdn) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
nhbins = size2index(tcache_maxclass) + 1;
|
nhbins = sz_size2index(tcache_maxclass) + 1;
|
||||||
|
|
||||||
/* Initialize tcache_bin_info. */
|
/* Initialize tcache_bin_info. */
|
||||||
tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins
|
tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins
|
||||||
|
@ -244,7 +244,7 @@ zone_good_size(malloc_zone_t *zone, size_t size) {
|
|||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
size = 1;
|
size = 1;
|
||||||
}
|
}
|
||||||
return s2u(size);
|
return sz_s2u(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static kern_return_t
|
static kern_return_t
|
||||||
|
@ -81,7 +81,7 @@ vsalloc(tsdn_t *tsdn, const void *ptr) {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return index2size(szind);
|
return sz_index2size(szind);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned
|
static unsigned
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#include "test/jemalloc_test.h"
|
#include "test/jemalloc_test.h"
|
||||||
|
|
||||||
|
#include "jemalloc/internal/spin.h"
|
||||||
|
|
||||||
static unsigned arena_ind;
|
static unsigned arena_ind;
|
||||||
static size_t sz;
|
static size_t sz;
|
||||||
static size_t esz;
|
static size_t esz;
|
||||||
@ -100,7 +102,7 @@ TEST_BEGIN(test_retained) {
|
|||||||
|
|
||||||
arena_ind = do_arena_create(NULL);
|
arena_ind = do_arena_create(NULL);
|
||||||
sz = nallocx(HUGEPAGE, 0);
|
sz = nallocx(HUGEPAGE, 0);
|
||||||
esz = sz + large_pad;
|
esz = sz + sz_large_pad;
|
||||||
|
|
||||||
atomic_store_u(&epoch, 0, ATOMIC_RELAXED);
|
atomic_store_u(&epoch, 0, ATOMIC_RELAXED);
|
||||||
|
|
||||||
@ -136,9 +138,9 @@ TEST_BEGIN(test_retained) {
|
|||||||
arena_t *arena = arena_get(tsdn_fetch(), arena_ind, false);
|
arena_t *arena = arena_get(tsdn_fetch(), arena_ind, false);
|
||||||
size_t usable = 0;
|
size_t usable = 0;
|
||||||
size_t fragmented = 0;
|
size_t fragmented = 0;
|
||||||
for (pszind_t pind = psz2ind(HUGEPAGE); pind <
|
for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind <
|
||||||
arena->extent_grow_next; pind++) {
|
arena->extent_grow_next; pind++) {
|
||||||
size_t psz = pind2sz(pind);
|
size_t psz = sz_pind2sz(pind);
|
||||||
size_t psz_fragmented = psz % esz;
|
size_t psz_fragmented = psz % esz;
|
||||||
size_t psz_usable = psz - psz_fragmented;
|
size_t psz_usable = psz - psz_fragmented;
|
||||||
/*
|
/*
|
||||||
|
@ -86,7 +86,8 @@ TEST_END
|
|||||||
TEST_BEGIN(test_rtree_extrema) {
|
TEST_BEGIN(test_rtree_extrema) {
|
||||||
extent_t extent_a, extent_b;
|
extent_t extent_a, extent_b;
|
||||||
extent_init(&extent_a, NULL, NULL, LARGE_MINCLASS, false,
|
extent_init(&extent_a, NULL, NULL, LARGE_MINCLASS, false,
|
||||||
size2index(LARGE_MINCLASS), 0, extent_state_active, false, false);
|
sz_size2index(LARGE_MINCLASS), 0, extent_state_active, false,
|
||||||
|
false);
|
||||||
extent_init(&extent_b, NULL, NULL, 0, false, NSIZES, 0,
|
extent_init(&extent_b, NULL, NULL, 0, false, NSIZES, 0,
|
||||||
extent_state_active, false, false);
|
extent_state_active, false, false);
|
||||||
|
|
||||||
|
@ -27,11 +27,11 @@ TEST_BEGIN(test_size_classes) {
|
|||||||
szind_t index, max_index;
|
szind_t index, max_index;
|
||||||
|
|
||||||
max_size_class = get_max_size_class();
|
max_size_class = get_max_size_class();
|
||||||
max_index = size2index(max_size_class);
|
max_index = sz_size2index(max_size_class);
|
||||||
|
|
||||||
for (index = 0, size_class = index2size(index); index < max_index ||
|
for (index = 0, size_class = sz_index2size(index); index < max_index ||
|
||||||
size_class < max_size_class; index++, size_class =
|
size_class < max_size_class; index++, size_class =
|
||||||
index2size(index)) {
|
sz_index2size(index)) {
|
||||||
assert_true(index < max_index,
|
assert_true(index < max_index,
|
||||||
"Loop conditionals should be equivalent; index=%u, "
|
"Loop conditionals should be equivalent; index=%u, "
|
||||||
"size_class=%zu (%#zx)", index, size_class, size_class);
|
"size_class=%zu (%#zx)", index, size_class, size_class);
|
||||||
@ -39,42 +39,44 @@ TEST_BEGIN(test_size_classes) {
|
|||||||
"Loop conditionals should be equivalent; index=%u, "
|
"Loop conditionals should be equivalent; index=%u, "
|
||||||
"size_class=%zu (%#zx)", index, size_class, size_class);
|
"size_class=%zu (%#zx)", index, size_class, size_class);
|
||||||
|
|
||||||
assert_u_eq(index, size2index(size_class),
|
assert_u_eq(index, sz_size2index(size_class),
|
||||||
"size2index() does not reverse index2size(): index=%u -->"
|
"sz_size2index() does not reverse sz_index2size(): index=%u"
|
||||||
" size_class=%zu --> index=%u --> size_class=%zu", index,
|
" --> size_class=%zu --> index=%u --> size_class=%zu",
|
||||||
size_class, size2index(size_class),
|
index, size_class, sz_size2index(size_class),
|
||||||
index2size(size2index(size_class)));
|
sz_index2size(sz_size2index(size_class)));
|
||||||
assert_zu_eq(size_class, index2size(size2index(size_class)),
|
assert_zu_eq(size_class,
|
||||||
"index2size() does not reverse size2index(): index=%u -->"
|
sz_index2size(sz_size2index(size_class)),
|
||||||
" size_class=%zu --> index=%u --> size_class=%zu", index,
|
"sz_index2size() does not reverse sz_size2index(): index=%u"
|
||||||
size_class, size2index(size_class),
|
" --> size_class=%zu --> index=%u --> size_class=%zu",
|
||||||
index2size(size2index(size_class)));
|
index, size_class, sz_size2index(size_class),
|
||||||
|
sz_index2size(sz_size2index(size_class)));
|
||||||
|
|
||||||
assert_u_eq(index+1, size2index(size_class+1),
|
assert_u_eq(index+1, sz_size2index(size_class+1),
|
||||||
"Next size_class does not round up properly");
|
"Next size_class does not round up properly");
|
||||||
|
|
||||||
assert_zu_eq(size_class, (index > 0) ?
|
assert_zu_eq(size_class, (index > 0) ?
|
||||||
s2u(index2size(index-1)+1) : s2u(1),
|
sz_s2u(sz_index2size(index-1)+1) : sz_s2u(1),
|
||||||
"s2u() does not round up to size class");
|
"sz_s2u() does not round up to size class");
|
||||||
assert_zu_eq(size_class, s2u(size_class-1),
|
assert_zu_eq(size_class, sz_s2u(size_class-1),
|
||||||
"s2u() does not round up to size class");
|
"sz_s2u() does not round up to size class");
|
||||||
assert_zu_eq(size_class, s2u(size_class),
|
assert_zu_eq(size_class, sz_s2u(size_class),
|
||||||
"s2u() does not compute same size class");
|
"sz_s2u() does not compute same size class");
|
||||||
assert_zu_eq(s2u(size_class+1), index2size(index+1),
|
assert_zu_eq(sz_s2u(size_class+1), sz_index2size(index+1),
|
||||||
"s2u() does not round up to next size class");
|
"sz_s2u() does not round up to next size class");
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_u_eq(index, size2index(index2size(index)),
|
assert_u_eq(index, sz_size2index(sz_index2size(index)),
|
||||||
"size2index() does not reverse index2size()");
|
"sz_size2index() does not reverse sz_index2size()");
|
||||||
assert_zu_eq(max_size_class, index2size(size2index(max_size_class)),
|
assert_zu_eq(max_size_class, sz_index2size(
|
||||||
"index2size() does not reverse size2index()");
|
sz_size2index(max_size_class)),
|
||||||
|
"sz_index2size() does not reverse sz_size2index()");
|
||||||
|
|
||||||
assert_zu_eq(size_class, s2u(index2size(index-1)+1),
|
assert_zu_eq(size_class, sz_s2u(sz_index2size(index-1)+1),
|
||||||
"s2u() does not round up to size class");
|
"sz_s2u() does not round up to size class");
|
||||||
assert_zu_eq(size_class, s2u(size_class-1),
|
assert_zu_eq(size_class, sz_s2u(size_class-1),
|
||||||
"s2u() does not round up to size class");
|
"sz_s2u() does not round up to size class");
|
||||||
assert_zu_eq(size_class, s2u(size_class),
|
assert_zu_eq(size_class, sz_s2u(size_class),
|
||||||
"s2u() does not compute same size class");
|
"sz_s2u() does not compute same size class");
|
||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
@ -83,10 +85,11 @@ TEST_BEGIN(test_psize_classes) {
|
|||||||
pszind_t pind, max_pind;
|
pszind_t pind, max_pind;
|
||||||
|
|
||||||
max_psz = get_max_size_class() + PAGE;
|
max_psz = get_max_size_class() + PAGE;
|
||||||
max_pind = psz2ind(max_psz);
|
max_pind = sz_psz2ind(max_psz);
|
||||||
|
|
||||||
for (pind = 0, size_class = pind2sz(pind); pind < max_pind || size_class
|
for (pind = 0, size_class = sz_pind2sz(pind);
|
||||||
< max_psz; pind++, size_class = pind2sz(pind)) {
|
pind < max_pind || size_class < max_psz;
|
||||||
|
pind++, size_class = sz_pind2sz(pind)) {
|
||||||
assert_true(pind < max_pind,
|
assert_true(pind < max_pind,
|
||||||
"Loop conditionals should be equivalent; pind=%u, "
|
"Loop conditionals should be equivalent; pind=%u, "
|
||||||
"size_class=%zu (%#zx)", pind, size_class, size_class);
|
"size_class=%zu (%#zx)", pind, size_class, size_class);
|
||||||
@ -94,42 +97,42 @@ TEST_BEGIN(test_psize_classes) {
|
|||||||
"Loop conditionals should be equivalent; pind=%u, "
|
"Loop conditionals should be equivalent; pind=%u, "
|
||||||
"size_class=%zu (%#zx)", pind, size_class, size_class);
|
"size_class=%zu (%#zx)", pind, size_class, size_class);
|
||||||
|
|
||||||
assert_u_eq(pind, psz2ind(size_class),
|
assert_u_eq(pind, sz_psz2ind(size_class),
|
||||||
"psz2ind() does not reverse pind2sz(): pind=%u -->"
|
"sz_psz2ind() does not reverse sz_pind2sz(): pind=%u -->"
|
||||||
" size_class=%zu --> pind=%u --> size_class=%zu", pind,
|
" size_class=%zu --> pind=%u --> size_class=%zu", pind,
|
||||||
size_class, psz2ind(size_class),
|
size_class, sz_psz2ind(size_class),
|
||||||
pind2sz(psz2ind(size_class)));
|
sz_pind2sz(sz_psz2ind(size_class)));
|
||||||
assert_zu_eq(size_class, pind2sz(psz2ind(size_class)),
|
assert_zu_eq(size_class, sz_pind2sz(sz_psz2ind(size_class)),
|
||||||
"pind2sz() does not reverse psz2ind(): pind=%u -->"
|
"sz_pind2sz() does not reverse sz_psz2ind(): pind=%u -->"
|
||||||
" size_class=%zu --> pind=%u --> size_class=%zu", pind,
|
" size_class=%zu --> pind=%u --> size_class=%zu", pind,
|
||||||
size_class, psz2ind(size_class),
|
size_class, sz_psz2ind(size_class),
|
||||||
pind2sz(psz2ind(size_class)));
|
sz_pind2sz(sz_psz2ind(size_class)));
|
||||||
|
|
||||||
assert_u_eq(pind+1, psz2ind(size_class+1),
|
assert_u_eq(pind+1, sz_psz2ind(size_class+1),
|
||||||
"Next size_class does not round up properly");
|
"Next size_class does not round up properly");
|
||||||
|
|
||||||
assert_zu_eq(size_class, (pind > 0) ?
|
assert_zu_eq(size_class, (pind > 0) ?
|
||||||
psz2u(pind2sz(pind-1)+1) : psz2u(1),
|
sz_psz2u(sz_pind2sz(pind-1)+1) : sz_psz2u(1),
|
||||||
"psz2u() does not round up to size class");
|
"sz_psz2u() does not round up to size class");
|
||||||
assert_zu_eq(size_class, psz2u(size_class-1),
|
assert_zu_eq(size_class, sz_psz2u(size_class-1),
|
||||||
"psz2u() does not round up to size class");
|
"sz_psz2u() does not round up to size class");
|
||||||
assert_zu_eq(size_class, psz2u(size_class),
|
assert_zu_eq(size_class, sz_psz2u(size_class),
|
||||||
"psz2u() does not compute same size class");
|
"sz_psz2u() does not compute same size class");
|
||||||
assert_zu_eq(psz2u(size_class+1), pind2sz(pind+1),
|
assert_zu_eq(sz_psz2u(size_class+1), sz_pind2sz(pind+1),
|
||||||
"psz2u() does not round up to next size class");
|
"sz_psz2u() does not round up to next size class");
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_u_eq(pind, psz2ind(pind2sz(pind)),
|
assert_u_eq(pind, sz_psz2ind(sz_pind2sz(pind)),
|
||||||
"psz2ind() does not reverse pind2sz()");
|
"sz_psz2ind() does not reverse sz_pind2sz()");
|
||||||
assert_zu_eq(max_psz, pind2sz(psz2ind(max_psz)),
|
assert_zu_eq(max_psz, sz_pind2sz(sz_psz2ind(max_psz)),
|
||||||
"pind2sz() does not reverse psz2ind()");
|
"sz_pind2sz() does not reverse sz_psz2ind()");
|
||||||
|
|
||||||
assert_zu_eq(size_class, psz2u(pind2sz(pind-1)+1),
|
assert_zu_eq(size_class, sz_psz2u(sz_pind2sz(pind-1)+1),
|
||||||
"psz2u() does not round up to size class");
|
"sz_psz2u() does not round up to size class");
|
||||||
assert_zu_eq(size_class, psz2u(size_class-1),
|
assert_zu_eq(size_class, sz_psz2u(size_class-1),
|
||||||
"psz2u() does not round up to size class");
|
"sz_psz2u() does not round up to size class");
|
||||||
assert_zu_eq(size_class, psz2u(size_class),
|
assert_zu_eq(size_class, sz_psz2u(size_class),
|
||||||
"psz2u() does not compute same size class");
|
"sz_psz2u() does not compute same size class");
|
||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
@ -139,35 +142,35 @@ TEST_BEGIN(test_overflow) {
|
|||||||
max_size_class = get_max_size_class();
|
max_size_class = get_max_size_class();
|
||||||
max_psz = max_size_class + PAGE;
|
max_psz = max_size_class + PAGE;
|
||||||
|
|
||||||
assert_u_eq(size2index(max_size_class+1), NSIZES,
|
assert_u_eq(sz_size2index(max_size_class+1), NSIZES,
|
||||||
"size2index() should return NSIZES on overflow");
|
"sz_size2index() should return NSIZES on overflow");
|
||||||
assert_u_eq(size2index(ZU(PTRDIFF_MAX)+1), NSIZES,
|
assert_u_eq(sz_size2index(ZU(PTRDIFF_MAX)+1), NSIZES,
|
||||||
"size2index() should return NSIZES on overflow");
|
"sz_size2index() should return NSIZES on overflow");
|
||||||
assert_u_eq(size2index(SIZE_T_MAX), NSIZES,
|
assert_u_eq(sz_size2index(SIZE_T_MAX), NSIZES,
|
||||||
"size2index() should return NSIZES on overflow");
|
"sz_size2index() should return NSIZES on overflow");
|
||||||
|
|
||||||
assert_zu_eq(s2u(max_size_class+1), 0,
|
assert_zu_eq(sz_s2u(max_size_class+1), 0,
|
||||||
"s2u() should return 0 for unsupported size");
|
"sz_s2u() should return 0 for unsupported size");
|
||||||
assert_zu_eq(s2u(ZU(PTRDIFF_MAX)+1), 0,
|
assert_zu_eq(sz_s2u(ZU(PTRDIFF_MAX)+1), 0,
|
||||||
"s2u() should return 0 for unsupported size");
|
"sz_s2u() should return 0 for unsupported size");
|
||||||
assert_zu_eq(s2u(SIZE_T_MAX), 0,
|
assert_zu_eq(sz_s2u(SIZE_T_MAX), 0,
|
||||||
"s2u() should return 0 on overflow");
|
"sz_s2u() should return 0 on overflow");
|
||||||
|
|
||||||
assert_u_eq(psz2ind(max_size_class+1), NPSIZES,
|
assert_u_eq(sz_psz2ind(max_size_class+1), NPSIZES,
|
||||||
"psz2ind() should return NPSIZES on overflow");
|
"sz_psz2ind() should return NPSIZES on overflow");
|
||||||
assert_u_eq(psz2ind(ZU(PTRDIFF_MAX)+1), NPSIZES,
|
assert_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), NPSIZES,
|
||||||
"psz2ind() should return NPSIZES on overflow");
|
"sz_psz2ind() should return NPSIZES on overflow");
|
||||||
assert_u_eq(psz2ind(SIZE_T_MAX), NPSIZES,
|
assert_u_eq(sz_psz2ind(SIZE_T_MAX), NPSIZES,
|
||||||
"psz2ind() should return NPSIZES on overflow");
|
"sz_psz2ind() should return NPSIZES on overflow");
|
||||||
|
|
||||||
assert_zu_eq(psz2u(max_size_class+1), max_psz,
|
assert_zu_eq(sz_psz2u(max_size_class+1), max_psz,
|
||||||
"psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported"
|
"sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported"
|
||||||
" size");
|
" size");
|
||||||
assert_zu_eq(psz2u(ZU(PTRDIFF_MAX)+1), max_psz,
|
assert_zu_eq(sz_psz2u(ZU(PTRDIFF_MAX)+1), max_psz,
|
||||||
"psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported "
|
"sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported "
|
||||||
"size");
|
"size");
|
||||||
assert_zu_eq(psz2u(SIZE_T_MAX), max_psz,
|
assert_zu_eq(sz_psz2u(SIZE_T_MAX), max_psz,
|
||||||
"psz2u() should return (LARGE_MAXCLASS + PAGE) on overflow");
|
"sz_psz2u() should return (LARGE_MAXCLASS + PAGE) on overflow");
|
||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user