server-skynet-source-3rd-je.../include/jemalloc/internal/stats.h

172 lines
4.3 KiB
C
Raw Normal View History

/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
2016-06-01 05:50:21 +08:00
typedef struct malloc_large_stats_s malloc_large_stats_t;
typedef struct arena_stats_s arena_stats_t;
typedef struct chunk_stats_s chunk_stats_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct tcache_bin_stats_s {
/*
* Number of allocation requests that corresponded to the size of this
* bin.
*/
uint64_t nrequests;
};
struct malloc_bin_stats_s {
/*
* Total number of allocation/deallocation requests served directly by
* the bin. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t nmalloc;
uint64_t ndalloc;
/*
* Number of allocation requests that correspond to the size of this
* bin. This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t nrequests;
/*
* Current number of regions of this size class, including regions
* currently cached by tcache.
*/
size_t curregs;
/* Number of tcache fills from this bin. */
uint64_t nfills;
/* Number of tcache flushes to this bin. */
uint64_t nflushes;
2016-05-30 09:34:50 +08:00
/* Total number of slabs created for this bin's size class. */
uint64_t nslabs;
/*
2016-05-30 09:34:50 +08:00
* Total number of slabs reused by extracting them from the slabs heap
* for this bin's size class.
*/
2016-05-30 09:34:50 +08:00
uint64_t reslabs;
2016-05-30 09:34:50 +08:00
/* Current number of slabs in this bin. */
size_t curslabs;
};
2016-06-01 05:50:21 +08:00
struct malloc_large_stats_s {
/*
* Total number of allocation/deallocation requests served directly by
* the arena.
*/
uint64_t nmalloc;
uint64_t ndalloc;
/*
* Number of allocation requests that correspond to this size class.
* This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t nrequests;
/* Current number of (multi-)chunk allocations of this size class. */
2016-06-01 05:50:21 +08:00
size_t curlextents;
};
struct arena_stats_s {
/* Number of bytes currently mapped. */
size_t mapped;
/*
* Number of bytes currently retained as a side effect of munmap() being
* disabled/bypassed. Retained bytes are technically mapped (though
* always decommitted or purged), but they are excluded from the mapped
* statistic (above).
*/
size_t retained;
/*
* Total number of purge sweeps, total number of madvise calls made,
* and total pages purged in order to keep dirty unused memory under
* control.
*/
uint64_t npurge;
uint64_t nmadvise;
uint64_t purged;
/* Number of bytes currently allocated for internal metadata. */
size_t metadata; /* Protected via atomic_*_z(). */
2016-06-01 05:50:21 +08:00
size_t allocated_large;
uint64_t nmalloc_large;
uint64_t ndalloc_large;
uint64_t nrequests_large;
2016-06-01 05:50:21 +08:00
/* One element for each large size class. */
malloc_large_stats_t lstats[NSIZES - NBINS];
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern bool opt_stats_print;
extern size_t stats_cactive;
void stats_print(void (*write)(void *, const char *), void *cbopaque,
const char *opts);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
size_t stats_cactive_get(void);
void stats_cactive_add(size_t size);
void stats_cactive_sub(size_t size);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
JEMALLOC_INLINE size_t
stats_cactive_get(void)
{
return (atomic_read_z(&stats_cactive));
}
JEMALLOC_INLINE void
stats_cactive_add(size_t size)
{
UNUSED size_t cactive;
assert(size > 0);
assert((size & chunksize_mask) == 0);
cactive = atomic_add_z(&stats_cactive, size);
assert(cactive - size < cactive);
}
JEMALLOC_INLINE void
stats_cactive_sub(size_t size)
{
UNUSED size_t cactive;
assert(size > 0);
assert((size & chunksize_mask) == 0);
cactive = atomic_sub_z(&stats_cactive, size);
assert(cactive + size > cactive);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/