e2bcf037d4
Rather than protecting dss operations with a mutex, use atomic operations. This has negligible impact on synchronization overhead during typical dss allocation, but is a substantial improvement for chunk_in_dss() and the newly added chunk_dss_mergeable(), which can be called multiple times during chunk deallocations. This change also has the advantage of avoiding tsd in deallocation paths associated with purging, which resolves potential deadlocks during thread exit due to attempted tsd resurrection. This resolves #425.
38 lines
1.2 KiB
C
38 lines
1.2 KiB
C
/******************************************************************************/
|
|
#ifdef JEMALLOC_H_TYPES
|
|
|
|
typedef enum {
|
|
dss_prec_disabled = 0,
|
|
dss_prec_primary = 1,
|
|
dss_prec_secondary = 2,
|
|
|
|
dss_prec_limit = 3
|
|
} dss_prec_t;
|
|
#define DSS_PREC_DEFAULT dss_prec_secondary
|
|
#define DSS_DEFAULT "secondary"
|
|
|
|
#endif /* JEMALLOC_H_TYPES */
|
|
/******************************************************************************/
|
|
#ifdef JEMALLOC_H_STRUCTS
|
|
|
|
extern const char *dss_prec_names[];
|
|
|
|
#endif /* JEMALLOC_H_STRUCTS */
|
|
/******************************************************************************/
|
|
#ifdef JEMALLOC_H_EXTERNS
|
|
|
|
dss_prec_t chunk_dss_prec_get(void);
|
|
bool chunk_dss_prec_set(dss_prec_t dss_prec);
|
|
void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
|
|
size_t size, size_t alignment, bool *zero, bool *commit);
|
|
bool chunk_in_dss(void *chunk);
|
|
bool chunk_dss_mergeable(void *chunk_a, void *chunk_b);
|
|
void chunk_dss_boot(void);
|
|
|
|
#endif /* JEMALLOC_H_EXTERNS */
|
|
/******************************************************************************/
|
|
#ifdef JEMALLOC_H_INLINES
|
|
|
|
#endif /* JEMALLOC_H_INLINES */
|
|
/******************************************************************************/
|