2020-05-30 06:02:19 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_PAI_H
|
|
|
|
#define JEMALLOC_INTERNAL_PAI_H
|
|
|
|
|
|
|
|
/* An interface for page allocation. */
|
|
|
|
|
|
|
|
typedef struct pai_s pai_t;
|
|
|
|
struct pai_s {
|
|
|
|
/* Returns NULL on failure. */
|
|
|
|
edata_t *(*alloc)(tsdn_t *tsdn, pai_t *self, size_t size,
|
2021-11-06 05:19:39 +08:00
|
|
|
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
|
2021-04-27 05:22:25 +08:00
|
|
|
bool *deferred_work_generated);
|
2021-01-08 04:27:43 +08:00
|
|
|
/*
|
|
|
|
* Returns the number of extents added to the list (which may be fewer
|
|
|
|
* than requested, in case of OOM). The list should already be
|
|
|
|
* initialized. The only alignment guarantee is page-alignment, and
|
|
|
|
* the results are not necessarily zeroed.
|
|
|
|
*/
|
|
|
|
size_t (*alloc_batch)(tsdn_t *tsdn, pai_t *self, size_t size,
|
2021-08-19 10:24:37 +08:00
|
|
|
size_t nallocs, edata_list_active_t *results,
|
|
|
|
bool *deferred_work_generated);
|
2020-05-30 06:02:19 +08:00
|
|
|
bool (*expand)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
2021-08-19 10:24:37 +08:00
|
|
|
size_t old_size, size_t new_size, bool zero,
|
|
|
|
bool *deferred_work_generated);
|
2020-05-30 06:02:19 +08:00
|
|
|
bool (*shrink)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
2021-08-19 10:24:37 +08:00
|
|
|
size_t old_size, size_t new_size, bool *deferred_work_generated);
|
|
|
|
void (*dalloc)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
|
|
|
bool *deferred_work_generated);
|
2021-01-05 11:43:08 +08:00
|
|
|
/* This function empties out list as a side-effect of being called. */
|
2021-01-05 10:40:27 +08:00
|
|
|
void (*dalloc_batch)(tsdn_t *tsdn, pai_t *self,
|
2021-08-19 10:24:37 +08:00
|
|
|
edata_list_active_t *list, bool *deferred_work_generated);
|
2021-08-07 05:53:05 +08:00
|
|
|
uint64_t (*time_until_deferred_work)(tsdn_t *tsdn, pai_t *self);
|
2020-05-30 06:02:19 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* These are just simple convenience functions to avoid having to reference the
|
|
|
|
* same pai_t twice on every invocation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static inline edata_t *
|
2021-11-06 05:19:39 +08:00
|
|
|
pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
|
|
|
|
bool zero, bool guarded, bool frequent_reuse,
|
|
|
|
bool *deferred_work_generated) {
|
2021-04-27 05:22:25 +08:00
|
|
|
return self->alloc(tsdn, self, size, alignment, zero, guarded,
|
2021-11-06 05:19:39 +08:00
|
|
|
frequent_reuse, deferred_work_generated);
|
2020-05-30 06:02:19 +08:00
|
|
|
}
|
|
|
|
|
2021-01-08 04:27:43 +08:00
|
|
|
static inline size_t
|
|
|
|
pai_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
|
2021-08-19 10:24:37 +08:00
|
|
|
edata_list_active_t *results, bool *deferred_work_generated) {
|
|
|
|
return self->alloc_batch(tsdn, self, size, nallocs, results,
|
|
|
|
deferred_work_generated);
|
2021-01-08 04:27:43 +08:00
|
|
|
}
|
|
|
|
|
2020-05-30 06:02:19 +08:00
|
|
|
static inline bool
|
|
|
|
pai_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
|
2021-08-19 10:24:37 +08:00
|
|
|
size_t new_size, bool zero, bool *deferred_work_generated) {
|
|
|
|
return self->expand(tsdn, self, edata, old_size, new_size, zero,
|
|
|
|
deferred_work_generated);
|
2020-05-30 06:02:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
|
|
|
pai_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
|
2021-08-19 10:24:37 +08:00
|
|
|
size_t new_size, bool *deferred_work_generated) {
|
|
|
|
return self->shrink(tsdn, self, edata, old_size, new_size,
|
|
|
|
deferred_work_generated);
|
2020-05-30 06:02:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2021-08-19 10:24:37 +08:00
|
|
|
pai_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
|
|
|
bool *deferred_work_generated) {
|
|
|
|
self->dalloc(tsdn, self, edata, deferred_work_generated);
|
2020-05-30 06:02:19 +08:00
|
|
|
}
|
|
|
|
|
2021-01-05 10:40:27 +08:00
|
|
|
static inline void
|
2021-08-19 10:24:37 +08:00
|
|
|
pai_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list,
|
|
|
|
bool *deferred_work_generated) {
|
|
|
|
self->dalloc_batch(tsdn, self, list, deferred_work_generated);
|
2021-01-05 10:40:27 +08:00
|
|
|
}
|
|
|
|
|
2021-08-07 05:53:05 +08:00
|
|
|
static inline uint64_t
|
|
|
|
pai_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
|
|
|
|
return self->time_until_deferred_work(tsdn, self);
|
|
|
|
}
|
|
|
|
|
2021-01-05 10:40:27 +08:00
|
|
|
/*
|
2021-01-08 04:27:43 +08:00
|
|
|
* An implementation of batch allocation that simply calls alloc once for
|
2021-01-05 10:40:27 +08:00
|
|
|
* each item in the list.
|
|
|
|
*/
|
2021-01-08 04:27:43 +08:00
|
|
|
size_t pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size,
|
2021-08-19 10:24:37 +08:00
|
|
|
size_t nallocs, edata_list_active_t *results, bool *deferred_work_generated);
|
2021-01-08 04:27:43 +08:00
|
|
|
/* Ditto, for dalloc. */
|
2021-01-05 10:40:27 +08:00
|
|
|
void pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self,
|
2021-08-19 10:24:37 +08:00
|
|
|
edata_list_active_t *list, bool *deferred_work_generated);
|
2021-01-05 10:40:27 +08:00
|
|
|
|
2020-05-30 06:02:19 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_PAI_H */
|