2019-12-17 03:05:07 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_EXTENT_H
|
|
|
|
#define JEMALLOC_INTERNAL_EXTENT_H
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2019-12-13 08:25:24 +08:00
|
|
|
#include "jemalloc/internal/ecache.h"
|
2019-12-03 06:19:22 +08:00
|
|
|
#include "jemalloc/internal/ehooks.h"
|
2017-04-11 07:54:25 +08:00
|
|
|
#include "jemalloc/internal/ph.h"
|
2017-05-24 05:26:31 +08:00
|
|
|
#include "jemalloc/internal/rtree.h"
|
2017-04-11 07:54:25 +08:00
|
|
|
|
2019-12-04 10:31:47 +08:00
|
|
|
/*
|
|
|
|
* This module contains the page-level allocator. It chooses the addresses that
|
|
|
|
* allocations requested by other modules will inhabit, and updates the global
|
|
|
|
* metadata to reflect allocation/deallocation/purging decisions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit)
|
|
|
|
* is the max ratio between the size of the active extent and the new extent.
|
|
|
|
*/
|
|
|
|
#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
|
2017-11-10 05:51:39 +08:00
|
|
|
extern size_t opt_lg_extent_max_active_fit;
|
|
|
|
|
2020-06-02 09:49:42 +08:00
|
|
|
edata_t *ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
2021-03-11 16:21:47 +08:00
|
|
|
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
|
2021-04-27 05:22:25 +08:00
|
|
|
bool zero, bool guarded);
|
2020-06-02 09:49:42 +08:00
|
|
|
edata_t *ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
2021-03-11 16:21:47 +08:00
|
|
|
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
|
2021-04-27 05:22:25 +08:00
|
|
|
bool zero, bool guarded);
|
2020-06-02 09:49:42 +08:00
|
|
|
void ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
2019-12-13 08:25:24 +08:00
|
|
|
ecache_t *ecache, edata_t *edata);
|
2020-06-02 09:49:42 +08:00
|
|
|
edata_t *ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
2019-12-13 08:25:24 +08:00
|
|
|
ecache_t *ecache, size_t npages_min);
|
2019-12-13 09:30:28 +08:00
|
|
|
|
2020-06-02 09:49:42 +08:00
|
|
|
void extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
2020-03-10 03:20:06 +08:00
|
|
|
edata_t *edata);
|
2020-06-02 09:49:42 +08:00
|
|
|
void extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *edata);
|
2020-06-02 09:49:42 +08:00
|
|
|
void extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *edata);
|
2019-12-14 05:46:25 +08:00
|
|
|
bool extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
|
|
|
|
size_t offset, size_t length);
|
|
|
|
bool extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
|
|
|
|
size_t offset, size_t length);
|
2020-03-11 01:37:46 +08:00
|
|
|
bool extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
|
|
|
|
size_t offset, size_t length);
|
|
|
|
bool extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
|
|
|
|
size_t offset, size_t length);
|
2020-06-02 09:49:42 +08:00
|
|
|
edata_t *extent_split_wrapper(tsdn_t *tsdn, pac_t *pac,
|
2020-03-16 09:55:43 +08:00
|
|
|
ehooks_t *ehooks, edata_t *edata, size_t size_a, size_t size_b);
|
2020-06-02 09:49:42 +08:00
|
|
|
bool extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
2020-03-16 00:53:09 +08:00
|
|
|
edata_t *a, edata_t *b);
|
2020-06-02 09:01:19 +08:00
|
|
|
size_t extent_sn_next(pac_t *pac);
|
2017-03-14 08:36:57 +08:00
|
|
|
bool extent_boot(void);
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2021-03-26 06:32:44 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
|
|
|
extent_neighbor_head_state_mergeable(bool edata_is_head,
|
|
|
|
bool neighbor_is_head, bool forward) {
|
|
|
|
/*
|
|
|
|
* Head states checking: disallow merging if the higher addr extent is a
|
|
|
|
* head extent. This helps preserve first-fit, and more importantly
|
|
|
|
* makes sure no merge across arenas.
|
|
|
|
*/
|
|
|
|
if (forward) {
|
|
|
|
if (neighbor_is_head) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (edata_is_head) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
|
|
|
extent_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents,
|
|
|
|
extent_pai_t pai, extent_state_t expected_state, bool forward,
|
|
|
|
bool expanding) {
|
|
|
|
edata_t *neighbor = contents.edata;
|
|
|
|
if (neighbor == NULL) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
/* It's not safe to access *neighbor yet; must verify states first. */
|
|
|
|
bool neighbor_is_head = contents.metadata.is_head;
|
|
|
|
if (!extent_neighbor_head_state_mergeable(edata_is_head_get(edata),
|
|
|
|
neighbor_is_head, forward)) {
|
2021-06-19 20:38:44 +08:00
|
|
|
return false;
|
2021-03-26 06:32:44 +08:00
|
|
|
}
|
|
|
|
extent_state_t neighbor_state = contents.metadata.state;
|
|
|
|
if (pai == EXTENT_PAI_PAC) {
|
|
|
|
if (neighbor_state != expected_state) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
/* From this point, it's safe to access *neighbor. */
|
|
|
|
if (!expanding && (edata_committed_get(edata) !=
|
|
|
|
edata_committed_get(neighbor))) {
|
|
|
|
/*
|
|
|
|
* Some platforms (e.g. Windows) require an explicit
|
|
|
|
* commit step (and writing to uncomitted memory is not
|
|
|
|
* allowed).
|
|
|
|
*/
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (neighbor_state == extent_state_active) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
/* From this point, it's safe to access *neighbor. */
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(edata_pai_get(edata) == pai);
|
|
|
|
if (edata_pai_get(neighbor) != pai) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (opt_retain) {
|
|
|
|
assert(edata_arena_ind_get(edata) ==
|
|
|
|
edata_arena_ind_get(neighbor));
|
|
|
|
} else {
|
|
|
|
if (edata_arena_ind_get(edata) !=
|
|
|
|
edata_arena_ind_get(neighbor)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-12-17 03:05:07 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_EXTENT_H */
|