San: Bump alloc frequently reused guarded allocations
To utilize a separate retained area for guarded extents, use bump alloc to allocate those extents.
This commit is contained in:
committed by
Alexander Lapenkov
parent
f56f5b9930
commit
800ce49c19
@@ -208,6 +208,7 @@ extent_assert_can_coalesce(const edata_t *inner, const edata_t *outer) {
|
||||
assert(edata_committed_get(inner) == edata_committed_get(outer));
|
||||
assert(edata_state_get(inner) == extent_state_active);
|
||||
assert(edata_state_get(outer) == extent_state_merging);
|
||||
assert(!edata_guarded_get(inner) && !edata_guarded_get(outer));
|
||||
assert(edata_base_get(inner) == edata_past_get(outer) ||
|
||||
edata_base_get(outer) == edata_past_get(inner));
|
||||
}
|
||||
|
@@ -127,6 +127,7 @@ extent_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents,
|
||||
return false;
|
||||
}
|
||||
}
|
||||
assert(!edata_guarded_get(edata) && !edata_guarded_get(neighbor));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@@ -99,6 +99,9 @@ struct pac_s {
|
||||
exp_grow_t exp_grow;
|
||||
malloc_mutex_t grow_mtx;
|
||||
|
||||
/* Special allocator for guarded frequently reused extents. */
|
||||
san_bump_alloc_t sba;
|
||||
|
||||
/* How large extents should be before getting auto-purged. */
|
||||
atomic_zu_t oversize_threshold;
|
||||
|
||||
|
@@ -5,7 +5,9 @@
|
||||
#include "jemalloc/internal/exp_grow.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
|
||||
extern const size_t SBA_RETAINED_ALLOC_SIZE;
|
||||
#define SBA_RETAINED_ALLOC_SIZE ((size_t)4 << 20)
|
||||
|
||||
extern bool opt_retain;
|
||||
|
||||
typedef struct ehooks_s ehooks_t;
|
||||
typedef struct pac_s pac_t;
|
||||
@@ -17,8 +19,31 @@ struct san_bump_alloc_s {
|
||||
edata_t *curr_reg;
|
||||
};
|
||||
|
||||
bool
|
||||
san_bump_alloc_init(san_bump_alloc_t* sba);
|
||||
static inline bool
|
||||
san_bump_enabled() {
|
||||
/*
|
||||
* We enable san_bump allocator only when it's possible to break up a
|
||||
* mapping and unmap a part of it (maps_coalesce). This is needed to
|
||||
* ensure the arena destruction process can destroy all retained guarded
|
||||
* extents one by one and to unmap a trailing part of a retained guarded
|
||||
* region when it's too small to fit a pending allocation.
|
||||
* opt_retain is required, because this allocator retains a large
|
||||
* virtual memory mapping and returns smaller parts of it.
|
||||
*/
|
||||
return maps_coalesce && opt_retain;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
san_bump_alloc_init(san_bump_alloc_t* sba) {
|
||||
bool err = malloc_mutex_init(&sba->mtx, "sanitizer_bump_allocator",
|
||||
WITNESS_RANK_SAN_BUMP_ALLOC, malloc_mutex_rank_exclusive);
|
||||
if (err) {
|
||||
return true;
|
||||
}
|
||||
sba->curr_reg = NULL;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
edata_t *
|
||||
san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac, ehooks_t *ehooks,
|
||||
|
Reference in New Issue
Block a user