Remove redundant chunk argument from chunk_{,de,re}register().
This commit is contained in:
parent
f442254bdf
commit
93e79c5c3f
@ -52,11 +52,9 @@ chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena);
|
|||||||
chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
|
chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
|
||||||
const chunk_hooks_t *chunk_hooks);
|
const chunk_hooks_t *chunk_hooks);
|
||||||
|
|
||||||
bool chunk_register(tsdn_t *tsdn, const void *chunk, const extent_t *extent);
|
bool chunk_register(tsdn_t *tsdn, const extent_t *extent);
|
||||||
void chunk_deregister(tsdn_t *tsdn, const void *chunk,
|
void chunk_deregister(tsdn_t *tsdn, const extent_t *extent);
|
||||||
const extent_t *extent);
|
void chunk_reregister(tsdn_t *tsdn, const extent_t *extent);
|
||||||
void chunk_reregister(tsdn_t *tsdn, const void *chunk,
|
|
||||||
const extent_t *extent);
|
|
||||||
void *chunk_alloc_base(size_t size);
|
void *chunk_alloc_base(size_t size);
|
||||||
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
|
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
||||||
|
@ -527,7 +527,7 @@ arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
|
|||||||
*/
|
*/
|
||||||
extent_init(&chunk->extent, arena, chunk, chunksize, true, zero, true,
|
extent_init(&chunk->extent, arena, chunk, chunksize, true, zero, true,
|
||||||
true);
|
true);
|
||||||
return (chunk_register(tsdn, chunk, &chunk->extent));
|
return (chunk_register(tsdn, &chunk->extent));
|
||||||
}
|
}
|
||||||
|
|
||||||
static arena_chunk_t *
|
static arena_chunk_t *
|
||||||
@ -665,7 +665,7 @@ arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
|
|||||||
bool committed;
|
bool committed;
|
||||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
|
|
||||||
chunk_deregister(tsdn, chunk, &chunk->extent);
|
chunk_deregister(tsdn, &chunk->extent);
|
||||||
|
|
||||||
committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
|
committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
|
||||||
if (!committed) {
|
if (!committed) {
|
||||||
|
22
src/chunk.c
22
src/chunk.c
@ -141,21 +141,21 @@ chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
chunk_register(tsdn_t *tsdn, const void *chunk, const extent_t *extent)
|
chunk_register(tsdn_t *tsdn, const extent_t *extent)
|
||||||
{
|
{
|
||||||
|
const void *addr;
|
||||||
size_t size;
|
size_t size;
|
||||||
rtree_elm_t *elm_a;
|
rtree_elm_t *elm_a;
|
||||||
|
|
||||||
assert(extent_addr_get(extent) == chunk);
|
addr = extent_addr_get(extent);
|
||||||
|
|
||||||
size = extent_size_get(extent);
|
size = extent_size_get(extent);
|
||||||
|
|
||||||
if ((elm_a = rtree_elm_acquire(tsdn, &chunks_rtree, (uintptr_t)chunk,
|
if ((elm_a = rtree_elm_acquire(tsdn, &chunks_rtree, (uintptr_t)addr,
|
||||||
false, true)) == NULL)
|
false, true)) == NULL)
|
||||||
return (true);
|
return (true);
|
||||||
rtree_elm_write_acquired(tsdn, &chunks_rtree, elm_a, extent);
|
rtree_elm_write_acquired(tsdn, &chunks_rtree, elm_a, extent);
|
||||||
if (size > chunksize) {
|
if (size > chunksize) {
|
||||||
uintptr_t last = ((uintptr_t)chunk +
|
uintptr_t last = ((uintptr_t)addr +
|
||||||
(uintptr_t)(CHUNK_CEILING(size - chunksize)));
|
(uintptr_t)(CHUNK_CEILING(size - chunksize)));
|
||||||
rtree_elm_t *elm_b;
|
rtree_elm_t *elm_b;
|
||||||
|
|
||||||
@ -190,18 +190,20 @@ chunk_register(tsdn_t *tsdn, const void *chunk, const extent_t *extent)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
chunk_deregister(tsdn_t *tsdn, const void *chunk, const extent_t *extent)
|
chunk_deregister(tsdn_t *tsdn, const extent_t *extent)
|
||||||
{
|
{
|
||||||
|
const void *addr;
|
||||||
size_t size;
|
size_t size;
|
||||||
rtree_elm_t *elm_a;
|
rtree_elm_t *elm_a;
|
||||||
|
|
||||||
|
addr = extent_addr_get(extent);
|
||||||
size = extent_size_get(extent);
|
size = extent_size_get(extent);
|
||||||
|
|
||||||
elm_a = rtree_elm_acquire(tsdn, &chunks_rtree, (uintptr_t)chunk, true,
|
elm_a = rtree_elm_acquire(tsdn, &chunks_rtree, (uintptr_t)addr, true,
|
||||||
false);
|
false);
|
||||||
rtree_elm_write_acquired(tsdn, &chunks_rtree, elm_a, NULL);
|
rtree_elm_write_acquired(tsdn, &chunks_rtree, elm_a, NULL);
|
||||||
if (size > chunksize) {
|
if (size > chunksize) {
|
||||||
uintptr_t last = ((uintptr_t)chunk +
|
uintptr_t last = ((uintptr_t)addr +
|
||||||
(uintptr_t)(CHUNK_CEILING(size - chunksize)));
|
(uintptr_t)(CHUNK_CEILING(size - chunksize)));
|
||||||
rtree_elm_t *elm_b = rtree_elm_acquire(tsdn, &chunks_rtree,
|
rtree_elm_t *elm_b = rtree_elm_acquire(tsdn, &chunks_rtree,
|
||||||
last, true, false);
|
last, true, false);
|
||||||
@ -219,11 +221,11 @@ chunk_deregister(tsdn_t *tsdn, const void *chunk, const extent_t *extent)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
chunk_reregister(tsdn_t *tsdn, const void *chunk, const extent_t *extent)
|
chunk_reregister(tsdn_t *tsdn, const extent_t *extent)
|
||||||
{
|
{
|
||||||
bool err;
|
bool err;
|
||||||
|
|
||||||
err = chunk_register(tsdn, chunk, extent);
|
err = chunk_register(tsdn, extent);
|
||||||
assert(!err);
|
assert(!err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
16
src/huge.c
16
src/huge.c
@ -52,7 +52,7 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
|
|
||||||
extent_init(extent, arena, ret, usize, true, is_zeroed, true, false);
|
extent_init(extent, arena, ret, usize, true, is_zeroed, true, false);
|
||||||
|
|
||||||
if (chunk_register(tsdn, ret, extent)) {
|
if (chunk_register(tsdn, extent)) {
|
||||||
arena_chunk_dalloc_huge(tsdn, arena, ret, usize);
|
arena_chunk_dalloc_huge(tsdn, arena, ret, usize);
|
||||||
idalloctm(tsdn, iealloc(tsdn, extent), extent, NULL, true,
|
idalloctm(tsdn, iealloc(tsdn, extent), extent, NULL, true,
|
||||||
true);
|
true);
|
||||||
@ -135,11 +135,11 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
|||||||
|
|
||||||
/* Update the size of the huge allocation. */
|
/* Update the size of the huge allocation. */
|
||||||
assert(extent_size_get(extent) != usize);
|
assert(extent_size_get(extent) != usize);
|
||||||
chunk_deregister(tsdn, ptr, extent);
|
chunk_deregister(tsdn, extent);
|
||||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||||
extent_size_set(extent, usize);
|
extent_size_set(extent, usize);
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||||
chunk_reregister(tsdn, ptr, extent);
|
chunk_reregister(tsdn, extent);
|
||||||
/* Update zeroed. */
|
/* Update zeroed. */
|
||||||
extent_zeroed_set(extent, post_zeroed);
|
extent_zeroed_set(extent, post_zeroed);
|
||||||
|
|
||||||
@ -196,13 +196,13 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
|||||||
post_zeroed = pre_zeroed;
|
post_zeroed = pre_zeroed;
|
||||||
|
|
||||||
/* Update the size of the huge allocation. */
|
/* Update the size of the huge allocation. */
|
||||||
chunk_deregister(tsdn, ptr, extent);
|
chunk_deregister(tsdn, extent);
|
||||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||||
extent_size_set(extent, usize);
|
extent_size_set(extent, usize);
|
||||||
/* Update zeroed. */
|
/* Update zeroed. */
|
||||||
extent_zeroed_set(extent, post_zeroed);
|
extent_zeroed_set(extent, post_zeroed);
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||||
chunk_reregister(tsdn, ptr, extent);
|
chunk_reregister(tsdn, extent);
|
||||||
|
|
||||||
/* Zap the excess chunks. */
|
/* Zap the excess chunks. */
|
||||||
arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize);
|
arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize);
|
||||||
@ -232,12 +232,12 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
|||||||
return (true);
|
return (true);
|
||||||
|
|
||||||
/* Update the size of the huge allocation. */
|
/* Update the size of the huge allocation. */
|
||||||
chunk_deregister(tsdn, ptr, extent);
|
chunk_deregister(tsdn, extent);
|
||||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||||
extent_size_set(extent, usize);
|
extent_size_set(extent, usize);
|
||||||
extent_zeroed_set(extent, extent_zeroed_get(extent) && is_zeroed_chunk);
|
extent_zeroed_set(extent, extent_zeroed_get(extent) && is_zeroed_chunk);
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||||
chunk_reregister(tsdn, ptr, extent);
|
chunk_reregister(tsdn, extent);
|
||||||
|
|
||||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
if (zero || (config_fill && unlikely(opt_zero))) {
|
||||||
if (!is_zeroed_subchunk) {
|
if (!is_zeroed_subchunk) {
|
||||||
@ -355,7 +355,7 @@ huge_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr)
|
|||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
arena = extent_arena_get(extent);
|
arena = extent_arena_get(extent);
|
||||||
chunk_deregister(tsdn, ptr, extent);
|
chunk_deregister(tsdn, extent);
|
||||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||||
ql_remove(&arena->huge, extent, ql_link);
|
ql_remove(&arena->huge, extent, ql_link);
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
||||||
|
Loading…
Reference in New Issue
Block a user