Remove redundant chunk argument from chunk_{,de,re}register().

This commit is contained in:
Jason Evans 2016-05-16 13:37:41 -07:00
parent f442254bdf
commit 93e79c5c3f
4 changed files with 25 additions and 25 deletions

View File

@ -52,11 +52,9 @@ chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena);
chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
const chunk_hooks_t *chunk_hooks);
bool chunk_register(tsdn_t *tsdn, const void *chunk, const extent_t *extent);
void chunk_deregister(tsdn_t *tsdn, const void *chunk,
const extent_t *extent);
void chunk_reregister(tsdn_t *tsdn, const void *chunk,
const extent_t *extent);
bool chunk_register(tsdn_t *tsdn, const extent_t *extent);
void chunk_deregister(tsdn_t *tsdn, const extent_t *extent);
void chunk_reregister(tsdn_t *tsdn, const extent_t *extent);
void *chunk_alloc_base(size_t size);
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,

View File

@ -527,7 +527,7 @@ arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
*/
extent_init(&chunk->extent, arena, chunk, chunksize, true, zero, true,
true);
return (chunk_register(tsdn, chunk, &chunk->extent));
return (chunk_register(tsdn, &chunk->extent));
}
static arena_chunk_t *
@ -665,7 +665,7 @@ arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
bool committed;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
chunk_deregister(tsdn, chunk, &chunk->extent);
chunk_deregister(tsdn, &chunk->extent);
committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
if (!committed) {

View File

@ -141,21 +141,21 @@ chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
}
bool
chunk_register(tsdn_t *tsdn, const void *chunk, const extent_t *extent)
chunk_register(tsdn_t *tsdn, const extent_t *extent)
{
const void *addr;
size_t size;
rtree_elm_t *elm_a;
assert(extent_addr_get(extent) == chunk);
addr = extent_addr_get(extent);
size = extent_size_get(extent);
if ((elm_a = rtree_elm_acquire(tsdn, &chunks_rtree, (uintptr_t)chunk,
if ((elm_a = rtree_elm_acquire(tsdn, &chunks_rtree, (uintptr_t)addr,
false, true)) == NULL)
return (true);
rtree_elm_write_acquired(tsdn, &chunks_rtree, elm_a, extent);
if (size > chunksize) {
uintptr_t last = ((uintptr_t)chunk +
uintptr_t last = ((uintptr_t)addr +
(uintptr_t)(CHUNK_CEILING(size - chunksize)));
rtree_elm_t *elm_b;
@ -190,18 +190,20 @@ chunk_register(tsdn_t *tsdn, const void *chunk, const extent_t *extent)
}
void
chunk_deregister(tsdn_t *tsdn, const void *chunk, const extent_t *extent)
chunk_deregister(tsdn_t *tsdn, const extent_t *extent)
{
const void *addr;
size_t size;
rtree_elm_t *elm_a;
addr = extent_addr_get(extent);
size = extent_size_get(extent);
elm_a = rtree_elm_acquire(tsdn, &chunks_rtree, (uintptr_t)chunk, true,
elm_a = rtree_elm_acquire(tsdn, &chunks_rtree, (uintptr_t)addr, true,
false);
rtree_elm_write_acquired(tsdn, &chunks_rtree, elm_a, NULL);
if (size > chunksize) {
uintptr_t last = ((uintptr_t)chunk +
uintptr_t last = ((uintptr_t)addr +
(uintptr_t)(CHUNK_CEILING(size - chunksize)));
rtree_elm_t *elm_b = rtree_elm_acquire(tsdn, &chunks_rtree,
last, true, false);
@ -219,11 +221,11 @@ chunk_deregister(tsdn_t *tsdn, const void *chunk, const extent_t *extent)
}
void
chunk_reregister(tsdn_t *tsdn, const void *chunk, const extent_t *extent)
chunk_reregister(tsdn_t *tsdn, const extent_t *extent)
{
bool err;
err = chunk_register(tsdn, chunk, extent);
err = chunk_register(tsdn, extent);
assert(!err);
}

View File

@ -52,7 +52,7 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
extent_init(extent, arena, ret, usize, true, is_zeroed, true, false);
if (chunk_register(tsdn, ret, extent)) {
if (chunk_register(tsdn, extent)) {
arena_chunk_dalloc_huge(tsdn, arena, ret, usize);
idalloctm(tsdn, iealloc(tsdn, extent), extent, NULL, true,
true);
@ -135,11 +135,11 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, extent_t *extent, void *ptr,
/* Update the size of the huge allocation. */
assert(extent_size_get(extent) != usize);
chunk_deregister(tsdn, ptr, extent);
chunk_deregister(tsdn, extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
extent_size_set(extent, usize);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
chunk_reregister(tsdn, ptr, extent);
chunk_reregister(tsdn, extent);
/* Update zeroed. */
extent_zeroed_set(extent, post_zeroed);
@ -196,13 +196,13 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, void *ptr,
post_zeroed = pre_zeroed;
/* Update the size of the huge allocation. */
chunk_deregister(tsdn, ptr, extent);
chunk_deregister(tsdn, extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
extent_size_set(extent, usize);
/* Update zeroed. */
extent_zeroed_set(extent, post_zeroed);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
chunk_reregister(tsdn, ptr, extent);
chunk_reregister(tsdn, extent);
/* Zap the excess chunks. */
arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize);
@ -232,12 +232,12 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, void *ptr,
return (true);
/* Update the size of the huge allocation. */
chunk_deregister(tsdn, ptr, extent);
chunk_deregister(tsdn, extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
extent_size_set(extent, usize);
extent_zeroed_set(extent, extent_zeroed_get(extent) && is_zeroed_chunk);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
chunk_reregister(tsdn, ptr, extent);
chunk_reregister(tsdn, extent);
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed_subchunk) {
@ -355,7 +355,7 @@ huge_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr)
arena_t *arena;
arena = extent_arena_get(extent);
chunk_deregister(tsdn, ptr, extent);
chunk_deregister(tsdn, extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
ql_remove(&arena->huge, extent, ql_link);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);