Emap: Move in iealloc.
This is logically scoped to the emap.
This commit is contained in:
parent
1d449bd9a6
commit
9b5d105fc3
@ -1,6 +1,7 @@
|
|||||||
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
|
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
|
||||||
#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
|
#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/emap.h"
|
||||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||||
#include "jemalloc/internal/mutex.h"
|
#include "jemalloc/internal/mutex.h"
|
||||||
#include "jemalloc/internal/rtree.h"
|
#include "jemalloc/internal/rtree.h"
|
||||||
@ -47,10 +48,10 @@ arena_prof_info_get(tsd_t *tsd, const void *ptr, alloc_ctx_t *alloc_ctx,
|
|||||||
|
|
||||||
/* Static check. */
|
/* Static check. */
|
||||||
if (alloc_ctx == NULL) {
|
if (alloc_ctx == NULL) {
|
||||||
edata = iealloc(tsd_tsdn(tsd), ptr);
|
edata = emap_lookup(tsd_tsdn(tsd), &emap_global, ptr);
|
||||||
is_slab = edata_slab_get(edata);
|
is_slab = edata_slab_get(edata);
|
||||||
} else if (unlikely(!(is_slab = alloc_ctx->slab))) {
|
} else if (unlikely(!(is_slab = alloc_ctx->slab))) {
|
||||||
edata = iealloc(tsd_tsdn(tsd), ptr);
|
edata = emap_lookup(tsd_tsdn(tsd), &emap_global, ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(!is_slab)) {
|
if (unlikely(!is_slab)) {
|
||||||
@ -73,13 +74,15 @@ arena_prof_tctx_reset(tsd_t *tsd, const void *ptr, alloc_ctx_t *alloc_ctx) {
|
|||||||
|
|
||||||
/* Static check. */
|
/* Static check. */
|
||||||
if (alloc_ctx == NULL) {
|
if (alloc_ctx == NULL) {
|
||||||
edata_t *edata = iealloc(tsd_tsdn(tsd), ptr);
|
edata_t *edata = emap_lookup(tsd_tsdn(tsd), &emap_global, ptr);
|
||||||
if (unlikely(!edata_slab_get(edata))) {
|
if (unlikely(!edata_slab_get(edata))) {
|
||||||
large_prof_tctx_reset(edata);
|
large_prof_tctx_reset(edata);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (unlikely(!alloc_ctx->slab)) {
|
if (unlikely(!alloc_ctx->slab)) {
|
||||||
large_prof_tctx_reset(iealloc(tsd_tsdn(tsd), ptr));
|
edata_t *edata = emap_lookup(tsd_tsdn(tsd),
|
||||||
|
&emap_global, ptr);
|
||||||
|
large_prof_tctx_reset(edata);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -89,7 +92,7 @@ arena_prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) {
|
|||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
edata_t *edata = iealloc(tsd_tsdn(tsd), ptr);
|
edata_t *edata = emap_lookup(tsd_tsdn(tsd), &emap_global, ptr);
|
||||||
assert(!edata_slab_get(edata));
|
assert(!edata_slab_get(edata));
|
||||||
|
|
||||||
large_prof_tctx_reset(edata);
|
large_prof_tctx_reset(edata);
|
||||||
@ -177,8 +180,9 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
|
|||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE arena_t *
|
JEMALLOC_ALWAYS_INLINE arena_t *
|
||||||
arena_aalloc(tsdn_t *tsdn, const void *ptr) {
|
arena_aalloc(tsdn_t *tsdn, const void *ptr) {
|
||||||
return (arena_t *)atomic_load_p(&arenas[edata_arena_ind_get(
|
edata_t *edata = emap_lookup(tsdn, &emap_global, ptr);
|
||||||
iealloc(tsdn, ptr))], ATOMIC_RELAXED);
|
unsigned arena_ind = edata_arena_ind_get(edata);
|
||||||
|
return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_RELAXED);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
@ -233,7 +237,7 @@ arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
|
|||||||
if (config_prof && unlikely(szind < SC_NBINS)) {
|
if (config_prof && unlikely(szind < SC_NBINS)) {
|
||||||
arena_dalloc_promoted(tsdn, ptr, NULL, true);
|
arena_dalloc_promoted(tsdn, ptr, NULL, true);
|
||||||
} else {
|
} else {
|
||||||
edata_t *edata = iealloc(tsdn, ptr);
|
edata_t *edata = emap_lookup(tsdn, &emap_global, ptr);
|
||||||
large_dalloc(tsdn, edata);
|
large_dalloc(tsdn, edata);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -277,7 +281,7 @@ arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
|
|||||||
slow_path);
|
slow_path);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
edata_t *edata = iealloc(tsdn, ptr);
|
edata_t *edata = emap_lookup(tsdn, &emap_global, ptr);
|
||||||
large_dalloc(tsdn, edata);
|
large_dalloc(tsdn, edata);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -112,4 +112,13 @@ emap_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE edata_t *
|
||||||
|
emap_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr) {
|
||||||
|
rtree_ctx_t rtree_ctx_fallback;
|
||||||
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||||
|
|
||||||
|
return rtree_edata_read(tsdn, &emap->rtree, rtree_ctx, (uintptr_t)ptr,
|
||||||
|
true);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_EMAP_H */
|
#endif /* JEMALLOC_INTERNAL_EMAP_H */
|
||||||
|
@ -1,9 +1,7 @@
|
|||||||
#ifndef JEMALLOC_INTERNAL_INLINES_B_H
|
#ifndef JEMALLOC_INTERNAL_INLINES_B_H
|
||||||
#define JEMALLOC_INTERNAL_INLINES_B_H
|
#define JEMALLOC_INTERNAL_INLINES_B_H
|
||||||
|
|
||||||
#include "jemalloc/internal/emap.h"
|
|
||||||
#include "jemalloc/internal/extent.h"
|
#include "jemalloc/internal/extent.h"
|
||||||
#include "jemalloc/internal/rtree.h"
|
|
||||||
|
|
||||||
/* Choose an arena based on a per-thread value. */
|
/* Choose an arena based on a per-thread value. */
|
||||||
static inline arena_t *
|
static inline arena_t *
|
||||||
@ -77,13 +75,4 @@ arena_is_auto(arena_t *arena) {
|
|||||||
return (arena_ind_get(arena) < manual_arena_base);
|
return (arena_ind_get(arena) < manual_arena_base);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE edata_t *
|
|
||||||
iealloc(tsdn_t *tsdn, const void *ptr) {
|
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
||||||
|
|
||||||
return rtree_edata_read(tsdn, &emap_global.rtree, rtree_ctx,
|
|
||||||
(uintptr_t)ptr, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_INLINES_B_H */
|
#endif /* JEMALLOC_INTERNAL_INLINES_B_H */
|
||||||
|
@ -1637,7 +1637,7 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
|||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(opt_prof);
|
assert(opt_prof);
|
||||||
|
|
||||||
edata_t *edata = iealloc(tsdn, ptr);
|
edata_t *edata = emap_lookup(tsdn, &emap_global, ptr);
|
||||||
size_t usize = edata_usize_get(edata);
|
size_t usize = edata_usize_get(edata);
|
||||||
size_t bumped_usize = arena_prof_demote(tsdn, edata, ptr);
|
size_t bumped_usize = arena_prof_demote(tsdn, edata, ptr);
|
||||||
if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) {
|
if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) {
|
||||||
@ -1769,7 +1769,7 @@ arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, edata_t *edata, void *ptr) {
|
|||||||
|
|
||||||
void
|
void
|
||||||
arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
|
arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
|
||||||
edata_t *edata = iealloc(tsdn, ptr);
|
edata_t *edata = emap_lookup(tsdn, &emap_global, ptr);
|
||||||
arena_t *arena = arena_get_from_edata(edata);
|
arena_t *arena = arena_get_from_edata(edata);
|
||||||
|
|
||||||
arena_dalloc_bin(tsdn, arena, edata, ptr);
|
arena_dalloc_bin(tsdn, arena, edata, ptr);
|
||||||
@ -1783,7 +1783,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
|||||||
/* Calls with non-zero extra had to clamp extra. */
|
/* Calls with non-zero extra had to clamp extra. */
|
||||||
assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS);
|
assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS);
|
||||||
|
|
||||||
edata_t *edata = iealloc(tsdn, ptr);
|
edata_t *edata = emap_lookup(tsdn, &emap_global, ptr);
|
||||||
if (unlikely(size > SC_LARGE_MAXCLASS)) {
|
if (unlikely(size > SC_LARGE_MAXCLASS)) {
|
||||||
ret = true;
|
ret = true;
|
||||||
goto done;
|
goto done;
|
||||||
@ -1817,7 +1817,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
|||||||
ret = true;
|
ret = true;
|
||||||
}
|
}
|
||||||
done:
|
done:
|
||||||
assert(edata == iealloc(tsdn, ptr));
|
assert(edata == emap_lookup(tsdn, &emap_global, ptr));
|
||||||
*newsize = edata_usize_get(edata);
|
*newsize = edata_usize_get(edata);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -2667,7 +2667,7 @@ arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
|
|||||||
ret = EINVAL;
|
ret = EINVAL;
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
|
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
|
||||||
WRITE(ptr, void *);
|
WRITE(ptr, void *);
|
||||||
edata = iealloc(tsd_tsdn(tsd), ptr);
|
edata = emap_lookup(tsd_tsdn(tsd), &emap_global, ptr);
|
||||||
if (edata == NULL)
|
if (edata == NULL)
|
||||||
goto label_return;
|
goto label_return;
|
||||||
|
|
||||||
|
@ -189,8 +189,8 @@ ehooks_default_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
|||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
ehooks_same_sn(tsdn_t *tsdn, void *addr_a, void *addr_b) {
|
ehooks_same_sn(tsdn_t *tsdn, void *addr_a, void *addr_b) {
|
||||||
edata_t *a = iealloc(tsdn, addr_a);
|
edata_t *a = emap_lookup(tsdn, &emap_global, addr_a);
|
||||||
edata_t *b = iealloc(tsdn, addr_b);
|
edata_t *b = emap_lookup(tsdn, &emap_global, addr_b);
|
||||||
return edata_sn_comp(a, b) == 0;
|
return edata_sn_comp(a, b) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -253,9 +253,9 @@ bool
|
|||||||
ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
|
ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
|
||||||
void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
|
void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
|
||||||
tsdn_t *tsdn = tsdn_fetch();
|
tsdn_t *tsdn = tsdn_fetch();
|
||||||
edata_t *a = iealloc(tsdn, addr_a);
|
edata_t *a = emap_lookup(tsdn, &emap_global, addr_a);
|
||||||
bool head_a = edata_is_head_get(a);
|
bool head_a = edata_is_head_get(a);
|
||||||
edata_t *b = iealloc(tsdn, addr_b);
|
edata_t *b = emap_lookup(tsdn, &emap_global, addr_b);
|
||||||
bool head_b = edata_is_head_get(b);
|
bool head_b = edata_is_head_get(b);
|
||||||
return ehooks_default_merge_impl(tsdn, addr_a, head_a, addr_b, head_b);
|
return ehooks_default_merge_impl(tsdn, addr_a, head_a, addr_b, head_b);
|
||||||
}
|
}
|
||||||
|
@ -6,7 +6,7 @@ inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr, size_t *nfree,
|
|||||||
size_t *nregs, size_t *size) {
|
size_t *nregs, size_t *size) {
|
||||||
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
|
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
|
||||||
|
|
||||||
const edata_t *edata = iealloc(tsdn, ptr);
|
const edata_t *edata = emap_lookup(tsdn, &emap_global, ptr);
|
||||||
if (unlikely(edata == NULL)) {
|
if (unlikely(edata == NULL)) {
|
||||||
*nfree = *nregs = *size = 0;
|
*nfree = *nregs = *size = 0;
|
||||||
return;
|
return;
|
||||||
@ -31,7 +31,7 @@ inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
|
|||||||
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
|
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
|
||||||
&& bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
|
&& bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
|
||||||
|
|
||||||
const edata_t *edata = iealloc(tsdn, ptr);
|
const edata_t *edata = emap_lookup(tsdn, &emap_global, ptr);
|
||||||
if (unlikely(edata == NULL)) {
|
if (unlikely(edata == NULL)) {
|
||||||
*nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
|
*nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
|
||||||
*slabcur_addr = NULL;
|
*slabcur_addr = NULL;
|
||||||
|
@ -3423,7 +3423,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
|
|||||||
* object associated with the ptr (though the content of the edata_t
|
* object associated with the ptr (though the content of the edata_t
|
||||||
* object can be changed).
|
* object can be changed).
|
||||||
*/
|
*/
|
||||||
edata_t *old_edata = iealloc(tsd_tsdn(tsd), ptr);
|
edata_t *old_edata = emap_lookup(tsd_tsdn(tsd), &emap_global, ptr);
|
||||||
|
|
||||||
alloc_ctx_t alloc_ctx;
|
alloc_ctx_t alloc_ctx;
|
||||||
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
|
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
|
||||||
@ -3462,7 +3462,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
|
|||||||
* xallocx() should keep using the same edata_t object (though its
|
* xallocx() should keep using the same edata_t object (though its
|
||||||
* content can be changed).
|
* content can be changed).
|
||||||
*/
|
*/
|
||||||
assert(iealloc(tsd_tsdn(tsd), ptr) == old_edata);
|
assert(emap_lookup(tsd_tsdn(tsd), &emap_global, ptr) == old_edata);
|
||||||
|
|
||||||
if (unlikely(usize == old_usize)) {
|
if (unlikely(usize == old_usize)) {
|
||||||
te_alloc_rollback(tsd, usize);
|
te_alloc_rollback(tsd, usize);
|
||||||
|
@ -272,7 +272,7 @@ void *
|
|||||||
large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
|
large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
|
||||||
size_t alignment, bool zero, tcache_t *tcache,
|
size_t alignment, bool zero, tcache_t *tcache,
|
||||||
hook_ralloc_args_t *hook_args) {
|
hook_ralloc_args_t *hook_args) {
|
||||||
edata_t *edata = iealloc(tsdn, ptr);
|
edata_t *edata = emap_lookup(tsdn, &emap_global, ptr);
|
||||||
|
|
||||||
size_t oldusize = edata_usize_get(edata);
|
size_t oldusize = edata_usize_get(edata);
|
||||||
/* The following should have been caught by callers. */
|
/* The following should have been caught by callers. */
|
||||||
|
@ -148,7 +148,7 @@ prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) {
|
|||||||
void
|
void
|
||||||
prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size,
|
prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size,
|
||||||
size_t usize, prof_tctx_t *tctx) {
|
size_t usize, prof_tctx_t *tctx) {
|
||||||
edata_t *edata = iealloc(tsd_tsdn(tsd), ptr);
|
edata_t *edata = emap_lookup(tsd_tsdn(tsd), &emap_global, ptr);
|
||||||
prof_info_set(tsd, edata, tctx);
|
prof_info_set(tsd, edata, tctx);
|
||||||
|
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
|
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
|
||||||
|
@ -160,7 +160,8 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
|
|||||||
item_edata);
|
item_edata);
|
||||||
} else {
|
} else {
|
||||||
for (unsigned i = 0 ; i < nflush; i++) {
|
for (unsigned i = 0 ; i < nflush; i++) {
|
||||||
item_edata[i] = iealloc(tsdn, *(bottom_item - i));
|
item_edata[i] = emap_lookup(tsd_tsdn(tsd), &emap_global,
|
||||||
|
*(bottom_item - i));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -258,7 +259,8 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t
|
|||||||
#ifndef JEMALLOC_EXTRA_SIZE_CHECK
|
#ifndef JEMALLOC_EXTRA_SIZE_CHECK
|
||||||
/* Look up edata once per item. */
|
/* Look up edata once per item. */
|
||||||
for (unsigned i = 0 ; i < nflush; i++) {
|
for (unsigned i = 0 ; i < nflush; i++) {
|
||||||
item_edata[i] = iealloc(tsd_tsdn(tsd), *(bottom_item - i));
|
item_edata[i] = emap_lookup(tsd_tsdn(tsd), &emap_global,
|
||||||
|
*(bottom_item - i));
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, nflush,
|
tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, nflush,
|
||||||
|
@ -62,12 +62,12 @@ thd_start(void *varg) {
|
|||||||
ptr = mallocx(1, MALLOCX_TCACHE_NONE);
|
ptr = mallocx(1, MALLOCX_TCACHE_NONE);
|
||||||
ptr2 = mallocx(129, MALLOCX_TCACHE_NONE);
|
ptr2 = mallocx(129, MALLOCX_TCACHE_NONE);
|
||||||
|
|
||||||
edata = iealloc(tsdn, ptr);
|
edata = emap_lookup(tsdn, &emap_global, ptr);
|
||||||
shard1 = edata_binshard_get(edata);
|
shard1 = edata_binshard_get(edata);
|
||||||
dallocx(ptr, 0);
|
dallocx(ptr, 0);
|
||||||
assert_u_lt(shard1, 16, "Unexpected bin shard used");
|
assert_u_lt(shard1, 16, "Unexpected bin shard used");
|
||||||
|
|
||||||
edata = iealloc(tsdn, ptr2);
|
edata = emap_lookup(tsdn, &emap_global, ptr2);
|
||||||
shard2 = edata_binshard_get(edata);
|
shard2 = edata_binshard_get(edata);
|
||||||
dallocx(ptr2, 0);
|
dallocx(ptr2, 0);
|
||||||
assert_u_lt(shard2, 4, "Unexpected bin shard used");
|
assert_u_lt(shard2, 4, "Unexpected bin shard used");
|
||||||
|
@ -101,7 +101,7 @@ TEST_END
|
|||||||
|
|
||||||
static void confirm_malloc(tsd_t *tsd, void *p) {
|
static void confirm_malloc(tsd_t *tsd, void *p) {
|
||||||
assert_ptr_not_null(p, "malloc failed unexpectedly");
|
assert_ptr_not_null(p, "malloc failed unexpectedly");
|
||||||
edata_t *e = iealloc(TSDN_NULL, p);
|
edata_t *e = emap_lookup(TSDN_NULL, &emap_global, p);
|
||||||
assert_ptr_not_null(e, "NULL edata for living pointer");
|
assert_ptr_not_null(e, "NULL edata for living pointer");
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
|
||||||
prof_recent_t *n = edata_prof_recent_alloc_get(tsd, e);
|
prof_recent_t *n = edata_prof_recent_alloc_get(tsd, e);
|
||||||
|
Loading…
Reference in New Issue
Block a user