Don't fetch tsd in a0{d,}alloc().
Don't fetch tsd in a0{d,}alloc(), because doing so can cause infinite recursion on systems that require an allocated tsd wrapper.
This commit is contained in:
parent
fc0b3b7383
commit
9b75677e53
@ -203,7 +203,6 @@ static void *
|
|||||||
a0alloc(size_t size, bool zero)
|
a0alloc(size_t size, bool zero)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
tsd_t *tsd;
|
|
||||||
|
|
||||||
if (unlikely(malloc_init()))
|
if (unlikely(malloc_init()))
|
||||||
return (NULL);
|
return (NULL);
|
||||||
@ -211,11 +210,10 @@ a0alloc(size_t size, bool zero)
|
|||||||
if (size == 0)
|
if (size == 0)
|
||||||
size = 1;
|
size = 1;
|
||||||
|
|
||||||
tsd = tsd_fetch();
|
|
||||||
if (size <= arena_maxclass)
|
if (size <= arena_maxclass)
|
||||||
ret = arena_malloc(tsd, a0get(), size, zero, false);
|
ret = arena_malloc(NULL, a0get(), size, zero, false);
|
||||||
else
|
else
|
||||||
ret = huge_malloc(tsd, a0get(), size, zero, false);
|
ret = huge_malloc(NULL, a0get(), size, zero, false);
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -237,18 +235,16 @@ a0calloc(size_t num, size_t size)
|
|||||||
void
|
void
|
||||||
a0free(void *ptr)
|
a0free(void *ptr)
|
||||||
{
|
{
|
||||||
tsd_t *tsd;
|
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
|
|
||||||
if (ptr == NULL)
|
if (ptr == NULL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
tsd = tsd_fetch();
|
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
if (chunk != ptr)
|
if (chunk != ptr)
|
||||||
arena_dalloc(tsd, chunk, ptr, false);
|
arena_dalloc(NULL, chunk, ptr, false);
|
||||||
else
|
else
|
||||||
huge_dalloc(tsd, ptr, false);
|
huge_dalloc(NULL, ptr, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Create a new arena and insert it into the arenas array at index ind. */
|
/* Create a new arena and insert it into the arenas array at index ind. */
|
||||||
@ -2301,9 +2297,9 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
|
|||||||
* fork/malloc races via the following functions it registers during
|
* fork/malloc races via the following functions it registers during
|
||||||
* initialization using pthread_atfork(), but of course that does no good if
|
* initialization using pthread_atfork(), but of course that does no good if
|
||||||
* the allocator isn't fully initialized at fork time. The following library
|
* the allocator isn't fully initialized at fork time. The following library
|
||||||
* constructor is a partial solution to this problem. It may still possible to
|
* constructor is a partial solution to this problem. It may still be possible
|
||||||
* trigger the deadlock described above, but doing so would involve forking via
|
* to trigger the deadlock described above, but doing so would involve forking
|
||||||
* a library constructor that runs before jemalloc's runs.
|
* via a library constructor that runs before jemalloc's runs.
|
||||||
*/
|
*/
|
||||||
JEMALLOC_ATTR(constructor)
|
JEMALLOC_ATTR(constructor)
|
||||||
static void
|
static void
|
||||||
|
@ -85,6 +85,7 @@ TEST_END
|
|||||||
int
|
int
|
||||||
main(void)
|
main(void)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (test(
|
return (test(
|
||||||
test_mq_basic,
|
test_mq_basic,
|
||||||
test_mq_threaded));
|
test_mq_threaded));
|
||||||
|
Loading…
Reference in New Issue
Block a user