Rework and fix the assertions on malloc fastpath.

The first half of the malloc fastpath may execute before malloc_init.  Make the
assertions work in that case.
This commit is contained in:
Qi Wang
2020-01-13 23:28:09 -08:00
committed by Qi Wang
parent ad3f3fc561
commit dab81bd315
2 changed files with 48 additions and 13 deletions

View File

@@ -152,10 +152,15 @@ sz_size2index_compute(size_t size) {
}
JEMALLOC_ALWAYS_INLINE szind_t
sz_size2index_lookup(size_t size) {
sz_size2index_lookup_impl(size_t size) {
assert(size <= SC_LOOKUP_MAXCLASS);
szind_t ret = (sz_size2index_tab[(size + (ZU(1) << SC_LG_TINY_MIN) - 1)
>> SC_LG_TINY_MIN]);
return sz_size2index_tab[(size + (ZU(1) << SC_LG_TINY_MIN) - 1)
>> SC_LG_TINY_MIN];
}
JEMALLOC_ALWAYS_INLINE szind_t
sz_size2index_lookup(size_t size) {
szind_t ret = sz_size2index_lookup_impl(size);
assert(ret == sz_size2index_compute(size));
return ret;
}
@@ -194,9 +199,14 @@ sz_index2size_compute(szind_t index) {
}
}
JEMALLOC_ALWAYS_INLINE size_t
sz_index2size_lookup_impl(szind_t index) {
return sz_index2size_tab[index];
}
JEMALLOC_ALWAYS_INLINE size_t
sz_index2size_lookup(szind_t index) {
size_t ret = (size_t)sz_index2size_tab[index];
size_t ret = sz_index2size_lookup_impl(index);
assert(ret == sz_index2size_compute(index));
return ret;
}
@@ -207,6 +217,12 @@ sz_index2size(szind_t index) {
return sz_index2size_lookup(index);
}
JEMALLOC_ALWAYS_INLINE void
sz_size2index_usize_fastpath(size_t size, szind_t *ind, size_t *usize) {
*ind = sz_size2index_lookup_impl(size);
*usize = sz_index2size_lookup_impl(*ind);
}
JEMALLOC_ALWAYS_INLINE size_t
sz_s2u_compute(size_t size) {
if (unlikely(size > SC_LARGE_MAXCLASS)) {