diff --git a/include/jemalloc/internal/arena_externs.h b/include/jemalloc/internal/arena_externs.h index 7a469462..4f744cac 100644 --- a/include/jemalloc/internal/arena_externs.h +++ b/include/jemalloc/internal/arena_externs.h @@ -85,7 +85,7 @@ size_t arena_extent_sn_next(arena_t *arena); arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); bool arena_init_huge(void); arena_t *arena_choose_huge(tsd_t *tsd); -void arena_boot(void); +void arena_boot(sc_data_t *sc_data); void arena_prefork0(tsdn_t *tsdn, arena_t *arena); void arena_prefork1(tsdn_t *tsdn, arena_t *arena); void arena_prefork2(tsdn_t *tsdn, arena_t *arena); diff --git a/include/jemalloc/internal/extent_structs.h b/include/jemalloc/internal/extent_structs.h index c6c1e234..1983097e 100644 --- a/include/jemalloc/internal/extent_structs.h +++ b/include/jemalloc/internal/extent_structs.h @@ -181,14 +181,14 @@ struct extents_s { * * Synchronization: mtx. */ - extent_heap_t heaps[SC_NPSIZES_MAX + 1]; + extent_heap_t heaps[SC_NPSIZES + 1]; /* * Bitmap for which set bits correspond to non-empty heaps. * * Synchronization: mtx. */ - bitmap_t bitmap[BITMAP_GROUPS(SC_NPSIZES_MAX + 1)]; + bitmap_t bitmap[BITMAP_GROUPS(SC_NPSIZES + 1)]; /* * LRU of all extents in heaps. diff --git a/include/jemalloc/internal/sc.h b/include/jemalloc/internal/sc.h index 5c94378c..5b79bb47 100644 --- a/include/jemalloc/internal/sc.h +++ b/include/jemalloc/internal/sc.h @@ -182,6 +182,7 @@ #define SC_NGROUP (1ULL << SC_LG_NGROUP) #define SC_PTR_BITS ((1ULL << LG_SIZEOF_PTR) * 8) #define SC_NTINY (LG_QUANTUM - SC_LG_TINY_MIN) +#define SC_LG_TINY_MAXCLASS (LG_QUANTUM > SC_LG_TINY_MIN ? LG_QUANTUM - 1 : -1) #define SC_NPSEUDO SC_NGROUP #define SC_LG_FIRST_REGULAR_BASE (LG_QUANTUM + SC_LG_NGROUP) /* @@ -200,7 +201,7 @@ * because delta may be smaller than a page, this is not the same as the number * of size classes that are *multiples* of the page size. */ -#define SC_NPSIZES_MAX ( \ +#define SC_NPSIZES ( \ /* Start with all the size classes. */ \ SC_NSIZES \ /* Subtract out those groups with too small a base. */ \ @@ -209,11 +210,8 @@ - SC_NPSEUDO \ /* And the tiny group. */ \ - SC_NTINY \ - /* \ - * In the lg_base == lg_page - 1 group, only the last sc is big \ - * enough to make it to lg_page. \ - */ \ - - (SC_NGROUP - 1)) + /* Groups where ndelta*delta is not a multiple of the page size. */ \ + - (2 * (SC_NGROUP))) /* * We declare a size class is binnable if size < page size * group. Or, in other @@ -314,7 +312,6 @@ struct sc_data_s { sc_t sc[SC_NSIZES]; }; -extern sc_data_t sc_data_global; void sc_data_init(sc_data_t *data); /* * Updates slab sizes in [begin, end] to be pgs pages in length, if possible. @@ -322,6 +319,6 @@ void sc_data_init(sc_data_t *data); */ void sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end, int pgs); -void sc_boot(); +void sc_boot(sc_data_t *data); #endif /* JEMALLOC_INTERNAL_SC_H */ diff --git a/include/jemalloc/internal/sz.h b/include/jemalloc/internal/sz.h index e743d878..69625ee2 100644 --- a/include/jemalloc/internal/sz.h +++ b/include/jemalloc/internal/sz.h @@ -26,7 +26,7 @@ * sz_pind2sz_tab encodes the same information as could be computed by * sz_pind2sz_compute(). */ -extern size_t sz_pind2sz_tab[SC_NPSIZES_MAX + 1]; +extern size_t sz_pind2sz_tab[SC_NPSIZES + 1]; /* * sz_index2size_tab encodes the same information as could be computed (at * unacceptable cost in some code paths) by sz_index2size_compute(). @@ -52,7 +52,7 @@ extern void sz_boot(const sc_data_t *sc_data); JEMALLOC_ALWAYS_INLINE pszind_t sz_psz2ind(size_t psz) { if (unlikely(psz > SC_LARGE_MAXCLASS)) { - return sc_data_global.npsizes; + return SC_NPSIZES; } pszind_t x = lg_floor((psz<<1)-1); pszind_t shift = (x < SC_LG_NGROUP + LG_PAGE) ? @@ -72,7 +72,7 @@ sz_psz2ind(size_t psz) { static inline size_t sz_pind2sz_compute(pszind_t pind) { - if (unlikely(pind == sc_data_global.npsizes)) { + if (unlikely(pind == SC_NPSIZES)) { return SC_LARGE_MAXCLASS + PAGE; } size_t grp = pind >> SC_LG_NGROUP; @@ -99,7 +99,7 @@ sz_pind2sz_lookup(pszind_t pind) { static inline size_t sz_pind2sz(pszind_t pind) { - assert(pind < sc_data_global.npsizes + 1); + assert(pind < SC_NPSIZES + 1); return sz_pind2sz_lookup(pind); } @@ -123,9 +123,8 @@ sz_size2index_compute(size_t size) { return SC_NSIZES; } #if (SC_NTINY != 0) - if (size <= (ZU(1) << sc_data_global.lg_tiny_maxclass)) { - szind_t lg_tmin = sc_data_global.lg_tiny_maxclass - - sc_data_global.ntiny + 1; + if (size <= (ZU(1) << SC_LG_TINY_MAXCLASS)) { + szind_t lg_tmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1; szind_t lg_ceil = lg_floor(pow2_ceil_zu(size)); return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin); } @@ -143,7 +142,7 @@ sz_size2index_compute(size_t size) { szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) & ((ZU(1) << SC_LG_NGROUP) - 1); - szind_t index = sc_data_global.ntiny + grp + mod; + szind_t index = SC_NTINY + grp + mod; return index; } } @@ -168,13 +167,12 @@ sz_size2index(size_t size) { static inline size_t sz_index2size_compute(szind_t index) { #if (SC_NTINY > 0) - if (index < sc_data_global.ntiny) { - return (ZU(1) << (sc_data_global.lg_tiny_maxclass - - sc_data_global.ntiny + 1 + index)); + if (index < SC_NTINY) { + return (ZU(1) << (SC_LG_TINY_MAXCLASS - SC_NTINY + 1 + index)); } #endif { - size_t reduced_index = index - sc_data_global.ntiny; + size_t reduced_index = index - SC_NTINY; size_t grp = reduced_index >> SC_LG_NGROUP; size_t mod = reduced_index & ((ZU(1) << SC_LG_NGROUP) - 1); @@ -211,9 +209,8 @@ sz_s2u_compute(size_t size) { return 0; } #if (SC_NTINY > 0) - if (size <= (ZU(1) << sc_data_global.lg_tiny_maxclass)) { - size_t lg_tmin = sc_data_global.lg_tiny_maxclass - - sc_data_global.ntiny + 1; + if (size <= (ZU(1) << SC_LG_TINY_MAXCLASS)) { + size_t lg_tmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1; size_t lg_ceil = lg_floor(pow2_ceil_zu(size)); return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) : (ZU(1) << lg_ceil)); diff --git a/src/arena.c b/src/arena.c index 91043cff..da7fd78b 100644 --- a/src/arena.c +++ b/src/arena.c @@ -1754,8 +1754,7 @@ arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit, if (new_limit != NULL) { size_t limit = *new_limit; /* Grow no more than the new limit. */ - if ((new_ind = sz_psz2ind(limit + 1) - 1) - >= sc_data_global.npsizes) { + if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) { return true; } } @@ -1899,7 +1898,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { } arena->extent_grow_next = sz_psz2ind(HUGEPAGE); - arena->retain_grow_limit = sc_data_global.npsizes - 1; + arena->retain_grow_limit = sz_psz2ind(SC_LARGE_MAXCLASS); if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow", WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { goto label_error; @@ -2001,11 +2000,11 @@ arena_init_huge(void) { } void -arena_boot(void) { +arena_boot(sc_data_t *sc_data) { arena_dirty_decay_ms_default_set(opt_dirty_decay_ms); arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms); for (unsigned i = 0; i < SC_NBINS; i++) { - sc_t *sc = &sc_data_global.sc[i]; + sc_t *sc = &sc_data->sc[i]; div_init(&arena_binind_div_info[i], (1U << sc->lg_base) + (sc->ndelta << sc->lg_delta)); } diff --git a/src/base.c b/src/base.c index cabf66c4..f3c61661 100644 --- a/src/base.c +++ b/src/base.c @@ -262,7 +262,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks, */ size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size + usize)); - pszind_t pind_next = (*pind_last + 1 < sc_data_global.npsizes) ? + pszind_t pind_next = (*pind_last + 1 < sz_psz2ind(SC_LARGE_MAXCLASS)) ? *pind_last + 1 : *pind_last; size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next)); size_t block_size = (min_block_size > next_block_size) ? min_block_size diff --git a/src/extent.c b/src/extent.c index 74076b66..592974a8 100644 --- a/src/extent.c +++ b/src/extent.c @@ -20,7 +20,7 @@ mutex_pool_t extent_mutex_pool; size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT; static const bitmap_info_t extents_bitmap_info = - BITMAP_INFO_INITIALIZER(SC_NPSIZES_MAX+1); + BITMAP_INFO_INITIALIZER(SC_NPSIZES+1); static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, @@ -288,7 +288,7 @@ extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, malloc_mutex_rank_exclusive)) { return true; } - for (unsigned i = 0; i < sc_data_global.npsizes + 1; i++) { + for (unsigned i = 0; i < SC_NPSIZES + 1; i++) { extent_heap_new(&extents->heaps[i]); } bitmap_init(extents->bitmap, &extents_bitmap_info, true); @@ -375,7 +375,7 @@ extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size, &extents_bitmap_info, (size_t)pind); i < pind_max; i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, (size_t)i+1)) { - assert(i < sc_data_global.npsizes); + assert(i < SC_NPSIZES); assert(!extent_heap_empty(&extents->heaps[i])); extent_t *extent = extent_heap_first(&extents->heaps[i]); uintptr_t base = (uintptr_t)extent_base_get(extent); @@ -405,7 +405,7 @@ extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, (size_t)pind); - if (i < sc_data_global.npsizes + 1) { + if (i < SC_NPSIZES + 1) { /* * In order to reduce fragmentation, avoid reusing and splitting * large extents for much smaller sizes. @@ -434,7 +434,7 @@ extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, (size_t)pind); - i < sc_data_global.npsizes + 1; + i < SC_NPSIZES + 1; i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, (size_t)i+1)) { assert(!extent_heap_empty(&extents->heaps[i])); @@ -443,10 +443,10 @@ extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, if (ret == NULL || extent_snad_comp(extent, ret) < 0) { ret = extent; } - if (i == sc_data_global.npsizes) { + if (i == SC_NPSIZES) { break; } - assert(i < sc_data_global.npsizes); + assert(i < SC_NPSIZES); } return ret; @@ -1249,13 +1249,11 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); while (alloc_size < alloc_size_min) { egn_skip++; - if (arena->extent_grow_next + egn_skip == - sc_data_global.npsizes) { + if (arena->extent_grow_next + egn_skip >= + sz_psz2ind(SC_LARGE_MAXCLASS)) { /* Outside legal range. */ goto label_err; } - assert(arena->extent_grow_next + egn_skip - < sc_data_global.npsizes); alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); } diff --git a/src/jemalloc.c b/src/jemalloc.c index 8e0a581b..d473664f 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -920,7 +920,7 @@ malloc_slow_flag_init(void) { } static void -malloc_conf_init(void) { +malloc_conf_init(sc_data_t *sc_data) { unsigned i; char buf[PATH_MAX + 1]; const char *opts, *k, *v; @@ -1254,7 +1254,7 @@ malloc_conf_init(void) { &pgs); if (!err) { sc_data_update_slab_size( - &sc_data_global, slab_start, + sc_data, slab_start, slab_end, (int)pgs); } else { malloc_conf_error( @@ -1368,6 +1368,11 @@ static bool malloc_init_hard_a0_locked() { malloc_initializer = INITIALIZER; + JEMALLOC_DIAGNOSTIC_PUSH + JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS + sc_data_t sc_data = {0}; + JEMALLOC_DIAGNOSTIC_POP + /* * Ordering here is somewhat tricky; we need sc_boot() first, since that * determines what the size classes will be, and then @@ -1375,10 +1380,10 @@ malloc_init_hard_a0_locked() { * before sz_boot and bin_boot, which assume that the values they read * out of sc_data_global are final. */ - sc_boot(); - malloc_conf_init(); - sz_boot(&sc_data_global); - bin_boot(&sc_data_global); + sc_boot(&sc_data); + malloc_conf_init(&sc_data); + sz_boot(&sc_data); + bin_boot(&sc_data); if (config_prof) { prof_boot0(); @@ -1407,7 +1412,7 @@ malloc_init_hard_a0_locked() { if (config_prof) { prof_boot1(); } - arena_boot(); + arena_boot(&sc_data); if (tcache_boot(TSDN_NULL)) { return true; } diff --git a/src/sc.c b/src/sc.c index 74c91018..8784bdd0 100644 --- a/src/sc.c +++ b/src/sc.c @@ -238,6 +238,8 @@ size_classes( * touch the extra global cacheline. We assert, however, that the two * computations are equivalent. */ + assert(sc_data->npsizes == SC_NPSIZES); + assert(sc_data->lg_tiny_maxclass == SC_LG_TINY_MAXCLASS); assert(sc_data->small_maxclass == SC_SMALL_MAXCLASS); assert(sc_data->large_minclass == SC_LARGE_MINCLASS); assert(sc_data->lg_large_minclass == SC_LG_LARGE_MINCLASS); @@ -297,6 +299,6 @@ sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end, int pgs) { } void -sc_boot() { - sc_data_init(&sc_data_global); +sc_boot(sc_data_t *data) { + sc_data_init(data); } diff --git a/src/sz.c b/src/sz.c index e038728e..77f89c62 100644 --- a/src/sz.c +++ b/src/sz.c @@ -2,7 +2,7 @@ #include "jemalloc/internal/sz.h" JEMALLOC_ALIGNED(CACHELINE) -size_t sz_pind2sz_tab[SC_NPSIZES_MAX+1]; +size_t sz_pind2sz_tab[SC_NPSIZES+1]; static void sz_boot_pind2sz_tab(const sc_data_t *sc_data) { @@ -15,7 +15,9 @@ sz_boot_pind2sz_tab(const sc_data_t *sc_data) { pind++; } } - sz_pind2sz_tab[pind] = sc_data->large_maxclass + PAGE; + for (int i = pind; i <= (int)SC_NPSIZES; i++) { + sz_pind2sz_tab[pind] = sc_data->large_maxclass + PAGE; + } } JEMALLOC_ALIGNED(CACHELINE) diff --git a/test/unit/junk.c b/test/unit/junk.c index be8933a7..57e3ad43 100644 --- a/test/unit/junk.c +++ b/test/unit/junk.c @@ -129,8 +129,7 @@ TEST_END TEST_BEGIN(test_junk_large) { test_skip_if(!config_fill); - test_junk(SC_SMALL_MAXCLASS + 1, - (1U << (sc_data_global.lg_large_minclass + 1))); + test_junk(SC_SMALL_MAXCLASS + 1, (1U << (SC_LG_LARGE_MINCLASS + 1))); } TEST_END diff --git a/test/unit/mallctl.c b/test/unit/mallctl.c index f6362008..452d884d 100644 --- a/test/unit/mallctl.c +++ b/test/unit/mallctl.c @@ -581,7 +581,7 @@ TEST_BEGIN(test_arena_i_retain_grow_limit) { assert_d_eq(mallctlbymib(mib, miblen, &default_limit, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); - assert_zu_eq(default_limit, sz_pind2sz(sc_data_global.npsizes - 1), + assert_zu_eq(default_limit, SC_LARGE_MAXCLASS, "Unexpected default for retain_grow_limit"); new_limit = PAGE - 1; diff --git a/test/unit/prof_gdump.c b/test/unit/prof_gdump.c index 0b8d7c34..f7e0aac7 100644 --- a/test/unit/prof_gdump.c +++ b/test/unit/prof_gdump.c @@ -29,12 +29,12 @@ TEST_BEGIN(test_gdump) { prof_dump_open = prof_dump_open_intercept; did_prof_dump_open = false; - p = mallocx((1U << sc_data_global.lg_large_minclass), 0); + p = mallocx((1U << SC_LG_LARGE_MINCLASS), 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); did_prof_dump_open = false; - q = mallocx((1U << sc_data_global.lg_large_minclass), 0); + q = mallocx((1U << SC_LG_LARGE_MINCLASS), 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); @@ -45,7 +45,7 @@ TEST_BEGIN(test_gdump) { "Unexpected mallctl failure while disabling prof.gdump"); assert(gdump_old); did_prof_dump_open = false; - r = mallocx((1U << sc_data_global.lg_large_minclass), 0); + r = mallocx((1U << SC_LG_LARGE_MINCLASS), 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_false(did_prof_dump_open, "Unexpected profile dump"); @@ -56,7 +56,7 @@ TEST_BEGIN(test_gdump) { "Unexpected mallctl failure while enabling prof.gdump"); assert(!gdump_old); did_prof_dump_open = false; - s = mallocx((1U << sc_data_global.lg_large_minclass), 0); + s = mallocx((1U << SC_LG_LARGE_MINCLASS), 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); diff --git a/test/unit/size_classes.c b/test/unit/size_classes.c index 7c28e166..69473363 100644 --- a/test/unit/size_classes.c +++ b/test/unit/size_classes.c @@ -108,8 +108,13 @@ TEST_BEGIN(test_psize_classes) { size_class, sz_psz2ind(size_class), sz_pind2sz(sz_psz2ind(size_class))); - assert_u_eq(pind+1, sz_psz2ind(size_class+1), - "Next size_class does not round up properly"); + if (size_class == SC_LARGE_MAXCLASS) { + assert_u_eq(SC_NPSIZES, sz_psz2ind(size_class + 1), + "Next size_class does not round up properly"); + } else { + assert_u_eq(pind + 1, sz_psz2ind(size_class + 1), + "Next size_class does not round up properly"); + } assert_zu_eq(size_class, (pind > 0) ? sz_psz2u(sz_pind2sz(pind-1)+1) : sz_psz2u(1), @@ -156,16 +161,13 @@ TEST_BEGIN(test_overflow) { assert_zu_eq(sz_s2u(SIZE_T_MAX), 0, "sz_s2u() should return 0 on overflow"); - assert_u_eq(sz_psz2ind(max_size_class+1), sc_data_global.npsizes, + assert_u_eq(sz_psz2ind(max_size_class+1), SC_NPSIZES, "sz_psz2ind() should return NPSIZES on overflow"); - assert_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), sc_data_global.npsizes, + assert_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), SC_NPSIZES, "sz_psz2ind() should return NPSIZES on overflow"); - assert_u_eq(sz_psz2ind(SIZE_T_MAX), sc_data_global.npsizes, + assert_u_eq(sz_psz2ind(SIZE_T_MAX), SC_NPSIZES, "sz_psz2ind() should return NPSIZES on overflow"); - assert_u_le(sc_data_global.npsizes, SC_NPSIZES_MAX, - "Dynamic value of npsizes is higher than static bound."); - assert_zu_eq(sz_psz2u(max_size_class+1), max_psz, "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported" " size"); diff --git a/test/unit/stats.c b/test/unit/stats.c index b8f549be..4323bfa3 100644 --- a/test/unit/stats.c +++ b/test/unit/stats.c @@ -76,7 +76,7 @@ TEST_BEGIN(test_stats_arenas_summary) { little = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0)); assert_ptr_not_null(little, "Unexpected mallocx() failure"); - large = mallocx((1U << sc_data_global.lg_large_minclass), + large = mallocx((1U << SC_LG_LARGE_MINCLASS), MALLOCX_ARENA(0)); assert_ptr_not_null(large, "Unexpected mallocx() failure"); @@ -192,7 +192,7 @@ TEST_BEGIN(test_stats_arenas_large) { uint64_t epoch, nmalloc, ndalloc; int expected = config_stats ? 0 : ENOENT; - p = mallocx((1U << sc_data_global.lg_large_minclass), MALLOCX_ARENA(0)); + p = mallocx((1U << SC_LG_LARGE_MINCLASS), MALLOCX_ARENA(0)); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), diff --git a/test/unit/zero.c b/test/unit/zero.c index 8b8d2072..271fd5cb 100644 --- a/test/unit/zero.c +++ b/test/unit/zero.c @@ -47,8 +47,7 @@ TEST_END TEST_BEGIN(test_zero_large) { test_skip_if(!config_fill); - test_zero(SC_SMALL_MAXCLASS + 1, - 1U << (sc_data_global.lg_large_minclass + 1)); + test_zero(SC_SMALL_MAXCLASS + 1, 1U << (SC_LG_LARGE_MINCLASS + 1)); } TEST_END