Use offsetof() when sizing dynamic structures.
Base dynamic structure size on offsetof(), rather than subtracting the size of the dynamic structure member. Results could differ on systems with strict data structure alignment requirements.
This commit is contained in:
parent
3377ffa1f4
commit
c2fc8c8b3a
@ -101,8 +101,8 @@ arenas_extend(unsigned ind)
|
|||||||
arena_t *ret;
|
arena_t *ret;
|
||||||
|
|
||||||
/* Allocate enough space for trailing bins. */
|
/* Allocate enough space for trailing bins. */
|
||||||
ret = (arena_t *)base_alloc(sizeof(arena_t)
|
ret = (arena_t *)base_alloc(offsetof(arena_t, bins)
|
||||||
+ (sizeof(arena_bin_t) * (nbins - 1)));
|
+ (sizeof(arena_bin_t) * nbins));
|
||||||
if (ret != NULL && arena_new(ret, ind) == false) {
|
if (ret != NULL && arena_new(ret, ind) == false) {
|
||||||
arenas[ind] = ret;
|
arenas[ind] = ret;
|
||||||
return (ret);
|
return (ret);
|
||||||
|
@ -13,11 +13,12 @@ rtree_new(unsigned bits)
|
|||||||
height++;
|
height++;
|
||||||
assert(height * bits_per_level >= bits);
|
assert(height * bits_per_level >= bits);
|
||||||
|
|
||||||
ret = (rtree_t*)base_alloc(sizeof(rtree_t) + (sizeof(unsigned) *
|
ret = (rtree_t*)base_alloc(offsetof(rtree_t, level2bits) +
|
||||||
(height - 1)));
|
(sizeof(unsigned) * height));
|
||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
memset(ret, 0, sizeof(rtree_t) + (sizeof(unsigned) * (height - 1)));
|
memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) *
|
||||||
|
height));
|
||||||
|
|
||||||
malloc_mutex_init(&ret->mutex);
|
malloc_mutex_init(&ret->mutex);
|
||||||
ret->height = height;
|
ret->height = height;
|
||||||
|
@ -204,7 +204,7 @@ tcache_create(arena_t *arena)
|
|||||||
size_t size;
|
size_t size;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
size = sizeof(tcache_t) + (sizeof(tcache_bin_t) * (nhbins - 1));
|
size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
|
||||||
/*
|
/*
|
||||||
* Round up to the nearest multiple of the cacheline size, in order to
|
* Round up to the nearest multiple of the cacheline size, in order to
|
||||||
* avoid the possibility of false cacheline sharing.
|
* avoid the possibility of false cacheline sharing.
|
||||||
|
Loading…
Reference in New Issue
Block a user