Make prof_tctx accesses atomic.

Although exceedingly unlikely, it appears that writes to the prof_tctx
field of arena_chunk_map_misc_t could be reordered such that a stale
value could be read during deallocation, with profiler metadata
corruption and invalid pointer dereferences being the most likely
effects.
This commit is contained in:
Jason Evans 2015-02-12 15:54:53 -08:00
parent 88fef7ceda
commit 5f7140b045

View File

@ -943,8 +943,11 @@ arena_prof_tctx_get(const void *ptr)
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
ret = (prof_tctx_t *)(uintptr_t)1U; ret = (prof_tctx_t *)(uintptr_t)1U;
else else {
ret = arena_miscelm_get(chunk, pageind)->prof_tctx; arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk,
pageind);
ret = atomic_read_p((void **)&elm->prof_tctx);
}
} else } else
ret = huge_prof_tctx_get(ptr); ret = huge_prof_tctx_get(ptr);
@ -965,8 +968,11 @@ arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
assert(arena_mapbits_allocated_get(chunk, pageind) != 0); assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
if (unlikely(arena_mapbits_large_get(chunk, pageind) != 0)) if (unlikely(arena_mapbits_large_get(chunk, pageind) != 0)) {
arena_miscelm_get(chunk, pageind)->prof_tctx = tctx; arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk,
pageind);
atomic_write_p((void **)&elm->prof_tctx, tctx);
}
} else } else
huge_prof_tctx_set(ptr, tctx); huge_prof_tctx_set(ptr, tctx);
} }