Clean up prof-related comments.

Clean up some prof-related comments to more accurately reflect how the
code works.

Simplify OOM handling code in a couple of prof-related error paths.
This commit is contained in:
Jason Evans 2011-08-09 19:06:06 -07:00
parent 41b954ed36
commit 0cdd42eb32

View File

@ -503,11 +503,8 @@ prof_lookup(prof_bt_t *bt)
/* Allocate and partially initialize a new cnt. */ /* Allocate and partially initialize a new cnt. */
ret.v = imalloc(sizeof(prof_thr_cnt_t)); ret.v = imalloc(sizeof(prof_thr_cnt_t));
if (ret.p == NULL) { if (ret.p == NULL) {
if (new_ctx) { if (new_ctx)
malloc_mutex_lock(&ctx.p->lock); prof_ctx_destroy(ctx.p);
ctx.p->cnt_merged.curobjs--;
malloc_mutex_unlock(&ctx.p->lock);
}
return (NULL); return (NULL);
} }
ql_elm_new(ret.p, cnts_link); ql_elm_new(ret.p, cnts_link);
@ -518,11 +515,8 @@ prof_lookup(prof_bt_t *bt)
ret.p->epoch = 0; ret.p->epoch = 0;
memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
if (ckh_insert(&prof_tdata->bt2cnt, btkey.v, ret.v)) { if (ckh_insert(&prof_tdata->bt2cnt, btkey.v, ret.v)) {
if (new_ctx) { if (new_ctx)
malloc_mutex_lock(&ctx.p->lock); prof_ctx_destroy(ctx.p);
ctx.p->cnt_merged.curobjs--;
malloc_mutex_unlock(&ctx.p->lock);
}
idalloc(ret.v); idalloc(ret.v);
return (NULL); return (NULL);
} }
@ -644,11 +638,10 @@ prof_ctx_destroy(prof_ctx_t *ctx)
/* /*
* Check that ctx is still unused by any thread cache before destroying * Check that ctx is still unused by any thread cache before destroying
* it. prof_lookup() interlocks bt2ctx_mtx and ctx->lock in order to * it. prof_lookup() artificially raises ctx->cnt_merge.curobjs in
* avoid a race condition with this function, and prof_ctx_merge() * order to avoid a race condition with this function, as does
* artificially raises ctx->cnt_merged.curobjs in order to avoid a race * prof_ctx_merge() in order to avoid a race between the main body of
* between the main body of prof_ctx_merge() and entry into this * prof_ctx_merge() and entry into this function.
* function.
*/ */
prof_enter(); prof_enter();
malloc_mutex_lock(&ctx->lock); malloc_mutex_lock(&ctx->lock);
@ -665,7 +658,10 @@ prof_ctx_destroy(prof_ctx_t *ctx)
malloc_mutex_destroy(&ctx->lock); malloc_mutex_destroy(&ctx->lock);
idalloc(ctx); idalloc(ctx);
} else { } else {
/* Compensate for increment in prof_ctx_merge(). */ /*
* Compensate for increment in prof_ctx_merge() or
* prof_lookup().
*/
ctx->cnt_merged.curobjs--; ctx->cnt_merged.curobjs--;
malloc_mutex_unlock(&ctx->lock); malloc_mutex_unlock(&ctx->lock);
prof_leave(); prof_leave();
@ -1130,18 +1126,15 @@ prof_tdata_cleanup(void *arg)
prof_tdata_t *prof_tdata = (prof_tdata_t *)arg; prof_tdata_t *prof_tdata = (prof_tdata_t *)arg;
/* /*
* Delete the hash table. All of its contents can still be * Delete the hash table. All of its contents can still be iterated
* iterated over via the LRU. * over via the LRU.
*/ */
ckh_delete(&prof_tdata->bt2cnt); ckh_delete(&prof_tdata->bt2cnt);
/* /* Iteratively merge cnt's into the global stats and delete them. */
* Iteratively merge cnt's into the global stats and delete
* them.
*/
while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) { while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) {
prof_ctx_merge(cnt->ctx, cnt);
ql_remove(&prof_tdata->lru_ql, cnt, lru_link); ql_remove(&prof_tdata->lru_ql, cnt, lru_link);
prof_ctx_merge(cnt->ctx, cnt);
idalloc(cnt); idalloc(cnt);
} }