aboutsummaryrefslogtreecommitdiffstats
path: root/src/prof.c
diff options
context:
space:
mode:
authorDavid Goldblatt <davidgoldblatt@fb.com>2017-05-15 15:38:15 -0700
committerDavid Goldblatt <davidtgoldblatt@gmail.com>2017-05-19 14:21:27 -0700
commit26c792e61a163b38b373023bca2947283dcd1fc8 (patch)
tree89c4b0e1275707f6e671a23ad6ed548484becabc /src/prof.c
parent6e62c6286258e340308b4a989b4bd80232fed8e1 (diff)
downloadplatform_external_jemalloc_new-26c792e61a163b38b373023bca2947283dcd1fc8.tar.gz
platform_external_jemalloc_new-26c792e61a163b38b373023bca2947283dcd1fc8.tar.bz2
platform_external_jemalloc_new-26c792e61a163b38b373023bca2947283dcd1fc8.zip
Allow mutexes to take a lock ordering enum at construction.
This lets us specify whether and how mutexes of the same rank are allowed to be acquired. Currently, we only allow two polices (only a single mutex at a given rank at a time, and mutexes acquired in ascending order), but we can plausibly allow more (e.g. the "release uncontended mutexes before blocking").
Diffstat (limited to 'src/prof.c')
-rw-r--r--src/prof.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/src/prof.c b/src/prof.c
index 470d926f..18978810 100644
--- a/src/prof.c
+++ b/src/prof.c
@@ -1754,7 +1754,7 @@ prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) {
#ifndef JEMALLOC_ATOMIC_U64
if (malloc_mutex_init(&prof_accum->mtx, "prof_accum",
- WITNESS_RANK_PROF_ACCUM)) {
+ WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) {
return true;
}
prof_accum->accumbytes = 0;
@@ -2289,20 +2289,21 @@ prof_boot2(tsd_t *tsd) {
prof_active = opt_prof_active;
if (malloc_mutex_init(&prof_active_mtx, "prof_active",
- WITNESS_RANK_PROF_ACTIVE)) {
+ WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) {
return true;
}
prof_gdump_val = opt_prof_gdump;
if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
- WITNESS_RANK_PROF_GDUMP)) {
+ WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) {
return true;
}
prof_thread_active_init = opt_prof_thread_active_init;
if (malloc_mutex_init(&prof_thread_active_init_mtx,
"prof_thread_active_init",
- WITNESS_RANK_PROF_THREAD_ACTIVE_INIT)) {
+ WITNESS_RANK_PROF_THREAD_ACTIVE_INIT,
+ malloc_mutex_rank_exclusive)) {
return true;
}
@@ -2311,28 +2312,28 @@ prof_boot2(tsd_t *tsd) {
return true;
}
if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
- WITNESS_RANK_PROF_BT2GCTX)) {
+ WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) {
return true;
}
tdata_tree_new(&tdatas);
if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
- WITNESS_RANK_PROF_TDATAS)) {
+ WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) {
return true;
}
next_thr_uid = 0;
if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
- WITNESS_RANK_PROF_NEXT_THR_UID)) {
+ WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) {
return true;
}
if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
- WITNESS_RANK_PROF_DUMP_SEQ)) {
+ WITNESS_RANK_PROF_DUMP_SEQ, malloc_mutex_rank_exclusive)) {
return true;
}
if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
- WITNESS_RANK_PROF_DUMP)) {
+ WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) {
return true;
}
@@ -2352,7 +2353,8 @@ prof_boot2(tsd_t *tsd) {
}
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
- WITNESS_RANK_PROF_GCTX)) {
+ WITNESS_RANK_PROF_GCTX,
+ malloc_mutex_rank_exclusive)) {
return true;
}
}
@@ -2365,7 +2367,8 @@ prof_boot2(tsd_t *tsd) {
}
for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
- WITNESS_RANK_PROF_TDATA)) {
+ WITNESS_RANK_PROF_TDATA,
+ malloc_mutex_rank_exclusive)) {
return true;
}
}