diff options
| author | Yinan Zhang <zyn8950@gmail.com> | 2020-04-22 17:22:43 -0700 |
|---|---|---|
| committer | Yinan Zhang <zyn8950@gmail.com> | 2020-07-31 09:16:50 -0700 |
| commit | f28cc2bc87199e031b9d035ccdff6a2d429274c9 (patch) | |
| tree | e062b013120a9ea19dcbab4540d525c2e7e95f25 | |
| parent | ddb8dc4ad0523e07ab0475d6c9583d8ca27de8dc (diff) | |
| download | platform_external_jemalloc_new-f28cc2bc87199e031b9d035ccdff6a2d429274c9.tar.gz platform_external_jemalloc_new-f28cc2bc87199e031b9d035ccdff6a2d429274c9.tar.bz2 platform_external_jemalloc_new-f28cc2bc87199e031b9d035ccdff6a2d429274c9.zip | |
Extract bin shard selection out of bin locking
| -rw-r--r-- | include/jemalloc/internal/arena_externs.h | 2 | ||||
| -rw-r--r-- | src/arena.c | 32 | ||||
| -rw-r--r-- | src/tcache.c | 11 |
3 files changed, 23 insertions, 22 deletions
diff --git a/include/jemalloc/internal/arena_externs.h b/include/jemalloc/internal/arena_externs.h index 674c98f5..c600d10f 100644 --- a/include/jemalloc/internal/arena_externs.h +++ b/include/jemalloc/internal/arena_externs.h @@ -85,7 +85,7 @@ arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); bool arena_init_huge(void); bool arena_is_huge(unsigned arena_ind); arena_t *arena_choose_huge(tsd_t *tsd); -bin_t *arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind, +bin_t *arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned *binshard); void arena_boot(sc_data_t *sc_data); void arena_prefork0(tsdn_t *tsdn, arena_t *arena); diff --git a/src/arena.c b/src/arena.c index 46da3859..1df276b0 100644 --- a/src/arena.c +++ b/src/arena.c @@ -739,21 +739,20 @@ arena_bin_refill_slabcur_no_fresh_slab(tsdn_t *tsdn, arena_t *arena, return (bin->slabcur == NULL); } -/* Choose a bin shard and return the locked bin. */ bin_t * -arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind, - unsigned *binshard) { - bin_t *bin; +arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind, + unsigned *binshard_p) { + unsigned binshard; if (tsdn_null(tsdn) || tsd_arena_get(tsdn_tsd(tsdn)) == NULL) { - *binshard = 0; + binshard = 0; } else { - *binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind]; + binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind]; } - assert(*binshard < bin_infos[binind].n_shards); - bin = &arena->bins[binind].bin_shards[*binshard]; - malloc_mutex_lock(tsdn, &bin->lock); - - return bin; + assert(binshard < bin_infos[binind].n_shards); + if (binshard_p != NULL) { + *binshard_p = binshard; + } + return &arena->bins[binind].bin_shards[binshard]; } void @@ -797,11 +796,12 @@ arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena, edata_t *fresh_slab = NULL; bool alloc_and_retry = false; unsigned filled = 0; - - bin_t *bin; unsigned binshard; + bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard); + label_refill: - bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard); + malloc_mutex_lock(tsdn, &bin->lock); + while (filled < nfill) { /* Try batch-fill from slabcur first. */ edata_t *slabcur = bin->slabcur; @@ -854,6 +854,7 @@ label_refill: bin->stats.nfills++; cache_bin->tstats.nrequests = 0; } + malloc_mutex_unlock(tsdn, &bin->lock); if (alloc_and_retry) { @@ -906,8 +907,9 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { const bin_info_t *bin_info = &bin_infos[binind]; size_t usize = sz_index2size(binind); unsigned binshard; - bin_t *bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard); + bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard); + malloc_mutex_lock(tsdn, &bin->lock); edata_t *fresh_slab = NULL; void *ret = arena_bin_malloc_no_fresh_slab(tsdn, arena, bin, binind); if (ret == NULL) { diff --git a/src/tcache.c b/src/tcache.c index a33d9e24..b681ee10 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -454,9 +454,9 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin, * thread's arena, so the stats didn't get merged. * Manually do so now. */ - unsigned binshard; - bin_t *bin = arena_bin_choose_lock(tsdn, tcache_arena, - binind, &binshard); + bin_t *bin = arena_bin_choose(tsdn, tcache_arena, + binind, NULL); + malloc_mutex_lock(tsdn, &bin->lock); bin->stats.nflushes++; bin->stats.nrequests += cache_bin->tstats.nrequests; cache_bin->tstats.nrequests = 0; @@ -751,9 +751,8 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { for (unsigned i = 0; i < nhbins; i++) { cache_bin_t *cache_bin = &tcache->bins[i]; if (i < SC_NBINS) { - unsigned binshard; - bin_t *bin = arena_bin_choose_lock(tsdn, arena, i, - &binshard); + bin_t *bin = arena_bin_choose(tsdn, arena, i, NULL); + malloc_mutex_lock(tsdn, &bin->lock); bin->stats.nrequests += cache_bin->tstats.nrequests; malloc_mutex_unlock(tsdn, &bin->lock); } else { |
