diff options
| author | Jason Evans <jasone@canonware.com> | 2017-01-15 16:56:30 -0800 |
|---|---|---|
| committer | Jason Evans <jasone@canonware.com> | 2017-01-20 21:43:07 -0800 |
| commit | c4c2592c834d8a37beb0a0d53842095160cbf9ee (patch) | |
| tree | e4717ea6a2f13926dadd74ea1fc83f9742f77968 /src | |
| parent | 5154ff32ee8c37bacb6afd8a07b923eb33228357 (diff) | |
| download | platform_external_jemalloc_new-c4c2592c834d8a37beb0a0d53842095160cbf9ee.tar.gz platform_external_jemalloc_new-c4c2592c834d8a37beb0a0d53842095160cbf9ee.tar.bz2 platform_external_jemalloc_new-c4c2592c834d8a37beb0a0d53842095160cbf9ee.zip | |
Update brace style.
Add braces around single-line blocks, and remove line breaks before
function-opening braces.
This resolves #537.
Diffstat (limited to 'src')
| -rw-r--r-- | src/arena.c | 516 | ||||
| -rw-r--r-- | src/base.c | 99 | ||||
| -rw-r--r-- | src/bitmap.c | 30 | ||||
| -rw-r--r-- | src/ckh.c | 101 | ||||
| -rw-r--r-- | src/ctl.c | 344 | ||||
| -rw-r--r-- | src/extent.c | 375 | ||||
| -rw-r--r-- | src/extent_dss.c | 68 | ||||
| -rw-r--r-- | src/extent_mmap.c | 22 | ||||
| -rw-r--r-- | src/jemalloc.c | 497 | ||||
| -rw-r--r-- | src/jemalloc_cpp.cpp | 33 | ||||
| -rw-r--r-- | src/large.c | 87 | ||||
| -rw-r--r-- | src/mutex.c | 40 | ||||
| -rw-r--r-- | src/nstime.c | 57 | ||||
| -rw-r--r-- | src/pages.c | 85 | ||||
| -rw-r--r-- | src/prof.c | 568 | ||||
| -rw-r--r-- | src/rtree.c | 70 | ||||
| -rw-r--r-- | src/stats.c | 45 | ||||
| -rw-r--r-- | src/tcache.c | 120 | ||||
| -rw-r--r-- | src/tsd.c | 42 | ||||
| -rw-r--r-- | src/util.c | 96 | ||||
| -rw-r--r-- | src/witness.c | 27 | ||||
| -rw-r--r-- | src/zone.c | 96 |
22 files changed, 1670 insertions, 1748 deletions
diff --git a/src/arena.c b/src/arena.c index 7362c4e6..5cf9bd07 100644 --- a/src/arena.c +++ b/src/arena.c @@ -38,16 +38,14 @@ static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, /******************************************************************************/ static size_t -arena_extent_dirty_npages(const extent_t *extent) -{ +arena_extent_dirty_npages(const extent_t *extent) { return (extent_size_get(extent) >> LG_PAGE); } static extent_t * arena_extent_cache_alloc_locked(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad, - size_t alignment, bool *zero, bool slab) -{ + size_t alignment, bool *zero, bool slab) { bool commit = true; malloc_mutex_assert_owner(tsdn, &arena->lock); @@ -59,8 +57,7 @@ arena_extent_cache_alloc_locked(tsdn_t *tsdn, arena_t *arena, extent_t * arena_extent_cache_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, - size_t alignment, bool *zero) -{ + size_t alignment, bool *zero) { extent_t *extent; malloc_mutex_lock(tsdn, &arena->lock); @@ -73,8 +70,7 @@ arena_extent_cache_alloc(tsdn_t *tsdn, arena_t *arena, static void arena_extent_cache_dalloc_locked(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent) -{ + extent_hooks_t **r_extent_hooks, extent_t *extent) { malloc_mutex_assert_owner(tsdn, &arena->lock); extent_dalloc_cache(tsdn, arena, r_extent_hooks, extent); @@ -83,8 +79,7 @@ arena_extent_cache_dalloc_locked(tsdn_t *tsdn, arena_t *arena, void arena_extent_cache_dalloc(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent) -{ + extent_hooks_t **r_extent_hooks, extent_t *extent) { malloc_mutex_lock(tsdn, &arena->lock); arena_extent_cache_dalloc_locked(tsdn, arena, r_extent_hooks, extent); malloc_mutex_unlock(tsdn, &arena->lock); @@ -92,8 +87,7 @@ arena_extent_cache_dalloc(tsdn_t *tsdn, arena_t *arena, void arena_extent_cache_maybe_insert(tsdn_t *tsdn, arena_t *arena, extent_t *extent, - bool cache) -{ + bool cache) { malloc_mutex_assert_owner(tsdn, &arena->extents_mtx); if (cache) { @@ -104,8 +98,7 @@ arena_extent_cache_maybe_insert(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void arena_extent_cache_maybe_remove(tsdn_t *tsdn, arena_t *arena, extent_t *extent, - bool dirty) -{ + bool dirty) { malloc_mutex_assert_owner(tsdn, &arena->extents_mtx); if (dirty) { @@ -117,8 +110,7 @@ arena_extent_cache_maybe_remove(tsdn_t *tsdn, arena_t *arena, extent_t *extent, JEMALLOC_INLINE_C void * arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab, - const arena_bin_info_t *bin_info) -{ + const arena_bin_info_t *bin_info) { void *ret; arena_slab_data_t *slab_data = extent_slab_data_get(slab); size_t regind; @@ -137,8 +129,7 @@ arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab, JEMALLOC_INLINE_C #endif size_t -arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) -{ +arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) { size_t diff, regind; /* Freeing a pointer outside the slab can cause assertion failure. */ @@ -174,8 +165,7 @@ arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) JEMALLOC_INLINE_C void arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab, - arena_slab_data_t *slab_data, void *ptr) -{ + arena_slab_data_t *slab_data, void *ptr) { szind_t binind = slab_data->binind; const arena_bin_info_t *bin_info = &arena_bin_info[binind]; size_t regind = arena_slab_regind(slab, binind, ptr); @@ -189,27 +179,25 @@ arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab, } static void -arena_nactive_add(arena_t *arena, size_t add_pages) -{ +arena_nactive_add(arena_t *arena, size_t add_pages) { arena->nactive += add_pages; } static void -arena_nactive_sub(arena_t *arena, size_t sub_pages) -{ +arena_nactive_sub(arena_t *arena, size_t sub_pages) { assert(arena->nactive >= sub_pages); arena->nactive -= sub_pages; } static void -arena_large_malloc_stats_update(arena_t *arena, size_t usize) -{ +arena_large_malloc_stats_update(arena_t *arena, size_t usize) { szind_t index, hindex; cassert(config_stats); - if (usize < LARGE_MINCLASS) + if (usize < LARGE_MINCLASS) { usize = LARGE_MINCLASS; + } index = size2index(usize); hindex = (index >= NBINS) ? index - NBINS : 0; @@ -221,14 +209,14 @@ arena_large_malloc_stats_update(arena_t *arena, size_t usize) } static void -arena_large_malloc_stats_update_undo(arena_t *arena, size_t usize) -{ +arena_large_malloc_stats_update_undo(arena_t *arena, size_t usize) { szind_t index, hindex; cassert(config_stats); - if (usize < LARGE_MINCLASS) + if (usize < LARGE_MINCLASS) { usize = LARGE_MINCLASS; + } index = size2index(usize); hindex = (index >= NBINS) ? index - NBINS : 0; @@ -240,14 +228,14 @@ arena_large_malloc_stats_update_undo(arena_t *arena, size_t usize) } static void -arena_large_dalloc_stats_update(arena_t *arena, size_t usize) -{ +arena_large_dalloc_stats_update(arena_t *arena, size_t usize) { szind_t index, hindex; cassert(config_stats); - if (usize < LARGE_MINCLASS) + if (usize < LARGE_MINCLASS) { usize = LARGE_MINCLASS; + } index = size2index(usize); hindex = (index >= NBINS) ? index - NBINS : 0; @@ -258,8 +246,7 @@ arena_large_dalloc_stats_update(arena_t *arena, size_t usize) } static void -arena_large_reset_stats_cancel(arena_t *arena, size_t usize) -{ +arena_large_reset_stats_cancel(arena_t *arena, size_t usize) { szind_t index = size2index(usize); szind_t hindex = (index >= NBINS) ? index - NBINS : 0; @@ -270,16 +257,15 @@ arena_large_reset_stats_cancel(arena_t *arena, size_t usize) } static void -arena_large_ralloc_stats_update(arena_t *arena, size_t oldusize, size_t usize) -{ +arena_large_ralloc_stats_update(arena_t *arena, size_t oldusize, size_t usize) { arena_large_dalloc_stats_update(arena, oldusize); arena_large_malloc_stats_update(arena, usize); } static extent_t * arena_extent_alloc_large_hard(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, size_t usize, size_t alignment, bool *zero) -{ + extent_hooks_t **r_extent_hooks, size_t usize, size_t alignment, + bool *zero) { extent_t *extent; bool commit = true; @@ -301,8 +287,7 @@ arena_extent_alloc_large_hard(tsdn_t *tsdn, arena_t *arena, extent_t * arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, - size_t alignment, bool *zero) -{ + size_t alignment, bool *zero) { extent_t *extent; extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; @@ -328,14 +313,14 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, void arena_extent_dalloc_large(tsdn_t *tsdn, arena_t *arena, extent_t *extent, - bool locked) -{ + bool locked) { extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; - if (!locked) + if (!locked) { malloc_mutex_lock(tsdn, &arena->lock); - else + } else { malloc_mutex_assert_owner(tsdn, &arena->lock); + } if (config_stats) { arena_large_dalloc_stats_update(arena, extent_usize_get(extent)); @@ -344,14 +329,14 @@ arena_extent_dalloc_large(tsdn_t *tsdn, arena_t *arena, extent_t *extent, arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE); arena_extent_cache_dalloc_locked(tsdn, arena, &extent_hooks, extent); - if (!locked) + if (!locked) { malloc_mutex_unlock(tsdn, &arena->lock); + } } void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, - size_t oldusize) -{ + size_t oldusize) { size_t usize = extent_usize_get(extent); size_t udiff = oldusize - usize; @@ -366,8 +351,7 @@ arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, - size_t oldusize) -{ + size_t oldusize) { size_t usize = extent_usize_get(extent); size_t udiff = usize - oldusize; @@ -381,8 +365,7 @@ arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, } static void -arena_decay_deadline_init(arena_t *arena) -{ +arena_decay_deadline_init(arena_t *arena) { /* * Generate a new deadline that is uniformly random within the next * epoch after the current one. @@ -399,14 +382,12 @@ arena_decay_deadline_init(arena_t *arena) } static bool -arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time) -{ +arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time) { return (nstime_compare(&arena->decay.deadline, time) <= 0); } static size_t -arena_decay_backlog_npages_limit(const arena_t *arena) -{ +arena_decay_backlog_npages_limit(const arena_t *arena) { static const uint64_t h_steps[] = { #define STEP(step, h, x, y) \ h, @@ -423,24 +404,23 @@ arena_decay_backlog_npages_limit(const arena_t *arena) * to round down to the nearest whole number of pages. */ sum = 0; - for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) + for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { sum += arena->decay.backlog[i] * h_steps[i]; + } npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); return (npages_limit_backlog); } static void -arena_decay_backlog_update_last(arena_t *arena) -{ +arena_decay_backlog_update_last(arena_t *arena) { size_t ndirty_delta = (arena->ndirty > arena->decay.nunpurged) ? arena->ndirty - arena->decay.nunpurged : 0; arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta; } static void -arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64) -{ +arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64) { if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) * sizeof(size_t)); @@ -461,8 +441,7 @@ arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64) } static void -arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time) -{ +arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time) { uint64_t nadvance_u64; nstime_t delta; @@ -486,25 +465,23 @@ arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time) } static void -arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena) -{ +arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena) { size_t ndirty_limit = arena_decay_backlog_npages_limit(arena); - if (arena->ndirty > ndirty_limit) + if (arena->ndirty > ndirty_limit) { arena_purge_to_limit(tsdn, arena, ndirty_limit); + } arena->decay.nunpurged = arena->ndirty; } static void -arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time) -{ +arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time) { arena_decay_epoch_advance_helper(arena, time); arena_decay_epoch_advance_purge(tsdn, arena); } static void -arena_decay_init(arena_t *arena, ssize_t decay_time) -{ +arena_decay_init(arena_t *arena, ssize_t decay_time) { arena->decay.time = decay_time; if (decay_time > 0) { nstime_init2(&arena->decay.interval, decay_time, 0); @@ -520,18 +497,18 @@ arena_decay_init(arena_t *arena, ssize_t decay_time) } static bool -arena_decay_time_valid(ssize_t decay_time) -{ - if (decay_time < -1) +arena_decay_time_valid(ssize_t decay_time) { + if (decay_time < -1) { return (false); - if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX) + } + if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX) { return (true); + } return (false); } ssize_t -arena_decay_time_get(tsdn_t *tsdn, arena_t *arena) -{ +arena_decay_time_get(tsdn_t *tsdn, arena_t *arena) { ssize_t decay_time; malloc_mutex_lock(tsdn, &arena->lock); @@ -542,10 +519,10 @@ arena_decay_time_get(tsdn_t *tsdn, arena_t *arena) } bool -arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) -{ - if (!arena_decay_time_valid(decay_time)) +arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) { + if (!arena_decay_time_valid(decay_time)) { return (true); + } malloc_mutex_lock(tsdn, &arena->lock); /* @@ -564,14 +541,14 @@ arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) } static void -arena_maybe_purge_helper(tsdn_t *tsdn, arena_t *arena) -{ +arena_maybe_purge_helper(tsdn_t *tsdn, arena_t *arena) { nstime_t time; /* Purge all or nothing if the option is disabled. */ if (arena->decay.time <= 0) { - if (arena->decay.time == 0) + if (arena->decay.time == 0) { arena_purge_to_limit(tsdn, arena, 0); + } return; } @@ -601,33 +578,34 @@ arena_maybe_purge_helper(tsdn_t *tsdn, arena_t *arena) * during the current epoch are not subject to purge until a future * epoch, so as a result purging only happens during epoch advances. */ - if (arena_decay_deadline_reached(arena, &time)) + if (arena_decay_deadline_reached(arena, &time)) { arena_decay_epoch_advance(tsdn, arena, &time); + } } void -arena_maybe_purge(tsdn_t *tsdn, arena_t *arena) -{ +arena_maybe_purge(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_assert_owner(tsdn, &arena->lock); /* Don't recursively purge. */ - if (arena->purging) + if (arena->purging) { return; + } arena_maybe_purge_helper(tsdn, arena); } static size_t -arena_dirty_count(tsdn_t *tsdn, arena_t *arena) -{ +arena_dirty_count(tsdn_t *tsdn, arena_t *arena) { extent_t *extent; size_t ndirty = 0; malloc_mutex_lock(tsdn, &arena->extents_mtx); for (extent = qr_next(&arena->extents_dirty, qr_link); extent != - &arena->extents_dirty; extent = qr_next(extent, qr_link)) + &arena->extents_dirty; extent = qr_next(extent, qr_link)) { ndirty += extent_size_get(extent) >> LG_PAGE; + } malloc_mutex_unlock(tsdn, &arena->extents_mtx); @@ -636,8 +614,7 @@ arena_dirty_count(tsdn_t *tsdn, arena_t *arena) static size_t arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, - size_t ndirty_limit, extent_t *purge_extents_sentinel) -{ + size_t ndirty_limit, extent_t *purge_extents_sentinel) { extent_t *extent, *next; size_t nstashed = 0; @@ -651,8 +628,9 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, UNUSED extent_t *textent; npages = extent_size_get(extent) >> LG_PAGE; - if (arena->ndirty - (nstashed + npages) < ndirty_limit) + if (arena->ndirty - (nstashed + npages) < ndirty_limit) { break; + } next = qr_next(extent, qr_link); /* Allocate. */ @@ -675,20 +653,21 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, static size_t arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *purge_extents_sentinel) -{ + extent_hooks_t **r_extent_hooks, extent_t *purge_extents_sentinel) { UNUSED size_t nmadvise; size_t npurged; extent_t *extent, *next; - if (config_stats) + if (config_stats) { nmadvise = 0; + } npurged = 0; for (extent = qr_next(purge_extents_sentinel, qr_link); extent != purge_extents_sentinel; extent = next) { - if (config_stats) + if (config_stats) { nmadvise++; + } npurged += extent_size_get(extent) >> LG_PAGE; next = qr_next(extent, qr_link); @@ -709,8 +688,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, * invariant: (arena->ndirty >= ndirty_limit) */ static void -arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit) -{ +arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit) { extent_hooks_t *extent_hooks = extent_hooks_get(arena); size_t npurge, npurged; extent_t purge_extents_sentinel; @@ -730,33 +708,34 @@ arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit) npurge = arena_stash_dirty(tsdn, arena, &extent_hooks, ndirty_limit, &purge_extents_sentinel); - if (npurge == 0) + if (npurge == 0) { goto label_return; + } npurged = arena_purge_stashed(tsdn, arena, &extent_hooks, &purge_extents_sentinel); assert(npurged == npurge); - if (config_stats) + if (config_stats) { arena->stats.npurge++; + } label_return: arena->purging = false; } void -arena_purge(tsdn_t *tsdn, arena_t *arena, bool all) -{ +arena_purge(tsdn_t *tsdn, arena_t *arena, bool all) { malloc_mutex_lock(tsdn, &arena->lock); - if (all) + if (all) { arena_purge_to_limit(tsdn, arena, 0); - else + } else { arena_maybe_purge(tsdn, arena); + } malloc_mutex_unlock(tsdn, &arena->lock); } static void -arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) -{ +arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) { extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE); @@ -764,45 +743,41 @@ arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) } static void -arena_bin_slabs_nonfull_insert(arena_bin_t *bin, extent_t *slab) -{ +arena_bin_slabs_nonfull_insert(arena_bin_t *bin, extent_t *slab) { assert(extent_slab_data_get(slab)->nfree > 0); extent_heap_insert(&bin->slabs_nonfull, slab); } static void -arena_bin_slabs_nonfull_remove(arena_bin_t *bin, extent_t *slab) -{ +arena_bin_slabs_nonfull_remove(arena_bin_t *bin, extent_t *slab) { extent_heap_remove(&bin->slabs_nonfull, slab); } static extent_t * -arena_bin_slabs_nonfull_tryget(arena_bin_t *bin) -{ +arena_bin_slabs_nonfull_tryget(arena_bin_t *bin) { extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull); - if (slab == NULL) + if (slab == NULL) { return (NULL); - if (config_stats) + } + if (config_stats) { bin->stats.reslabs++; + } return (slab); } static void -arena_bin_slabs_full_insert(arena_bin_t *bin, extent_t *slab) -{ +arena_bin_slabs_full_insert(arena_bin_t *bin, extent_t *slab) { assert(extent_slab_data_get(slab)->nfree == 0); extent_ring_insert(&bin->slabs_full, slab); } static void -arena_bin_slabs_full_remove(extent_t *slab) -{ +arena_bin_slabs_full_remove(extent_t *slab) { extent_ring_remove(slab); } void -arena_reset(tsd_t *tsd, arena_t *arena) -{ +arena_reset(tsd_t *tsd, arena_t *arena) { unsigned i; extent_t *extent; @@ -828,16 +803,19 @@ arena_reset(tsd_t *tsd, arena_t *arena) size_t usize; malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); - if (config_stats || (config_prof && opt_prof)) + if (config_stats || (config_prof && opt_prof)) { usize = isalloc(tsd_tsdn(tsd), extent, ptr); + } /* Remove large allocation from prof sample set. */ - if (config_prof && opt_prof) + if (config_prof && opt_prof) { prof_free(tsd, extent, ptr, usize); + } large_dalloc(tsd_tsdn(tsd), extent); malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); /* Cancel out unwanted effects on stats. */ - if (config_stats) + if (config_stats) { arena_large_reset_stats_cancel(arena, usize); + } } malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); @@ -883,8 +861,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) } static void -arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) -{ +arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) { extent_hooks_t *extent_hooks = extent_hooks_get(arena); size_t i; @@ -912,8 +889,7 @@ arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) } void -arena_destroy(tsd_t *tsd, arena_t *arena) -{ +arena_destroy(tsd_t *tsd, arena_t *arena) { assert(base_ind_get(arena->base) >= narenas_auto); assert(arena_nthreads_get(arena, false) == 0); assert(arena_nthreads_get(arena, true) == 0); @@ -949,8 +925,7 @@ arena_destroy(tsd_t *tsd, arena_t *arena) static extent_t * arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, const arena_bin_info_t *bin_info) -{ + extent_hooks_t **r_extent_hooks, const arena_bin_info_t *bin_info) { extent_t *slab; bool zero, commit; @@ -966,8 +941,7 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, static extent_t * arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, - const arena_bin_info_t *bin_info) -{ + const arena_bin_info_t *bin_info) { extent_t *slab; arena_slab_data_t *slab_data; extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; @@ -978,8 +952,9 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, if (slab == NULL) { slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks, bin_info); - if (slab == NULL) + if (slab == NULL) { return (NULL); + } } assert(extent_slab_get(slab)); @@ -991,23 +966,24 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, slab_data->nfree = bin_info->nregs; bitmap_init(slab_data->bitmap, &bin_info->bitmap_info); - if (config_stats) + if (config_stats) { arena->stats.mapped += extent_size_get(slab); + } return (slab); } static extent_t * arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, - szind_t binind) -{ + szind_t binind) { extent_t *slab; const arena_bin_info_t *bin_info; /* Look for a usable slab. */ slab = arena_bin_slabs_nonfull_tryget(bin); - if (slab != NULL) + if (slab != NULL) { return (slab); + } /* No existing slabs have any space available. */ bin_info = &arena_bin_info[binind]; @@ -1034,8 +1010,9 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, * so search one more time. */ slab = arena_bin_slabs_nonfull_tryget(bin); - if (slab != NULL) + if (slab != NULL) { return (slab); + } return (NULL); } @@ -1043,8 +1020,7 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, /* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */ static void * arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, - szind_t binind) -{ + szind_t binind) { const arena_bin_info_t *bin_info; extent_t *slab; @@ -1088,8 +1064,9 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, bin->slabcur = NULL; } - if (slab == NULL) + if (slab == NULL) { return (NULL); + } bin->slabcur = slab; assert(extent_slab_data_get(bin->slabcur)->nfree > 0); @@ -1099,15 +1076,15 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin, - szind_t binind, uint64_t prof_accumbytes) -{ + szind_t binind, uint64_t prof_accumbytes) { unsigned i, nfill; arena_bin_t *bin; assert(tbin->ncached == 0); - if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) + if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) { prof_idump(tsdn); + } bin = &arena->bins[binind]; malloc_mutex_lock(tsdn, &bin->lock); for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> @@ -1118,8 +1095,9 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin, extent_slab_data_get(slab)->nfree > 0) { ptr = arena_slab_reg_alloc(tsdn, slab, &arena_bin_info[binind]); - } else + } else { ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind); + } if (ptr == NULL) { /* * OOM. tbin->avail isn't yet filled down to its first @@ -1152,10 +1130,10 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin, } void -arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, bool zero) -{ - if (!zero) +arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, bool zero) { + if (!zero) { memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size); + } } #ifdef JEMALLOC_JET @@ -1163,8 +1141,7 @@ arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, bool zero) #define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small) #endif void -arena_dalloc_junk_small(void *ptr, const arena_bin_info_t *bin_info) -{ +arena_dalloc_junk_small(void *ptr, const arena_bin_info_t *bin_info) { memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size); } #ifdef JEMALLOC_JET @@ -1175,8 +1152,7 @@ arena_dalloc_junk_small_t *arena_dalloc_junk_small = #endif static void * -arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) -{ +arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { void *ret; arena_bin_t *bin; size_t usize; @@ -1188,10 +1164,11 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) malloc_mutex_lock(tsdn, &bin->lock); if ((slab = bin->slabcur) != NULL && extent_slab_data_get(slab)->nfree > - 0) + 0) { ret = arena_slab_reg_alloc(tsdn, slab, &arena_bin_info[binind]); - else + } else { ret = arena_bin_malloc_hard(tsdn, arena, bin, binind); + } if (ret == NULL) { malloc_mutex_unlock(tsdn, &bin->lock); @@ -1204,16 +1181,18 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) bin->stats.curregs++; } malloc_mutex_unlock(tsdn, &bin->lock); - if (config_prof && arena_prof_accum(tsdn, arena, usize)) + if (config_prof && arena_prof_accum(tsdn, arena, usize)) { prof_idump(tsdn); + } if (!zero) { if (config_fill) { if (unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], false); - } else if (unlikely(opt_zero)) + } else if (unlikely(opt_zero)) { memset(ret, 0, usize); + } } } else { if (config_fill && unlikely(opt_junk_alloc)) { @@ -1229,24 +1208,25 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) void * arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, - bool zero) -{ + bool zero) { assert(!tsdn_null(tsdn) || arena != NULL); - if (likely(!tsdn_null(tsdn))) + if (likely(!tsdn_null(tsdn))) { arena = arena_choose(tsdn_tsd(tsdn), arena); - if (unlikely(arena == NULL)) + } + if (unlikely(arena == NULL)) { return (NULL); + } - if (likely(size <= SMALL_MAXCLASS)) + if (likely(size <= SMALL_MAXCLASS)) { return (arena_malloc_small(tsdn, arena, ind, zero)); + } return (large_malloc(tsdn, arena, index2size(ind), zero)); } void * arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, - bool zero, tcache_t *tcache) -{ + bool zero, tcache_t *tcache) { void *ret; if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE @@ -1255,18 +1235,18 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero, tcache, true); } else { - if (likely(alignment <= CACHELINE)) + if (likely(alignment <= CACHELINE)) { ret = large_malloc(tsdn, arena, usize, zero); - else + } else { ret = large_palloc(tsdn, arena, usize, alignment, zero); + } } return (ret); } void arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr, - size_t usize) -{ + size_t usize) { arena_t *arena = extent_arena_get(extent); cassert(config_prof); @@ -1283,18 +1263,18 @@ arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr, * canceling. */ malloc_mutex_lock(tsdn, &arena->lock); - if (arena->prof_accumbytes >= LARGE_MINCLASS - usize) + if (arena->prof_accumbytes >= LARGE_MINCLASS - usize) { arena->prof_accumbytes -= LARGE_MINCLASS - usize; - else + } else { arena->prof_accumbytes = 0; + } malloc_mutex_unlock(tsdn, &arena->lock); assert(isalloc(tsdn, extent, ptr) == usize); } static size_t -arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) -{ +arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) { cassert(config_prof); assert(ptr != NULL); @@ -1307,8 +1287,7 @@ arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) void arena_dalloc_promoted(tsdn_t *tsdn, extent_t *extent, void *ptr, - tcache_t *tcache, bool slow_path) -{ + tcache_t *tcache, bool slow_path) { size_t usize; cassert(config_prof); @@ -1318,17 +1297,17 @@ arena_dalloc_promoted(tsdn_t *tsdn, extent_t *extent, void *ptr, if (usize <= tcache_maxclass) { tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, usize, slow_path); - } else + } else { large_dalloc(tsdn, extent); + } } static void -arena_dissociate_bin_slab(extent_t *slab, arena_bin_t *bin) -{ +arena_dissociate_bin_slab(extent_t *slab, arena_bin_t *bin) { /* Dissociate slab from bin. */ - if (slab == bin->slabcur) + if (slab == bin->slabcur) { bin->slabcur = NULL; - else { + } else { szind_t binind = extent_slab_data_get(slab)->binind; const arena_bin_info_t *bin_info = &arena_bin_info[binind]; @@ -1337,17 +1316,17 @@ arena_dissociate_bin_slab(extent_t *slab, arena_bin_t *bin) * slab only contains one region, then it never gets inserted * into the non-full slabs heap. */ - if (bin_info->nregs == 1) + if (bin_info->nregs == 1) { arena_bin_slabs_full_remove(slab); - else + } else { arena_bin_slabs_nonfull_remove(bin, slab); + } } } static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, - arena_bin_t *bin) -{ + arena_bin_t *bin) { assert(slab != bin->slabcur); malloc_mutex_unlock(tsdn, &bin->lock); @@ -1357,14 +1336,14 @@ arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, malloc_mutex_unlock(tsdn, &arena->lock); /****************************/ malloc_mutex_lock(tsdn, &bin->lock); - if (config_stats) + if (config_stats) { bin->stats.curslabs--; + } } static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, - arena_bin_t *bin) -{ + arena_bin_t *bin) { assert(extent_slab_data_get(slab)->nfree > 0); /* @@ -1375,28 +1354,31 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, */ if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) { /* Switch slabcur. */ - if (extent_slab_data_get(bin->slabcur)->nfree > 0) + if (extent_slab_data_get(bin->slabcur)->nfree > 0) { arena_bin_slabs_nonfull_insert(bin, bin->slabcur); - else + } else { arena_bin_slabs_full_insert(bin, bin->slabcur); + } bin->slabcur = slab; - if (config_stats) + if (config_stats) { bin->stats.reslabs++; - } else + } + } else { arena_bin_slabs_nonfull_insert(bin, slab); + } } static void arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab, - void *ptr, bool junked) -{ + void *ptr, bool junked) { arena_slab_data_t *slab_data = extent_slab_data_get(slab); szind_t binind = slab_data->binind; arena_bin_t *bin = &arena->bins[binind]; const arena_bin_info_t *bin_info = &arena_bin_info[binind]; - if (!junked && config_fill && unlikely(opt_junk_free)) + if (!junked && config_fill && unlikely(opt_junk_free)) { arena_dalloc_junk_small(ptr, bin_info); + } arena_slab_reg_dalloc(tsdn, slab, slab_data, ptr); if (slab_data->nfree == bin_info->nregs) { @@ -1415,14 +1397,12 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab, void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent, - void *ptr) -{ + void *ptr) { arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true); } static void -arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) -{ +arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { arena_bin_t *bin = &arena->bins[extent_slab_data_get(extent)->binind]; malloc_mutex_lock(tsdn, &bin->lock); @@ -1431,23 +1411,22 @@ arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) } void -arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) -{ +arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { arena_dalloc_bin(tsdn, arena, extent, ptr); arena_decay_tick(tsdn, arena); } bool arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, - size_t size, size_t extra, bool zero) -{ + size_t size, size_t extra, bool zero) { size_t usize_min, usize_max; /* Calls with non-zero extra had to clamp extra. */ assert(extra == 0 || size + extra <= LARGE_MAXCLASS); - if (unlikely(size > LARGE_MAXCLASS)) + if (unlikely(size > LARGE_MAXCLASS)) { return (true); + } usize_min = s2u(size); usize_max = s2u(size + extra); @@ -1460,8 +1439,9 @@ arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, oldsize); if ((usize_max > SMALL_MAXCLASS || size2index(usize_max) != size2index(oldsize)) && (size > oldsize || usize_max < - oldsize)) + oldsize)) { return (true); + } arena_decay_tick(tsdn, extent_arena_get(extent)); return (false); @@ -1475,33 +1455,36 @@ arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, static void * arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, - size_t alignment, bool zero, tcache_t *tcache) -{ - if (alignment == 0) + size_t alignment, bool zero, tcache_t *tcache) { + if (alignment == 0) { return (arena_malloc(tsdn, arena, usize, size2index(usize), zero, tcache, true)); + } usize = sa2u(usize, alignment); - if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { return (NULL); + } return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); } void * arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr, - size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache) -{ + size_t oldsize, size_t size, size_t alignment, bool zero, + tcache_t *tcache) { void *ret; size_t usize, copysize; usize = s2u(size); - if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) + if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) { return (NULL); + } if (likely(usize <= SMALL_MAXCLASS)) { /* Try to avoid moving the allocation. */ if (!arena_ralloc_no_move(tsdn, extent, ptr, oldsize, usize, 0, - zero)) + zero)) { return (ptr); + } } if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) { @@ -1515,8 +1498,9 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr, */ ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, zero, tcache); - if (ret == NULL) + if (ret == NULL) { return (NULL); + } /* * Junk/zero-filling were already done by @@ -1530,8 +1514,7 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr, } dss_prec_t -arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena) -{ +arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena) { dss_prec_t ret; malloc_mutex_lock(tsdn, &arena->lock); @@ -1541,10 +1524,10 @@ arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena) } bool -arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec) -{ - if (!have_dss) +arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec) { + if (!have_dss) { return (dss_prec != dss_prec_disabled); + } malloc_mutex_lock(tsdn, &arena->lock); arena->dss_prec = dss_prec; malloc_mutex_unlock(tsdn, &arena->lock); @@ -1552,24 +1535,22 @@ arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec) } ssize_t -arena_decay_time_default_get(void) -{ +arena_decay_time_default_get(void) { return ((ssize_t)atomic_read_zu((size_t *)&decay_time_default)); } bool -arena_decay_time_default_set(ssize_t decay_time) -{ - if (!arena_decay_time_valid(decay_time)) +arena_decay_time_default_set(ssize_t decay_time) { + if (!arena_decay_time_valid(decay_time)) { return (true); + } atomic_write_zu((size_t *)&decay_time_default, (size_t)decay_time); return (false); } static void arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads, - const char **dss, ssize_t *decay_time, size_t *nactive, size_t *ndirty) -{ + const char **dss, ssize_t *decay_time, size_t *nactive, size_t *ndirty) { *nthreads += arena_nthreads_get(arena, false); *dss = dss_prec_names[arena->dss_prec]; *decay_time = arena->decay.time; @@ -1579,8 +1560,7 @@ arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads, void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, - const char **dss, ssize_t *decay_time, size_t *nactive, size_t *ndirty) -{ + const char **dss, ssize_t *decay_time, size_t *nactive, size_t *ndirty) { malloc_mutex_lock(tsdn, &arena->lock); arena_basic_stats_merge_locked(arena, nthreads, dss, decay_time, nactive, ndirty); @@ -1591,8 +1571,7 @@ void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *decay_time, size_t *nactive, size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, - malloc_large_stats_t *lstats) -{ + malloc_large_stats_t *lstats) { size_t base_allocated, base_resident, base_mapped; unsigned i; @@ -1662,57 +1641,57 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, } unsigned -arena_nthreads_get(arena_t *arena, bool internal) -{ +arena_nthreads_get(arena_t *arena, bool internal) { return (atomic_read_u(&arena->nthreads[internal])); } void -arena_nthreads_inc(arena_t *arena, bool internal) -{ +arena_nthreads_inc(arena_t *arena, bool internal) { atomic_add_u(&arena->nthreads[internal], 1); } void -arena_nthreads_dec(arena_t *arena, bool internal) -{ +arena_nthreads_dec(arena_t *arena, bool internal) { atomic_sub_u(&arena->nthreads[internal], 1); } size_t -arena_extent_sn_next(arena_t *arena) -{ +arena_extent_sn_next(arena_t *arena) { return (atomic_add_zu(&arena->extent_sn_next, 1) - 1); } arena_t * -arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) -{ +arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { arena_t *arena; base_t *base; unsigned i; - if (ind == 0) + if (ind == 0) { base = b0get(); - else { + } else { base = base_new(tsdn, ind, extent_hooks); - if (base == NULL) + if (base == NULL) { return (NULL); + } } arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE); - if (arena == NULL) + if (arena == NULL) { goto label_error; + } arena->nthreads[0] = arena->nthreads[1] = 0; - if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA)) + if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA)) { goto label_error; + } - if (config_stats && config_tcache) + if (config_stats && config_tcache) { ql_new(&arena->tcache_ql); + } - if (config_prof) + if (config_prof) { arena->prof_accumbytes = 0; + } if (config_cache_oblivious) { /* @@ -1738,8 +1717,9 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) ql_new(&arena->large); if (malloc_mutex_init(&arena->large_mtx, "arena_large", - WITNESS_RANK_ARENA_LARGE)) + WITNESS_RANK_ARENA_LARGE)) { goto label_error; + } for (i = 0; i < NPSIZES+1; i++) { extent_heap_new(&arena->extents_cached[i]); @@ -1750,83 +1730,85 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) false, false); if (malloc_mutex_init(&arena->extents_mtx, "arena_extents", - WITNESS_RANK_ARENA_EXTENTS)) + WITNESS_RANK_ARENA_EXTENTS)) { goto label_error; + } - if (!config_munmap) + if (!config_munmap) { arena->extent_grow_next = psz2ind(HUGEPAGE); + } ql_new(&arena->extent_cache); if (malloc_mutex_init(&arena->extent_cache_mtx, "arena_extent_cache", - WITNESS_RANK_ARENA_EXTENT_CACHE)) + WITNESS_RANK_ARENA_EXTENT_CACHE)) { goto label_error; + } /* Initialize bins. */ for (i = 0; i < NBINS; i++) { arena_bin_t *bin = &arena->bins[i]; if (malloc_mutex_init(&bin->lock, "arena_bin", - WITNESS_RANK_ARENA_BIN)) + WITNESS_RANK_ARENA_BIN)) { goto label_error; + } bin->slabcur = NULL; extent_heap_new(&bin->slabs_nonfull); extent_init(&bin->slabs_full, arena, NULL, 0, 0, 0, false, false, false, false); - if (config_stats) + if (config_stats) { memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); + } } arena->base = base; return (arena); label_error: - if (ind != 0) + if (ind != 0) { base_delete(base); + } return (NULL); } void -arena_boot(void) -{ +arena_boot(void) { arena_decay_time_default_set(opt_decay_time); } void -arena_prefork0(tsdn_t *tsdn, arena_t *arena) -{ +arena_prefork0(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_prefork(tsdn, &arena->lock); } void -arena_prefork1(tsdn_t *tsdn, arena_t *arena) -{ +arena_prefork1(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_prefork(tsdn, &arena->extents_mtx); } void -arena_prefork2(tsdn_t *tsdn, arena_t *arena) -{ +arena_prefork2(tsdn_t *tsdn, arena_t *arena) { malloc_mutex_prefork(tsdn, &arena->extent_cache_mtx); } void -arena_prefork3(tsdn_t *tsdn, arena_t *arena) -{ +arena_prefork3(tsdn_t *tsdn, arena_t *arena) { unsigned i; base_prefork(tsdn, arena->base); - for (i = 0; i < NBINS; i++) + for (i = 0; i < NBINS; i++) { malloc_mutex_prefork(tsdn, &arena->bins[i].lock); + } malloc_mutex_prefork(tsdn, &arena->large_mtx); } void -arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) -{ +arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { unsigned i; malloc_mutex_postfork_parent(tsdn, &arena->large_mtx); - for (i = 0; i < NBINS; i++) + for (i = 0; i < NBINS; i++) { malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock); + } base_postfork_parent(tsdn, arena->base); malloc_mutex_postfork_parent(tsdn, &arena->extent_cache_mtx); malloc_mutex_postfork_parent(tsdn, &arena->extents_mtx); @@ -1834,13 +1816,13 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) } void -arena_postfork_child(tsdn_t *tsdn, arena_t *arena) -{ +arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { unsigned i; malloc_mutex_postfork_child(tsdn, &arena->large_mtx); - for (i = 0; i < NBINS; i++) + for (i = 0; i < NBINS; i++) { malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock); + } base_postfork_child(tsdn, arena->base); malloc_mutex_postfork_child(tsdn, &arena->extent_cache_mtx); malloc_mutex_postfork_child(tsdn, &arena->extents_mtx); @@ -9,17 +9,16 @@ static base_t *b0; /******************************************************************************/ static void * -base_map(extent_hooks_t *extent_hooks, unsigned ind, size_t size) -{ +base_map(extent_hooks_t *extent_hooks, unsigned ind, size_t size) { void *addr; bool zero = true; bool commit = true; assert(size == HUGEPAGE_CEILING(size)); - if (extent_hooks == &extent_hooks_default) + if (extent_hooks == &extent_hooks_default) { addr = extent_alloc_mmap(NULL, size, PAGE, &zero, &commit); - else { + } else { addr = extent_hooks->alloc(extent_hooks, NULL, size, PAGE, &zero, &commit, ind); } @@ -28,8 +27,8 @@ base_map(extent_hooks_t *extent_hooks, unsigned ind, size_t size) } static void -base_unmap(extent_hooks_t *extent_hooks, unsigned ind, void *addr, size_t size) -{ +base_unmap(extent_hooks_t *extent_hooks, unsigned ind, void *addr, + size_t size) { /* * Cascade through dalloc, decommit, purge_lazy, and purge_forced, * stopping at first success. This cascade is performed for consistency @@ -41,40 +40,48 @@ base_unmap(extent_hooks_t *extent_hooks, unsigned ind, void *addr, size_t size) * some consistent-but-allocated state. */ if (extent_hooks == &extent_hooks_default) { - if (!extent_dalloc_mmap(addr, size)) + if (!extent_dalloc_mmap(addr, size)) { return; - if (!pages_decommit(addr, size)) + } + if (!pages_decommit(addr, size)) { return; - if (!pages_purge_lazy(addr, size)) + } + if (!pages_purge_lazy(addr, size)) { return; - if (!pages_purge_forced(addr, size)) + } + if (!pages_purge_forced(addr, size)) { return; + } /* Nothing worked. This should never happen. */ not_reached(); } else { if (extent_hooks->dalloc != NULL && - !extent_hooks->dalloc(extent_hooks, addr, size, true, ind)) + !extent_hooks->dalloc(extent_hooks, addr, size, true, + ind)) { return; + } if (extent_hooks->decommit != NULL && !extent_hooks->decommit(extent_hooks, addr, size, 0, size, - ind)) + ind)) { return; + } if (extent_hooks->purge_lazy != NULL && !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size, - ind)) + ind)) { return; + } if (extent_hooks->purge_forced != NULL && !extent_hooks->purge_forced(extent_hooks, addr, size, 0, - size, ind)) + size, ind)) { return; + } /* Nothing worked. That's the application's problem. */ } } static void base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr, - size_t size) -{ + size_t size) { size_t sn; sn = *extent_sn_next; @@ -85,8 +92,7 @@ base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr, static void * base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size, - size_t alignment) -{ + size_t alignment) { void *ret; assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM)); @@ -104,8 +110,7 @@ base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size, static void base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent, - size_t gap_size, void *addr, size_t size) -{ + size_t gap_size, void *addr, size_t size) { if (extent_size_get(extent) > 0) { /* * Compute the index for the largest size class that does not @@ -131,8 +136,7 @@ base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent, static void * base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent, - size_t size, size_t alignment) -{ + size_t size, size_t alignment) { void *ret; size_t gap_size; @@ -148,8 +152,7 @@ base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent, */ static base_block_t * base_block_alloc(extent_hooks_t *extent_hooks, unsigned ind, - size_t *extent_sn_next, size_t size, size_t alignment) -{ + size_t *extent_sn_next, size_t size, size_t alignment) { base_block_t *block; size_t usize, header_size, gap_size, block_size; @@ -159,8 +162,9 @@ base_block_alloc(extent_hooks_t *extent_hooks, unsigned ind, gap_size = ALIGNMENT_CEILING(header_size, alignment) - header_size; block_size = HUGEPAGE_CEILING(header_size + gap_size + usize); block = (base_block_t *)base_map(extent_hooks, ind, block_size); - if (block == NULL) + if (block == NULL) { return (NULL); + } block->size = block_size; block->next = NULL; assert(block_size >= header_size); @@ -174,8 +178,7 @@ base_block_alloc(extent_hooks_t *extent_hooks, unsigned ind, * specified alignment. */ static extent_t * -base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) -{ +base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { extent_hooks_t *extent_hooks = base_extent_hooks_get(base); base_block_t *block; @@ -183,8 +186,9 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) block = base_block_alloc(extent_hooks, base_ind_get(base), &base->extent_sn_next, size, alignment); - if (block == NULL) + if (block == NULL) { return (NULL); + } block->next = base->blocks; base->blocks = block; if (config_stats) { @@ -198,14 +202,12 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) } base_t * -b0get(void) -{ +b0get(void) { return (b0); } base_t * -base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) -{ +base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { base_t *base; size_t extent_sn_next, base_alignment, base_size, gap_size; base_block_t *block; @@ -214,8 +216,9 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) extent_sn_next = 0; block = base_block_alloc(extent_hooks, ind, &extent_sn_next, sizeof(base_t), QUANTUM); - if (block == NULL) + if (block == NULL) { return (NULL); + } base_alignment = CACHELINE; base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment); @@ -229,8 +232,9 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) } base->extent_sn_next = extent_sn_next; base->blocks = block; - for (i = 0; i < NSIZES; i++) + for (i = 0; i < NSIZES; i++) { extent_heap_new(&base->avail[i]); + } if (config_stats) { base->allocated = sizeof(base_block_t); base->resident = PAGE_CEILING(sizeof(base_block_t)); @@ -245,8 +249,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) } void -base_delete(base_t *base) -{ +base_delete(base_t *base) { extent_hooks_t *extent_hooks = base_extent_hooks_get(base); base_block_t *next = base->blocks; do { @@ -258,14 +261,12 @@ base_delete(base_t *base) } extent_hooks_t * -base_extent_hooks_get(base_t *base) -{ +base_extent_hooks_get(base_t *base) { return ((extent_hooks_t *)atomic_read_p(&base->extent_hooks_pun)); } extent_hooks_t * -base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) -{ +base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) { extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base); union { extent_hooks_t **h; @@ -287,8 +288,7 @@ base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) * sharing. */ void * -base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) -{ +base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { void *ret; size_t usize, asize; szind_t i; @@ -324,8 +324,7 @@ label_return: void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident, - size_t *mapped) -{ + size_t *mapped) { cassert(config_stats); malloc_mutex_lock(tsdn, &base->mtx); @@ -338,26 +337,22 @@ base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident, } void -base_prefork(tsdn_t *tsdn, base_t *base) -{ +base_prefork(tsdn_t *tsdn, base_t *base) { malloc_mutex_prefork(tsdn, &base->mtx); } void -base_postfork_parent(tsdn_t *tsdn, base_t *base) -{ +base_postfork_parent(tsdn_t *tsdn, base_t *base) { malloc_mutex_postfork_parent(tsdn, &base->mtx); } void -base_postfork_child(tsdn_t *tsdn, base_t *base) -{ +base_postfork_child(tsdn_t *tsdn, base_t *base) { malloc_mutex_postfork_child(tsdn, &base->mtx); } bool -base_boot(tsdn_t *tsdn) -{ +base_boot(tsdn_t *tsdn) { b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default); return (b0 == NULL); } diff --git a/src/bitmap.c b/src/bitmap.c index 3d27f059..7cbc7d45 100644 --- a/src/bitmap.c +++ b/src/bitmap.c @@ -6,8 +6,7 @@ #ifdef BITMAP_USE_TREE void -bitmap_info_init(bitmap_info_t *binfo, size_t nbits) -{ +bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { unsigned i; size_t group_count; @@ -35,14 +34,12 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits) } static size_t -bitmap_info_ngroups(const bitmap_info_t *binfo) -{ +bitmap_info_ngroups(const bitmap_info_t *binfo) { return (binfo->levels[binfo->nlevels].group_offset); } void -bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ +bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) { size_t extra; unsigned i; @@ -56,23 +53,24 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) memset(bitmap, 0xffU, bitmap_size(binfo)); extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; - if (extra != 0) + if (extra != 0) { bitmap[binfo->levels[1].group_offset - 1] >>= extra; + } for (i = 1; i < binfo->nlevels; i++) { size_t group_count = binfo->levels[i].group_offset - binfo->levels[i-1].group_offset; extra = (BITMAP_GROUP_NBITS - (group_count & BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; - if (extra != 0) + if (extra != 0) { bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; + } } } #else /* BITMAP_USE_TREE */ void -bitmap_info_init(bitmap_info_t *binfo, size_t nbits) -{ +bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { assert(nbits > 0); assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); @@ -81,27 +79,25 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits) } static size_t -bitmap_info_ngroups(const bitmap_info_t *binfo) -{ +bitmap_info_ngroups(const bitmap_info_t *binfo) { return (binfo->ngroups); } void -bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ +bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) { size_t extra; memset(bitmap, 0xffU, bitmap_size(binfo)); extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; - if (extra != 0) + if (extra != 0) { bitmap[binfo->ngroups - 1] >>= extra; + } } #endif /* BITMAP_USE_TREE */ size_t -bitmap_size(const bitmap_info_t *binfo) -{ +bitmap_size(const bitmap_info_t *binfo) { return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP); } @@ -50,15 +50,15 @@ static void ckh_shrink(tsd_t *tsd, ckh_t *ckh); * otherwise. */ JEMALLOC_INLINE_C size_t -ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) -{ +ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) { ckhc_t *cell; unsigned i; for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; - if (cell->key != NULL && ckh->keycomp(key, cell->key)) + if (cell->key != NULL && ckh->keycomp(key, cell->key)) { return ((bucket << LG_CKH_BUCKET_CELLS) + i); + } } return (SIZE_T_MAX); @@ -68,8 +68,7 @@ ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) * Search table for key and return cell number if found; SIZE_T_MAX otherwise. */ JEMALLOC_INLINE_C size_t -ckh_isearch(ckh_t *ckh, const void *key) -{ +ckh_isearch(ckh_t *ckh, const void *key) { size_t hashes[2], bucket, cell; assert(ckh != NULL); @@ -79,8 +78,9 @@ ckh_isearch(ckh_t *ckh, const void *key) /* Search primary bucket. */ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); cell = ckh_bucket_search(ckh, bucket, key); - if (cell != SIZE_T_MAX) + if (cell != SIZE_T_MAX) { return (cell); + } /* Search secondary bucket. */ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); @@ -90,8 +90,7 @@ ckh_isearch(ckh_t *ckh, const void *key) JEMALLOC_INLINE_C bool ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, - const void *data) -{ + const void *data) { ckhc_t *cell; unsigned offset, i; @@ -123,8 +122,7 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, */ JEMALLOC_INLINE_C bool ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, - void const **argdata) -{ + void const **argdata) { const void *key, *data, *tkey, *tdata; ckhc_t *cell; size_t hashes[2], bucket, tbucket; @@ -187,14 +185,14 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, } bucket = tbucket; - if (!ckh_try_bucket_insert(ckh, bucket, key, data)) + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { return (false); + } } } JEMALLOC_INLINE_C bool -ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) -{ +ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) { size_t hashes[2], bucket; const void *key = *argkey; const void *data = *argdata; @@ -203,13 +201,15 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) /* Try to insert in primary bucket. */ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); - if (!ckh_try_bucket_insert(ckh, bucket, key, data)) + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { return (false); + } /* Try to insert in secondary bucket. */ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); - if (!ckh_try_bucket_insert(ckh, bucket, key, data)) + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { return (false); + } /* * Try to find a place for this item via iterative eviction/relocation. @@ -222,8 +222,7 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) * old table into the new. */ JEMALLOC_INLINE_C bool -ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) -{ +ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) { size_t count, i, nins; const void *key, *data; @@ -245,8 +244,7 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) } static bool -ckh_grow(tsd_t *tsd, ckh_t *ckh) -{ +ckh_grow(tsd_t *tsd, ckh_t *ckh) { bool ret; ckhc_t *tab, *ttab; unsigned lg_prevbuckets, lg_curcells; @@ -302,8 +300,7 @@ label_return: } static void -ckh_shrink(tsd_t *tsd, ckh_t *ckh) -{ +ckh_shrink(tsd_t *tsd, ckh_t *ckh) { ckhc_t *tab, *ttab; size_t usize; unsigned lg_prevbuckets, lg_curcells; @@ -315,8 +312,9 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) lg_prevbuckets = ckh->lg_curbuckets; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); - if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { return; + } tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, true, arena_ichoose(tsd, NULL)); if (tab == NULL) { @@ -353,8 +351,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, - ckh_keycomp_t *keycomp) -{ + ckh_keycomp_t *keycomp) { bool ret; size_t mincells, usize; unsigned lg_mincells; @@ -384,8 +381,9 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2; for (lg_mincells = LG_CKH_BUCKET_CELLS; (ZU(1) << lg_mincells) < mincells; - lg_mincells++) - ; /* Do nothing. */ + lg_mincells++) { + /* Do nothing. */ + } ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->hash = hash; @@ -409,8 +407,7 @@ label_return: } void -ckh_delete(tsd_t *tsd, ckh_t *ckh) -{ +ckh_delete(tsd_t *tsd, ckh_t *ckh) { assert(ckh != NULL); #ifdef CKH_VERBOSE @@ -427,30 +424,31 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh) idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), ckh->tab), ckh->tab, NULL, true, true); - if (config_debug) + if (config_debug) { memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t)); + } } size_t -ckh_count(ckh_t *ckh) -{ +ckh_count(ckh_t *ckh) { assert(ckh != NULL); return (ckh->count); } bool -ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) -{ +ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) { size_t i, ncells; for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS)); i < ncells; i++) { if (ckh->tab[i].key != NULL) { - if (key != NULL) + if (key != NULL) { *key = (void *)ckh->tab[i].key; - if (data != NULL) + } + if (data != NULL) { *data = (void *)ckh->tab[i].data; + } *tabind = i + 1; return (false); } @@ -460,8 +458,7 @@ ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) } bool -ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) -{ +ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) { bool ret; assert(ckh != NULL); @@ -485,18 +482,19 @@ label_return: bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, - void **data) -{ + void **data) { size_t cell; assert(ckh != NULL); cell = ckh_isearch(ckh, searchkey); if (cell != SIZE_T_MAX) { - if (key != NULL) + if (key != NULL) { *key = (void *)ckh->tab[cell].key; - if (data != NULL) + } + if (data != NULL) { *data = (void *)ckh->tab[cell].data; + } ckh->tab[cell].key = NULL; ckh->tab[cell].data = NULL; /* Not necessary. */ @@ -516,18 +514,19 @@ ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, } bool -ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) -{ +ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) { size_t cell; assert(ckh != NULL); cell = ckh_isearch(ckh, searchkey); if (cell != SIZE_T_MAX) { - if (key != NULL) + if (key != NULL) { *key = (void *)ckh->tab[cell].key; - if (data != NULL) + } + if (data != NULL) { *data = (void *)ckh->tab[cell].data; + } return (false); } @@ -535,14 +534,12 @@ ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) } void -ckh_string_hash(const void *key, size_t r_hash[2]) -{ +ckh_string_hash(const void *key, size_t r_hash[2]) { hash(key, strlen((const char *)key), 0x94122f33U, r_hash); } bool -ckh_string_keycomp(const void *k1, const void *k2) -{ +ckh_string_keycomp(const void *k1, const void *k2) { assert(k1 != NULL); assert(k2 != NULL); @@ -550,8 +547,7 @@ ckh_string_keycomp(const void *k1, const void *k2) } void -ckh_pointer_hash(const void *key, size_t r_hash[2]) -{ +ckh_pointer_hash(const void *key, size_t r_hash[2]) { union { const void *v; size_t i; @@ -563,7 +559,6 @@ ckh_pointer_hash(const void *key, size_t r_hash[2]) } bool -ckh_pointer_keycomp(const void *k1, const void *k2) -{ +ckh_pointer_keycomp(const void *k1, const void *k2) { return ((k1 == k2) ? true : false); } @@ -17,22 +17,19 @@ static ctl_arenas_t *ctl_arenas; /* Helpers for named and indexed nodes. */ JEMALLOC_INLINE_C const ctl_named_node_t * -ctl_named_node(const ctl_node_t *node) -{ +ctl_named_node(const ctl_node_t *node) { return ((node->named) ? (const ctl_named_node_t *)node : NULL); } JEMALLOC_INLINE_C const ctl_named_node_t * -ctl_named_children(const ctl_named_node_t *node, size_t index) -{ +ctl_named_children(const ctl_named_node_t *node, size_t index) { const ctl_named_node_t *children = ctl_named_node(node->children); return (children ? &children[index] : NULL); } JEMALLOC_INLINE_C const ctl_indexed_node_t * -ctl_indexed_node(const ctl_node_t *node) -{ +ctl_indexed_node(const ctl_node_t *node) { return (!node->named ? (const ctl_indexed_node_t *)node : NULL); } @@ -433,8 +430,7 @@ static const ctl_named_node_t super_root_node[] = { /******************************************************************************/ static unsigned -arenas_i2a_impl(size_t i, bool compat, bool validate) -{ +arenas_i2a_impl(size_t i, bool compat, bool validate) { unsigned a; switch (i) { @@ -453,9 +449,9 @@ arenas_i2a_impl(size_t i, bool compat, bool validate) * removal in 6.0.0. */ a = 0; - } else if (validate && i >= ctl_arenas->narenas) + } else if (validate && i >= ctl_arenas->narenas) { a = UINT_MAX; - else { + } else { /* * This function should never be called for an index * more than one past the range of indices that have @@ -472,14 +468,12 @@ arenas_i2a_impl(size_t i, bool compat, bool validate) } static unsigned -arenas_i2a(size_t i) -{ +arenas_i2a(size_t i) { return (arenas_i2a_impl(i, true, false)); } static ctl_arena_t * -arenas_i_impl(tsdn_t *tsdn, size_t i, bool compat, bool init) -{ +arenas_i_impl(tsdn_t *tsdn, size_t i, bool compat, bool init) { ctl_arena_t *ret; assert(!compat || !init); @@ -515,16 +509,14 @@ arenas_i_impl(tsdn_t *tsdn, size_t i, bool compat, bool init) } static ctl_arena_t * -arenas_i(size_t i) -{ +arenas_i(size_t i) { ctl_arena_t *ret = arenas_i_impl(TSDN_NULL, i, true, false); assert(ret != NULL); return (ret); } static void -ctl_arena_clear(ctl_arena_t *ctl_arena) -{ +ctl_arena_clear(ctl_arena_t *ctl_arena) { ctl_arena->nthreads = 0; ctl_arena->dss = dss_prec_names[dss_prec_limit]; ctl_arena->decay_time = -1; @@ -544,8 +536,7 @@ ctl_arena_clear(ctl_arena_t *ctl_arena) } static void -ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) -{ +ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) { unsigned i; if (config_stats) { @@ -575,8 +566,7 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) static void ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, - bool destroyed) -{ + bool destroyed) { unsigned i; if (!destroyed) { @@ -605,13 +595,15 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, sdstats->astats.base += astats->astats.base; sdstats->astats.internal += astats->astats.internal; sdstats->astats.resident += astats->astats.resident; - } else + } else { assert(astats->astats.internal == 0); + } - if (!destroyed) + if (!destroyed) { sdstats->allocated_small += astats->allocated_small; - else + } else { assert(astats->allocated_small == 0); + } sdstats->nmalloc_small += astats->nmalloc_small; sdstats->ndalloc_small += astats->ndalloc_small; sdstats->nrequests_small += astats->nrequests_small; @@ -619,8 +611,9 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, if (!destroyed) { sdstats->astats.allocated_large += astats->astats.allocated_large; - } else + } else { assert(astats->astats.allocated_large == 0); + } sdstats->astats.nmalloc_large += astats->astats.nmalloc_large; sdstats->astats.ndalloc_large += astats->astats.ndalloc_large; sdstats->astats.nrequests_large += @@ -639,8 +632,9 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, if (!destroyed) { sdstats->bstats[i].curregs += astats->bstats[i].curregs; - } else + } else { assert(astats->bstats[i].curregs == 0); + } if (config_tcache) { sdstats->bstats[i].nfills += astats->bstats[i].nfills; @@ -652,8 +646,9 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, if (!destroyed) { sdstats->bstats[i].curslabs += astats->bstats[i].curslabs; - } else + } else { assert(astats->bstats[i].curslabs == 0); + } } for (i = 0; i < NSIZES - NBINS; i++) { @@ -664,16 +659,16 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, if (!destroyed) { sdstats->lstats[i].curlextents += astats->lstats[i].curlextents; - } else + } else { assert(astats->lstats[i].curlextents == 0); + } } } } static void ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena, - unsigned i, bool destroyed) -{ + unsigned i, bool destroyed) { ctl_arena_t *ctl_arena = arenas_i(i); ctl_arena_clear(ctl_arena); @@ -683,8 +678,7 @@ ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena, } static unsigned -ctl_arena_init(tsdn_t *tsdn, extent_hooks_t *extent_hooks) -{ +ctl_arena_init(tsdn_t *tsdn, extent_hooks_t *extent_hooks) { unsigned arena_ind; ctl_arena_t *ctl_arena; @@ -692,26 +686,29 @@ ctl_arena_init(tsdn_t *tsdn, extent_hooks_t *extent_hooks) NULL) { ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link); arena_ind = ctl_arena->arena_ind; - } else + } else { arena_ind = ctl_arenas->narenas; + } /* Trigger stats allocation. */ - if (arenas_i_impl(tsdn, arena_ind, false, true) == NULL) + if (arenas_i_impl(tsdn, arena_ind, false, true) == NULL) { return (UINT_MAX); + } /* Initialize new arena. */ - if (arena_init(tsdn, arena_ind, extent_hooks) == NULL) + if (arena_init(tsdn, arena_ind, extent_hooks) == NULL) { return (UINT_MAX); + } - if (arena_ind == ctl_arenas->narenas) + if (arena_ind == ctl_arenas->narenas) { ctl_arenas->narenas++; + } return (arena_ind); } static void -ctl_refresh(tsdn_t *tsdn) -{ +ctl_refresh(tsdn_t *tsdn) { unsigned i; ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL); VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas); @@ -751,8 +748,7 @@ ctl_refresh(tsdn_t *tsdn) } static bool -ctl_init(tsdn_t *tsdn) -{ +ctl_init(tsdn_t *tsdn) { bool ret; malloc_mutex_lock(tsdn, &ctl_mtx); @@ -828,8 +824,7 @@ label_return: static int ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, - size_t *mibp, size_t *depthp) -{ + size_t *mibp, size_t *depthp) { int ret; const char *elm, *tdot, *dot; size_t elen, i, j; @@ -857,9 +852,10 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, if (strlen(child->name) == elen && strncmp(elm, child->name, elen) == 0) { node = child; - if (nodesp != NULL) + if (nodesp != NULL) { nodesp[i] = (const ctl_node_t *)node; + } mibp[i] = j; break; } @@ -886,8 +882,9 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, goto label_return; } - if (nodesp != NULL) + if (nodesp != NULL) { nodesp[i] = (const ctl_node_t *)node; + } mibp[i] = (size_t)index; } @@ -925,8 +922,7 @@ label_return: int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ + void *newp, size_t newlen) { int ret; size_t depth; ctl_node_t const *nodes[CTL_MAX_DEPTH]; @@ -940,12 +936,14 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, depth = CTL_MAX_DEPTH; ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth); - if (ret != 0) + if (ret != 0) { goto label_return; + } node = ctl_named_node(nodes[depth-1]); - if (node != NULL && node->ctl) + if (node != NULL && node->ctl) { ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen); + } else { /* The name refers to a partial path through the ctl tree. */ ret = ENOENT; @@ -956,8 +954,7 @@ label_return: } int -ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp) -{ +ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp) { int ret; if (!ctl_initialized && ctl_init(tsdn)) { @@ -972,8 +969,7 @@ label_return: int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ + size_t *oldlenp, void *newp, size_t newlen) { int ret; const ctl_named_node_t *node; size_t i; @@ -1009,9 +1005,9 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, } /* Call the ctl function. */ - if (node && node->ctl) + if (node && node->ctl) { ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen); - else { + } else { /* Partial MIB. */ ret = ENOENT; } @@ -1021,10 +1017,10 @@ label_return: } bool -ctl_boot(void) -{ - if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL)) +ctl_boot(void) { + if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL)) { return (true); + } ctl_initialized = false; @@ -1032,20 +1028,17 @@ ctl_boot(void) } void -ctl_prefork(tsdn_t *tsdn) -{ +ctl_prefork(tsdn_t *tsdn) { malloc_mutex_prefork(tsdn, &ctl_mtx); } void -ctl_postfork_parent(tsdn_t *tsdn) -{ +ctl_postfork_parent(tsdn_t *tsdn) { malloc_mutex_postfork_parent(tsdn, &ctl_mtx); } void -ctl_postfork_child(tsdn_t *tsdn) -{ +ctl_postfork_child(tsdn_t *tsdn) { malloc_mutex_postfork_child(tsdn, &ctl_mtx); } @@ -1112,36 +1105,38 @@ ctl_postfork_child(tsdn_t *tsdn) #define CTL_RO_CLGEN(c, l, n, v, t) \ static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen) \ -{ \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ - if (!(c)) \ + if (!(c)) { \ return (ENOENT); \ - if (l) \ + } \ + if (l) { \ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ + } \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ - if (l) \ + if (l) { \ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ + } \ return (ret); \ } #define CTL_RO_CGEN(c, n, v, t) \ static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen) \ -{ \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ - if (!(c)) \ + if (!(c)) { \ return (ENOENT); \ + } \ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ READONLY(); \ oldval = (v); \ @@ -1156,8 +1151,7 @@ label_return: \ #define CTL_RO_GEN(n, v, t) \ static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen) \ -{ \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ @@ -1179,13 +1173,13 @@ label_return: \ #define CTL_RO_NL_CGEN(c, n, v, t) \ static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen) \ -{ \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ - if (!(c)) \ + if (!(c)) { \ return (ENOENT); \ + } \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ @@ -1198,8 +1192,7 @@ label_return: \ #define CTL_RO_NL_GEN(n, v, t) \ static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen) \ -{ \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ @@ -1215,13 +1208,13 @@ label_return: \ #define CTL_TSD_RO_NL_CGEN(c, n, m, t) \ static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen) \ -{ \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ - if (!(c)) \ + if (!(c)) { \ return (ENOENT); \ + } \ READONLY(); \ oldval = (m(tsd)); \ READ(oldval, t); \ @@ -1234,8 +1227,7 @@ label_return: \ #define CTL_RO_CONFIG_GEN(n, t) \ static int \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen) \ -{ \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ @@ -1254,15 +1246,15 @@ CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) static int epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ + size_t *oldlenp, void *newp, size_t newlen) { int ret; UNUSED uint64_t newval; malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(newval, uint64_t); - if (newp != NULL) + if (newp != NULL) { ctl_refresh(tsd_tsdn(tsd)); + } READ(ctl_arenas->epoch, uint64_t); ret = 0; @@ -1317,15 +1309,15 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) static int thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ + size_t *oldlenp, void *newp, size_t newlen) { int ret; arena_t *oldarena; unsigned newind, oldind; oldarena = arena_choose(tsd, NULL); - if (oldarena == NULL) + if (oldarena == NULL) { return (EAGAIN); + } newind = oldind = arena_ind_get(oldarena); WRITE(newind, unsigned); @@ -1372,13 +1364,13 @@ CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp, static int thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) -{ + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; - if (!config_tcache) + if (!config_tcache) { return (ENOENT); + } oldval = tcache_enabled_get(); if (newp != NULL) { @@ -1397,12 +1389,12 @@ label_return: static int thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) -{ + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; - if (!config_tcache) + if (!config_tcache) { return (ENOENT); + } READONLY(); WRITEONLY(); @@ -1416,12 +1408,12 @@ label_return: static int thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ + size_t *oldlenp, void *newp, size_t newlen) { int ret; - if (!config_prof) + if (!config_prof) { return (ENOENT); + } READ_XOR_WRITE(); @@ -1432,8 +1424,9 @@ thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, } if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) != - 0) + 0) { goto label_return; + } } else { const char *oldname = prof_thread_name_get(tsd); READ(oldname, const char *); @@ -1446,13 +1439,13 @@ label_return: static int thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ + size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; - if (!config_prof) + if (!config_prof) { return (ENOENT); + } oldval = prof_thread_active_get(tsd); if (newp != NULL) { @@ -1476,13 +1469,13 @@ label_return: static int tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ + size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned tcache_ind; - if (!config_tcache) + if (!config_tcache) { return (ENOENT); + } malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); READONLY(); @@ -1500,13 +1493,13 @@ label_return: static int tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ + size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned tcache_ind; - if (!config_tcache) + if (!config_tcache) { return (ENOENT); + } WRITEONLY(); tcache_ind = UINT_MAX; @@ -1524,13 +1517,13 @@ label_return: static int tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ + size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned tcache_ind; - if (!config_tcache) + if (!config_tcache) { return (ENOENT); + } WRITEONLY(); tcache_ind = UINT_MAX; @@ -1550,8 +1543,7 @@ label_return: static int arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) -{ + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; tsdn_t *tsdn = tsd_tsdn(tsd); unsigned arena_ind; @@ -1572,8 +1564,7 @@ label_return: } static void -arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all) -{ +arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all) { malloc_mutex_lock(tsdn, &ctl_mtx); { unsigned narenas = ctl_arenas->narenas; @@ -1586,8 +1577,9 @@ arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all) unsigned i; VARIABLE_ARRAY(arena_t *, tarenas, narenas); - for (i = 0; i < narenas; i++) + for (i = 0; i < narenas; i++) { tarenas[i] = arena_get(tsdn, i, false); + } /* * No further need to hold ctl_mtx, since narenas and @@ -1596,8 +1588,9 @@ arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all) malloc_mutex_unlock(tsdn, &ctl_mtx); for (i = 0; i < narenas; i++) { - if (tarenas[i] != NULL) + if (tarenas[i] != NULL) { arena_purge(tsdn, tarenas[i], all); + } } } else { arena_t *tarena; @@ -1609,16 +1602,16 @@ arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all) /* No further need to hold ctl_mtx. */ malloc_mutex_unlock(tsdn, &ctl_mtx); - if (tarena != NULL) + if (tarena != NULL) { arena_purge(tsdn, tarena, all); + } } } } static int arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ + size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind; @@ -1634,8 +1627,7 @@ label_return: static int arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ + size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind; @@ -1652,8 +1644,7 @@ label_return: static int arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind, - arena_t **arena) -{ + arena_t **arena) { int ret; READONLY(); @@ -1678,16 +1669,16 @@ label_return: static int arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ + size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind; arena_t *arena; ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp, newp, newlen, &arena_ind, &arena); - if (ret != 0) + if (ret != 0) { return (ret); + } arena_reset(tsd, arena); @@ -1696,8 +1687,7 @@ arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, static int arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ + size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind; arena_t *arena; @@ -1705,8 +1695,9 @@ arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp, newp, newlen, &arena_ind, &arena); - if (ret != 0) + if (ret != 0) { goto label_return; + } if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena, true) != 0) { @@ -1735,8 +1726,7 @@ label_return: static int arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ + size_t *oldlenp, void *newp, size_t newlen) { int ret; const char *dss = NULL; unsigned arena_ind; @@ -1797,8 +1787,7 @@ label_return: static int arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ + size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind; arena_t *arena; @@ -1833,8 +1822,7 @@ label_return: static int arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) -{ + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind; arena_t *arena; @@ -1867,8 +1855,7 @@ label_return: } static const ctl_named_node_t * -arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) -{ +arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { const ctl_named_node_t *ret; malloc_mutex_lock(tsdn, &ctl_mtx); @@ -1894,8 +1881,7 @@ label_return: static int arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ + size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned narenas; @@ -1916,8 +1902,7 @@ label_return: static int arenas_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ + size_t *oldlenp, void *newp, size_t newlen) { int ret; if (oldp != NULL && oldlenp != NULL) { @@ -1949,27 +1934,27 @@ CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) CTL_RO_NL_GEN(arenas_bin_i_slab_size, arena_bin_info[mib[2]].slab_size, size_t) static const ctl_named_node_t * -arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) -{ - if (i > NBINS) +arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { + if (i > NBINS) { return (NULL); + } return (super_arenas_bin_i_node); } CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned) CTL_RO_NL_GEN(arenas_lextent_i_size, index2size(NBINS+(szind_t)mib[2]), size_t) static const ctl_named_node_t * -arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) -{ - if (i > NSIZES - NBINS) +arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t i) { + if (i > NSIZES - NBINS) { return (NULL); + } return (super_arenas_lextent_i_node); } static int arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ + size_t *oldlenp, void *newp, size_t newlen) { int ret; extent_hooks_t *extent_hooks; unsigned arena_ind; @@ -1995,13 +1980,13 @@ label_return: static int prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen) -{ + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; - if (!config_prof) + if (!config_prof) { return (ENOENT); + } if (newp != NULL) { if (newlen != sizeof(bool)) { @@ -2010,8 +1995,9 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, } oldval = prof_thread_active_init_set(tsd_tsdn(tsd), *(bool *)newp); - } else + } else { oldval = prof_thread_active_init_get(tsd_tsdn(tsd)); + } READ(oldval, bool); ret = 0; @@ -2021,13 +2007,13 @@ label_return: static int prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ + size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; - if (!config_prof) + if (!config_prof) { return (ENOENT); + } if (newp != NULL) { if (newlen != sizeof(bool)) { @@ -2035,8 +2021,9 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, goto label_return; } oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp); - } else + } else { oldval = prof_active_get(tsd_tsdn(tsd)); + } READ(oldval, bool); ret = 0; @@ -2046,13 +2033,13 @@ label_return: static int prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ + size_t *oldlenp, void *newp, size_t newlen) { int ret; const char *filename = NULL; - if (!config_prof) + if (!config_prof) { return (ENOENT); + } WRITEONLY(); WRITE(filename, const char *); @@ -2069,13 +2056,13 @@ label_return: static int prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ + size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; - if (!config_prof) + if (!config_prof) { return (ENOENT); + } if (newp != NULL) { if (newlen != sizeof(bool)) { @@ -2083,8 +2070,9 @@ prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, goto label_return; } oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp); - } else + } else { oldval = prof_gdump_get(tsd_tsdn(tsd)); + } READ(oldval, bool); ret = 0; @@ -2094,18 +2082,19 @@ label_return: static int prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ + size_t *oldlenp, void *newp, size_t newlen) { int ret; size_t lg_sample = lg_prof_sample; - if (!config_prof) + if (!config_prof) { return (ENOENT); + } WRITEONLY(); WRITE(lg_sample, size_t); - if (lg_sample >= (sizeof(uint64_t) << 3)) + if (lg_sample >= (sizeof(uint64_t) << 3)) { lg_sample = (sizeof(uint64_t) << 3) - 1; + } prof_reset(tsd, lg_sample); @@ -2189,10 +2178,10 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs, static const ctl_named_node_t * stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, - size_t j) -{ - if (j > NBINS) + size_t j) { + if (j > NBINS) { return (NULL); + } return (super_stats_arenas_i_bins_j_node); } @@ -2207,16 +2196,15 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents, static const ctl_named_node_t * stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, - size_t j) -{ - if (j > NSIZES - NBINS) + size_t j) { + if (j > NSIZES - NBINS) { return (NULL); + } return (super_stats_arenas_i_lextents_j_node); } static const ctl_named_node_t * -stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) -{ +stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { const ctl_named_node_t *ret; size_t a; diff --git a/src/extent.c b/src/extent.c index be40aaad..5cf2e25c 100644 --- a/src/extent.c +++ b/src/extent.c @@ -75,8 +75,7 @@ static void extent_record(tsdn_t *tsdn, arena_t *arena, /******************************************************************************/ extent_t * -extent_alloc(tsdn_t *tsdn, arena_t *arena) -{ +extent_alloc(tsdn_t *tsdn, arena_t *arena) { extent_t *extent; malloc_mutex_lock(tsdn, &arena->extent_cache_mtx); @@ -92,8 +91,7 @@ extent_alloc(tsdn_t *tsdn, arena_t *arena) } void -extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) -{ +extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { malloc_mutex_lock(tsdn, &arena->extent_cache_mtx); ql_elm_new(extent, ql_link); ql_tail_insert(&arena->extent_cache, extent, ql_link); @@ -101,22 +99,21 @@ extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) } extent_hooks_t * -extent_hooks_get(arena_t *arena) -{ +extent_hooks_get(arena_t *arena) { return (base_extent_hooks_get(arena->base)); } extent_hooks_t * -extent_hooks_set(arena_t *arena, extent_hooks_t *extent_hooks) -{ +extent_hooks_set(arena_t *arena, extent_hooks_t *extent_hooks) { return (base_extent_hooks_set(arena->base, extent_hooks)); } static void -extent_hooks_assure_initialized(arena_t *arena, extent_hooks_t **r_extent_hooks) -{ - if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) +extent_hooks_assure_initialized(arena_t *arena, + extent_hooks_t **r_extent_hooks) { + if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) { *r_extent_hooks = extent_hooks_get(arena); + } } #ifdef JEMALLOC_JET @@ -124,8 +121,7 @@ extent_hooks_assure_initialized(arena_t *arena, extent_hooks_t **r_extent_hooks) #define extent_size_quantize_floor JEMALLOC_N(n_extent_size_quantize_floor) #endif size_t -extent_size_quantize_floor(size_t size) -{ +extent_size_quantize_floor(size_t size) { size_t ret; pszind_t pind; @@ -161,8 +157,7 @@ extent_size_quantize_t *extent_size_quantize_floor = #define extent_size_quantize_ceil JEMALLOC_N(n_extent_size_quantize_ceil) #endif size_t -extent_size_quantize_ceil(size_t size) -{ +extent_size_quantize_ceil(size_t size) { size_t ret; assert(size > 0); @@ -195,8 +190,7 @@ ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp) static void extent_heaps_insert(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES+1], - extent_t *extent) -{ + extent_t *extent) { size_t psz = extent_size_quantize_floor(extent_size_get(extent)); pszind_t pind = psz2ind(psz); @@ -207,8 +201,7 @@ extent_heaps_insert(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES+1], static void extent_heaps_remove(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES+1], - extent_t *extent) -{ + extent_t *extent) { size_t psz = extent_size_quantize_floor(extent_size_get(extent)); pszind_t pind = psz2ind(psz); @@ -220,12 +213,12 @@ extent_heaps_remove(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES+1], static bool extent_rtree_acquire(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, const extent_t *extent, bool dependent, bool init_missing, - rtree_elm_t **r_elm_a, rtree_elm_t **r_elm_b) -{ + rtree_elm_t **r_elm_a, rtree_elm_t **r_elm_b) { *r_elm_a = rtree_elm_acquire(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)extent_base_get(extent), dependent, init_missing); - if (!dependent && *r_elm_a == NULL) + if (!dependent && *r_elm_a == NULL) { return (true); + } assert(*r_elm_a != NULL); if (extent_size_get(extent) > PAGE) { @@ -237,33 +230,33 @@ extent_rtree_acquire(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, return (true); } assert(*r_elm_b != NULL); - } else + } else { *r_elm_b = NULL; + } return (false); } static void extent_rtree_write_acquired(tsdn_t *tsdn, rtree_elm_t *elm_a, - rtree_elm_t *elm_b, const extent_t *extent) -{ + rtree_elm_t *elm_b, const extent_t *extent) { rtree_elm_write_acquired(tsdn, &extents_rtree, elm_a, extent); - if (elm_b != NULL) + if (elm_b != NULL) { rtree_elm_write_acquired(tsdn, &extents_rtree, elm_b, extent); + } } static void -extent_rtree_release(tsdn_t *tsdn, rtree_elm_t *elm_a, rtree_elm_t *elm_b) -{ +extent_rtree_release(tsdn_t *tsdn, rtree_elm_t *elm_a, rtree_elm_t *elm_b) { rtree_elm_release(tsdn, &extents_rtree, elm_a); - if (elm_b != NULL) + if (elm_b != NULL) { rtree_elm_release(tsdn, &extents_rtree, elm_b); + } } static void extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, - const extent_t *extent) -{ + const extent_t *extent) { size_t i; assert(extent_slab_get(extent)); @@ -276,8 +269,7 @@ extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, } static void -extent_gprof_add(tsdn_t *tsdn, const extent_t *extent) -{ +extent_gprof_add(tsdn_t *tsdn, const extent_t *extent) { cassert(config_prof); if (opt_prof && extent_active_get(extent)) { @@ -291,14 +283,14 @@ extent_gprof_add(tsdn_t *tsdn, const extent_t *extent) */ high = atomic_read_zu(&highpages); } - if (cur > high && prof_gdump_get_unlocked()) + if (cur > high && prof_gdump_get_unlocked()) { prof_gdump(tsdn); + } } } static void -extent_gprof_sub(tsdn_t *tsdn, const extent_t *extent) -{ +extent_gprof_sub(tsdn_t *tsdn, const extent_t *extent) { cassert(config_prof); if (opt_prof && extent_active_get(extent)) { @@ -309,37 +301,37 @@ extent_gprof_sub(tsdn_t *tsdn, const extent_t *extent) } static bool -extent_register(tsdn_t *tsdn, const extent_t *extent) -{ +extent_register(tsdn_t *tsdn, const extent_t *extent) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_elm_t *elm_a, *elm_b; if (extent_rtree_acquire(tsdn, rtree_ctx, extent, false, true, &elm_a, - &elm_b)) + &elm_b)) { return (true); + } extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent); - if (extent_slab_get(extent)) + if (extent_slab_get(extent)) { extent_interior_register(tsdn, rtree_ctx, extent); + } extent_rtree_release(tsdn, elm_a, elm_b); - if (config_prof) + if (config_prof) { extent_gprof_add(tsdn, extent); + } return (false); } static void -extent_reregister(tsdn_t *tsdn, const extent_t *extent) -{ +extent_reregister(tsdn_t *tsdn, const extent_t *extent) { bool err = extent_register(tsdn, extent); assert(!err); } static void extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, - const extent_t *extent) -{ + const extent_t *extent) { size_t i; assert(extent_slab_get(extent)); @@ -352,8 +344,7 @@ extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, } static void -extent_deregister(tsdn_t *tsdn, extent_t *extent) -{ +extent_deregister(tsdn_t *tsdn, extent_t *extent) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_elm_t *elm_a, *elm_b; @@ -367,8 +358,9 @@ extent_deregister(tsdn_t *tsdn, extent_t *extent) } extent_rtree_release(tsdn, elm_a, elm_b); - if (config_prof) + if (config_prof) { extent_gprof_sub(tsdn, extent); + } } /* @@ -377,8 +369,7 @@ extent_deregister(tsdn_t *tsdn, extent_t *extent) */ static extent_t * extent_first_best_fit(tsdn_t *tsdn, arena_t *arena, - extent_heap_t extent_heaps[NPSIZES+1], size_t size) -{ + extent_heap_t extent_heaps[NPSIZES+1], size_t size) { pszind_t pind, i; malloc_mutex_assert_owner(tsdn, &arena->extents_mtx); @@ -386,8 +377,9 @@ extent_first_best_fit(tsdn_t *tsdn, arena_t *arena, pind = psz2ind(extent_size_quantize_ceil(size)); for (i = pind; i < NPSIZES+1; i++) { extent_t *extent = extent_heap_first(&extent_heaps[i]); - if (extent != NULL) + if (extent != NULL) { return (extent); + } } return (NULL); @@ -395,8 +387,7 @@ extent_first_best_fit(tsdn_t *tsdn, arena_t *arena, static void extent_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, - bool cache, extent_t *extent) -{ + bool cache, extent_t *extent) { /* * Leak extent after making sure its pages have already been purged, so * that this is only a virtual memory leak. @@ -415,15 +406,15 @@ static extent_t * extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_heap_t extent_heaps[NPSIZES+1], bool locked, bool cache, void *new_addr, size_t usize, size_t pad, size_t alignment, bool *zero, - bool *commit, bool slab) -{ + bool *commit, bool slab) { extent_t *extent; rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); size_t size, alloc_size, leadsize, trailsize; - if (locked) + if (locked) { malloc_mutex_assert_owner(tsdn, &arena->extents_mtx); + } assert(new_addr == NULL || !slab); assert(pad == 0 || !slab); assert(alignment > 0); @@ -452,10 +443,12 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, size = usize + pad; alloc_size = size + PAGE_CEILING(alignment) - PAGE; /* Beware size_t wrap-around. */ - if (alloc_size < usize) + if (alloc_size < usize) { return (NULL); - if (!locked) + } + if (!locked) { malloc_mutex_lock(tsdn, &arena->extents_mtx); + } extent_hooks_assure_initialized(arena, r_extent_hooks); if (new_addr != NULL) { rtree_elm_t *elm; @@ -470,19 +463,22 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, if (extent_arena_get(extent) != arena || extent_size_get(extent) < size || extent_active_get(extent) || - extent_retained_get(extent) == cache) + extent_retained_get(extent) == cache) { extent = NULL; + } } rtree_elm_release(tsdn, &extents_rtree, elm); - } else + } else { extent = NULL; + } } else { extent = extent_first_best_fit(tsdn, arena, extent_heaps, alloc_size); } if (extent == NULL) { - if (!locked) + if (!locked) { malloc_mutex_unlock(tsdn, &arena->extents_mtx); + } return (NULL); } extent_heaps_remove(tsdn, extent_heaps, extent); @@ -493,10 +489,12 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, assert(new_addr == NULL || leadsize == 0); assert(extent_size_get(extent) >= leadsize + size); trailsize = extent_size_get(extent) - leadsize - size; - if (extent_zeroed_get(extent)) + if (extent_zeroed_get(extent)) { *zero = true; - if (extent_committed_get(extent)) + } + if (extent_committed_get(extent)) { *commit = true; + } /* Split the lead. */ if (leadsize != 0) { @@ -507,8 +505,9 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, if (extent == NULL) { extent_deregister(tsdn, lead); extent_leak(tsdn, arena, r_extent_hooks, cache, lead); - if (!locked) + if (!locked) { malloc_mutex_unlock(tsdn, &arena->extents_mtx); + } return (NULL); } extent_heaps_insert(tsdn, extent_heaps, lead); @@ -523,8 +522,9 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_deregister(tsdn, extent); extent_leak(tsdn, arena, r_extent_hooks, cache, extent); - if (!locked) + if (!locked) { malloc_mutex_unlock(tsdn, &arena->extents_mtx); + } return (NULL); } extent_heaps_insert(tsdn, extent_heaps, trail); @@ -540,8 +540,9 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, if (*commit && !extent_committed_get(extent)) { if (extent_commit_wrapper(tsdn, arena, r_extent_hooks, extent, 0, extent_size_get(extent))) { - if (!locked) + if (!locked) { malloc_mutex_unlock(tsdn, &arena->extents_mtx); + } extent_record(tsdn, arena, r_extent_hooks, extent_heaps, cache, extent); return (NULL); @@ -549,16 +550,18 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_zeroed_set(extent, true); } - if (pad != 0) + if (pad != 0) { extent_addr_randomize(tsdn, extent, alignment); + } extent_active_set(extent, true); if (slab) { extent_slab_set(extent, slab); extent_interior_register(tsdn, rtree_ctx, extent); } - if (!locked) + if (!locked) { malloc_mutex_unlock(tsdn, &arena->extents_mtx); + } if (*zero) { if (!extent_zeroed_get(extent)) { @@ -569,8 +572,9 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, size_t *p = (size_t *)(uintptr_t) extent_addr_get(extent); - for (i = 0; i < usize / sizeof(size_t); i++) + for (i = 0; i < usize / sizeof(size_t); i++) { assert(p[i] == 0); + } } } return (extent); @@ -584,8 +588,7 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, */ static void * extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, - size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) -{ + size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) { void *ret; assert(size != 0); @@ -594,17 +597,20 @@ extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, /* "primary" dss. */ if (have_dss && dss_prec == dss_prec_primary && (ret = extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, - commit)) != NULL) + commit)) != NULL) { return (ret); + } /* mmap. */ if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit)) - != NULL) + != NULL) { return (ret); + } /* "secondary" dss. */ if (have_dss && dss_prec == dss_prec_secondary && (ret = extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, - commit)) != NULL) + commit)) != NULL) { return (ret); + } /* All strategies for allocation failed. */ return (NULL); @@ -613,8 +619,7 @@ extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, static extent_t * extent_alloc_cache_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, bool locked, void *new_addr, size_t usize, - size_t pad, size_t alignment, bool *zero, bool *commit, bool slab) -{ + size_t pad, size_t alignment, bool *zero, bool *commit, bool slab) { extent_t *extent; assert(usize + pad != 0); @@ -629,8 +634,7 @@ extent_alloc_cache_impl(tsdn_t *tsdn, arena_t *arena, extent_t * extent_alloc_cache_locked(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad, - size_t alignment, bool *zero, bool *commit, bool slab) -{ + size_t alignment, bool *zero, bool *commit, bool slab) { malloc_mutex_assert_owner(tsdn, &arena->extents_mtx); return (extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, true, @@ -640,16 +644,14 @@ extent_alloc_cache_locked(tsdn_t *tsdn, arena_t *arena, extent_t * extent_alloc_cache(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad, - size_t alignment, bool *zero, bool *commit, bool slab) -{ + size_t alignment, bool *zero, bool *commit, bool slab) { return (extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, false, new_addr, usize, pad, alignment, zero, commit, slab)); } static void * extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr, - size_t size, size_t alignment, bool *zero, bool *commit) -{ + size_t size, size_t alignment, bool *zero, bool *commit) { void *ret; ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero, @@ -659,8 +661,7 @@ extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr, static void * extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, - size_t alignment, bool *zero, bool *commit, unsigned arena_ind) -{ + size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { tsdn_t *tsdn; arena_t *arena; @@ -680,10 +681,10 @@ extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, static void extent_retain(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, - extent_t *extent) -{ - if (config_stats) + extent_t *extent) { + if (config_stats) { arena->stats.retained += extent_size_get(extent); + } extent_record(tsdn, arena, r_extent_hooks, arena->extents_retained, false, extent); } @@ -696,8 +697,7 @@ extent_retain(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, static extent_t * extent_grow_retained(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad, - size_t alignment, bool *zero, bool *commit, bool slab) -{ + size_t alignment, bool *zero, bool *commit, bool slab) { extent_t *extent; void *ptr; size_t size, alloc_size, alloc_size_min, leadsize, trailsize; @@ -713,13 +713,16 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, alloc_size = pind2sz(arena->extent_grow_next); alloc_size_min = size + PAGE_CEILING(alignment) - PAGE; /* Beware size_t wrap-around. */ - if (alloc_size_min < usize) + if (alloc_size_min < usize) { return (NULL); - if (alloc_size < alloc_size_min) + } + if (alloc_size < alloc_size_min) { return (NULL); + } extent = extent_alloc(tsdn, arena); - if (extent == NULL) + if (extent == NULL) { return (NULL); + } zeroed = false; committed = false; ptr = extent_alloc_core(tsdn, arena, new_addr, alloc_size, PAGE, @@ -741,10 +744,12 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, assert(new_addr == NULL || leadsize == 0); assert(alloc_size >= leadsize + size); trailsize = alloc_size - leadsize - size; - if (extent_zeroed_get(extent)) + if (extent_zeroed_get(extent)) { *zero = true; - if (extent_committed_get(extent)) + } + if (extent_committed_get(extent)) { *commit = true; + } /* Split the lead. */ if (leadsize != 0) { @@ -790,8 +795,9 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, /* Adjust gprof stats now that extent is final size. */ extent_gprof_add(tsdn, extent); } - if (pad != 0) + if (pad != 0) { extent_addr_randomize(tsdn, extent, alignment); + } if (slab) { rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, @@ -800,18 +806,19 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, extent_slab_set(extent, true); extent_interior_register(tsdn, rtree_ctx, extent); } - if (*zero && !extent_zeroed_get(extent)) + if (*zero && !extent_zeroed_get(extent)) { memset(extent_addr_get(extent), 0, extent_usize_get(extent)); - if (arena->extent_grow_next + 1 < NPSIZES) + } + if (arena->extent_grow_next + 1 < NPSIZES) { arena->extent_grow_next++; + } return (extent); } static extent_t * extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad, - size_t alignment, bool *zero, bool *commit, bool slab) -{ + size_t alignment, bool *zero, bool *commit, bool slab) { extent_t *extent; assert(usize != 0); @@ -825,8 +832,9 @@ extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, size_t size = usize + pad; arena->stats.retained -= size; } - if (config_prof) + if (config_prof) { extent_gprof_add(tsdn, extent); + } } if (!config_munmap && extent == NULL) { extent = extent_grow_retained(tsdn, arena, r_extent_hooks, @@ -839,16 +847,16 @@ extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, static extent_t * extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad, - size_t alignment, bool *zero, bool *commit, bool slab) -{ + size_t alignment, bool *zero, bool *commit, bool slab) { extent_t *extent; size_t size; void *addr; size = usize + pad; extent = extent_alloc(tsdn, arena); - if (extent == NULL) + if (extent == NULL) { return (NULL); + } if (*r_extent_hooks == &extent_hooks_default) { /* Call directly to propagate tsdn. */ addr = extent_alloc_default_impl(tsdn, arena, new_addr, size, @@ -863,8 +871,9 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, } extent_init(extent, arena, addr, size, usize, arena_extent_sn_next(arena), true, zero, commit, slab); - if (pad != 0) + if (pad != 0) { extent_addr_randomize(tsdn, extent, alignment); + } if (extent_register(tsdn, extent)) { extent_leak(tsdn, arena, r_extent_hooks, false, extent); return (NULL); @@ -876,8 +885,7 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, extent_t * extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad, - size_t alignment, bool *zero, bool *commit, bool slab) -{ + size_t alignment, bool *zero, bool *commit, bool slab) { extent_t *extent; extent_hooks_assure_initialized(arena, r_extent_hooks); @@ -893,16 +901,19 @@ extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, } static bool -extent_can_coalesce(const extent_t *a, const extent_t *b) -{ - if (extent_arena_get(a) != extent_arena_get(b)) +extent_can_coalesce(const extent_t *a, const extent_t *b) { + if (extent_arena_get(a) != extent_arena_get(b)) { return (false); - if (extent_active_get(a) != extent_active_get(b)) + } + if (extent_active_get(a) != extent_active_get(b)) { return (false); - if (extent_committed_get(a) != extent_committed_get(b)) + } + if (extent_committed_get(a) != extent_committed_get(b)) { return (false); - if (extent_retained_get(a) != extent_retained_get(b)) + } + if (extent_retained_get(a) != extent_retained_get(b)) { return (false); + } return (true); } @@ -910,10 +921,10 @@ extent_can_coalesce(const extent_t *a, const extent_t *b) static void extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, - extent_heap_t extent_heaps[NPSIZES+1], bool cache) -{ - if (!extent_can_coalesce(a, b)) + extent_heap_t extent_heaps[NPSIZES+1], bool cache) { + if (!extent_can_coalesce(a, b)) { return; + } extent_heaps_remove(tsdn, extent_heaps, a); extent_heaps_remove(tsdn, extent_heaps, b); @@ -937,8 +948,7 @@ extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, static void extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, - extent_heap_t extent_heaps[NPSIZES+1], bool cache, extent_t *extent) -{ + extent_heap_t extent_heaps[NPSIZES+1], bool cache, extent_t *extent) { extent_t *prev, *next; rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); @@ -980,8 +990,7 @@ extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, } void -extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) -{ +extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; if (extent_register(tsdn, extent)) { @@ -993,8 +1002,7 @@ extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) void extent_dalloc_cache(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent) -{ + extent_hooks_t **r_extent_hooks, extent_t *extent) { assert(extent_base_get(extent) != NULL); assert(extent_size_get(extent) != 0); @@ -1006,17 +1014,16 @@ extent_dalloc_cache(tsdn_t *tsdn, arena_t *arena, } static bool -extent_dalloc_default_impl(void *addr, size_t size) -{ - if (!have_dss || !extent_in_dss(addr)) +extent_dalloc_default_impl(void *addr, size_t size) { + if (!have_dss || !extent_in_dss(addr)) { return (extent_dalloc_mmap(addr, size)); + } return (true); } static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size, - bool committed, unsigned arena_ind) -{ + bool committed, unsigned arena_ind) { assert(extent_hooks == &extent_hooks_default); return (extent_dalloc_default_impl(addr, size)); @@ -1024,8 +1031,7 @@ extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size, bool extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent) -{ + extent_hooks_t **r_extent_hooks, extent_t *extent) { bool err; assert(extent_base_get(extent) != NULL); @@ -1050,46 +1056,50 @@ extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena, extent_committed_get(extent), arena_ind_get(arena))); } - if (!err) + if (!err) { extent_dalloc(tsdn, arena, extent); + } return (err); } void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *extent) -{ + extent_hooks_t **r_extent_hooks, extent_t *extent) { bool zeroed; - if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) + if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) { return; + } extent_reregister(tsdn, extent); /* Try to decommit; purge if that fails. */ - if (!extent_committed_get(extent)) + if (!extent_committed_get(extent)) { zeroed = true; - else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent, - 0, extent_size_get(extent))) + } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent, + 0, extent_size_get(extent))) { zeroed = true; - else if ((*r_extent_hooks)->purge_lazy != NULL && + } else if ((*r_extent_hooks)->purge_lazy != NULL && !(*r_extent_hooks)->purge_lazy(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), 0, - extent_size_get(extent), arena_ind_get(arena))) + extent_size_get(extent), arena_ind_get(arena))) { zeroed = false; - else if ((*r_extent_hooks)->purge_forced != NULL && + } else if ((*r_extent_hooks)->purge_forced != NULL && !(*r_extent_hooks)->purge_forced(*r_extent_hooks, extent_base_get(extent), extent_size_get(extent), 0, - extent_size_get(extent), arena_ind_get(arena))) + extent_size_get(extent), arena_ind_get(arena))) { zeroed = true; - else + } else { zeroed = false; + } extent_zeroed_set(extent, zeroed); - if (config_stats) + if (config_stats) { arena->stats.retained += extent_size_get(extent); - if (config_prof) + } + if (config_prof) { extent_gprof_sub(tsdn, extent); + } extent_record(tsdn, arena, r_extent_hooks, arena->extents_retained, false, extent); @@ -1097,8 +1107,7 @@ extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, - size_t offset, size_t length, unsigned arena_ind) -{ + size_t offset, size_t length, unsigned arena_ind) { assert(extent_hooks == &extent_hooks_default); return (pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset), @@ -1108,8 +1117,7 @@ extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, - size_t length) -{ + size_t length) { bool err; extent_hooks_assure_initialized(arena, r_extent_hooks); @@ -1122,8 +1130,7 @@ extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, static bool extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, - size_t offset, size_t length, unsigned arena_ind) -{ + size_t offset, size_t length, unsigned arena_ind) { assert(extent_hooks == &extent_hooks_default); return (pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset), @@ -1133,8 +1140,7 @@ extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, - size_t length) -{ + size_t length) { bool err; extent_hooks_assure_initialized(arena, r_extent_hooks); @@ -1150,8 +1156,7 @@ extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, #ifdef PAGES_CAN_PURGE_LAZY static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, - size_t offset, size_t length, unsigned arena_ind) -{ + size_t offset, size_t length, unsigned arena_ind) { assert(extent_hooks == &extent_hooks_default); assert(addr != NULL); assert((offset & PAGE_MASK) == 0); @@ -1166,8 +1171,7 @@ extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, - size_t length) -{ + size_t length) { extent_hooks_assure_initialized(arena, r_extent_hooks); return ((*r_extent_hooks)->purge_lazy == NULL || (*r_extent_hooks)->purge_lazy(*r_extent_hooks, @@ -1178,8 +1182,7 @@ extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, #ifdef PAGES_CAN_PURGE_FORCED static bool extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr, - size_t size, size_t offset, size_t length, unsigned arena_ind) -{ + size_t size, size_t offset, size_t length, unsigned arena_ind) { assert(extent_hooks == &extent_hooks_default); assert(addr != NULL); assert((offset & PAGE_MASK) == 0); @@ -1194,8 +1197,7 @@ extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr, bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, - size_t length) -{ + size_t length) { extent_hooks_assure_initialized(arena, r_extent_hooks); return ((*r_extent_hooks)->purge_forced == NULL || (*r_extent_hooks)->purge_forced(*r_extent_hooks, @@ -1206,12 +1208,12 @@ extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, #ifdef JEMALLOC_MAPS_COALESCE static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size, - size_t size_a, size_t size_b, bool committed, unsigned arena_ind) -{ + size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { assert(extent_hooks == &extent_hooks_default); - if (!maps_coalesce) + if (!maps_coalesce) { return (true); + } return (false); } #endif @@ -1219,8 +1221,7 @@ extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size, extent_t * extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, - size_t usize_a, size_t size_b, size_t usize_b) -{ + size_t usize_a, size_t size_b, size_t usize_b) { extent_t *trail; rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); @@ -1230,12 +1231,14 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_assure_initialized(arena, r_extent_hooks); - if ((*r_extent_hooks)->split == NULL) + if ((*r_extent_hooks)->split == NULL) { return (NULL); + } trail = extent_alloc(tsdn, arena); - if (trail == NULL) + if (trail == NULL) { goto label_error_a; + } { extent_t lead; @@ -1246,8 +1249,9 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, extent_slab_get(extent)); if (extent_rtree_acquire(tsdn, rtree_ctx, &lead, false, true, - &lead_elm_a, &lead_elm_b)) + &lead_elm_a, &lead_elm_b)) { goto label_error_b; + } } extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) + @@ -1255,13 +1259,15 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, extent_active_get(extent), extent_zeroed_get(extent), extent_committed_get(extent), extent_slab_get(extent)); if (extent_rtree_acquire(tsdn, rtree_ctx, trail, false, true, - &trail_elm_a, &trail_elm_b)) + &trail_elm_a, &trail_elm_b)) { goto label_error_c; + } if ((*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent), size_a + size_b, size_a, size_b, extent_committed_get(extent), - arena_ind_get(arena))) + arena_ind_get(arena))) { goto label_error_d; + } extent_size_set(extent, size_a); extent_usize_set(extent, usize_a); @@ -1284,12 +1290,13 @@ label_error_a: } static bool -extent_merge_default_impl(void *addr_a, void *addr_b) -{ - if (!maps_coalesce) +extent_merge_default_impl(void *addr_a, void *addr_b) { + if (!maps_coalesce) { return (true); - if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) + } + if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) { return (true); + } return (false); } @@ -1297,8 +1304,7 @@ extent_merge_default_impl(void *addr_a, void *addr_b) #ifdef JEMALLOC_MAPS_COALESCE static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, - void *addr_b, size_t size_b, bool committed, unsigned arena_ind) -{ + void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { assert(extent_hooks == &extent_hooks_default); return (extent_merge_default_impl(addr_a, addr_b)); @@ -1307,8 +1313,7 @@ extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, - extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) -{ + extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) { bool err; rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); @@ -1316,8 +1321,9 @@ extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, extent_hooks_assure_initialized(arena, r_extent_hooks); - if ((*r_extent_hooks)->merge == NULL) + if ((*r_extent_hooks)->merge == NULL) { return (true); + } if (*r_extent_hooks == &extent_hooks_default) { /* Call directly to propagate tsdn. */ @@ -1330,8 +1336,9 @@ extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, arena_ind_get(arena)); } - if (err) + if (err) { return (true); + } /* * The rtree writes must happen while all the relevant elements are @@ -1350,8 +1357,9 @@ extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, if (b_elm_b != NULL) { rtree_elm_write_acquired(tsdn, &extents_rtree, b_elm_a, NULL); rtree_elm_release(tsdn, &extents_rtree, b_elm_a); - } else + } else { b_elm_b = b_elm_a; + } extent_size_set(a, extent_size_get(a) + extent_size_get(b)); extent_usize_set(a, extent_usize_get(a) + extent_usize_get(b)); @@ -1368,14 +1376,15 @@ extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, } bool -extent_boot(void) -{ +extent_boot(void) { if (rtree_new(&extents_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) - - LG_PAGE))) + LG_PAGE))) { return (true); + } - if (have_dss) + if (have_dss) { extent_dss_boot(); + } return (false); } diff --git a/src/extent_dss.c b/src/extent_dss.c index 5aa95b1c..d61d5464 100644 --- a/src/extent_dss.c +++ b/src/extent_dss.c @@ -30,8 +30,7 @@ static void *dss_max; /******************************************************************************/ static void * -extent_dss_sbrk(intptr_t increment) -{ +extent_dss_sbrk(intptr_t increment) { #ifdef JEMALLOC_DSS return (sbrk(increment)); #else @@ -41,28 +40,27 @@ extent_dss_sbrk(intptr_t increment) } dss_prec_t -extent_dss_prec_get(void) -{ +extent_dss_prec_get(void) { dss_prec_t ret; - if (!have_dss) + if (!have_dss) { return (dss_prec_disabled); + } ret = (dss_prec_t)atomic_read_u(&dss_prec_default); return (ret); } bool -extent_dss_prec_set(dss_prec_t dss_prec) -{ - if (!have_dss) +extent_dss_prec_set(dss_prec_t dss_prec) { + if (!have_dss) { return (dss_prec != dss_prec_disabled); + } atomic_write_u(&dss_prec_default, (unsigned)dss_prec); return (false); } static void * -extent_dss_max_update(void *new_addr) -{ +extent_dss_max_update(void *new_addr) { void *max_cur; spin_t spinner; @@ -83,20 +81,21 @@ extent_dss_max_update(void *new_addr) spin_adaptive(&spinner); continue; } - if (!atomic_cas_p(&dss_max, max_prev, max_cur)) + if (!atomic_cas_p(&dss_max, max_prev, max_cur)) { break; + } } /* Fixed new_addr can only be supported if it is at the edge of DSS. */ - if (new_addr != NULL && max_cur != new_addr) + if (new_addr != NULL && max_cur != new_addr) { return (NULL); + } return (max_cur); } void * extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, - size_t alignment, bool *zero, bool *commit) -{ + size_t alignment, bool *zero, bool *commit) { extent_t *gap; cassert(have_dss); @@ -107,12 +106,14 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, * sbrk() uses a signed increment argument, so take care not to * interpret a large allocation request as a negative increment. */ - if ((intptr_t)size < 0) + if ((intptr_t)size < 0) { return (NULL); + } gap = extent_alloc(tsdn, arena); - if (gap == NULL) + if (gap == NULL) { return (NULL); + } if (!atomic_read_u(&dss_exhausted)) { /* @@ -126,8 +127,9 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, intptr_t incr; max_cur = extent_dss_max_update(new_addr); - if (max_cur == NULL) + if (max_cur == NULL) { goto label_oom; + } /* * Compute how much gap space (if any) is necessary to @@ -145,8 +147,9 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, } dss_next = (void *)((uintptr_t)ret + size); if ((uintptr_t)ret < (uintptr_t)max_cur || - (uintptr_t)dss_next < (uintptr_t)max_cur) + (uintptr_t)dss_next < (uintptr_t)max_cur) { goto label_oom; /* Wrap-around. */ + } incr = gap_size + size; /* @@ -155,19 +158,22 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, * DSS while dss_max is greater than the current DSS * max reported by sbrk(0). */ - if (atomic_cas_p(&dss_max, max_cur, dss_next)) + if (atomic_cas_p(&dss_max, max_cur, dss_next)) { continue; + } /* Try to allocate. */ dss_prev = extent_dss_sbrk(incr); if (dss_prev == max_cur) { /* Success. */ - if (gap_size != 0) + if (gap_size != 0) { extent_dalloc_gap(tsdn, arena, gap); - else + } else { extent_dalloc(tsdn, arena, gap); - if (!*commit) + } + if (!*commit) { *commit = pages_decommit(ret, size); + } if (*zero && *commit) { extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; @@ -177,8 +183,9 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, size, 0, true, false, true, false); if (extent_purge_forced_wrapper(tsdn, arena, &extent_hooks, &extent, 0, - size)) + size)) { memset(ret, 0, size); + } } return (ret); } @@ -204,30 +211,28 @@ label_oom: } static bool -extent_in_dss_helper(void *addr, void *max) -{ +extent_in_dss_helper(void *addr, void *max) { return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr < (uintptr_t)max); } bool -extent_in_dss(void *addr) -{ +extent_in_dss(void *addr) { cassert(have_dss); return (extent_in_dss_helper(addr, atomic_read_p(&dss_max))); } bool -extent_dss_mergeable(void *addr_a, void *addr_b) -{ +extent_dss_mergeable(void *addr_a, void *addr_b) { void *max; cassert(have_dss); if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b < - (uintptr_t)dss_base) + (uintptr_t)dss_base) { return (true); + } max = atomic_read_p(&dss_max); return (extent_in_dss_helper(addr_a, max) == @@ -235,8 +240,7 @@ extent_dss_mergeable(void *addr_a, void *addr_b) } void -extent_dss_boot(void) -{ +extent_dss_boot(void) { cassert(have_dss); dss_base = extent_dss_sbrk(0); diff --git a/src/extent_mmap.c b/src/extent_mmap.c index e685a45b..2c00b588 100644 --- a/src/extent_mmap.c +++ b/src/extent_mmap.c @@ -4,21 +4,23 @@ /******************************************************************************/ static void * -extent_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit) -{ +extent_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, + bool *commit) { void *ret; size_t alloc_size; alloc_size = size + alignment - PAGE; /* Beware size_t wrap-around. */ - if (alloc_size < size) + if (alloc_size < size) { return (NULL); + } do { void *pages; size_t leadsize; pages = pages_map(NULL, alloc_size, commit); - if (pages == NULL) + if (pages == NULL) { return (NULL); + } leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - (uintptr_t)pages; ret = pages_trim(pages, alloc_size, leadsize, size, commit); @@ -31,8 +33,7 @@ extent_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit) void * extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, - bool *commit) -{ + bool *commit) { void *ret; size_t offset; @@ -52,8 +53,9 @@ extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, assert(alignment != 0); ret = pages_map(new_addr, size, commit); - if (ret == NULL || ret == new_addr) + if (ret == NULL || ret == new_addr) { return (ret); + } assert(new_addr == NULL); offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); if (offset != 0) { @@ -67,9 +69,9 @@ extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, } bool -extent_dalloc_mmap(void *addr, size_t size) -{ - if (config_munmap) +extent_dalloc_mmap(void *addr, size_t size) { + if (config_munmap) { pages_unmap(addr, size); + } return (!config_munmap); } diff --git a/src/jemalloc.c b/src/jemalloc.c index af2a53a2..2de42c3e 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -209,8 +209,7 @@ static bool init_lock_initialized = false; JEMALLOC_ATTR(constructor) static void WINAPI -_init_init_lock(void) -{ +_init_init_lock(void) { /* * If another constructor in the same binary is using mallctl to e.g. * set up extent hooks, it may end up running before this one, and @@ -221,8 +220,9 @@ _init_init_lock(void) * the process creation, before any separate thread normally starts * doing anything. */ - if (!init_lock_initialized) + if (!init_lock_initialized) { malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT); + } init_lock_initialized = true; } @@ -273,24 +273,23 @@ static bool malloc_init_hard(void); */ JEMALLOC_ALWAYS_INLINE_C bool -malloc_initialized(void) -{ +malloc_initialized(void) { return (malloc_init_state == malloc_init_initialized); } JEMALLOC_ALWAYS_INLINE_C bool -malloc_init_a0(void) -{ - if (unlikely(malloc_init_state == malloc_init_uninitialized)) +malloc_init_a0(void) { + if (unlikely(malloc_init_state == malloc_init_uninitialized)) { return (malloc_init_hard_a0()); + } return (false); } JEMALLOC_ALWAYS_INLINE_C bool -malloc_init(void) -{ - if (unlikely(!malloc_initialized()) && malloc_init_hard()) +malloc_init(void) { + if (unlikely(!malloc_initialized()) && malloc_init_hard()) { return (true); + } return (false); } @@ -300,30 +299,27 @@ malloc_init(void) */ static void * -a0ialloc(size_t size, bool zero, bool is_internal) -{ - if (unlikely(malloc_init_a0())) +a0ialloc(size_t size, bool zero, bool is_internal) { + if (unlikely(malloc_init_a0())) { return (NULL); + } return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL, is_internal, arena_get(TSDN_NULL, 0, true), true)); } static void -a0idalloc(extent_t *extent, void *ptr, bool is_internal) -{ +a0idalloc(extent_t *extent, void *ptr, bool is_internal) { idalloctm(TSDN_NULL, extent, ptr, false, is_internal, true); } void * -a0malloc(size_t size) -{ +a0malloc(size_t size) { return (a0ialloc(size, false, true)); } void -a0dalloc(void *ptr) -{ +a0dalloc(void *ptr) { a0idalloc(iealloc(NULL, ptr), ptr, true); } @@ -334,17 +330,16 @@ a0dalloc(void *ptr) */ void * -bootstrap_malloc(size_t size) -{ - if (unlikely(size == 0)) +bootstrap_malloc(size_t size) { + if (unlikely(size == 0)) { size = 1; + } return (a0ialloc(size, false, false)); } void * -bootstrap_calloc(size_t num, size_t size) -{ +bootstrap_calloc(size_t num, size_t size) { size_t num_size; num_size = num * size; @@ -357,49 +352,46 @@ bootstrap_calloc(size_t num, size_t size) } void -bootstrap_free(void *ptr) -{ - if (unlikely(ptr == NULL)) +bootstrap_free(void *ptr) { + if (unlikely(ptr == NULL)) { return; + } a0idalloc(iealloc(NULL, ptr), ptr, false); } void -arena_set(unsigned ind, arena_t *arena) -{ +arena_set(unsigned ind, arena_t *arena) { atomic_write_p((void **)&arenas[ind], arena); } static void -narenas_total_set(unsigned narenas) -{ +narenas_total_set(unsigned narenas) { atomic_write_u(&narenas_total, narenas); } static void -narenas_total_inc(void) -{ +narenas_total_inc(void) { atomic_add_u(&narenas_total, 1); } unsigned -narenas_total_get(void) -{ +narenas_total_get(void) { return (atomic_read_u(&narenas_total)); } /* Create a new arena and insert it into the arenas array at index ind. */ static arena_t * -arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) -{ +arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { arena_t *arena; assert(ind <= narenas_total_get()); - if (ind > MALLOCX_ARENA_MAX) + if (ind > MALLOCX_ARENA_MAX) { return (NULL); - if (ind == narenas_total_get()) + } + if (ind == narenas_total_get()) { narenas_total_inc(); + } /* * Another thread may have already initialized arenas[ind] if it's an @@ -418,8 +410,7 @@ arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) } arena_t * -arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) -{ +arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { arena_t *arena; malloc_mutex_lock(tsdn, &arenas_lock); @@ -429,25 +420,25 @@ arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) } static void -arena_bind(tsd_t *tsd, unsigned ind, bool internal) -{ +arena_bind(tsd_t *tsd, unsigned ind, bool internal) { arena_t *arena; - if (!tsd_nominal(tsd)) + if (!tsd_nominal(tsd)) { return; + } arena = arena_get(tsd_tsdn(tsd), ind, false); arena_nthreads_inc(arena, internal); - if (internal) + if (internal) { tsd_iarena_set(tsd, arena); - else + } else { tsd_arena_set(tsd, arena); + } } void -arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) -{ +arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { arena_t *oldarena, *newarena; oldarena = arena_get(tsd_tsdn(tsd), oldind, false); @@ -458,21 +449,20 @@ arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) } static void -arena_unbind(tsd_t *tsd, unsigned ind, bool internal) -{ +arena_unbind(tsd_t *tsd, unsigned ind, bool internal) { arena_t *arena; arena = arena_get(tsd_tsdn(tsd), ind, false); arena_nthreads_dec(arena, internal); - if (internal) + if (internal) { tsd_iarena_set(tsd, NULL); - else + } else { tsd_arena_set(tsd, NULL); + } } arena_tdata_t * -arena_tdata_get_hard(tsd_t *tsd, unsigned ind) -{ +arena_tdata_get_hard(tsd_t *tsd, unsigned ind) { arena_tdata_t *tdata, *arenas_tdata_old; arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); unsigned narenas_tdata_old, i; @@ -541,15 +531,15 @@ arena_tdata_get_hard(tsd_t *tsd, unsigned ind) /* Read the refreshed tdata array. */ tdata = &arenas_tdata[ind]; label_return: - if (arenas_tdata_old != NULL) + if (arenas_tdata_old != NULL) { a0dalloc(arenas_tdata_old); + } return (tdata); } /* Slow path, called only by arena_choose(). */ arena_t * -arena_choose_hard(tsd_t *tsd, bool internal) -{ +arena_choose_hard(tsd_t *tsd, bool internal) { arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); if (narenas_auto > 1) { @@ -563,8 +553,9 @@ arena_choose_hard(tsd_t *tsd, bool internal) * choose[1]: For internal metadata allocation. */ - for (j = 0; j < 2; j++) + for (j = 0; j < 2; j++) { choose[j] = 0; + } first_null = narenas_auto; malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); @@ -580,8 +571,9 @@ arena_choose_hard(tsd_t *tsd, bool internal) tsd_tsdn(tsd), i, false), !!j) < arena_nthreads_get(arena_get( tsd_tsdn(tsd), choose[j], false), - !!j)) + !!j)) { choose[j] = i; + } } } else if (first_null == narenas_auto) { /* @@ -622,8 +614,9 @@ arena_choose_hard(tsd_t *tsd, bool internal) &arenas_lock); return (NULL); } - if (!!j == internal) + if (!!j == internal) { ret = arena; + } } arena_bind(tsd, choose[j], !!j); } @@ -638,28 +631,27 @@ arena_choose_hard(tsd_t *tsd, bool internal) } void -iarena_cleanup(tsd_t *tsd) -{ +iarena_cleanup(tsd_t *tsd) { arena_t *iarena; iarena = tsd_iarena_get(tsd); - if (iarena != NULL) + if (iarena != NULL) { arena_unbind(tsd, arena_ind_get(iarena), true); + } } void -arena_cleanup(tsd_t *tsd) -{ +arena_cleanup(tsd_t *tsd) { arena_t *arena; arena = tsd_arena_get(tsd); - if (arena != NULL) + if (arena != NULL) { arena_unbind(tsd, arena_ind_get(arena), false); + } } void -arenas_tdata_cleanup(tsd_t *tsd) -{ +arenas_tdata_cleanup(tsd_t *tsd) { arena_tdata_t *arenas_tdata; /* Prevent tsd->arenas_tdata from being (re)created. */ @@ -673,8 +665,7 @@ arenas_tdata_cleanup(tsd_t *tsd) } static void -stats_print_atexit(void) -{ +stats_print_atexit(void) { if (config_tcache && config_stats) { tsdn_t *tsdn; unsigned narenas, i; @@ -720,19 +711,18 @@ stats_print_atexit(void) #ifndef JEMALLOC_HAVE_SECURE_GETENV static char * -secure_getenv(const char *name) -{ +secure_getenv(const char *name) { # ifdef JEMALLOC_HAVE_ISSETUGID - if (issetugid() != 0) + if (issetugid() != 0) { return (NULL); + } # endif return (getenv(name)); } #endif static unsigned -malloc_ncpus(void) -{ +malloc_ncpus(void) { long result; #ifdef _WIN32 @@ -761,8 +751,7 @@ malloc_ncpus(void) static bool malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, - char const **v_p, size_t *vlen_p) -{ + char const **v_p, size_t *vlen_p) { bool accept; const char *opts = *opts_p; @@ -837,15 +826,13 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, static void malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, - size_t vlen) -{ + size_t vlen) { malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, (int)vlen, v); } static void -malloc_slow_flag_init(void) -{ +malloc_slow_flag_init(void) { /* * Combine the runtime options into malloc_slow for fast path. Called * after processing all the options. @@ -860,8 +847,7 @@ malloc_slow_flag_init(void) } static void -malloc_conf_init(void) -{ +malloc_conf_init(void) { unsigned i; char buf[PATH_MAX + 1]; const char *opts, *k, *v; @@ -948,17 +934,18 @@ malloc_conf_init(void) (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) #define CONF_HANDLE_BOOL(o, n, cont) \ if (CONF_MATCH(n)) { \ - if (CONF_MATCH_VALUE("true")) \ + if (CONF_MATCH_VALUE("true")) { \ o = true; \ - else if (CONF_MATCH_VALUE("false")) \ + } else if (CONF_MATCH_VALUE("false")) { \ o = false; \ - else { \ + } else { \ malloc_conf_error( \ "Invalid conf value", \ k, klen, v, vlen); \ } \ - if (cont) \ + if (cont) { \ continue; \ + } \ } #define CONF_MIN_no(um, min) false #define CONF_MIN_yes(um, min) ((um) < (min)) @@ -978,13 +965,15 @@ malloc_conf_init(void) k, klen, v, vlen); \ } else if (clip) { \ if (CONF_MIN_##check_min(um, \ - (min))) \ + (min))) { \ o = (t)(min); \ - else if (CONF_MAX_##check_max( \ - um, (max))) \ + } else if ( \ + CONF_MAX_##check_max(um, \ + (max))) { \ o = (t)(max); \ - else \ + } else { \ o = (t)um; \ + } \ } else { \ if (CONF_MIN_##check_min(um, \ (min)) || \ @@ -994,8 +983,9 @@ malloc_conf_init(void) "Out-of-range " \ "conf value", \ k, klen, v, vlen); \ - } else \ + } else { \ o = (t)um; \ + } \ } \ continue; \ } @@ -1023,8 +1013,9 @@ malloc_conf_init(void) malloc_conf_error( \ "Out-of-range conf value", \ k, klen, v, vlen); \ - } else \ + } else { \ o = l; \ + } \ continue; \ } #define CONF_HANDLE_CHAR_P(o, n, d) \ @@ -1148,8 +1139,7 @@ malloc_conf_init(void) } static bool -malloc_init_hard_needed(void) -{ +malloc_init_hard_needed(void) { if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == malloc_init_recursible)) { /* @@ -1177,35 +1167,42 @@ malloc_init_hard_needed(void) } static bool -malloc_init_hard_a0_locked() -{ +malloc_init_hard_a0_locked() { malloc_initializer = INITIALIZER; - if (config_prof) + if (config_prof) { prof_boot0(); + } malloc_conf_init(); if (opt_stats_print) { /* Print statistics at exit. */ if (atexit(stats_print_atexit) != 0) { malloc_write("<jemalloc>: Error in atexit()\n"); - if (opt_abort) + if (opt_abort) { abort(); + } } } pages_boot(); - if (base_boot(TSDN_NULL)) + if (base_boot(TSDN_NULL)) { return (true); - if (extent_boot()) + } + if (extent_boot()) { return (true); - if (ctl_boot()) + } + if (ctl_boot()) { return (true); - if (config_prof) + } + if (config_prof) { prof_boot1(); + } arena_boot(); - if (config_tcache && tcache_boot(TSDN_NULL)) + if (config_tcache && tcache_boot(TSDN_NULL)) { return (true); - if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS)) + } + if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS)) { return (true); + } /* * Create enough scaffolding to allow recursive allocation in * malloc_ncpus(). @@ -1218,9 +1215,10 @@ malloc_init_hard_a0_locked() * Initialize one arena here. The rest are lazily created in * arena_choose_hard(). */ - if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default) == - NULL) + if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default) + == NULL) { return (true); + } malloc_init_state = malloc_init_a0_initialized; @@ -1228,8 +1226,7 @@ malloc_init_hard_a0_locked() } static bool -malloc_init_hard_a0(void) -{ +malloc_init_hard_a0(void) { bool ret; malloc_mutex_lock(TSDN_NULL, &init_lock); @@ -1240,8 +1237,7 @@ malloc_init_hard_a0(void) /* Initialize data structures which may trigger recursive allocation. */ static bool -malloc_init_hard_recursible(void) -{ +malloc_init_hard_recursible(void) { malloc_init_state = malloc_init_recursible; ncpus = malloc_ncpus(); @@ -1253,8 +1249,9 @@ malloc_init_hard_recursible(void) if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, jemalloc_postfork_child) != 0) { malloc_write("<jemalloc>: Error in pthread_atfork()\n"); - if (opt_abort) + if (opt_abort) { abort(); + } return (true); } #endif @@ -1263,20 +1260,21 @@ malloc_init_hard_recursible(void) } static bool -malloc_init_hard_finish(tsdn_t *tsdn) -{ - if (malloc_mutex_boot()) +malloc_init_hard_finish(tsdn_t *tsdn) { + if (malloc_mutex_boot()) { return (true); + } if (opt_narenas == 0) { /* * For SMP systems, create more than one arena per CPU by * default. */ - if (ncpus > 1) + if (ncpus > 1) { opt_narenas = ncpus << 2; - else + } else { opt_narenas = 1; + } } narenas_auto = opt_narenas; /* @@ -1292,8 +1290,9 @@ malloc_init_hard_finish(tsdn_t *tsdn) /* Allocate and initialize arenas. */ arenas = (arena_t **)base_alloc(tsdn, a0->base, sizeof(arena_t *) * (MALLOCX_ARENA_MAX+1), CACHELINE); - if (arenas == NULL) + if (arenas == NULL) { return (true); + } /* Copy the pointer to the one arena that was already initialized. */ arena_set(0, a0); @@ -1304,8 +1303,7 @@ malloc_init_hard_finish(tsdn_t *tsdn) } static bool -malloc_init_hard(void) -{ +malloc_init_hard(void) { tsd_t *tsd; #if defined(_WIN32) && _WIN32_WINNT < 0x0600 @@ -1326,10 +1324,12 @@ malloc_init_hard(void) malloc_mutex_unlock(TSDN_NULL, &init_lock); /* Recursive allocation relies on functional tsd. */ tsd = malloc_tsd_boot0(); - if (tsd == NULL) + if (tsd == NULL) { return (true); - if (malloc_init_hard_recursible()) + } + if (malloc_init_hard_recursible()) { return (true); + } malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); if (config_prof && prof_boot2(tsd)) { @@ -1616,7 +1616,6 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts) { tsd = tsd_fetch(); witness_assert_lockless(tsd_tsdn(tsd)); - /* If profiling is on, get our profiling context. */ if (config_prof && opt_prof) { /* @@ -1755,8 +1754,7 @@ imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) -je_malloc(size_t size) -{ +je_malloc(size_t size) { void *ret; static_opts_t sopts; dynamic_opts_t dopts; @@ -1780,8 +1778,7 @@ je_malloc(size_t size) JEMALLOC_EXPORT int JEMALLOC_NOTHROW JEMALLOC_ATTR(nonnull(1)) -je_posix_memalign(void **memptr, size_t alignment, size_t size) -{ +je_posix_memalign(void **memptr, size_t alignment, size_t size) { int ret; static_opts_t sopts; dynamic_opts_t dopts; @@ -1808,8 +1805,7 @@ je_posix_memalign(void **memptr, size_t alignment, size_t size) JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) -je_aligned_alloc(size_t alignment, size_t size) -{ +je_aligned_alloc(size_t alignment, size_t size) { void *ret; static_opts_t sopts; @@ -1839,8 +1835,7 @@ je_aligned_alloc(size_t alignment, size_t size) JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) -je_calloc(size_t num, size_t size) -{ +je_calloc(size_t num, size_t size) { void *ret; static_opts_t sopts; dynamic_opts_t dopts; @@ -1865,29 +1860,30 @@ je_calloc(size_t num, size_t size) static void * irealloc_prof_sample(tsd_t *tsd, extent_t *extent, void *old_ptr, - size_t old_usize, size_t usize, prof_tctx_t *tctx) -{ + size_t old_usize, size_t usize, prof_tctx_t *tctx) { void *p; - if (tctx == NULL) + if (tctx == NULL) { return (NULL); + } if (usize <= SMALL_MAXCLASS) { p = iralloc(tsd, extent, old_ptr, old_usize, LARGE_MINCLASS, 0, false); - if (p == NULL) + if (p == NULL) { return (NULL); + } arena_prof_promote(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p, usize); - } else + } else { p = iralloc(tsd, extent, old_ptr, old_usize, usize, 0, false); + } return (p); } JEMALLOC_ALWAYS_INLINE_C void * irealloc_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize, - size_t usize) -{ + size_t usize) { void *p; extent_t *extent; bool prof_active; @@ -1915,8 +1911,7 @@ irealloc_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize, } JEMALLOC_INLINE_C void -ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) -{ +ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { extent_t *extent; size_t usize; @@ -1929,42 +1924,46 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) if (config_prof && opt_prof) { usize = isalloc(tsd_tsdn(tsd), extent, ptr); prof_free(tsd, extent, ptr, usize); - } else if (config_stats) + } else if (config_stats) { usize = isalloc(tsd_tsdn(tsd), extent, ptr); - if (config_stats) + } + if (config_stats) { *tsd_thread_deallocatedp_get(tsd) += usize; + } - if (likely(!slow_path)) + if (likely(!slow_path)) { idalloctm(tsd_tsdn(tsd), extent, ptr, tcache, false, false); - else + } else { idalloctm(tsd_tsdn(tsd), extent, ptr, tcache, false, true); + } } JEMALLOC_INLINE_C void isfree(tsd_t *tsd, extent_t *extent, void *ptr, size_t usize, tcache_t *tcache, - bool slow_path) -{ + bool slow_path) { witness_assert_lockless(tsd_tsdn(tsd)); assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); - if (config_prof && opt_prof) + if (config_prof && opt_prof) { prof_free(tsd, extent, ptr, usize); - if (config_stats) + } + if (config_stats) { *tsd_thread_deallocatedp_get(tsd) += usize; + } - if (likely(!slow_path)) + if (likely(!slow_path)) { isdalloct(tsd_tsdn(tsd), extent, ptr, usize, tcache, false); - else + } else { isdalloct(tsd_tsdn(tsd), extent, ptr, usize, tcache, true); + } } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ALLOC_SIZE(2) -je_realloc(void *ptr, size_t size) -{ +je_realloc(void *ptr, size_t size) { void *ret; tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); size_t usize JEMALLOC_CC_SILENCE_INIT(0); @@ -2000,8 +1999,9 @@ je_realloc(void *ptr, size_t size) NULL : irealloc_prof(tsd, extent, ptr, old_usize, usize); } else { - if (config_stats) + if (config_stats) { usize = s2u(size); + } ret = iralloc(tsd, extent, ptr, old_usize, size, 0, false); } @@ -2033,16 +2033,16 @@ je_realloc(void *ptr, size_t size) } JEMALLOC_EXPORT void JEMALLOC_NOTHROW -je_free(void *ptr) -{ +je_free(void *ptr) { UTRACE(ptr, 0, 0); if (likely(ptr != NULL)) { tsd_t *tsd = tsd_fetch(); witness_assert_lockless(tsd_tsdn(tsd)); - if (likely(!malloc_slow)) + if (likely(!malloc_slow)) { ifree(tsd, ptr, tcache_get(tsd, false), false); - else + } else { ifree(tsd, ptr, tcache_get(tsd, false), true); + } witness_assert_lockless(tsd_tsdn(tsd)); } } @@ -2059,8 +2059,7 @@ je_free(void *ptr) JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) -je_memalign(size_t alignment, size_t size) -{ +je_memalign(size_t alignment, size_t size) { void *ret; static_opts_t sopts; dynamic_opts_t dopts; @@ -2090,8 +2089,7 @@ je_memalign(size_t alignment, size_t size) JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) -je_valloc(size_t size) -{ +je_valloc(size_t size) { void *ret; static_opts_t sopts; @@ -2180,8 +2178,7 @@ int __posix_memalign(void** r, size_t a, size_t s) JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) -je_mallocx(size_t size, int flags) -{ +je_mallocx(size_t size, int flags) { void *ret; static_opts_t sopts; dynamic_opts_t dopts; @@ -2225,17 +2222,18 @@ je_mallocx(size_t size, int flags) static void * irallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *old_ptr, size_t old_usize, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, arena_t *arena, prof_tctx_t *tctx) -{ + tcache_t *tcache, arena_t *arena, prof_tctx_t *tctx) { void *p; - if (tctx == NULL) + if (tctx == NULL) { return (NULL); + } if (usize <= SMALL_MAXCLASS) { p = iralloct(tsdn, extent, old_ptr, old_usize, LARGE_MINCLASS, alignment, zero, tcache, arena); - if (p == NULL) + if (p == NULL) { return (NULL); + } arena_prof_promote(tsdn, iealloc(tsdn, p), p, usize); } else { p = iralloct(tsdn, extent, old_ptr, old_usize, usize, alignment, @@ -2248,8 +2246,7 @@ irallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *old_ptr, JEMALLOC_ALWAYS_INLINE_C void * irallocx_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize, size_t size, size_t alignment, size_t *usize, bool zero, tcache_t *tcache, - arena_t *arena) -{ + arena_t *arena) { void *p; extent_t *extent; bool prof_active; @@ -2281,8 +2278,9 @@ irallocx_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize, */ extent = old_extent; *usize = isalloc(tsd_tsdn(tsd), extent, p); - } else + } else { extent = iealloc(tsd_tsdn(tsd), p); + } prof_realloc(tsd, extent, p, *usize, tctx, prof_active, false, old_extent, old_ptr, old_usize, old_tctx); @@ -2292,8 +2290,7 @@ irallocx_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize, JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ALLOC_SIZE(2) -je_rallocx(void *ptr, size_t size, int flags) -{ +je_rallocx(void *ptr, size_t size, int flags) { void *p; tsd_t *tsd; extent_t *extent; @@ -2314,34 +2311,41 @@ je_rallocx(void *ptr, size_t size, int flags) if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { unsigned arena_ind = MALLOCX_ARENA_GET(flags); arena = arena_get(tsd_tsdn(tsd), arena_ind, true); - if (unlikely(arena == NULL)) + if (unlikely(arena == NULL)) { goto label_oom; - } else + } + } else { arena = NULL; + } if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { - if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { tcache = NULL; - else + } else { tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); - } else + } + } else { tcache = tcache_get(tsd, true); + } old_usize = isalloc(tsd_tsdn(tsd), extent, ptr); if (config_prof && opt_prof) { usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); - if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { goto label_oom; + } p = irallocx_prof(tsd, extent, ptr, old_usize, size, alignment, &usize, zero, tcache, arena); - if (unlikely(p == NULL)) + if (unlikely(p == NULL)) { goto label_oom; + } } else { p = iralloct(tsd_tsdn(tsd), extent, ptr, old_usize, size, alignment, zero, tcache, arena); - if (unlikely(p == NULL)) + if (unlikely(p == NULL)) { goto label_oom; + } if (config_stats) { usize = isalloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p); @@ -2368,12 +2372,13 @@ label_oom: JEMALLOC_ALWAYS_INLINE_C size_t ixallocx_helper(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t old_usize, - size_t size, size_t extra, size_t alignment, bool zero) -{ + size_t size, size_t extra, size_t alignment, bool zero) { size_t usize; - if (ixalloc(tsdn, extent, ptr, old_usize, size, extra, alignment, zero)) + if (ixalloc(tsdn, extent, ptr, old_usize, size, extra, alignment, + zero)) { return (old_usize); + } usize = isalloc(tsdn, extent, ptr); return (usize); @@ -2382,12 +2387,12 @@ ixallocx_helper(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t old_usize, static size_t ixallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero, - prof_tctx_t *tctx) -{ + prof_tctx_t *tctx) { size_t usize; - if (tctx == NULL) + if (tctx == NULL) { return (old_usize); + } usize = ixallocx_helper(tsdn, extent, ptr, old_usize, size, extra, alignment, zero); @@ -2396,8 +2401,7 @@ ixallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *ptr, JEMALLOC_ALWAYS_INLINE_C size_t ixallocx_prof(tsd_t *tsd, extent_t *extent, void *ptr, size_t old_usize, - size_t size, size_t extra, size_t alignment, bool zero) -{ + size_t size, size_t extra, size_t alignment, bool zero) { size_t usize_max, usize; bool prof_active; prof_tctx_t *old_tctx, *tctx; @@ -2445,8 +2449,7 @@ ixallocx_prof(tsd_t *tsd, extent_t *extent, void *ptr, size_t old_usize, } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW -je_xallocx(void *ptr, size_t size, size_t extra, int flags) -{ +je_xallocx(void *ptr, size_t size, size_t extra, int flags) { tsd_t *tsd; extent_t *extent; size_t usize, old_usize; @@ -2476,8 +2479,9 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) usize = old_usize; goto label_not_resized; } - if (unlikely(LARGE_MAXCLASS - size < extra)) + if (unlikely(LARGE_MAXCLASS - size < extra)) { extra = LARGE_MAXCLASS - size; + } if (config_prof && opt_prof) { usize = ixallocx_prof(tsd, extent, ptr, old_usize, size, extra, @@ -2486,8 +2490,9 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) usize = ixallocx_helper(tsd_tsdn(tsd), extent, ptr, old_usize, size, extra, alignment, zero); } - if (unlikely(usize == old_usize)) + if (unlikely(usize == old_usize)) { goto label_not_resized; + } if (config_stats) { *tsd_thread_allocatedp_get(tsd) += usize; @@ -2501,8 +2506,7 @@ label_not_resized: JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW JEMALLOC_ATTR(pure) -je_sallocx(const void *ptr, int flags) -{ +je_sallocx(const void *ptr, int flags) { size_t usize; tsdn_t *tsdn; @@ -2511,18 +2515,18 @@ je_sallocx(const void *ptr, int flags) tsdn = tsdn_fetch(); witness_assert_lockless(tsdn); - if (config_ivsalloc) + if (config_ivsalloc) { usize = ivsalloc(tsdn, ptr); - else + } else { usize = isalloc(tsdn, iealloc(tsdn, ptr), ptr); + } witness_assert_lockless(tsdn); return (usize); } JEMALLOC_EXPORT void JEMALLOC_NOTHROW -je_dallocx(void *ptr, int flags) -{ +je_dallocx(void *ptr, int flags) { tsd_t *tsd; tcache_t *tcache; @@ -2532,39 +2536,41 @@ je_dallocx(void *ptr, int flags) tsd = tsd_fetch(); witness_assert_lockless(tsd_tsdn(tsd)); if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { - if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { tcache = NULL; - else + } else { tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); - } else + } + } else { tcache = tcache_get(tsd, false); + } UTRACE(ptr, 0, 0); - if (likely(!malloc_slow)) + if (likely(!malloc_slow)) { ifree(tsd, ptr, tcache, false); - else + } else { ifree(tsd, ptr, tcache, true); + } witness_assert_lockless(tsd_tsdn(tsd)); } JEMALLOC_ALWAYS_INLINE_C size_t -inallocx(tsdn_t *tsdn, size_t size, int flags) -{ +inallocx(tsdn_t *tsdn, size_t size, int flags) { size_t usize; witness_assert_lockless(tsdn); - if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) + if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) { usize = s2u(size); - else + } else { usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); + } witness_assert_lockless(tsdn); return (usize); } JEMALLOC_EXPORT void JEMALLOC_NOTHROW -je_sdallocx(void *ptr, size_t size, int flags) -{ +je_sdallocx(void *ptr, size_t size, int flags) { tsd_t *tsd; extent_t *extent; size_t usize; @@ -2579,39 +2585,43 @@ je_sdallocx(void *ptr, size_t size, int flags) witness_assert_lockless(tsd_tsdn(tsd)); if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { - if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { tcache = NULL; - else + } else { tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); - } else + } + } else { tcache = tcache_get(tsd, false); + } UTRACE(ptr, 0, 0); - if (likely(!malloc_slow)) + if (likely(!malloc_slow)) { isfree(tsd, extent, ptr, usize, tcache, false); - else + } else { isfree(tsd, extent, ptr, usize, tcache, true); + } witness_assert_lockless(tsd_tsdn(tsd)); } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW JEMALLOC_ATTR(pure) -je_nallocx(size_t size, int flags) -{ +je_nallocx(size_t size, int flags) { size_t usize; tsdn_t *tsdn; assert(size != 0); - if (unlikely(malloc_init())) + if (unlikely(malloc_init())) { return (0); + } tsdn = tsdn_fetch(); witness_assert_lockless(tsdn); usize = inallocx(tsdn, size, flags); - if (unlikely(usize > LARGE_MAXCLASS)) + if (unlikely(usize > LARGE_MAXCLASS)) { return (0); + } witness_assert_lockless(tsdn); return (usize); @@ -2619,13 +2629,13 @@ je_nallocx(size_t size, int flags) JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) -{ + size_t newlen) { int ret; tsd_t *tsd; - if (unlikely(malloc_init())) + if (unlikely(malloc_init())) { return (EAGAIN); + } tsd = tsd_fetch(); witness_assert_lockless(tsd_tsdn(tsd)); @@ -2635,13 +2645,13 @@ je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, } JEMALLOC_EXPORT int JEMALLOC_NOTHROW -je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) -{ +je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { int ret; tsdn_t *tsdn; - if (unlikely(malloc_init())) + if (unlikely(malloc_init())) { return (EAGAIN); + } tsdn = tsdn_fetch(); witness_assert_lockless(tsdn); @@ -2652,13 +2662,13 @@ je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ + void *newp, size_t newlen) { int ret; tsd_t *tsd; - if (unlikely(malloc_init())) + if (unlikely(malloc_init())) { return (EAGAIN); + } tsd = tsd_fetch(); witness_assert_lockless(tsd_tsdn(tsd)); @@ -2669,8 +2679,7 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts) -{ + const char *opts) { tsdn_t *tsdn; tsdn = tsdn_fetch(); @@ -2680,8 +2689,7 @@ je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW -je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) -{ +je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { size_t ret; tsdn_t *tsdn; @@ -2690,9 +2698,9 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) tsdn = tsdn_fetch(); witness_assert_lockless(tsdn); - if (config_ivsalloc) + if (config_ivsalloc) { ret = ivsalloc(tsdn, ptr); - else { + } else { ret = (ptr == NULL) ? 0 : isalloc(tsdn, iealloc(tsdn, ptr), ptr); } @@ -2726,8 +2734,7 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) #ifndef JEMALLOC_JET JEMALLOC_ATTR(constructor) static void -jemalloc_constructor(void) -{ +jemalloc_constructor(void) { malloc_init(); } #endif @@ -2745,8 +2752,9 @@ _malloc_prefork(void) arena_t *arena; #ifdef JEMALLOC_MUTEX_INIT_CB - if (!malloc_initialized()) + if (!malloc_initialized()) { return; + } #endif assert(malloc_initialized()); @@ -2779,8 +2787,9 @@ _malloc_prefork(void) } } for (i = 0; i < narenas; i++) { - if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { arena_prefork3(tsd_tsdn(tsd), arena); + } } prof_prefork1(tsd_tsdn(tsd)); } @@ -2797,8 +2806,9 @@ _malloc_postfork(void) unsigned i, narenas; #ifdef JEMALLOC_MUTEX_INIT_CB - if (!malloc_initialized()) + if (!malloc_initialized()) { return; + } #endif assert(malloc_initialized()); @@ -2809,8 +2819,9 @@ _malloc_postfork(void) for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { arena_t *arena; - if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { arena_postfork_parent(tsd_tsdn(tsd), arena); + } } prof_postfork_parent(tsd_tsdn(tsd)); malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); @@ -2818,8 +2829,7 @@ _malloc_postfork(void) } void -jemalloc_postfork_child(void) -{ +jemalloc_postfork_child(void) { tsd_t *tsd; unsigned i, narenas; @@ -2832,8 +2842,9 @@ jemalloc_postfork_child(void) for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { arena_t *arena; - if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { arena_postfork_child(tsd_tsdn(tsd), arena); + } } prof_postfork_child(tsd_tsdn(tsd)); malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); diff --git a/src/jemalloc_cpp.cpp b/src/jemalloc_cpp.cpp index 984c944b..030ff995 100644 --- a/src/jemalloc_cpp.cpp +++ b/src/jemalloc_cpp.cpp @@ -33,8 +33,7 @@ void operator delete[](void *ptr, std::size_t size) noexcept; template <bool IsNoExcept> JEMALLOC_INLINE void * -newImpl(std::size_t size) noexcept(IsNoExcept) -{ +newImpl(std::size_t size) noexcept(IsNoExcept) { void *ptr = je_malloc(size); if (likely(ptr != nullptr)) return (ptr); @@ -67,65 +66,55 @@ newImpl(std::size_t size) noexcept(IsNoExcept) } void * -operator new(std::size_t size) -{ +operator new(std::size_t size) { return (newImpl<false>(size)); } void * -operator new[](std::size_t size) -{ +operator new[](std::size_t size) { return (newImpl<false>(size)); } void * -operator new(std::size_t size, const std::nothrow_t &) noexcept -{ +operator new(std::size_t size, const std::nothrow_t &) noexcept { return (newImpl<true>(size)); } void * -operator new[](std::size_t size, const std::nothrow_t &) noexcept -{ +operator new[](std::size_t size, const std::nothrow_t &) noexcept { return (newImpl<true>(size)); } void -operator delete(void *ptr) noexcept -{ +operator delete(void *ptr) noexcept { je_free(ptr); } void -operator delete[](void *ptr) noexcept -{ +operator delete[](void *ptr) noexcept { je_free(ptr); } void -operator delete(void *ptr, const std::nothrow_t &) noexcept -{ +operator delete(void *ptr, const std::nothrow_t &) noexcept { je_free(ptr); } -void operator delete[](void *ptr, const std::nothrow_t &) noexcept -{ +void operator delete[](void *ptr, const std::nothrow_t &) noexcept { je_free(ptr); } #if __cpp_sized_deallocation >= 201309 void -operator delete(void *ptr, std::size_t size) noexcept -{ +operator delete(void *ptr, std::size_t size) noexcept { if (unlikely(ptr == nullptr)) { return; } je_sdallocx(ptr, size, /*flags=*/0); } -void operator delete[](void *ptr, std::size_t size) noexcept -{ +void operator delete[](void *ptr, std::size_t size) noexcept { if (unlikely(ptr == nullptr)) { return; } diff --git a/src/large.c b/src/large.c index 9936b236..0f2f1763 100644 --- a/src/large.c +++ b/src/large.c @@ -4,8 +4,7 @@ /******************************************************************************/ void * -large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) -{ +large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) { assert(usize == s2u(usize)); return (large_palloc(tsdn, arena, usize, CACHELINE, zero)); @@ -13,8 +12,7 @@ large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) void * large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, - bool zero) -{ + bool zero) { size_t ausize; extent_t *extent; bool is_zeroed; @@ -23,27 +21,31 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, assert(!tsdn_null(tsdn) || arena != NULL); ausize = sa2u(usize, alignment); - if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS)) + if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS)) { return (NULL); + } /* * Copy zero into is_zeroed and pass the copy to extent_alloc(), so that * it is possible to make correct junk/zero fill decisions below. */ is_zeroed = zero; - if (likely(!tsdn_null(tsdn))) + if (likely(!tsdn_null(tsdn))) { arena = arena_choose(tsdn_tsd(tsdn), arena); + } if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn, - arena, usize, alignment, &is_zeroed)) == NULL) + arena, usize, alignment, &is_zeroed)) == NULL) { return (NULL); + } /* Insert extent into large. */ malloc_mutex_lock(tsdn, &arena->large_mtx); ql_elm_new(extent, ql_link); ql_tail_insert(&arena->large, extent, ql_link); malloc_mutex_unlock(tsdn, &arena->large_mtx); - if (config_prof && arena_prof_accum(tsdn, arena, usize)) + if (config_prof && arena_prof_accum(tsdn, arena, usize)) { prof_idump(tsdn); + } if (zero || (config_fill && unlikely(opt_zero))) { if (!is_zeroed) { @@ -64,8 +66,7 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, #define large_dalloc_junk JEMALLOC_N(n_large_dalloc_junk) #endif void -large_dalloc_junk(void *ptr, size_t usize) -{ +large_dalloc_junk(void *ptr, size_t usize) { memset(ptr, JEMALLOC_FREE_JUNK, usize); } #ifdef JEMALLOC_JET @@ -79,15 +80,15 @@ large_dalloc_junk_t *large_dalloc_junk = JEMALLOC_N(n_large_dalloc_junk); #define large_dalloc_maybe_junk JEMALLOC_N(n_large_dalloc_maybe_junk) #endif void -large_dalloc_maybe_junk(void *ptr, size_t usize) -{ +large_dalloc_maybe_junk(void *ptr, size_t usize) { if (config_fill && have_dss && unlikely(opt_junk_free)) { /* * Only bother junk filling if the extent isn't about to be * unmapped. */ - if (!config_munmap || (have_dss && extent_in_dss(ptr))) + if (!config_munmap || (have_dss && extent_in_dss(ptr))) { large_dalloc_junk(ptr, usize); + } } } #ifdef JEMALLOC_JET @@ -98,8 +99,7 @@ large_dalloc_maybe_junk_t *large_dalloc_maybe_junk = #endif static bool -large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) -{ +large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) { arena_t *arena = extent_arena_get(extent); size_t oldusize = extent_usize_get(extent); extent_hooks_t *extent_hooks = extent_hooks_get(arena); @@ -107,16 +107,18 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) assert(oldusize > usize); - if (extent_hooks->split == NULL) + if (extent_hooks->split == NULL) { return (true); + } /* Split excess pages. */ if (diff != 0) { extent_t *trail = extent_split_wrapper(tsdn, arena, &extent_hooks, extent, usize + large_pad, usize, diff, diff); - if (trail == NULL) + if (trail == NULL) { return (true); + } if (config_fill && unlikely(opt_junk_free)) { large_dalloc_maybe_junk(extent_addr_get(trail), @@ -133,8 +135,7 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) static bool large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, - bool zero) -{ + bool zero) { arena_t *arena = extent_arena_get(extent); size_t oldusize = extent_usize_get(extent); bool is_zeroed_trail = false; @@ -142,8 +143,9 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, size_t trailsize = usize - extent_usize_get(extent); extent_t *trail; - if (extent_hooks->merge == NULL) + if (extent_hooks->merge == NULL) { return (true); + } if ((trail = arena_extent_cache_alloc(tsdn, arena, &extent_hooks, extent_past_get(extent), trailsize, CACHELINE, &is_zeroed_trail)) == @@ -151,8 +153,9 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, bool commit = true; if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks, extent_past_get(extent), trailsize, 0, CACHELINE, - &is_zeroed_trail, &commit, false)) == NULL) + &is_zeroed_trail, &commit, false)) == NULL) { return (true); + } } if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) { @@ -193,8 +196,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, - size_t usize_max, bool zero) -{ + size_t usize_max, bool zero) { assert(s2u(extent_usize_get(extent)) == extent_usize_get(extent)); /* The following should have been caught by callers. */ assert(usize_min > 0 && usize_max <= LARGE_MAXCLASS); @@ -241,17 +243,16 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, static void * large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, - size_t alignment, bool zero) -{ - if (alignment <= CACHELINE) + size_t alignment, bool zero) { + if (alignment <= CACHELINE) { return (large_malloc(tsdn, arena, usize, zero)); + } return (large_palloc(tsdn, arena, usize, alignment, zero)); } void * large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize, - size_t alignment, bool zero, tcache_t *tcache) -{ + size_t alignment, bool zero, tcache_t *tcache) { void *ret; size_t copysize; @@ -262,8 +263,9 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize, LARGE_MINCLASS); /* Try to avoid moving the allocation. */ - if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) + if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) { return (extent_addr_get(extent)); + } /* * usize and old size are different enough that we need to use a @@ -271,8 +273,9 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize, * space and copying. */ ret = large_ralloc_move_helper(tsdn, arena, usize, alignment, zero); - if (ret == NULL) + if (ret == NULL) { return (NULL); + } copysize = (usize < extent_usize_get(extent)) ? usize : extent_usize_get(extent); @@ -288,8 +291,7 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize, * independent of these considerations. */ static void -large_dalloc_impl(tsdn_t *tsdn, extent_t *extent, bool junked_locked) -{ +large_dalloc_impl(tsdn_t *tsdn, extent_t *extent, bool junked_locked) { arena_t *arena; arena = extent_arena_get(extent); @@ -302,42 +304,37 @@ large_dalloc_impl(tsdn_t *tsdn, extent_t *extent, bool junked_locked) } arena_extent_dalloc_large(tsdn, arena, extent, junked_locked); - if (!junked_locked) + if (!junked_locked) { arena_decay_tick(tsdn, arena); + } } void -large_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent) -{ +large_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent) { large_dalloc_impl(tsdn, extent, true); } void -large_dalloc(tsdn_t *tsdn, extent_t *extent) -{ +large_dalloc(tsdn_t *tsdn, extent_t *extent) { large_dalloc_impl(tsdn, extent, false); } size_t -large_salloc(tsdn_t *tsdn, const extent_t *extent) -{ +large_salloc(tsdn_t *tsdn, const extent_t *extent) { return (extent_usize_get(extent)); } prof_tctx_t * -large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) -{ +large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) { return (extent_prof_tctx_get(extent)); } void -large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) -{ +large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) { extent_prof_tctx_set(extent, tctx); } void -large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) -{ +large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) { large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U); } diff --git a/src/mutex.c b/src/mutex.c index bde536de..bc0869f8 100644 --- a/src/mutex.c +++ b/src/mutex.c @@ -35,8 +35,7 @@ static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, void *(*)(void *), void *__restrict); static void -pthread_create_once(void) -{ +pthread_create_once(void) { pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); if (pthread_create_fptr == NULL) { malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, " @@ -50,8 +49,7 @@ pthread_create_once(void) JEMALLOC_EXPORT int pthread_create(pthread_t *__restrict thread, const pthread_attr_t *__restrict attr, void *(*start_routine)(void *), - void *__restrict arg) -{ + void *__restrict arg) { static pthread_once_t once_control = PTHREAD_ONCE_INIT; pthread_once(&once_control, pthread_create_once); @@ -68,15 +66,16 @@ JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, #endif bool -malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank) -{ +malloc_mutex_init(malloc_mutex_t *mutex, const char *name, + witness_rank_t rank) { #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 InitializeSRWLock(&mutex->lock); # else if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, - _CRT_SPINCOUNT)) + _CRT_SPINCOUNT)) { return (true); + } # endif #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) mutex->lock = OS_UNFAIR_LOCK_INIT; @@ -88,14 +87,16 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank) postponed_mutexes = mutex; } else { if (_pthread_mutex_init_calloc_cb(&mutex->lock, - bootstrap_calloc) != 0) + bootstrap_calloc) != 0) { return (true); + } } #else pthread_mutexattr_t attr; - if (pthread_mutexattr_init(&attr) != 0) + if (pthread_mutexattr_init(&attr) != 0) { return (true); + } pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); if (pthread_mutex_init(&mutex->lock, &attr) != 0) { pthread_mutexattr_destroy(&attr); @@ -103,26 +104,24 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank) } pthread_mutexattr_destroy(&attr); #endif - if (config_debug) + if (config_debug) { witness_init(&mutex->witness, name, rank, NULL, NULL); + } return (false); } void -malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) -{ +malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) { malloc_mutex_lock(tsdn, mutex); } void -malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) -{ +malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) { malloc_mutex_unlock(tsdn, mutex); } void -malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) -{ +malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) { #ifdef JEMALLOC_MUTEX_INIT_CB malloc_mutex_unlock(tsdn, mutex); #else @@ -130,21 +129,22 @@ malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) mutex->witness.rank)) { malloc_printf("<jemalloc>: Error re-initializing mutex in " "child\n"); - if (opt_abort) + if (opt_abort) { abort(); + } } #endif } bool -malloc_mutex_boot(void) -{ +malloc_mutex_boot(void) { #ifdef JEMALLOC_MUTEX_INIT_CB postpone_init = false; while (postponed_mutexes != NULL) { if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, - bootstrap_calloc) != 0) + bootstrap_calloc) != 0) { return (true); + } postponed_mutexes = postponed_mutexes->postponed_next; } #endif diff --git a/src/nstime.c b/src/nstime.c index 57ebf2e0..66989a07 100644 --- a/src/nstime.c +++ b/src/nstime.c @@ -3,66 +3,56 @@ #define BILLION UINT64_C(1000000000) void -nstime_init(nstime_t *time, uint64_t ns) -{ +nstime_init(nstime_t *time, uint64_t ns) { time->ns = ns; } void -nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) -{ +nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) { time->ns = sec * BILLION + nsec; } uint64_t -nstime_ns(const nstime_t *time) -{ +nstime_ns(const nstime_t *time) { return (time->ns); } uint64_t -nstime_sec(const nstime_t *time) -{ +nstime_sec(const nstime_t *time) { return (time->ns / BILLION); } uint64_t -nstime_nsec(const nstime_t *time) -{ +nstime_nsec(const nstime_t *time) { return (time->ns % BILLION); } void -nstime_copy(nstime_t *time, const nstime_t *source) -{ +nstime_copy(nstime_t *time, const nstime_t *source) { *time = *source; } int -nstime_compare(const nstime_t *a, const nstime_t *b) -{ +nstime_compare(const nstime_t *a, const nstime_t *b) { return ((a->ns > b->ns) - (a->ns < b->ns)); } void -nstime_add(nstime_t *time, const nstime_t *addend) -{ +nstime_add(nstime_t *time, const nstime_t *addend) { assert(UINT64_MAX - time->ns >= addend->ns); time->ns += addend->ns; } void -nstime_subtract(nstime_t *time, const nstime_t *subtrahend) -{ +nstime_subtract(nstime_t *time, const nstime_t *subtrahend) { assert(nstime_compare(time, subtrahend) >= 0); time->ns -= subtrahend->ns; } void -nstime_imultiply(nstime_t *time, uint64_t multiplier) -{ +nstime_imultiply(nstime_t *time, uint64_t multiplier) { assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) << 2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns)); @@ -70,16 +60,14 @@ nstime_imultiply(nstime_t *time, uint64_t multiplier) } void -nstime_idivide(nstime_t *time, uint64_t divisor) -{ +nstime_idivide(nstime_t *time, uint64_t divisor) { assert(divisor != 0); time->ns /= divisor; } uint64_t -nstime_divide(const nstime_t *time, const nstime_t *divisor) -{ +nstime_divide(const nstime_t *time, const nstime_t *divisor) { assert(divisor->ns != 0); return (time->ns / divisor->ns); @@ -88,8 +76,7 @@ nstime_divide(const nstime_t *time, const nstime_t *divisor) #ifdef _WIN32 # define NSTIME_MONOTONIC true static void -nstime_get(nstime_t *time) -{ +nstime_get(nstime_t *time) { FILETIME ft; uint64_t ticks_100ns; @@ -101,8 +88,7 @@ nstime_get(nstime_t *time) #elif JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE # define NSTIME_MONOTONIC true static void -nstime_get(nstime_t *time) -{ +nstime_get(nstime_t *time) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); @@ -111,8 +97,7 @@ nstime_get(nstime_t *time) #elif JEMALLOC_HAVE_CLOCK_MONOTONIC # define NSTIME_MONOTONIC true static void -nstime_get(nstime_t *time) -{ +nstime_get(nstime_t *time) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); @@ -121,15 +106,13 @@ nstime_get(nstime_t *time) #elif JEMALLOC_HAVE_MACH_ABSOLUTE_TIME # define NSTIME_MONOTONIC true static void -nstime_get(nstime_t *time) -{ +nstime_get(nstime_t *time) { nstime_init(time, mach_absolute_time()); } #else # define NSTIME_MONOTONIC false static void -nstime_get(nstime_t *time) -{ +nstime_get(nstime_t *time) { struct timeval tv; gettimeofday(&tv, NULL); @@ -142,8 +125,7 @@ nstime_get(nstime_t *time) #define nstime_monotonic JEMALLOC_N(n_nstime_monotonic) #endif bool -nstime_monotonic(void) -{ +nstime_monotonic(void) { return (NSTIME_MONOTONIC); #undef NSTIME_MONOTONIC } @@ -158,8 +140,7 @@ nstime_monotonic_t *nstime_monotonic = JEMALLOC_N(n_nstime_monotonic); #define nstime_update JEMALLOC_N(n_nstime_update) #endif bool -nstime_update(nstime_t *time) -{ +nstime_update(nstime_t *time) { nstime_t old_time; nstime_copy(&old_time, time); diff --git a/src/pages.c b/src/pages.c index 7c26a28a..c23dccd7 100644 --- a/src/pages.c +++ b/src/pages.c @@ -18,14 +18,14 @@ static bool os_overcommits; /******************************************************************************/ void * -pages_map(void *addr, size_t size, bool *commit) -{ +pages_map(void *addr, size_t size, bool *commit) { void *ret; assert(size != 0); - if (os_overcommits) + if (os_overcommits) { *commit = true; + } #ifdef _WIN32 /* @@ -46,9 +46,9 @@ pages_map(void *addr, size_t size, bool *commit) } assert(ret != NULL); - if (ret == MAP_FAILED) + if (ret == MAP_FAILED) { ret = NULL; - else if (addr != NULL && ret != addr) { + } else if (addr != NULL && ret != addr) { /* * We succeeded in mapping memory, but not in the right place. */ @@ -62,8 +62,7 @@ pages_map(void *addr, size_t size, bool *commit) } void -pages_unmap(void *addr, size_t size) -{ +pages_unmap(void *addr, size_t size) { #ifdef _WIN32 if (VirtualFree(addr, 0, MEM_RELEASE) == 0) #else @@ -80,15 +79,15 @@ pages_unmap(void *addr, size_t size) "munmap" #endif "(): %s\n", buf); - if (opt_abort) + if (opt_abort) { abort(); + } } } void * pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, - bool *commit) -{ + bool *commit) { void *ret = (void *)((uintptr_t)addr + leadsize); assert(alloc_size >= leadsize + size); @@ -98,30 +97,34 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, pages_unmap(addr, alloc_size); new_addr = pages_map(ret, size, commit); - if (new_addr == ret) + if (new_addr == ret) { return (ret); - if (new_addr) + } + if (new_addr) { pages_unmap(new_addr, size); + } return (NULL); } #else { size_t trailsize = alloc_size - leadsize - size; - if (leadsize != 0) + if (leadsize != 0) { pages_unmap(addr, leadsize); - if (trailsize != 0) + } + if (trailsize != 0) { pages_unmap((void *)((uintptr_t)ret + size), trailsize); + } return (ret); } #endif } static bool -pages_commit_impl(void *addr, size_t size, bool commit) -{ - if (os_overcommits) +pages_commit_impl(void *addr, size_t size, bool commit) { + if (os_overcommits) { return (true); + } #ifdef _WIN32 return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT, @@ -131,8 +134,9 @@ pages_commit_impl(void *addr, size_t size, bool commit) int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED, -1, 0); - if (result == MAP_FAILED) + if (result == MAP_FAILED) { return (true); + } if (result != addr) { /* * We succeeded in mapping memory, but not in the right @@ -147,22 +151,20 @@ pages_commit_impl(void *addr, size_t size, bool commit) } bool -pages_commit(void *addr, size_t size) -{ +pages_commit(void *addr, size_t size) { return (pages_commit_impl(addr, size, true)); } bool -pages_decommit(void *addr, size_t size) -{ +pages_decommit(void *addr, size_t size) { return (pages_commit_impl(addr, size, false)); } bool -pages_purge_lazy(void *addr, size_t size) -{ - if (!pages_can_purge_lazy) +pages_purge_lazy(void *addr, size_t size) { + if (!pages_can_purge_lazy) { return (true); + } #ifdef _WIN32 VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE); @@ -175,10 +177,10 @@ pages_purge_lazy(void *addr, size_t size) } bool -pages_purge_forced(void *addr, size_t size) -{ - if (!pages_can_purge_forced) +pages_purge_forced(void *addr, size_t size) { + if (!pages_can_purge_forced) { return (true); + } #if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) return (madvise(addr, size, MADV_DONTNEED) != 0); @@ -188,8 +190,7 @@ pages_purge_forced(void *addr, size_t size) } bool -pages_huge(void *addr, size_t size) -{ +pages_huge(void *addr, size_t size) { assert(HUGEPAGE_ADDR2BASE(addr) == addr); assert(HUGEPAGE_CEILING(size) == size); @@ -201,8 +202,7 @@ pages_huge(void *addr, size_t size) } bool -pages_nohuge(void *addr, size_t size) -{ +pages_nohuge(void *addr, size_t size) { assert(HUGEPAGE_ADDR2BASE(addr) == addr); assert(HUGEPAGE_CEILING(size) == size); @@ -215,14 +215,14 @@ pages_nohuge(void *addr, size_t size) #ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT static bool -os_overcommits_sysctl(void) -{ +os_overcommits_sysctl(void) { int vm_overcommit; size_t sz; sz = sizeof(vm_overcommit); - if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) + if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) { return (false); /* Error. */ + } return ((vm_overcommit & 0x3) == 0); } @@ -235,8 +235,7 @@ os_overcommits_sysctl(void) * wrappers. */ static bool -os_overcommits_proc(void) -{ +os_overcommits_proc(void) { int fd; char buf[1]; ssize_t nread; @@ -246,8 +245,9 @@ os_overcommits_proc(void) #else fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY); #endif - if (fd == -1) + if (fd == -1) { return (false); /* Error. */ + } #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read) nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf)); @@ -261,8 +261,9 @@ os_overcommits_proc(void) close(fd); #endif - if (nread < 1) + if (nread < 1) { return (false); /* Error. */ + } /* * /proc/sys/vm/overcommit_memory meanings: * 0: Heuristic overcommit. @@ -274,8 +275,7 @@ os_overcommits_proc(void) #endif void -pages_boot(void) -{ +pages_boot(void) { #ifndef _WIN32 mmap_flags = MAP_PRIVATE | MAP_ANON; #endif @@ -285,8 +285,9 @@ pages_boot(void) #elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY) os_overcommits = os_overcommits_proc(); # ifdef MAP_NORESERVE - if (os_overcommits) + if (os_overcommits) { mmap_flags |= MAP_NORESERVE; + } # endif #else os_overcommits = false; @@ -133,8 +133,7 @@ static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name); /* Red-black trees. */ JEMALLOC_INLINE_C int -prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) -{ +prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) { uint64_t a_thr_uid = a->thr_uid; uint64_t b_thr_uid = b->thr_uid; int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid); @@ -157,14 +156,14 @@ rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, tctx_link, prof_tctx_comp) JEMALLOC_INLINE_C int -prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) -{ +prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) { unsigned a_len = a->bt.len; unsigned b_len = b->bt.len; unsigned comp_len = (a_len < b_len) ? a_len : b_len; int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *)); - if (ret == 0) + if (ret == 0) { ret = (a_len > b_len) - (a_len < b_len); + } return (ret); } @@ -172,8 +171,7 @@ rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link, prof_gctx_comp) JEMALLOC_INLINE_C int -prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) -{ +prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) { int ret; uint64_t a_uid = a->thr_uid; uint64_t b_uid = b->thr_uid; @@ -194,8 +192,7 @@ rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, /******************************************************************************/ void -prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) -{ +prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) { prof_tdata_t *tdata; cassert(config_prof); @@ -208,24 +205,25 @@ prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) * programs. */ tdata = prof_tdata_get(tsd, true); - if (tdata != NULL) + if (tdata != NULL) { prof_sample_threshold_update(tdata); + } } if ((uintptr_t)tctx > (uintptr_t)1U) { malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); tctx->prepared = false; - if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) + if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) { prof_tctx_destroy(tsd, tctx); - else + } else { malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); + } } } void prof_malloc_sample_object(tsdn_t *tsdn, extent_t *extent, const void *ptr, - size_t usize, prof_tctx_t *tctx) -{ + size_t usize, prof_tctx_t *tctx) { prof_tctx_set(tsdn, extent, ptr, usize, tctx); malloc_mutex_lock(tsdn, tctx->tdata->lock); @@ -240,23 +238,22 @@ prof_malloc_sample_object(tsdn_t *tsdn, extent_t *extent, const void *ptr, } void -prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) -{ +prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) { malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); assert(tctx->cnts.curobjs > 0); assert(tctx->cnts.curbytes >= usize); tctx->cnts.curobjs--; tctx->cnts.curbytes -= usize; - if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) + if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) { prof_tctx_destroy(tsd, tctx); - else + } else { malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); + } } void -bt_init(prof_bt_t *bt, void **vec) -{ +bt_init(prof_bt_t *bt, void **vec) { cassert(config_prof); bt->vec = vec; @@ -264,8 +261,7 @@ bt_init(prof_bt_t *bt, void **vec) } JEMALLOC_INLINE_C void -prof_enter(tsd_t *tsd, prof_tdata_t *tdata) -{ +prof_enter(tsd_t *tsd, prof_tdata_t *tdata) { cassert(config_prof); assert(tdata == prof_tdata_get(tsd, false)); @@ -278,8 +274,7 @@ prof_enter(tsd_t *tsd, prof_tdata_t *tdata) } JEMALLOC_INLINE_C void -prof_leave(tsd_t *tsd, prof_tdata_t *tdata) -{ +prof_leave(tsd_t *tsd, prof_tdata_t *tdata) { cassert(config_prof); assert(tdata == prof_tdata_get(tsd, false)); @@ -295,17 +290,18 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata) gdump = tdata->enq_gdump; tdata->enq_gdump = false; - if (idump) + if (idump) { prof_idump(tsd_tsdn(tsd)); - if (gdump) + } + if (gdump) { prof_gdump(tsd_tsdn(tsd)); + } } } #ifdef JEMALLOC_PROF_LIBUNWIND void -prof_backtrace(prof_bt_t *bt) -{ +prof_backtrace(prof_bt_t *bt) { int nframes; cassert(config_prof); @@ -313,41 +309,41 @@ prof_backtrace(prof_bt_t *bt) assert(bt->vec != NULL); nframes = unw_backtrace(bt->vec, PROF_BT_MAX); - if (nframes <= 0) + if (nframes <= 0) { return; + } bt->len = nframes; } #elif (defined(JEMALLOC_PROF_LIBGCC)) static _Unwind_Reason_Code -prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) -{ +prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) { cassert(config_prof); return (_URC_NO_REASON); } static _Unwind_Reason_Code -prof_unwind_callback(struct _Unwind_Context *context, void *arg) -{ +prof_unwind_callback(struct _Unwind_Context *context, void *arg) { prof_unwind_data_t *data = (prof_unwind_data_t *)arg; void *ip; cassert(config_prof); ip = (void *)_Unwind_GetIP(context); - if (ip == NULL) + if (ip == NULL) { return (_URC_END_OF_STACK); + } data->bt->vec[data->bt->len] = ip; data->bt->len++; - if (data->bt->len == data->max) + if (data->bt->len == data->max) { return (_URC_END_OF_STACK); + } return (_URC_NO_REASON); } void -prof_backtrace(prof_bt_t *bt) -{ +prof_backtrace(prof_bt_t *bt) { prof_unwind_data_t data = {bt, PROF_BT_MAX}; cassert(config_prof); @@ -356,20 +352,22 @@ prof_backtrace(prof_bt_t *bt) } #elif (defined(JEMALLOC_PROF_GCC)) void -prof_backtrace(prof_bt_t *bt) -{ +prof_backtrace(prof_bt_t *bt) { #define BT_FRAME(i) \ if ((i) < PROF_BT_MAX) { \ void *p; \ - if (__builtin_frame_address(i) == 0) \ + if (__builtin_frame_address(i) == 0) { \ return; \ + } \ p = __builtin_return_address(i); \ - if (p == NULL) \ + if (p == NULL) { \ return; \ + } \ bt->vec[(i)] = p; \ bt->len = (i) + 1; \ - } else \ - return; + } else { \ + return; \ + } cassert(config_prof); @@ -517,30 +515,26 @@ prof_backtrace(prof_bt_t *bt) } #else void -prof_backtrace(prof_bt_t *bt) -{ +prof_backtrace(prof_bt_t *bt) { cassert(config_prof); not_reached(); } #endif static malloc_mutex_t * -prof_gctx_mutex_choose(void) -{ +prof_gctx_mutex_choose(void) { unsigned ngctxs = atomic_add_u(&cum_gctxs, 1); return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]); } static malloc_mutex_t * -prof_tdata_mutex_choose(uint64_t thr_uid) -{ +prof_tdata_mutex_choose(uint64_t thr_uid) { return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]); } static prof_gctx_t * -prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) -{ +prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { /* * Create a single allocation that has space for vec of length bt->len. */ @@ -548,8 +542,9 @@ prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size, size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), true); - if (gctx == NULL) + if (gctx == NULL) { return (NULL); + } gctx->lock = prof_gctx_mutex_choose(); /* * Set nlimbo to 1, in order to avoid a race condition with @@ -566,8 +561,7 @@ prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) static void prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, - prof_tdata_t *tdata) -{ + prof_tdata_t *tdata) { cassert(config_prof); /* @@ -582,8 +576,9 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, assert(gctx->nlimbo != 0); if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { /* Remove gctx from bt2gctx. */ - if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) + if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) { not_reached(); + } prof_leave(tsd, tdata_self); /* Destroy gctx. */ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); @@ -601,34 +596,37 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, } static bool -prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) -{ +prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) { malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); - if (opt_prof_accum) + if (opt_prof_accum) { return (false); - if (tctx->cnts.curobjs != 0) + } + if (tctx->cnts.curobjs != 0) { return (false); - if (tctx->prepared) + } + if (tctx->prepared) { return (false); + } return (true); } static bool -prof_gctx_should_destroy(prof_gctx_t *gctx) -{ - if (opt_prof_accum) +prof_gctx_should_destroy(prof_gctx_t *gctx) { + if (opt_prof_accum) { return (false); - if (!tctx_tree_empty(&gctx->tctxs)) + } + if (!tctx_tree_empty(&gctx->tctxs)) { return (false); - if (gctx->nlimbo != 0) + } + if (gctx->nlimbo != 0) { return (false); + } return (true); } static void -prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) -{ +prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { prof_tdata_t *tdata = tctx->tdata; prof_gctx_t *gctx = tctx->gctx; bool destroy_tdata, destroy_tctx, destroy_gctx; @@ -667,8 +665,9 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) */ gctx->nlimbo++; destroy_gctx = true; - } else + } else { destroy_gctx = false; + } break; case prof_tctx_state_dumping: /* @@ -693,18 +692,19 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock); - if (destroy_tdata) + if (destroy_tdata) { prof_tdata_destroy(tsd, tdata, false); + } - if (destroy_tctx) + if (destroy_tctx) { idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), tctx), tctx, NULL, true, true); + } } static bool prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, - void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) -{ + void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) { union { prof_gctx_t *p; void *v; @@ -751,8 +751,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, } prof_tctx_t * -prof_lookup(tsd_t *tsd, prof_bt_t *bt) -{ +prof_lookup(tsd_t *tsd, prof_bt_t *bt) { union { prof_tctx_t *p; void *v; @@ -763,13 +762,15 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) cassert(config_prof); tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) + if (tdata == NULL) { return (NULL); + } malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); - if (!not_found) /* Note double negative! */ + if (!not_found) { /* Note double negative! */ ret.p->prepared = true; + } malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (not_found) { void *btkey; @@ -781,16 +782,18 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) * cache. */ if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, - &new_gctx)) + &new_gctx)) { return (NULL); + } /* Link a prof_tctx_t into gctx for this thread. */ ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), size2index(sizeof(prof_tctx_t)), false, NULL, true, arena_ichoose(tsd, NULL), true); if (ret.p == NULL) { - if (new_gctx) + if (new_gctx) { prof_gctx_try_destroy(tsd, tdata, gctx, tdata); + } return (NULL); } ret.p->tdata = tdata; @@ -805,8 +808,9 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (error) { - if (new_gctx) + if (new_gctx) { prof_gctx_try_destroy(tsd, tdata, gctx, tdata); + } idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), ret.v), ret.v, NULL, true, true); return (NULL); @@ -835,14 +839,14 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) * -mno-sse) in order for the workaround to be complete. */ void -prof_sample_threshold_update(prof_tdata_t *tdata) -{ +prof_sample_threshold_update(prof_tdata_t *tdata) { #ifdef JEMALLOC_PROF uint64_t r; double u; - if (!config_prof) + if (!config_prof) { return; + } if (lg_prof_sample == 0) { tdata->bytes_until_sample = 0; @@ -877,8 +881,8 @@ prof_sample_threshold_update(prof_tdata_t *tdata) #ifdef JEMALLOC_JET static prof_tdata_t * -prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) -{ +prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { size_t *tdata_count = (size_t *)arg; (*tdata_count)++; @@ -887,8 +891,7 @@ prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) } size_t -prof_tdata_count(void) -{ +prof_tdata_count(void) { size_t tdata_count = 0; tsdn_t *tsdn; @@ -904,16 +907,16 @@ prof_tdata_count(void) #ifdef JEMALLOC_JET size_t -prof_bt_count(void) -{ +prof_bt_count(void) { size_t bt_count; tsd_t *tsd; prof_tdata_t *tdata; tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) + if (tdata == NULL) { return (0); + } malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); bt_count = ckh_count(&bt2gctx); @@ -928,16 +931,16 @@ prof_bt_count(void) #define prof_dump_open JEMALLOC_N(prof_dump_open_impl) #endif static int -prof_dump_open(bool propagate_err, const char *filename) -{ +prof_dump_open(bool propagate_err, const char *filename) { int fd; fd = creat(filename, 0644); if (fd == -1 && !propagate_err) { malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n", filename); - if (opt_abort) + if (opt_abort) { abort(); + } } return (fd); @@ -949,8 +952,7 @@ prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl); #endif static bool -prof_dump_flush(bool propagate_err) -{ +prof_dump_flush(bool propagate_err) { bool ret = false; ssize_t err; @@ -961,8 +963,9 @@ prof_dump_flush(bool propagate_err) if (!propagate_err) { malloc_write("<jemalloc>: write() failed during heap " "profile flush\n"); - if (opt_abort) + if (opt_abort) { abort(); + } } ret = true; } @@ -972,8 +975,7 @@ prof_dump_flush(bool propagate_err) } static bool -prof_dump_close(bool propagate_err) -{ +prof_dump_close(bool propagate_err) { bool ret; assert(prof_dump_fd != -1); @@ -985,8 +987,7 @@ prof_dump_close(bool propagate_err) } static bool -prof_dump_write(bool propagate_err, const char *s) -{ +prof_dump_write(bool propagate_err, const char *s) { size_t i, slen, n; cassert(config_prof); @@ -995,9 +996,11 @@ prof_dump_write(bool propagate_err, const char *s) slen = strlen(s); while (i < slen) { /* Flush the buffer if it is full. */ - if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) - if (prof_dump_flush(propagate_err) && propagate_err) + if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { + if (prof_dump_flush(propagate_err) && propagate_err) { return (true); + } + } if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) { /* Finish writing. */ @@ -1016,8 +1019,7 @@ prof_dump_write(bool propagate_err, const char *s) JEMALLOC_FORMAT_PRINTF(2, 3) static bool -prof_dump_printf(bool propagate_err, const char *format, ...) -{ +prof_dump_printf(bool propagate_err, const char *format, ...) { bool ret; va_list ap; char buf[PROF_PRINTF_BUFSIZE]; @@ -1031,8 +1033,7 @@ prof_dump_printf(bool propagate_err, const char *format, ...) } static void -prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) -{ +prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) { malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); malloc_mutex_lock(tsdn, tctx->gctx->lock); @@ -1063,8 +1064,7 @@ prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) } static void -prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) -{ +prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) { malloc_mutex_assert_owner(tsdn, gctx->lock); gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; @@ -1076,8 +1076,7 @@ prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) } static prof_tctx_t * -prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) -{ +prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { tsdn_t *tsdn = (tsdn_t *)arg; malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); @@ -1103,8 +1102,7 @@ struct prof_tctx_dump_iter_arg_s { }; static prof_tctx_t * -prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) -{ +prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) { struct prof_tctx_dump_iter_arg_s *arg = (struct prof_tctx_dump_iter_arg_s *)opaque; @@ -1121,8 +1119,9 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": " "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs, tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs, - tctx->dump_cnts.accumbytes)) + tctx->dump_cnts.accumbytes)) { return (tctx); + } break; default: not_reached(); @@ -1131,8 +1130,7 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) } static prof_tctx_t * -prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) -{ +prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { tsdn_t *tsdn = (tsdn_t *)arg; prof_tctx_t *ret; @@ -1158,8 +1156,7 @@ label_return: } static void -prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) -{ +prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) { cassert(config_prof); malloc_mutex_lock(tsdn, gctx->lock); @@ -1183,24 +1180,23 @@ struct prof_gctx_merge_iter_arg_s { }; static prof_gctx_t * -prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) -{ +prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { struct prof_gctx_merge_iter_arg_s *arg = (struct prof_gctx_merge_iter_arg_s *)opaque; malloc_mutex_lock(arg->tsdn, gctx->lock); tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, (void *)arg->tsdn); - if (gctx->cnt_summed.curobjs != 0) + if (gctx->cnt_summed.curobjs != 0) { arg->leak_ngctx++; + } malloc_mutex_unlock(arg->tsdn, gctx->lock); return (NULL); } static void -prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) -{ +prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { prof_tdata_t *tdata = prof_tdata_get(tsd, false); prof_gctx_t *gctx; @@ -1230,8 +1226,9 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), to_destroy), to_destroy, NULL, true, true); - } else + } else { next = NULL; + } } while (next != NULL); } gctx->nlimbo--; @@ -1239,8 +1236,9 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) gctx->nlimbo++; malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - } else + } else { malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + } } } @@ -1251,8 +1249,7 @@ struct prof_tdata_merge_iter_arg_s { static prof_tdata_t * prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, - void *opaque) -{ + void *opaque) { struct prof_tdata_merge_iter_arg_s *arg = (struct prof_tdata_merge_iter_arg_s *)opaque; @@ -1267,8 +1264,9 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, tdata->dumping = true; memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, - &tctx.v);) + &tctx.v);) { prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); + } arg->cnt_all.curobjs += tdata->cnt_summed.curobjs; arg->cnt_all.curbytes += tdata->cnt_summed.curbytes; @@ -1276,20 +1274,22 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs; arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes; } - } else + } else { tdata->dumping = false; + } malloc_mutex_unlock(arg->tsdn, tdata->lock); return (NULL); } static prof_tdata_t * -prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) -{ +prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { bool propagate_err = *(bool *)arg; - if (!tdata->dumping) + if (!tdata->dumping) { return (NULL); + } if (prof_dump_printf(propagate_err, " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n", @@ -1297,8 +1297,9 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs, tdata->cnt_summed.accumbytes, (tdata->thread_name != NULL) ? " " : "", - (tdata->thread_name != NULL) ? tdata->thread_name : "")) + (tdata->thread_name != NULL) ? tdata->thread_name : "")) { return (tdata); + } return (NULL); } @@ -1307,16 +1308,16 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) #define prof_dump_header JEMALLOC_N(prof_dump_header_impl) #endif static bool -prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) -{ +prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) { bool ret; if (prof_dump_printf(propagate_err, "heap_v2/%"FMTu64"\n" " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs, - cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) + cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) { return (true); + } malloc_mutex_lock(tsdn, &tdatas_mtx); ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, @@ -1332,8 +1333,7 @@ prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl); static bool prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, - const prof_bt_t *bt, prof_gctx_tree_t *gctxs) -{ + const prof_bt_t *bt, prof_gctx_tree_t *gctxs) { bool ret; unsigned i; struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg; @@ -1389,8 +1389,7 @@ label_return: #ifndef _WIN32 JEMALLOC_FORMAT_PRINTF(1, 2) static int -prof_open_maps(const char *format, ...) -{ +prof_open_maps(const char *format, ...) { int mfd; va_list ap; char filename[PATH_MAX + 1]; @@ -1405,8 +1404,7 @@ prof_open_maps(const char *format, ...) #endif static int -prof_getpid(void) -{ +prof_getpid(void) { #ifdef _WIN32 return (GetCurrentProcessId()); #else @@ -1415,8 +1413,7 @@ prof_getpid(void) } static bool -prof_dump_maps(bool propagate_err) -{ +prof_dump_maps(bool propagate_err) { bool ret; int mfd; @@ -1430,8 +1427,9 @@ prof_dump_maps(bool propagate_err) int pid = prof_getpid(); mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid); - if (mfd == -1) + if (mfd == -1) { mfd = prof_open_maps("/proc/%d/maps", pid); + } } #endif if (mfd != -1) { @@ -1463,8 +1461,9 @@ prof_dump_maps(bool propagate_err) ret = false; label_return: - if (mfd != -1) + if (mfd != -1) { close(mfd); + } return (ret); } @@ -1474,8 +1473,7 @@ label_return: */ static void prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, - const char *filename) -{ + const char *filename) { #ifdef JEMALLOC_PROF /* * Scaling is equivalent AdjustSamples() in jeprof, but the result may @@ -1510,8 +1508,7 @@ struct prof_gctx_dump_iter_arg_s { }; static prof_gctx_t * -prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) -{ +prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { prof_gctx_t *ret; struct prof_gctx_dump_iter_arg_s *arg = (struct prof_gctx_dump_iter_arg_s *)opaque; @@ -1534,8 +1531,7 @@ static void prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, - prof_gctx_tree_t *gctxs) -{ + prof_gctx_tree_t *gctxs) { size_t tabind; union { prof_gctx_t *p; @@ -1579,8 +1575,7 @@ prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename, struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg, - prof_gctx_tree_t *gctxs) -{ + prof_gctx_tree_t *gctxs) { /* Create dump file. */ if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) { return true; @@ -1616,8 +1611,8 @@ label_write_error: } static bool -prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) -{ +prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, + bool leakcheck) { prof_tdata_t *tdata; struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; @@ -1657,8 +1652,7 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) #ifdef JEMALLOC_JET void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, - uint64_t *accumbytes) -{ + uint64_t *accumbytes) { tsd_t *tsd; prof_tdata_t *tdata; struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; @@ -1705,8 +1699,7 @@ prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, #define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) #define VSEQ_INVALID UINT64_C(0xffffffffffffffff) static void -prof_dump_filename(char *filename, char v, uint64_t vseq) -{ +prof_dump_filename(char *filename, char v, uint64_t vseq) { cassert(config_prof); if (vseq != VSEQ_INVALID) { @@ -1724,8 +1717,7 @@ prof_dump_filename(char *filename, char v, uint64_t vseq) } static void -prof_fdump(void) -{ +prof_fdump(void) { tsd_t *tsd; char filename[DUMP_FILENAME_BUFSIZE]; @@ -1733,8 +1725,9 @@ prof_fdump(void) assert(opt_prof_final); assert(opt_prof_prefix[0] != '\0'); - if (!prof_booted) + if (!prof_booted) { return; + } tsd = tsd_fetch(); malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); @@ -1744,19 +1737,20 @@ prof_fdump(void) } void -prof_idump(tsdn_t *tsdn) -{ +prof_idump(tsdn_t *tsdn) { tsd_t *tsd; prof_tdata_t *tdata; cassert(config_prof); - if (!prof_booted || tsdn_null(tsdn)) + if (!prof_booted || tsdn_null(tsdn)) { return; + } tsd = tsdn_tsd(tsdn); tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) + if (tdata == NULL) { return; + } if (tdata->enq) { tdata->enq_idump = true; return; @@ -1773,19 +1767,20 @@ prof_idump(tsdn_t *tsdn) } bool -prof_mdump(tsd_t *tsd, const char *filename) -{ +prof_mdump(tsd_t *tsd, const char *filename) { char filename_buf[DUMP_FILENAME_BUFSIZE]; cassert(config_prof); - if (!opt_prof || !prof_booted) + if (!opt_prof || !prof_booted) { return (true); + } if (filename == NULL) { /* No filename specified, so automatically generate one. */ - if (opt_prof_prefix[0] == '\0') + if (opt_prof_prefix[0] == '\0') { return (true); + } malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename_buf, 'm', prof_dump_mseq); prof_dump_mseq++; @@ -1796,19 +1791,20 @@ prof_mdump(tsd_t *tsd, const char *filename) } void -prof_gdump(tsdn_t *tsdn) -{ +prof_gdump(tsdn_t *tsdn) { tsd_t *tsd; prof_tdata_t *tdata; cassert(config_prof); - if (!prof_booted || tsdn_null(tsdn)) + if (!prof_booted || tsdn_null(tsdn)) { return; + } tsd = tsdn_tsd(tsdn); tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) + if (tdata == NULL) { return; + } if (tdata->enq) { tdata->enq_gdump = true; return; @@ -1825,8 +1821,7 @@ prof_gdump(tsdn_t *tsdn) } static void -prof_bt_hash(const void *key, size_t r_hash[2]) -{ +prof_bt_hash(const void *key, size_t r_hash[2]) { prof_bt_t *bt = (prof_bt_t *)key; cassert(config_prof); @@ -1835,21 +1830,20 @@ prof_bt_hash(const void *key, size_t r_hash[2]) } static bool -prof_bt_keycomp(const void *k1, const void *k2) -{ +prof_bt_keycomp(const void *k1, const void *k2) { const prof_bt_t *bt1 = (prof_bt_t *)k1; const prof_bt_t *bt2 = (prof_bt_t *)k2; cassert(config_prof); - if (bt1->len != bt2->len) + if (bt1->len != bt2->len) { return (false); + } return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); } JEMALLOC_INLINE_C uint64_t -prof_thr_uid_alloc(tsdn_t *tsdn) -{ +prof_thr_uid_alloc(tsdn_t *tsdn) { uint64_t thr_uid; malloc_mutex_lock(tsdn, &next_thr_uid_mtx); @@ -1862,8 +1856,7 @@ prof_thr_uid_alloc(tsdn_t *tsdn) static prof_tdata_t * prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, - char *thread_name, bool active) -{ + char *thread_name, bool active) { prof_tdata_t *tdata; cassert(config_prof); @@ -1872,8 +1865,9 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t), size2index(sizeof(prof_tdata_t)), false, NULL, true, arena_get(TSDN_NULL, 0, true), true); - if (tdata == NULL) + if (tdata == NULL) { return (NULL); + } tdata->lock = prof_tdata_mutex_choose(thr_uid); tdata->thr_uid = thr_uid; @@ -1908,26 +1902,25 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, } prof_tdata_t * -prof_tdata_init(tsd_t *tsd) -{ +prof_tdata_init(tsd_t *tsd) { return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0, NULL, prof_thread_active_init_get(tsd_tsdn(tsd)))); } static bool -prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) -{ - if (tdata->attached && !even_if_attached) +prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) { + if (tdata->attached && !even_if_attached) { return (false); - if (ckh_count(&tdata->bt2tctx) != 0) + } + if (ckh_count(&tdata->bt2tctx) != 0) { return (false); + } return (true); } static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, - bool even_if_attached) -{ + bool even_if_attached) { malloc_mutex_assert_owner(tsdn, tdata->lock); return (prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); @@ -1935,8 +1928,7 @@ prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, static void prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, - bool even_if_attached) -{ + bool even_if_attached) { malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); tdata_tree_remove(&tdatas, tdata); @@ -1953,16 +1945,14 @@ prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, } static void -prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) -{ +prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); prof_tdata_destroy_locked(tsd, tdata, even_if_attached); malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); } static void -prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) -{ +prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { bool destroy_tdata; malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); @@ -1973,19 +1963,21 @@ prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) * Only detach if !destroy_tdata, because detaching would allow * another thread to win the race to destroy tdata. */ - if (!destroy_tdata) + if (!destroy_tdata) { tdata->attached = false; + } tsd_prof_tdata_set(tsd, NULL); - } else + } else { destroy_tdata = false; + } malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); - if (destroy_tdata) + if (destroy_tdata) { prof_tdata_destroy(tsd, tdata, true); + } } prof_tdata_t * -prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) -{ +prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) { uint64_t thr_uid = tdata->thr_uid; uint64_t thr_discrim = tdata->thr_discrim + 1; char *thread_name = (tdata->thread_name != NULL) ? @@ -1998,8 +1990,7 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) } static bool -prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) -{ +prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) { bool destroy_tdata; malloc_mutex_lock(tsdn, tdata->lock); @@ -2007,24 +1998,24 @@ prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) tdata->expired = true; destroy_tdata = tdata->attached ? false : prof_tdata_should_destroy(tsdn, tdata, false); - } else + } else { destroy_tdata = false; + } malloc_mutex_unlock(tsdn, tdata->lock); return (destroy_tdata); } static prof_tdata_t * -prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) -{ +prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { tsdn_t *tsdn = (tsdn_t *)arg; return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); } void -prof_reset(tsd_t *tsd, size_t lg_sample) -{ +prof_reset(tsd_t *tsd, size_t lg_sample) { prof_tdata_t *next; assert(lg_sample < (sizeof(uint64_t) << 3)); @@ -2041,8 +2032,9 @@ prof_reset(tsd_t *tsd, size_t lg_sample) if (to_destroy != NULL) { next = tdata_tree_next(&tdatas, to_destroy); prof_tdata_destroy_locked(tsd, to_destroy, false); - } else + } else { next = NULL; + } } while (next != NULL); malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); @@ -2050,21 +2042,21 @@ prof_reset(tsd_t *tsd, size_t lg_sample) } void -prof_tdata_cleanup(tsd_t *tsd) -{ +prof_tdata_cleanup(tsd_t *tsd) { prof_tdata_t *tdata; - if (!config_prof) + if (!config_prof) { return; + } tdata = tsd_prof_tdata_get(tsd); - if (tdata != NULL) + if (tdata != NULL) { prof_tdata_detach(tsd, tdata); + } } bool -prof_active_get(tsdn_t *tsdn) -{ +prof_active_get(tsdn_t *tsdn) { bool prof_active_current; malloc_mutex_lock(tsdn, &prof_active_mtx); @@ -2074,8 +2066,7 @@ prof_active_get(tsdn_t *tsdn) } bool -prof_active_set(tsdn_t *tsdn, bool active) -{ +prof_active_set(tsdn_t *tsdn, bool active) { bool prof_active_old; malloc_mutex_lock(tsdn, &prof_active_mtx); @@ -2086,97 +2077,102 @@ prof_active_set(tsdn_t *tsdn, bool active) } const char * -prof_thread_name_get(tsd_t *tsd) -{ +prof_thread_name_get(tsd_t *tsd) { prof_tdata_t *tdata; tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) + if (tdata == NULL) { return (""); + } return (tdata->thread_name != NULL ? tdata->thread_name : ""); } static char * -prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) -{ +prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) { char *ret; size_t size; - if (thread_name == NULL) + if (thread_name == NULL) { return (NULL); + } size = strlen(thread_name) + 1; - if (size == 1) + if (size == 1) { return (""); + } ret = iallocztm(tsdn, size, size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), true); - if (ret == NULL) + if (ret == NULL) { return (NULL); + } memcpy(ret, thread_name, size); return (ret); } int -prof_thread_name_set(tsd_t *tsd, const char *thread_name) -{ +prof_thread_name_set(tsd_t *tsd, const char *thread_name) { prof_tdata_t *tdata; unsigned i; char *s; tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) + if (tdata == NULL) { return (EAGAIN); + } /* Validate input. */ - if (thread_name == NULL) + if (thread_name == NULL) { return (EFAULT); + } for (i = 0; thread_name[i] != '\0'; i++) { char c = thread_name[i]; - if (!isgraph(c) && !isblank(c)) + if (!isgraph(c) && !isblank(c)) { return (EFAULT); + } } s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name); - if (s == NULL) + if (s == NULL) { return (EAGAIN); + } if (tdata->thread_name != NULL) { idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), tdata->thread_name), tdata->thread_name, NULL, true, true); tdata->thread_name = NULL; } - if (strlen(s) > 0) + if (strlen(s) > 0) { tdata->thread_name = s; + } return (0); } bool -prof_thread_active_get(tsd_t *tsd) -{ +prof_thread_active_get(tsd_t *tsd) { prof_tdata_t *tdata; tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) + if (tdata == NULL) { return (false); + } return (tdata->active); } bool -prof_thread_active_set(tsd_t *tsd, bool active) -{ +prof_thread_active_set(tsd_t *tsd, bool active) { prof_tdata_t *tdata; tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) + if (tdata == NULL) { return (true); + } tdata->active = active; return (false); } bool -prof_thread_active_init_get(tsdn_t *tsdn) -{ +prof_thread_active_init_get(tsdn_t *tsdn) { bool active_init; malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); @@ -2186,8 +2182,7 @@ prof_thread_active_init_get(tsdn_t *tsdn) } bool -prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) -{ +prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) { bool active_init_old; malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); @@ -2198,8 +2193,7 @@ prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) } bool -prof_gdump_get(tsdn_t *tsdn) -{ +prof_gdump_get(tsdn_t *tsdn) { bool prof_gdump_current; malloc_mutex_lock(tsdn, &prof_gdump_mtx); @@ -2209,8 +2203,7 @@ prof_gdump_get(tsdn_t *tsdn) } bool -prof_gdump_set(tsdn_t *tsdn, bool gdump) -{ +prof_gdump_set(tsdn_t *tsdn, bool gdump) { bool prof_gdump_old; malloc_mutex_lock(tsdn, &prof_gdump_mtx); @@ -2221,8 +2214,7 @@ prof_gdump_set(tsdn_t *tsdn, bool gdump) } void -prof_boot0(void) -{ +prof_boot0(void) { cassert(config_prof); memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT, @@ -2230,8 +2222,7 @@ prof_boot0(void) } void -prof_boot1(void) -{ +prof_boot1(void) { cassert(config_prof); /* @@ -2255,8 +2246,7 @@ prof_boot1(void) } bool -prof_boot2(tsd_t *tsd) -{ +prof_boot2(tsd_t *tsd) { cassert(config_prof); if (opt_prof) { @@ -2266,71 +2256,85 @@ prof_boot2(tsd_t *tsd) prof_active = opt_prof_active; if (malloc_mutex_init(&prof_active_mtx, "prof_active", - WITNESS_RANK_PROF_ACTIVE)) + WITNESS_RANK_PROF_ACTIVE)) { return (true); + } prof_gdump_val = opt_prof_gdump; if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump", - WITNESS_RANK_PROF_GDUMP)) + WITNESS_RANK_PROF_GDUMP)) { return (true); + } prof_thread_active_init = opt_prof_thread_active_init; if (malloc_mutex_init(&prof_thread_active_init_mtx, "prof_thread_active_init", - WITNESS_RANK_PROF_THREAD_ACTIVE_INIT)) + WITNESS_RANK_PROF_THREAD_ACTIVE_INIT)) { return (true); + } if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, - prof_bt_keycomp)) + prof_bt_keycomp)) { return (true); + } if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx", - WITNESS_RANK_PROF_BT2GCTX)) + WITNESS_RANK_PROF_BT2GCTX)) { return (true); + } tdata_tree_new(&tdatas); if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas", - WITNESS_RANK_PROF_TDATAS)) + WITNESS_RANK_PROF_TDATAS)) { return (true); + } next_thr_uid = 0; if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid", - WITNESS_RANK_PROF_NEXT_THR_UID)) + WITNESS_RANK_PROF_NEXT_THR_UID)) { return (true); + } if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq", - WITNESS_RANK_PROF_DUMP_SEQ)) + WITNESS_RANK_PROF_DUMP_SEQ)) { return (true); + } if (malloc_mutex_init(&prof_dump_mtx, "prof_dump", - WITNESS_RANK_PROF_DUMP)) + WITNESS_RANK_PROF_DUMP)) { return (true); + } if (opt_prof_final && opt_prof_prefix[0] != '\0' && atexit(prof_fdump) != 0) { malloc_write("<jemalloc>: Error in atexit()\n"); - if (opt_abort) + if (opt_abort) { abort(); + } } gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t), CACHELINE); - if (gctx_locks == NULL) + if (gctx_locks == NULL) { return (true); + } for (i = 0; i < PROF_NCTX_LOCKS; i++) { if (malloc_mutex_init(&gctx_locks[i], "prof_gctx", - WITNESS_RANK_PROF_GCTX)) + WITNESS_RANK_PROF_GCTX)) { return (true); + } } tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t), CACHELINE); - if (tdata_locks == NULL) + if (tdata_locks == NULL) { return (true); + } for (i = 0; i < PROF_NTDATA_LOCKS; i++) { if (malloc_mutex_init(&tdata_locks[i], "prof_tdata", - WITNESS_RANK_PROF_TDATA)) + WITNESS_RANK_PROF_TDATA)) { return (true); + } } } @@ -2348,24 +2352,24 @@ prof_boot2(tsd_t *tsd) } void -prof_prefork0(tsdn_t *tsdn) -{ +prof_prefork0(tsdn_t *tsdn) { if (opt_prof) { unsigned i; malloc_mutex_prefork(tsdn, &prof_dump_mtx); malloc_mutex_prefork(tsdn, &bt2gctx_mtx); malloc_mutex_prefork(tsdn, &tdatas_mtx); - for (i = 0; i < PROF_NTDATA_LOCKS; i++) + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { malloc_mutex_prefork(tsdn, &tdata_locks[i]); - for (i = 0; i < PROF_NCTX_LOCKS; i++) + } + for (i = 0; i < PROF_NCTX_LOCKS; i++) { malloc_mutex_prefork(tsdn, &gctx_locks[i]); + } } } void -prof_prefork1(tsdn_t *tsdn) -{ +prof_prefork1(tsdn_t *tsdn) { if (opt_prof) { malloc_mutex_prefork(tsdn, &prof_active_mtx); malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx); @@ -2376,8 +2380,7 @@ prof_prefork1(tsdn_t *tsdn) } void -prof_postfork_parent(tsdn_t *tsdn) -{ +prof_postfork_parent(tsdn_t *tsdn) { if (opt_prof) { unsigned i; @@ -2387,10 +2390,12 @@ prof_postfork_parent(tsdn_t *tsdn) malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx); malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx); malloc_mutex_postfork_parent(tsdn, &prof_active_mtx); - for (i = 0; i < PROF_NCTX_LOCKS; i++) + for (i = 0; i < PROF_NCTX_LOCKS; i++) { malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]); - for (i = 0; i < PROF_NTDATA_LOCKS; i++) + } + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]); + } malloc_mutex_postfork_parent(tsdn, &tdatas_mtx); malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx); malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx); @@ -2398,8 +2403,7 @@ prof_postfork_parent(tsdn_t *tsdn) } void -prof_postfork_child(tsdn_t *tsdn) -{ +prof_postfork_child(tsdn_t *tsdn) { if (opt_prof) { unsigned i; @@ -2408,10 +2412,12 @@ prof_postfork_child(tsdn_t *tsdn) malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx); malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx); malloc_mutex_postfork_child(tsdn, &prof_active_mtx); - for (i = 0; i < PROF_NCTX_LOCKS; i++) + for (i = 0; i < PROF_NCTX_LOCKS; i++) { malloc_mutex_postfork_child(tsdn, &gctx_locks[i]); - for (i = 0; i < PROF_NTDATA_LOCKS; i++) + } + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { malloc_mutex_postfork_child(tsdn, &tdata_locks[i]); + } malloc_mutex_postfork_child(tsdn, &tdatas_mtx); malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx); malloc_mutex_postfork_child(tsdn, &prof_dump_mtx); diff --git a/src/rtree.c b/src/rtree.c index 43f21652..de3e5962 100644 --- a/src/rtree.c +++ b/src/rtree.c @@ -2,8 +2,7 @@ #include "jemalloc/internal/jemalloc_internal.h" static unsigned -hmin(unsigned ha, unsigned hb) -{ +hmin(unsigned ha, unsigned hb) { return (ha < hb ? ha : hb); } @@ -12,8 +11,7 @@ hmin(unsigned ha, unsigned hb) * used. */ bool -rtree_new(rtree_t *rtree, unsigned bits) -{ +rtree_new(rtree_t *rtree, unsigned bits) { unsigned bits_in_leaf, height, i; assert(RTREE_HEIGHT_MAX == ((ZU(1) << (LG_SIZEOF_PTR+3)) / @@ -24,10 +22,12 @@ rtree_new(rtree_t *rtree, unsigned bits) : (bits % RTREE_BITS_PER_LEVEL); if (bits > bits_in_leaf) { height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL; - if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits) + if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits) { height++; - } else + } + } else { height = 1; + } assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits); rtree->height = height; @@ -68,8 +68,7 @@ rtree_new(rtree_t *rtree, unsigned bits) #define rtree_node_alloc JEMALLOC_N(rtree_node_alloc_impl) #endif static rtree_elm_t * -rtree_node_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) -{ +rtree_node_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { return ((rtree_elm_t *)base_alloc(tsdn, b0get(), nelms * sizeof(rtree_elm_t), CACHELINE)); } @@ -84,8 +83,7 @@ rtree_node_alloc_t *rtree_node_alloc = JEMALLOC_N(rtree_node_alloc_impl); #define rtree_node_dalloc JEMALLOC_N(rtree_node_dalloc_impl) #endif UNUSED static void -rtree_node_dalloc(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node) -{ +rtree_node_dalloc(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node) { /* Nodes are never deleted during normal operation. */ not_reached(); } @@ -98,8 +96,7 @@ rtree_node_dalloc_t *rtree_node_dalloc = JEMALLOC_N(rtree_node_dalloc_impl); #ifdef JEMALLOC_JET static void rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node, - unsigned level) -{ + unsigned level) { if (level + 1 < rtree->height) { size_t nchildren, i; @@ -116,22 +113,21 @@ rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node, } void -rtree_delete(tsdn_t *tsdn, rtree_t *rtree) -{ +rtree_delete(tsdn_t *tsdn, rtree_t *rtree) { unsigned i; for (i = 0; i < rtree->height; i++) { rtree_elm_t *subtree = rtree->levels[i].subtree; - if (subtree != NULL) + if (subtree != NULL) { rtree_delete_subtree(tsdn, rtree, subtree, i); + } } } #endif static rtree_elm_t * rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level, - rtree_elm_t **elmp) -{ + rtree_elm_t **elmp) { rtree_elm_t *node; malloc_mutex_lock(tsdn, &rtree->init_lock); @@ -151,23 +147,20 @@ rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level, } rtree_elm_t * -rtree_subtree_read_hard(tsdn_t *tsdn, rtree_t *rtree, unsigned level) -{ +rtree_subtree_read_hard(tsdn_t *tsdn, rtree_t *rtree, unsigned level) { return (rtree_node_init(tsdn, rtree, level, &rtree->levels[level].subtree)); } rtree_elm_t * rtree_child_read_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *elm, - unsigned level) -{ + unsigned level) { return (rtree_node_init(tsdn, rtree, level+1, &elm->child)); } static int rtree_elm_witness_comp(const witness_t *a, void *oa, const witness_t *b, - void *ob) -{ + void *ob) { uintptr_t ka = (uintptr_t)oa; uintptr_t kb = (uintptr_t)ob; @@ -178,8 +171,7 @@ rtree_elm_witness_comp(const witness_t *a, void *oa, const witness_t *b, } static witness_t * -rtree_elm_witness_alloc(tsd_t *tsd, uintptr_t key, const rtree_elm_t *elm) -{ +rtree_elm_witness_alloc(tsd_t *tsd, uintptr_t key, const rtree_elm_t *elm) { witness_t *witness; size_t i; rtree_elm_witness_tsd_t *witnesses = tsd_rtree_elm_witnessesp_get(tsd); @@ -204,8 +196,7 @@ rtree_elm_witness_alloc(tsd_t *tsd, uintptr_t key, const rtree_elm_t *elm) } static witness_t * -rtree_elm_witness_find(tsd_t *tsd, const rtree_elm_t *elm) -{ +rtree_elm_witness_find(tsd_t *tsd, const rtree_elm_t *elm) { size_t i; rtree_elm_witness_tsd_t *witnesses = tsd_rtree_elm_witnessesp_get(tsd); @@ -213,15 +204,16 @@ rtree_elm_witness_find(tsd_t *tsd, const rtree_elm_t *elm) i++) { rtree_elm_witness_t *rew = &witnesses->witnesses[i]; - if (rew->elm == elm) + if (rew->elm == elm) { return (&rew->witness); + } } not_reached(); } static void -rtree_elm_witness_dalloc(tsd_t *tsd, witness_t *witness, const rtree_elm_t *elm) -{ +rtree_elm_witness_dalloc(tsd_t *tsd, witness_t *witness, + const rtree_elm_t *elm) { size_t i; rtree_elm_witness_tsd_t *witnesses = tsd_rtree_elm_witnessesp_get(tsd); @@ -242,12 +234,12 @@ rtree_elm_witness_dalloc(tsd_t *tsd, witness_t *witness, const rtree_elm_t *elm) void rtree_elm_witness_acquire(tsdn_t *tsdn, const rtree_t *rtree, uintptr_t key, - const rtree_elm_t *elm) -{ + const rtree_elm_t *elm) { witness_t *witness; - if (tsdn_null(tsdn)) + if (tsdn_null(tsdn)) { return; + } witness = rtree_elm_witness_alloc(tsdn_tsd(tsdn), key, elm); witness_lock(tsdn, witness); @@ -255,12 +247,12 @@ rtree_elm_witness_acquire(tsdn_t *tsdn, const rtree_t *rtree, uintptr_t key, void rtree_elm_witness_access(tsdn_t *tsdn, const rtree_t *rtree, - const rtree_elm_t *elm) -{ + const rtree_elm_t *elm) { witness_t *witness; - if (tsdn_null(tsdn)) + if (tsdn_null(tsdn)) { return; + } witness = rtree_elm_witness_find(tsdn_tsd(tsdn), elm); witness_assert_owner(tsdn, witness); @@ -268,12 +260,12 @@ rtree_elm_witness_access(tsdn_t *tsdn, const rtree_t *rtree, void rtree_elm_witness_release(tsdn_t *tsdn, const rtree_t *rtree, - const rtree_elm_t *elm) -{ + const rtree_elm_t *elm) { witness_t *witness; - if (tsdn_null(tsdn)) + if (tsdn_null(tsdn)) { return; + } witness = rtree_elm_witness_find(tsdn_tsd(tsdn), elm); witness_unlock(tsdn, witness); diff --git a/src/stats.c b/src/stats.c index 020d56bd..b0a7fca2 100644 --- a/src/stats.c +++ b/src/stats.c @@ -34,8 +34,7 @@ bool opt_stats_print = false; static void stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, - bool json, bool large, unsigned i) -{ + bool json, bool large, unsigned i) { size_t page; bool in_gap, in_gap_prev; unsigned nbins, j; @@ -144,8 +143,9 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, } else if (milli < 1000) { malloc_snprintf(util, sizeof(util), "0.%zu", milli); - } else + } else { malloc_snprintf(util, sizeof(util), "1"); + } if (config_tcache) { malloc_cprintf(write_cb, cbopaque, @@ -183,8 +183,7 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, static void stats_arena_lextents_print(void (*write_cb)(void *, const char *), - void *cbopaque, bool json, unsigned i) -{ + void *cbopaque, bool json, unsigned i) { unsigned nbins, nlextents, j; bool in_gap, in_gap_prev; @@ -248,8 +247,7 @@ stats_arena_lextents_print(void (*write_cb)(void *, const char *), static void stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, - bool json, unsigned i, bool bins, bool large) -{ + bool json, unsigned i, bool bins, bool large) { unsigned nthreads; const char *dss; ssize_t decay_time; @@ -290,8 +288,9 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, if (decay_time >= 0) { malloc_cprintf(write_cb, cbopaque, "decay time: %zd\n", decay_time); - } else + } else { malloc_cprintf(write_cb, cbopaque, "decay time: N/A\n"); + } } CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t); @@ -445,16 +444,17 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, "resident: %12zu\n", resident); } - if (bins) + if (bins) { stats_arena_bins_print(write_cb, cbopaque, json, large, i); - if (large) + } + if (large) { stats_arena_lextents_print(write_cb, cbopaque, json, i); + } } static void stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque, - bool json, bool more) -{ + bool json, bool more) { const char *cpv; bool bv; unsigned uv; @@ -473,8 +473,9 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque, if (json) { malloc_cprintf(write_cb, cbopaque, "\t\t\"version\": \"%s\",\n", cpv); - } else + } else { malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv); + } /* config. */ #define CONFIG_WRITE_BOOL_JSON(n, c) \ @@ -655,8 +656,9 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque, if (json) { malloc_cprintf(write_cb, cbopaque, "\t\t\t\"narenas\": %u,\n", uv); - } else + } else { malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv); + } CTL_GET("arenas.decay_time", &ssv, ssize_t); if (json) { @@ -672,15 +674,17 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque, if (json) { malloc_cprintf(write_cb, cbopaque, "\t\t\t\"quantum\": %zu,\n", sv); - } else + } else { malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv); + } CTL_GET("arenas.page", &sv, size_t); if (json) { malloc_cprintf(write_cb, cbopaque, "\t\t\t\"page\": %zu,\n", sv); - } else + } else { malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv); + } if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) { if (json) { @@ -787,8 +791,7 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque, static void stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque, bool json, bool merged, bool destroyed, bool unmerged, bool bins, - bool large) -{ + bool large) { size_t allocated, active, metadata, resident, mapped, retained; CTL_GET("stats.allocated", &allocated, size_t); @@ -846,8 +849,9 @@ stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque, sz = sizeof(bool); xmallctlbymib(mib, miblen, &initialized[i], &sz, NULL, 0); - if (initialized[i]) + if (initialized[i]) { ninitialized++; + } } mib[1] = MALLCTL_ARENAS_DESTROYED; sz = sizeof(bool); @@ -934,8 +938,7 @@ stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque, void stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts) -{ + const char *opts) { int err; uint64_t epoch; size_t u64sz; diff --git a/src/tcache.c b/src/tcache.c index d1323418..bb6a5a75 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -24,14 +24,12 @@ static tcaches_t *tcaches_avail; /******************************************************************************/ size_t -tcache_salloc(tsdn_t *tsdn, const void *ptr) -{ +tcache_salloc(tsdn_t *tsdn, const void *ptr) { return (arena_salloc(tsdn, iealloc(tsdn, ptr), ptr)); } void -tcache_event_hard(tsd_t *tsd, tcache_t *tcache) -{ +tcache_event_hard(tsd_t *tsd, tcache_t *tcache) { szind_t binind = tcache->next_gc_bin; tcache_bin_t *tbin = &tcache->tbins[binind]; tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; @@ -52,33 +50,36 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) * Reduce fill count by 2X. Limit lg_fill_div such that the * fill count is always at least 1. */ - if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1) + if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1) { tbin->lg_fill_div++; + } } else if (tbin->low_water < 0) { /* * Increase fill count by 2X. Make sure lg_fill_div stays * greater than 0. */ - if (tbin->lg_fill_div > 1) + if (tbin->lg_fill_div > 1) { tbin->lg_fill_div--; + } } tbin->low_water = tbin->ncached; tcache->next_gc_bin++; - if (tcache->next_gc_bin == nhbins) + if (tcache->next_gc_bin == nhbins) { tcache->next_gc_bin = 0; + } } void * tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, - tcache_bin_t *tbin, szind_t binind, bool *tcache_success) -{ + tcache_bin_t *tbin, szind_t binind, bool *tcache_success) { void *ret; arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ? tcache->prof_accumbytes : 0); - if (config_prof) + if (config_prof) { tcache->prof_accumbytes = 0; + } ret = tcache_alloc_easy(tbin, tcache_success); return (ret); @@ -86,8 +87,7 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, - szind_t binind, unsigned rem) -{ + szind_t binind, unsigned rem) { arena_t *arena; void *ptr; unsigned i, nflush, ndeferred; @@ -106,8 +106,9 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, if (config_prof && bin_arena == arena) { if (arena_prof_accum(tsd_tsdn(tsd), arena, - tcache->prof_accumbytes)) + tcache->prof_accumbytes)) { prof_idump(tsd_tsdn(tsd)); + } tcache->prof_accumbytes = 0; } @@ -158,14 +159,14 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * sizeof(void *)); tbin->ncached = rem; - if ((int)tbin->ncached < tbin->low_water) + if ((int)tbin->ncached < tbin->low_water) { tbin->low_water = tbin->ncached; + } } void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, - unsigned rem, tcache_t *tcache) -{ + unsigned rem, tcache_t *tcache) { arena_t *arena; void *ptr; unsigned i, nflush, ndeferred; @@ -182,8 +183,9 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, arena_t *locked_arena = extent_arena_get(extent); UNUSED bool idump; - if (config_prof) + if (config_prof) { idump = false; + } malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock); if ((config_prof || config_stats) && locked_arena == arena) { if (config_prof) { @@ -220,8 +222,9 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, } } malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock); - if (config_prof && idump) + if (config_prof && idump) { prof_idump(tsd_tsdn(tsd)); + } arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush - ndeferred); } @@ -241,13 +244,13 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * sizeof(void *)); tbin->ncached = rem; - if ((int)tbin->ncached < tbin->low_water) + if ((int)tbin->ncached < tbin->low_water) { tbin->low_water = tbin->ncached; + } } static void -tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) -{ +tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { if (config_stats) { /* Link into list of extant tcaches. */ malloc_mutex_lock(tsdn, &arena->lock); @@ -258,8 +261,7 @@ tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) } static void -tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) -{ +tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { if (config_stats) { /* Unlink from list of extant tcaches. */ malloc_mutex_lock(tsdn, &arena->lock); @@ -282,31 +284,30 @@ tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena, - arena_t *newarena) -{ + arena_t *newarena) { tcache_arena_dissociate(tsdn, tcache, oldarena); tcache_arena_associate(tsdn, tcache, newarena); } tcache_t * -tcache_get_hard(tsd_t *tsd) -{ +tcache_get_hard(tsd_t *tsd) { arena_t *arena; if (!tcache_enabled_get()) { - if (tsd_nominal(tsd)) + if (tsd_nominal(tsd)) { tcache_enabled_set(false); /* Memoize. */ + } return (NULL); } arena = arena_choose(tsd, NULL); - if (unlikely(arena == NULL)) + if (unlikely(arena == NULL)) { return (NULL); + } return (tcache_create(tsd_tsdn(tsd), arena)); } tcache_t * -tcache_create(tsdn_t *tsdn, arena_t *arena) -{ +tcache_create(tsdn_t *tsdn, arena_t *arena) { tcache_t *tcache; size_t size, stack_offset; unsigned i; @@ -321,8 +322,9 @@ tcache_create(tsdn_t *tsdn, arena_t *arena) tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true, arena_get(TSDN_NULL, 0, true)); - if (tcache == NULL) + if (tcache == NULL) { return (NULL); + } tcache_arena_associate(tsdn, tcache, arena); @@ -345,8 +347,7 @@ tcache_create(tsdn_t *tsdn, arena_t *arena) } static void -tcache_destroy(tsd_t *tsd, tcache_t *tcache) -{ +tcache_destroy(tsd_t *tsd, tcache_t *tcache) { arena_t *arena; unsigned i; @@ -372,20 +373,21 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache) } if (config_prof && tcache->prof_accumbytes > 0 && - arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes)) + arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes)) { prof_idump(tsd_tsdn(tsd)); + } idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), tcache), tcache, NULL, true, true); } void -tcache_cleanup(tsd_t *tsd) -{ +tcache_cleanup(tsd_t *tsd) { tcache_t *tcache; - if (!config_tcache) + if (!config_tcache) { return; + } if ((tcache = tsd_tcache_get(tsd)) != NULL) { tcache_destroy(tsd, tcache); @@ -394,8 +396,7 @@ tcache_cleanup(tsd_t *tsd) } void -tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) -{ +tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { unsigned i; cassert(config_stats); @@ -422,8 +423,7 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) } bool -tcaches_create(tsd_t *tsd, unsigned *r_ind) -{ +tcaches_create(tsd_t *tsd, unsigned *r_ind) { arena_t *arena; tcache_t *tcache; tcaches_t *elm; @@ -431,18 +431,22 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind) if (tcaches == NULL) { tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *) * (MALLOCX_TCACHE_MAX+1), CACHELINE); - if (tcaches == NULL) + if (tcaches == NULL) { return (true); + } } - if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) + if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) { return (true); + } arena = arena_ichoose(tsd, NULL); - if (unlikely(arena == NULL)) + if (unlikely(arena == NULL)) { return (true); + } tcache = tcache_create(tsd_tsdn(tsd), arena); - if (tcache == NULL) + if (tcache == NULL) { return (true); + } if (tcaches_avail != NULL) { elm = tcaches_avail; @@ -460,23 +464,21 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind) } static void -tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm) -{ - if (elm->tcache == NULL) +tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm) { + if (elm->tcache == NULL) { return; + } tcache_destroy(tsd, elm->tcache); elm->tcache = NULL; } void -tcaches_flush(tsd_t *tsd, unsigned ind) -{ +tcaches_flush(tsd_t *tsd, unsigned ind) { tcaches_elm_flush(tsd, &tcaches[ind]); } void -tcaches_destroy(tsd_t *tsd, unsigned ind) -{ +tcaches_destroy(tsd_t *tsd, unsigned ind) { tcaches_t *elm = &tcaches[ind]; tcaches_elm_flush(tsd, elm); elm->next = tcaches_avail; @@ -484,23 +486,25 @@ tcaches_destroy(tsd_t *tsd, unsigned ind) } bool -tcache_boot(tsdn_t *tsdn) -{ +tcache_boot(tsdn_t *tsdn) { unsigned i; /* If necessary, clamp opt_lg_tcache_max. */ - if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < SMALL_MAXCLASS) + if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < + SMALL_MAXCLASS) { tcache_maxclass = SMALL_MAXCLASS; - else + } else { tcache_maxclass = (ZU(1) << opt_lg_tcache_max); + } nhbins = size2index(tcache_maxclass) + 1; /* Initialize tcache_bin_info. */ tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins * sizeof(tcache_bin_info_t), CACHELINE); - if (tcache_bin_info == NULL) + if (tcache_bin_info == NULL) { return (true); + } stack_nelms = 0; for (i = 0; i < NBINS; i++) { if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { @@ -12,20 +12,17 @@ malloc_tsd_data(, , tsd_t, TSD_INITIALIZER) /******************************************************************************/ void * -malloc_tsd_malloc(size_t size) -{ +malloc_tsd_malloc(size_t size) { return (a0malloc(CACHELINE_CEILING(size))); } void -malloc_tsd_dalloc(void *wrapper) -{ +malloc_tsd_dalloc(void *wrapper) { a0dalloc(wrapper); } void -malloc_tsd_no_cleanup(void *arg) -{ +malloc_tsd_no_cleanup(void *arg) { not_reached(); } @@ -34,21 +31,22 @@ malloc_tsd_no_cleanup(void *arg) JEMALLOC_EXPORT #endif void -_malloc_thread_cleanup(void) -{ +_malloc_thread_cleanup(void) { bool pending[MALLOC_TSD_CLEANUPS_MAX], again; unsigned i; - for (i = 0; i < ncleanups; i++) + for (i = 0; i < ncleanups; i++) { pending[i] = true; + } do { again = false; for (i = 0; i < ncleanups; i++) { if (pending[i]) { pending[i] = cleanups[i](); - if (pending[i]) + if (pending[i]) { again = true; + } } } } while (again); @@ -56,16 +54,14 @@ _malloc_thread_cleanup(void) #endif void -malloc_tsd_cleanup_register(bool (*f)(void)) -{ +malloc_tsd_cleanup_register(bool (*f)(void)) { assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); cleanups[ncleanups] = f; ncleanups++; } void -tsd_cleanup(void *arg) -{ +tsd_cleanup(void *arg) { tsd_t *tsd = (tsd_t *)arg; switch (tsd->state) { @@ -108,29 +104,27 @@ MALLOC_TSD } tsd_t * -malloc_tsd_boot0(void) -{ +malloc_tsd_boot0(void) { tsd_t *tsd; ncleanups = 0; - if (tsd_boot0()) + if (tsd_boot0()) { return (NULL); + } tsd = tsd_fetch(); *tsd_arenas_tdata_bypassp_get(tsd) = true; return (tsd); } void -malloc_tsd_boot1(void) -{ +malloc_tsd_boot1(void) { tsd_boot1(); *tsd_arenas_tdata_bypassp_get(tsd_fetch()) = false; } #ifdef _WIN32 static BOOL WINAPI -_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) -{ +_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) { switch (fdwReason) { #ifdef JEMALLOC_LAZY_LOCK case DLL_THREAD_ATTACH: @@ -164,8 +158,7 @@ BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL, #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ !defined(_WIN32)) void * -tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) -{ +tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) { pthread_t self = pthread_self(); tsd_init_block_t *iter; @@ -186,8 +179,7 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) } void -tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) -{ +tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) { malloc_mutex_lock(TSDN_NULL, &head->lock); ql_remove(&head->blocks, block, link); malloc_mutex_unlock(TSDN_NULL, &head->lock); @@ -46,8 +46,7 @@ static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, /* malloc_message() setup. */ static void -wrtmessage(void *cbopaque, const char *s) -{ +wrtmessage(void *cbopaque, const char *s) { #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write) /* * Use syscall(2) rather than write(2) when possible in order to avoid @@ -71,12 +70,12 @@ JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s); * je_malloc_message(...) throughout the code. */ void -malloc_write(const char *s) -{ - if (je_malloc_message != NULL) +malloc_write(const char *s) { + if (je_malloc_message != NULL) { je_malloc_message(NULL, s); - else + } else { wrtmessage(NULL, s); + } } /* @@ -84,8 +83,7 @@ malloc_write(const char *s) * provide a wrapper. */ int -buferror(int err, char *buf, size_t buflen) -{ +buferror(int err, char *buf, size_t buflen) { #ifdef _WIN32 FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0, (LPSTR)buf, (DWORD)buflen, NULL); @@ -103,8 +101,7 @@ buferror(int err, char *buf, size_t buflen) } uintmax_t -malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) -{ +malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) { uintmax_t ret, digit; unsigned b; bool neg; @@ -149,10 +146,12 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) switch (p[1]) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': - if (b == 0) + if (b == 0) { b = 8; - if (b == 8) + } + if (b == 8) { p++; + } break; case 'X': case 'x': switch (p[2]) { @@ -162,10 +161,12 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) case 'F': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': - if (b == 0) + if (b == 0) { b = 16; - if (b == 16) + } + if (b == 16) { p += 2; + } break; default: break; @@ -177,8 +178,9 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) goto label_return; } } - if (b == 0) + if (b == 0) { b = 10; + } /* Convert. */ ret = 0; @@ -196,8 +198,9 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) } p++; } - if (neg) + if (neg) { ret = (uintmax_t)(-((intmax_t)ret)); + } if (p == ns) { /* No conversion performed. */ @@ -211,15 +214,15 @@ label_return: if (p == ns) { /* No characters were converted. */ *endptr = (char *)nptr; - } else + } else { *endptr = (char *)p; + } } return (ret); } static char * -u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) -{ +u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) { unsigned i; i = U2S_BUFSIZE - 1; @@ -261,19 +264,21 @@ u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) } static char * -d2s(intmax_t x, char sign, char *s, size_t *slen_p) -{ +d2s(intmax_t x, char sign, char *s, size_t *slen_p) { bool neg; - if ((neg = (x < 0))) + if ((neg = (x < 0))) { x = -x; + } s = u2s(x, 10, false, s, slen_p); - if (neg) + if (neg) { sign = '-'; + } switch (sign) { case '-': - if (!neg) + if (!neg) { break; + } /* Fall through. */ case ' ': case '+': @@ -287,8 +292,7 @@ d2s(intmax_t x, char sign, char *s, size_t *slen_p) } static char * -o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) -{ +o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) { s = u2s(x, 8, false, s, slen_p); if (alt_form && *s != '0') { s--; @@ -299,8 +303,7 @@ o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) } static char * -x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) -{ +x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) { s = u2s(x, 16, uppercase, s, slen_p); if (alt_form) { s -= 2; @@ -311,14 +314,14 @@ x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) } size_t -malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) -{ +malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { size_t i; const char *f; #define APPEND_C(c) do { \ - if (i < size) \ + if (i < size) { \ str[i] = (c); \ + } \ i++; \ } while (0) #define APPEND_S(s, slen) do { \ @@ -334,16 +337,18 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) (size_t)width - slen : 0); \ if (!left_justify && pad_len != 0) { \ size_t j; \ - for (j = 0; j < pad_len; j++) \ + for (j = 0; j < pad_len; j++) { \ APPEND_C(' '); \ + } \ } \ /* Value. */ \ APPEND_S(s, slen); \ /* Right padding. */ \ if (left_justify && pad_len != 0) { \ size_t j; \ - for (j = 0; j < pad_len; j++) \ + for (j = 0; j < pad_len; j++) { \ APPEND_C(' '); \ + } \ } \ } while (0) #define GET_ARG_NUMERIC(val, len) do { \ @@ -454,10 +459,11 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) break; } /* Width/precision separator. */ - if (*f == '.') + if (*f == '.') { f++; - else + } else { goto label_length; + } /* Precision. */ switch (*f) { case '*': @@ -484,8 +490,9 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) if (*f == 'l') { len = 'q'; f++; - } else + } else { len = 'l'; + } break; case 'q': case 'j': case 't': case 'z': len = *f; @@ -576,10 +583,11 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) }} } label_out: - if (i < size) + if (i < size) { str[i] = '\0'; - else + } else { str[size - 1] = '\0'; + } #undef APPEND_C #undef APPEND_S @@ -590,8 +598,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) JEMALLOC_FORMAT_PRINTF(3, 4) size_t -malloc_snprintf(char *str, size_t size, const char *format, ...) -{ +malloc_snprintf(char *str, size_t size, const char *format, ...) { size_t ret; va_list ap; @@ -604,8 +611,7 @@ malloc_snprintf(char *str, size_t size, const char *format, ...) void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, va_list ap) -{ + const char *format, va_list ap) { char buf[MALLOC_PRINTF_BUFSIZE]; if (write_cb == NULL) { @@ -630,8 +636,7 @@ malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, JEMALLOC_FORMAT_PRINTF(3, 4) void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, ...) -{ + const char *format, ...) { va_list ap; va_start(ap, format); @@ -642,8 +647,7 @@ malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, /* Print to stderr in such a way as to avoid memory allocation. */ JEMALLOC_FORMAT_PRINTF(1, 2) void -malloc_printf(const char *format, ...) -{ +malloc_printf(const char *format, ...) { va_list ap; va_start(ap, format); diff --git a/src/witness.c b/src/witness.c index ffc7e247..f8d66217 100644 --- a/src/witness.c +++ b/src/witness.c @@ -3,8 +3,7 @@ void witness_init(witness_t *witness, const char *name, witness_rank_t rank, - witness_comp_t *comp, void *opaque) -{ + witness_comp_t *comp, void *opaque) { witness->name = name; witness->rank = rank; witness->comp = comp; @@ -16,8 +15,7 @@ witness_init(witness_t *witness, const char *name, witness_rank_t rank, #define witness_lock_error JEMALLOC_N(n_witness_lock_error) #endif void -witness_lock_error(const witness_list_t *witnesses, const witness_t *witness) -{ +witness_lock_error(const witness_list_t *witnesses, const witness_t *witness) { witness_t *w; malloc_printf("<jemalloc>: Lock rank order reversal:"); @@ -38,8 +36,7 @@ witness_lock_error_t *witness_lock_error = JEMALLOC_N(n_witness_lock_error); #define witness_owner_error JEMALLOC_N(n_witness_owner_error) #endif void -witness_owner_error(const witness_t *witness) -{ +witness_owner_error(const witness_t *witness) { malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name, witness->rank); abort(); @@ -55,8 +52,7 @@ witness_owner_error_t *witness_owner_error = JEMALLOC_N(n_witness_owner_error); #define witness_not_owner_error JEMALLOC_N(n_witness_not_owner_error) #endif void -witness_not_owner_error(const witness_t *witness) -{ +witness_not_owner_error(const witness_t *witness) { malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name, witness->rank); abort(); @@ -73,8 +69,7 @@ witness_not_owner_error_t *witness_not_owner_error = #define witness_lockless_error JEMALLOC_N(n_witness_lockless_error) #endif void -witness_lockless_error(const witness_list_t *witnesses) -{ +witness_lockless_error(const witness_list_t *witnesses) { witness_t *w; malloc_printf("<jemalloc>: Should not own any locks:"); @@ -92,28 +87,24 @@ witness_lockless_error_t *witness_lockless_error = #endif void -witnesses_cleanup(tsd_t *tsd) -{ +witnesses_cleanup(tsd_t *tsd) { witness_assert_lockless(tsd_tsdn(tsd)); /* Do nothing. */ } void -witness_prefork(tsd_t *tsd) -{ +witness_prefork(tsd_t *tsd) { tsd_witness_fork_set(tsd, true); } void -witness_postfork_parent(tsd_t *tsd) -{ +witness_postfork_parent(tsd_t *tsd) { tsd_witness_fork_set(tsd, false); } void -witness_postfork_child(tsd_t *tsd) -{ +witness_postfork_child(tsd_t *tsd) { #ifndef JEMALLOC_MUTEX_INIT_CB witness_list_t *witnesses; @@ -125,8 +125,7 @@ static void zone_reinit_lock(malloc_zone_t *zone); */ static size_t -zone_size(malloc_zone_t *zone, const void *ptr) -{ +zone_size(malloc_zone_t *zone, const void *ptr) { /* * There appear to be places within Darwin (such as setenv(3)) that * cause calls to this function with pointers that *no* zone owns. If @@ -140,20 +139,17 @@ zone_size(malloc_zone_t *zone, const void *ptr) } static void * -zone_malloc(malloc_zone_t *zone, size_t size) -{ +zone_malloc(malloc_zone_t *zone, size_t size) { return (je_malloc(size)); } static void * -zone_calloc(malloc_zone_t *zone, size_t num, size_t size) -{ +zone_calloc(malloc_zone_t *zone, size_t num, size_t size) { return (je_calloc(num, size)); } static void * -zone_valloc(malloc_zone_t *zone, size_t size) -{ +zone_valloc(malloc_zone_t *zone, size_t size) { void *ret = NULL; /* Assignment avoids useless compiler warning. */ je_posix_memalign(&ret, PAGE, size); @@ -162,8 +158,7 @@ zone_valloc(malloc_zone_t *zone, size_t size) } static void -zone_free(malloc_zone_t *zone, void *ptr) -{ +zone_free(malloc_zone_t *zone, void *ptr) { if (ivsalloc(tsdn_fetch(), ptr) != 0) { je_free(ptr); return; @@ -173,17 +168,16 @@ zone_free(malloc_zone_t *zone, void *ptr) } static void * -zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) -{ - if (ivsalloc(tsdn_fetch(), ptr) != 0) +zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) { + if (ivsalloc(tsdn_fetch(), ptr) != 0) { return (je_realloc(ptr, size)); + } return (realloc(ptr, size)); } static void * -zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) -{ +zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) { void *ret = NULL; /* Assignment avoids useless compiler warning. */ je_posix_memalign(&ret, alignment, size); @@ -192,8 +186,7 @@ zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) } static void -zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) -{ +zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) { size_t alloc_size; alloc_size = ivsalloc(tsdn_fetch(), ptr); @@ -207,16 +200,14 @@ zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) } static void -zone_destroy(malloc_zone_t *zone) -{ +zone_destroy(malloc_zone_t *zone) { /* This function should never be called. */ not_reached(); } static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results, - unsigned num_requested) -{ + unsigned num_requested) { unsigned i; for (i = 0; i < num_requested; i++) { @@ -230,8 +221,7 @@ zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results, static void zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed, - unsigned num_to_be_freed) -{ + unsigned num_to_be_freed) { unsigned i; for (i = 0; i < num_to_be_freed; i++) { @@ -241,53 +231,47 @@ zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed, } static size_t -zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) -{ +zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) { return 0; } static size_t -zone_good_size(malloc_zone_t *zone, size_t size) -{ - if (size == 0) +zone_good_size(malloc_zone_t *zone, size_t size) { + if (size == 0) { size = 1; + } return (s2u(size)); } static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask, vm_address_t zone_address, memory_reader_t reader, - vm_range_recorder_t recorder) -{ + vm_range_recorder_t recorder) { return KERN_SUCCESS; } static boolean_t -zone_check(malloc_zone_t *zone) -{ +zone_check(malloc_zone_t *zone) { return true; } static void -zone_print(malloc_zone_t *zone, boolean_t verbose) -{ +zone_print(malloc_zone_t *zone, boolean_t verbose) { } static void -zone_log(malloc_zone_t *zone, void *address) -{ +zone_log(malloc_zone_t *zone, void *address) { } static void -zone_force_lock(malloc_zone_t *zone) -{ - if (isthreaded) +zone_force_lock(malloc_zone_t *zone) { + if (isthreaded) { jemalloc_prefork(); + } } static void -zone_force_unlock(malloc_zone_t *zone) -{ +zone_force_unlock(malloc_zone_t *zone) { /* * Call jemalloc_postfork_child() rather than * jemalloc_postfork_parent(), because this function is executed by both @@ -295,13 +279,13 @@ zone_force_unlock(malloc_zone_t *zone) * reinitialized, but the child cannot unlock mutexes that were locked * by the parent. */ - if (isthreaded) + if (isthreaded) { jemalloc_postfork_child(); + } } static void -zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) -{ +zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) { /* We make no effort to actually fill the values */ stats->blocks_in_use = 0; stats->size_in_use = 0; @@ -310,23 +294,20 @@ zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) } static boolean_t -zone_locked(malloc_zone_t *zone) -{ +zone_locked(malloc_zone_t *zone) { /* Pretend no lock is being held */ return false; } static void -zone_reinit_lock(malloc_zone_t *zone) -{ +zone_reinit_lock(malloc_zone_t *zone) { /* As of OSX 10.12, this function is only used when force_unlock would * be used if the zone version were < 9. So just use force_unlock. */ zone_force_unlock(zone); } static void -zone_init(void) -{ +zone_init(void) { jemalloc_zone.size = zone_size; jemalloc_zone.malloc = zone_malloc; jemalloc_zone.calloc = zone_calloc; @@ -364,8 +345,7 @@ zone_init(void) } static malloc_zone_t * -zone_default_get(void) -{ +zone_default_get(void) { malloc_zone_t **zones = NULL; unsigned int num_zones = 0; @@ -387,16 +367,16 @@ zone_default_get(void) num_zones = 0; } - if (num_zones) + if (num_zones) { return (zones[0]); + } return (malloc_default_zone()); } /* As written, this function can only promote jemalloc_zone. */ static void -zone_promote(void) -{ +zone_promote(void) { malloc_zone_t *zone; do { @@ -433,16 +413,16 @@ zone_promote(void) JEMALLOC_ATTR(constructor) void -zone_register(void) -{ +zone_register(void) { /* * If something else replaced the system default zone allocator, don't * register jemalloc's. */ default_zone = zone_default_get(); if (!default_zone->zone_name || strcmp(default_zone->zone_name, - "DefaultMallocZone") != 0) + "DefaultMallocZone") != 0) { return; + } /* * The default purgeable zone is created lazily by OSX's libc. It uses |
