aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorChristopher Ferris <cferris@google.com>2018-11-16 12:52:28 -0800
committerChristopher Ferris <cferris@google.com>2018-11-26 12:16:01 -0800
commit08ccc1993359eaeefa82d9f4728d9b81b73b574a (patch)
treecaafaad03b4f2425f11b025ee8409c0ba12e2268 /src
parent75569e30c58d71c4311b90f7605847279df5c5ed (diff)
downloadplatform_external_jemalloc_new-08ccc1993359eaeefa82d9f4728d9b81b73b574a.tar.gz
platform_external_jemalloc_new-08ccc1993359eaeefa82d9f4728d9b81b73b574a.tar.bz2
platform_external_jemalloc_new-08ccc1993359eaeefa82d9f4728d9b81b73b574a.zip
Fix mallinfo counting for large allocations.
The lstats part of the arena structure was not being countered at all, so added that counting for mallinfo. Bug: 119580449 Test: New unit tests pass, without changes, the test fails. Change-Id: I97b231f9189a79f0ce0f55fe6c4cc00266ca75ac
Diffstat (limited to 'src')
-rw-r--r--src/android_je_mallinfo.c15
1 files changed, 14 insertions, 1 deletions
diff --git a/src/android_je_mallinfo.c b/src/android_je_mallinfo.c
index 32661c21..8a7ff232 100644
--- a/src/android_je_mallinfo.c
+++ b/src/android_je_mallinfo.c
@@ -26,15 +26,28 @@ struct mallinfo je_mallinfo() {
arena_t* arena = atomic_load_p(&arenas[i], ATOMIC_ACQUIRE);
if (arena != NULL) {
mi.hblkhd += atomic_load_zu(&arena->stats.mapped, ATOMIC_ACQUIRE);
- mi.uordblks += atomic_load_zu(&arena->stats.allocated_large, ATOMIC_ACQUIRE);
+ /* Accumulate the small bins. */
for (unsigned j = 0; j < NBINS; j++) {
bin_t* bin = &arena->bins[j];
+ /* NOTE: This includes allocations cached on every thread. */
malloc_mutex_lock(TSDN_NULL, &bin->lock);
mi.uordblks += bin_infos[j].reg_size * bin->stats.curregs;
malloc_mutex_unlock(TSDN_NULL, &bin->lock);
}
+
+ /* Accumulate the large allocation stats.
+ * Do not include stats.allocated_large, it is only updated by
+ * arena_stats_merge, and would include the data counted below.
+ */
+ for (unsigned j = 0; j < NSIZES - NBINS; j++) {
+ /* Read ndalloc first so that we guarantee nmalloc >= ndalloc. */
+ uint64_t ndalloc = arena_stats_read_u64(TSDN_NULL, &arena->stats, &arena->stats.lstats[j].ndalloc);
+ uint64_t nmalloc = arena_stats_read_u64(TSDN_NULL, &arena->stats, &arena->stats.lstats[j].nmalloc);
+ size_t allocs = (size_t)(nmalloc - ndalloc);
+ mi.uordblks += sz_index2size(NBINS + j) * allocs;
+ }
}
}
malloc_mutex_unlock(TSDN_NULL, &arenas_lock);