aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/android_je_mallinfo.c15
1 files changed, 14 insertions, 1 deletions
diff --git a/src/android_je_mallinfo.c b/src/android_je_mallinfo.c
index 32661c21..8a7ff232 100644
--- a/src/android_je_mallinfo.c
+++ b/src/android_je_mallinfo.c
@@ -26,15 +26,28 @@ struct mallinfo je_mallinfo() {
arena_t* arena = atomic_load_p(&arenas[i], ATOMIC_ACQUIRE);
if (arena != NULL) {
mi.hblkhd += atomic_load_zu(&arena->stats.mapped, ATOMIC_ACQUIRE);
- mi.uordblks += atomic_load_zu(&arena->stats.allocated_large, ATOMIC_ACQUIRE);
+ /* Accumulate the small bins. */
for (unsigned j = 0; j < NBINS; j++) {
bin_t* bin = &arena->bins[j];
+ /* NOTE: This includes allocations cached on every thread. */
malloc_mutex_lock(TSDN_NULL, &bin->lock);
mi.uordblks += bin_infos[j].reg_size * bin->stats.curregs;
malloc_mutex_unlock(TSDN_NULL, &bin->lock);
}
+
+ /* Accumulate the large allocation stats.
+ * Do not include stats.allocated_large, it is only updated by
+ * arena_stats_merge, and would include the data counted below.
+ */
+ for (unsigned j = 0; j < NSIZES - NBINS; j++) {
+ /* Read ndalloc first so that we guarantee nmalloc >= ndalloc. */
+ uint64_t ndalloc = arena_stats_read_u64(TSDN_NULL, &arena->stats, &arena->stats.lstats[j].ndalloc);
+ uint64_t nmalloc = arena_stats_read_u64(TSDN_NULL, &arena->stats, &arena->stats.lstats[j].nmalloc);
+ size_t allocs = (size_t)(nmalloc - ndalloc);
+ mi.uordblks += sz_index2size(NBINS + j) * allocs;
+ }
}
}
malloc_mutex_unlock(TSDN_NULL, &arenas_lock);