aboutsummaryrefslogtreecommitdiffstats
path: root/src/android_je_mallinfo.c
blob: 8a7ff2325adba2a4774e4fd08e5ede200d1bfac6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
/*
 * Copyright (C) 2014 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

/* Only use bin locks since the stats are now all atomic and can be read
 * without taking the stats lock.
 */
struct mallinfo je_mallinfo() {
  struct mallinfo mi;
  memset(&mi, 0, sizeof(mi));

  malloc_mutex_lock(TSDN_NULL, &arenas_lock);
  for (unsigned i = 0; i < narenas_auto; i++) {
    arena_t* arena = atomic_load_p(&arenas[i], ATOMIC_ACQUIRE);
    if (arena != NULL) {
      mi.hblkhd += atomic_load_zu(&arena->stats.mapped, ATOMIC_ACQUIRE);

      /* Accumulate the small bins. */
      for (unsigned j = 0; j < NBINS; j++) {
        bin_t* bin = &arena->bins[j];

        /* NOTE: This includes allocations cached on every thread. */
        malloc_mutex_lock(TSDN_NULL, &bin->lock);
        mi.uordblks += bin_infos[j].reg_size * bin->stats.curregs;
        malloc_mutex_unlock(TSDN_NULL, &bin->lock);
      }

      /* Accumulate the large allocation stats.
       * Do not include stats.allocated_large, it is only updated by
       * arena_stats_merge, and would include the data counted below.
       */
      for (unsigned j = 0; j < NSIZES - NBINS; j++) {
        /* Read ndalloc first so that we guarantee nmalloc >= ndalloc. */
        uint64_t ndalloc = arena_stats_read_u64(TSDN_NULL, &arena->stats, &arena->stats.lstats[j].ndalloc);
        uint64_t nmalloc = arena_stats_read_u64(TSDN_NULL, &arena->stats, &arena->stats.lstats[j].nmalloc);
        size_t allocs = (size_t)(nmalloc - ndalloc);
        mi.uordblks += sz_index2size(NBINS + j) * allocs;
      }
    }
  }
  malloc_mutex_unlock(TSDN_NULL, &arenas_lock);
  mi.fordblks = mi.hblkhd - mi.uordblks;
  mi.usmblks = mi.hblkhd;
  return mi;
}

size_t __mallinfo_narenas() {
  return narenas_auto;
}

size_t __mallinfo_nbins() {
  return NBINS;
}

struct mallinfo __mallinfo_arena_info(size_t aidx) {
  struct mallinfo mi;
  memset(&mi, 0, sizeof(mi));

  malloc_mutex_lock(TSDN_NULL, &arenas_lock);
  if (aidx < narenas_auto) {
    arena_t* arena = atomic_load_p(&arenas[aidx], ATOMIC_ACQUIRE);
    if (arena != NULL) {
      mi.hblkhd = atomic_load_zu(&arena->stats.mapped, ATOMIC_ACQUIRE);
      mi.ordblks = atomic_load_zu(&arena->stats.allocated_large, ATOMIC_ACQUIRE);

      for (unsigned j = 0; j < NBINS; j++) {
        bin_t* bin = &arena->bins[j];

        malloc_mutex_lock(TSDN_NULL, &bin->lock);
        mi.fsmblks += bin_infos[j].reg_size * bin->stats.curregs;
        malloc_mutex_unlock(TSDN_NULL, &bin->lock);
      }
    }
  }
  malloc_mutex_unlock(TSDN_NULL, &arenas_lock);
  return mi;
}

struct mallinfo __mallinfo_bin_info(size_t aidx, size_t bidx) {
  struct mallinfo mi;
  memset(&mi, 0, sizeof(mi));

  malloc_mutex_lock(TSDN_NULL, &arenas_lock);
  if (aidx < narenas_auto && bidx < NBINS) {
    arena_t* arena = atomic_load_p(&arenas[aidx], ATOMIC_ACQUIRE);
    if (arena != NULL) {
      bin_t* bin = &arena->bins[bidx];

      malloc_mutex_lock(TSDN_NULL, &bin->lock);
      mi.ordblks = bin_infos[bidx].reg_size * bin->stats.curregs;
      mi.uordblks = (size_t) bin->stats.nmalloc;
      mi.fordblks = (size_t) bin->stats.ndalloc;
      malloc_mutex_unlock(TSDN_NULL, &bin->lock);
    }
  }
  malloc_mutex_unlock(TSDN_NULL, &arenas_lock);
  return mi;
}