aboutsummaryrefslogtreecommitdiffstats
path: root/src/android_je_mallinfo.c
blob: 53bf664423aa3d86ffb1e143e085654a311418c6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
/*
 * Copyright (C) 2014 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

static size_t accumulate_large_allocs(arena_t* arena) {
  size_t total_bytes = 0;

  /* Accumulate the large allocation stats.
   * Do not include stats.allocated_large, it is only updated by
   * arena_stats_merge, and would include the data counted below.
   */
  for (unsigned j = 0; j < NSIZES - NBINS; j++) {
    /* Read ndalloc first so that we guarantee nmalloc >= ndalloc. */
    uint64_t ndalloc = arena_stats_read_u64(TSDN_NULL, &arena->stats, &arena->stats.lstats[j].ndalloc);
    uint64_t nmalloc = arena_stats_read_u64(TSDN_NULL, &arena->stats, &arena->stats.lstats[j].nmalloc);
    size_t allocs = (size_t)(nmalloc - ndalloc);
    total_bytes += sz_index2size(NBINS + j) * allocs;
  }
  return total_bytes;
}

static size_t accumulate_small_allocs(arena_t* arena) {
  size_t total_bytes = 0;
  for (unsigned j = 0; j < NBINS; j++) {
    bin_t* bin = &arena->bins[j];

    /* NOTE: This includes allocations cached on every thread. */
    malloc_mutex_lock(TSDN_NULL, &bin->lock);
    total_bytes += bin_infos[j].reg_size * bin->stats.curregs;
    malloc_mutex_unlock(TSDN_NULL, &bin->lock);
  }
  return total_bytes;
}


/* Only use bin locks since the stats are now all atomic and can be read
 * without taking the stats lock.
 */
struct mallinfo je_mallinfo() {
  struct mallinfo mi;
  memset(&mi, 0, sizeof(mi));

  malloc_mutex_lock(TSDN_NULL, &arenas_lock);
  for (unsigned i = 0; i < narenas_auto; i++) {
    arena_t* arena = atomic_load_p(&arenas[i], ATOMIC_ACQUIRE);
    if (arena != NULL) {
      mi.hblkhd += atomic_load_zu(&arena->stats.mapped, ATOMIC_ACQUIRE);

      mi.uordblks += accumulate_small_allocs(arena);
      mi.uordblks += accumulate_large_allocs(arena);
    }
  }
  malloc_mutex_unlock(TSDN_NULL, &arenas_lock);
  mi.fordblks = mi.hblkhd - mi.uordblks;
  mi.usmblks = mi.hblkhd;
  return mi;
}

size_t je_mallinfo_narenas() {
  return narenas_auto;
}

size_t je_mallinfo_nbins() {
  return NBINS;
}

struct mallinfo je_mallinfo_arena_info(size_t aidx) {
  struct mallinfo mi;
  memset(&mi, 0, sizeof(mi));

  malloc_mutex_lock(TSDN_NULL, &arenas_lock);
  if (aidx < narenas_auto) {
    arena_t* arena = atomic_load_p(&arenas[aidx], ATOMIC_ACQUIRE);
    if (arena != NULL) {
      mi.hblkhd = atomic_load_zu(&arena->stats.mapped, ATOMIC_ACQUIRE);
      mi.ordblks = accumulate_large_allocs(arena);
      mi.fsmblks = accumulate_small_allocs(arena);
    }
  }
  malloc_mutex_unlock(TSDN_NULL, &arenas_lock);
  return mi;
}

struct mallinfo je_mallinfo_bin_info(size_t aidx, size_t bidx) {
  struct mallinfo mi;
  memset(&mi, 0, sizeof(mi));

  malloc_mutex_lock(TSDN_NULL, &arenas_lock);
  if (aidx < narenas_auto && bidx < NBINS) {
    arena_t* arena = atomic_load_p(&arenas[aidx], ATOMIC_ACQUIRE);
    if (arena != NULL) {
      bin_t* bin = &arena->bins[bidx];

      malloc_mutex_lock(TSDN_NULL, &bin->lock);
      mi.ordblks = bin_infos[bidx].reg_size * bin->stats.curregs;
      mi.uordblks = (size_t) bin->stats.nmalloc;
      mi.fordblks = (size_t) bin->stats.ndalloc;
      malloc_mutex_unlock(TSDN_NULL, &bin->lock);
    }
  }
  malloc_mutex_unlock(TSDN_NULL, &arenas_lock);
  return mi;
}