aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorChristopher Ferris <cferris@google.com>2018-09-10 16:02:28 -0700
committerChristopher Ferris <cferris@google.com>2018-09-14 12:14:54 -0700
commit03b5d1c549d393f0a4c86cde7377e098ebb0e686 (patch)
tree025d300d4ff64f7929d19c3808f8655b577caa7b /src
parent7eecc47efac8cef90e806aca2758ef0fca63cf7a (diff)
downloadplatform_external_jemalloc_new-03b5d1c549d393f0a4c86cde7377e098ebb0e686.tar.gz
platform_external_jemalloc_new-03b5d1c549d393f0a4c86cde7377e098ebb0e686.tar.bz2
platform_external_jemalloc_new-03b5d1c549d393f0a4c86cde7377e098ebb0e686.zip
Add android extensions.master-cuttlefish-testing-release
Bug: 62621531 Bug: 110158834 Test: Ran unit tests and benchmarks using libc. Change-Id: Ie13ab8510c42f96b58496b0ab7e4f8c3a9cd2c6d
Diffstat (limited to 'src')
-rw-r--r--src/android_je_iterate.c67
-rw-r--r--src/android_je_mallinfo.c97
-rw-r--r--src/jemalloc.c21
-rw-r--r--src/large.c8
-rw-r--r--src/pages.c21
5 files changed, 214 insertions, 0 deletions
diff --git a/src/android_je_iterate.c b/src/android_je_iterate.c
new file mode 100644
index 00000000..3705cd63
--- /dev/null
+++ b/src/android_je_iterate.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+static pthread_mutex_t malloc_disabled_lock = PTHREAD_MUTEX_INITIALIZER;
+static bool malloc_disabled_tcache;
+
+int je_iterate(uintptr_t base, size_t size,
+ void (*callback)(uintptr_t ptr, size_t size, void* arg), void* arg) {
+ // TODO: Figure out how to implement this functionality for jemalloc5.
+ return -1;
+}
+
+static void je_malloc_disable_prefork() {
+ pthread_mutex_lock(&malloc_disabled_lock);
+}
+
+static void je_malloc_disable_postfork_parent() {
+ pthread_mutex_unlock(&malloc_disabled_lock);
+}
+
+static void je_malloc_disable_postfork_child() {
+ pthread_mutex_init(&malloc_disabled_lock, NULL);
+}
+
+void je_malloc_disable_init() {
+ if (pthread_atfork(je_malloc_disable_prefork,
+ je_malloc_disable_postfork_parent, je_malloc_disable_postfork_child) != 0) {
+ malloc_write("<jemalloc>: Error in pthread_atfork()\n");
+ if (opt_abort)
+ abort();
+ }
+}
+
+void je_malloc_disable() {
+ static pthread_once_t once_control = PTHREAD_ONCE_INIT;
+ pthread_once(&once_control, je_malloc_disable_init);
+
+ pthread_mutex_lock(&malloc_disabled_lock);
+ bool new_tcache = false;
+ size_t old_len = sizeof(malloc_disabled_tcache);
+ je_mallctl("thread.tcache.enabled",
+ &malloc_disabled_tcache, &old_len,
+ &new_tcache, sizeof(new_tcache));
+ jemalloc_prefork();
+}
+
+void je_malloc_enable() {
+ jemalloc_postfork_parent();
+ if (malloc_disabled_tcache) {
+ je_mallctl("thread.tcache.enabled", NULL, NULL,
+ &malloc_disabled_tcache, sizeof(malloc_disabled_tcache));
+ }
+ pthread_mutex_unlock(&malloc_disabled_lock);
+}
diff --git a/src/android_je_mallinfo.c b/src/android_je_mallinfo.c
new file mode 100644
index 00000000..32661c21
--- /dev/null
+++ b/src/android_je_mallinfo.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Only use bin locks since the stats are now all atomic and can be read
+ * without taking the stats lock.
+ */
+struct mallinfo je_mallinfo() {
+ struct mallinfo mi;
+ memset(&mi, 0, sizeof(mi));
+
+ malloc_mutex_lock(TSDN_NULL, &arenas_lock);
+ for (unsigned i = 0; i < narenas_auto; i++) {
+ arena_t* arena = atomic_load_p(&arenas[i], ATOMIC_ACQUIRE);
+ if (arena != NULL) {
+ mi.hblkhd += atomic_load_zu(&arena->stats.mapped, ATOMIC_ACQUIRE);
+ mi.uordblks += atomic_load_zu(&arena->stats.allocated_large, ATOMIC_ACQUIRE);
+
+ for (unsigned j = 0; j < NBINS; j++) {
+ bin_t* bin = &arena->bins[j];
+
+ malloc_mutex_lock(TSDN_NULL, &bin->lock);
+ mi.uordblks += bin_infos[j].reg_size * bin->stats.curregs;
+ malloc_mutex_unlock(TSDN_NULL, &bin->lock);
+ }
+ }
+ }
+ malloc_mutex_unlock(TSDN_NULL, &arenas_lock);
+ mi.fordblks = mi.hblkhd - mi.uordblks;
+ mi.usmblks = mi.hblkhd;
+ return mi;
+}
+
+size_t __mallinfo_narenas() {
+ return narenas_auto;
+}
+
+size_t __mallinfo_nbins() {
+ return NBINS;
+}
+
+struct mallinfo __mallinfo_arena_info(size_t aidx) {
+ struct mallinfo mi;
+ memset(&mi, 0, sizeof(mi));
+
+ malloc_mutex_lock(TSDN_NULL, &arenas_lock);
+ if (aidx < narenas_auto) {
+ arena_t* arena = atomic_load_p(&arenas[aidx], ATOMIC_ACQUIRE);
+ if (arena != NULL) {
+ mi.hblkhd = atomic_load_zu(&arena->stats.mapped, ATOMIC_ACQUIRE);
+ mi.ordblks = atomic_load_zu(&arena->stats.allocated_large, ATOMIC_ACQUIRE);
+
+ for (unsigned j = 0; j < NBINS; j++) {
+ bin_t* bin = &arena->bins[j];
+
+ malloc_mutex_lock(TSDN_NULL, &bin->lock);
+ mi.fsmblks += bin_infos[j].reg_size * bin->stats.curregs;
+ malloc_mutex_unlock(TSDN_NULL, &bin->lock);
+ }
+ }
+ }
+ malloc_mutex_unlock(TSDN_NULL, &arenas_lock);
+ return mi;
+}
+
+struct mallinfo __mallinfo_bin_info(size_t aidx, size_t bidx) {
+ struct mallinfo mi;
+ memset(&mi, 0, sizeof(mi));
+
+ malloc_mutex_lock(TSDN_NULL, &arenas_lock);
+ if (aidx < narenas_auto && bidx < NBINS) {
+ arena_t* arena = atomic_load_p(&arenas[aidx], ATOMIC_ACQUIRE);
+ if (arena != NULL) {
+ bin_t* bin = &arena->bins[bidx];
+
+ malloc_mutex_lock(TSDN_NULL, &bin->lock);
+ mi.ordblks = bin_infos[bidx].reg_size * bin->stats.curregs;
+ mi.uordblks = (size_t) bin->stats.nmalloc;
+ mi.fordblks = (size_t) bin->stats.ndalloc;
+ malloc_mutex_unlock(TSDN_NULL, &bin->lock);
+ }
+ }
+ malloc_mutex_unlock(TSDN_NULL, &arenas_lock);
+ return mi;
+}
diff --git a/src/jemalloc.c b/src/jemalloc.c
index f93c16fa..9f4df5e7 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -875,7 +875,14 @@ malloc_conf_init(void) {
const char *opts, *k, *v;
size_t klen, vlen;
+#if defined(__ANDROID__)
+ /* For Android, do not look at files nor environment variables for
+ * config data.
+ */
+ for (i = 0; i < 2; i++) {
+#else
for (i = 0; i < 4; i++) {
+#endif
/* Get runtime configuration. */
switch (i) {
case 0:
@@ -1346,7 +1353,12 @@ static bool
malloc_init_hard_recursible(void) {
malloc_init_state = malloc_init_recursible;
+#if defined(__ANDROID__) && defined(ANDROID_NUM_ARENAS)
+ /* Hardcode since this value won't be used. */
+ ncpus = 2;
+#else
ncpus = malloc_ncpus();
+#endif
#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
&& !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
@@ -1371,6 +1383,9 @@ malloc_init_hard_recursible(void) {
static unsigned
malloc_narenas_default(void) {
+#if defined(ANDROID_NUM_ARENAS)
+ return ANDROID_NUM_ARENAS;
+#else
assert(ncpus > 0);
/*
* For SMP systems, create more than one arena per CPU by
@@ -1381,6 +1396,7 @@ malloc_narenas_default(void) {
} else {
return 1;
}
+#endif
}
static percpu_arena_mode_t
@@ -3324,3 +3340,8 @@ jemalloc_postfork_child(void) {
}
/******************************************************************************/
+
+#if defined(__ANDROID__) && !defined(JEMALLOC_JET)
+#include "android_je_iterate.c"
+#include "android_je_mallinfo.c"
+#endif
diff --git a/src/large.c b/src/large.c
index 27a2c679..cbffd99b 100644
--- a/src/large.c
+++ b/src/large.c
@@ -42,7 +42,15 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
*/
is_zeroed = zero;
if (likely(!tsdn_null(tsdn))) {
+#if defined(__ANDROID__) && !defined(__LP64__)
+ /* On 32 bit systems, using a per arena cache can exhaust
+ * virtual address space. Force all huge allocations to
+ * always take place in the first arena.
+ */
+ arena = arena_get(tsdn, 0, false);
+#else
arena = arena_choose(tsdn_tsd(tsdn), arena);
+#endif
}
if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn,
arena, usize, alignment, &is_zeroed)) == NULL) {
diff --git a/src/pages.c b/src/pages.c
index 26002692..132aeeb7 100644
--- a/src/pages.c
+++ b/src/pages.c
@@ -16,6 +16,13 @@
#endif
/******************************************************************************/
+/* Defines/includes needed for special android code. */
+
+#if defined(__ANDROID__)
+#include <sys/prctl.h>
+#endif
+
+/******************************************************************************/
/* Data. */
/* Actual operating system page size, detected during bootstrap, <= PAGE. */
@@ -90,6 +97,13 @@ os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
ret = NULL;
}
#endif
+#if defined(__ANDROID__)
+ if (ret != NULL) {
+ /* Name this memory as being used by libc */
+ prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ret, size,
+ "libc_malloc");
+ }
+#endif
assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL &&
ret == addr));
return ret;
@@ -573,6 +587,11 @@ pages_boot(void) {
mmap_flags = MAP_PRIVATE | MAP_ANON;
#endif
+#if defined(__ANDROID__)
+ /* Android always supports overcommits. */
+ os_overcommits = true;
+#else /* __ANDROID__ */
+
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
os_overcommits = os_overcommits_sysctl();
#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
@@ -586,6 +605,8 @@ pages_boot(void) {
os_overcommits = false;
#endif
+#endif /* __ANDROID__ */
+
init_thp_state();
/* Detect lazy purge runtime support. */