aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorChristopher Ferris <cferris@google.com>2018-09-28 12:50:26 -0700
committerChristopher Ferris <cferris@google.com>2018-11-01 14:43:50 -0700
commitc6954b2064a82404e747d9a763fe8e8d7135e202 (patch)
treeedfab0d3bede0c1543553156ed93fe4d1f021938 /src
parent109c8749ed17bb1b000360a8a69852f0eab605fa (diff)
downloadplatform_external_jemalloc_new-c6954b2064a82404e747d9a763fe8e8d7135e202.tar.gz
platform_external_jemalloc_new-c6954b2064a82404e747d9a763fe8e8d7135e202.tar.bz2
platform_external_jemalloc_new-c6954b2064a82404e747d9a763fe8e8d7135e202.zip
Further updates to jemalloc code.
Add support for svelte. Add je_iterate support. Update some of the internals so that bad pointers in je_iterate do not crash. Test: Ran new bionic unit tests, ran libmemunreachable tests, booted system. Change-Id: I04171cf88df16d8dc2c2ebb60327e58b915b9d83
Diffstat (limited to 'src')
-rw-r--r--src/android_je_iterate.c54
-rw-r--r--src/jemalloc.c6
-rw-r--r--src/large.c2
-rw-r--r--src/rtree.c12
-rw-r--r--src/tcache.c4
5 files changed, 69 insertions, 9 deletions
diff --git a/src/android_je_iterate.c b/src/android_je_iterate.c
index 3705cd63..0702f338 100644
--- a/src/android_je_iterate.c
+++ b/src/android_je_iterate.c
@@ -19,8 +19,54 @@ static bool malloc_disabled_tcache;
int je_iterate(uintptr_t base, size_t size,
void (*callback)(uintptr_t ptr, size_t size, void* arg), void* arg) {
- // TODO: Figure out how to implement this functionality for jemalloc5.
- return -1;
+ size_t pagesize = getpagesize();
+ tsd_t* tsd = tsd_fetch_min();
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+
+ // Make sure the pointer is aligned to at least 8 bytes.
+ uintptr_t ptr = (base + 7) & ~7;
+ uintptr_t end_ptr = ptr + size;
+ while (ptr < end_ptr) {
+ extent_t* extent = iealloc(tsd_tsdn(tsd), (void*)ptr);
+ if (extent == NULL) {
+ // Skip to the next page, guaranteed no other pointers on this page.
+ ptr += pagesize;
+ continue;
+ }
+
+ szind_t szind;
+ bool slab;
+ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, ptr, true, &szind, &slab);
+ if (slab) {
+ // Small allocation.
+ szind_t binind = extent_szind_get(extent);
+ const bin_info_t* bin_info = &bin_infos[binind];
+ arena_slab_data_t* slab_data = extent_slab_data_get(extent);
+
+ uintptr_t first_ptr = (uintptr_t)extent_addr_get(extent);
+ size_t bin_size = bin_info->reg_size;
+ // Align the pointer to the bin size.
+ ptr = (ptr + bin_size - 1) & ~(bin_size - 1);
+ for (size_t bit = (ptr - first_ptr) / bin_size; bit < bin_info->bitmap_info.nbits; bit++) {
+ if (bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, bit)) {
+ uintptr_t allocated_ptr = first_ptr + bin_size * bit;
+ if (allocated_ptr >= end_ptr) {
+ break;
+ }
+ callback(allocated_ptr, bin_size, arg);
+ }
+ }
+ } else if (extent_state_get(extent) == extent_state_active) {
+ // Large allocation.
+ uintptr_t base_ptr = (uintptr_t)extent_addr_get(extent);
+ if (ptr <= base_ptr) {
+ // This extent is actually allocated and within the range to check.
+ callback(base_ptr, extent_usize_get(extent), arg);
+ }
+ }
+ ptr = (uintptr_t)extent_past_get(extent);
+ }
+ return 0;
}
static void je_malloc_disable_prefork() {
@@ -51,6 +97,9 @@ void je_malloc_disable() {
pthread_mutex_lock(&malloc_disabled_lock);
bool new_tcache = false;
size_t old_len = sizeof(malloc_disabled_tcache);
+
+ // Disable the tcache (if not already disabled) so that we don't
+ // have to search the tcache for pointers.
je_mallctl("thread.tcache.enabled",
&malloc_disabled_tcache, &old_len,
&new_tcache, sizeof(new_tcache));
@@ -60,6 +109,7 @@ void je_malloc_disable() {
void je_malloc_enable() {
jemalloc_postfork_parent();
if (malloc_disabled_tcache) {
+ // Re-enable the tcache if it was enabled before the disabled call.
je_mallctl("thread.tcache.enabled", NULL, NULL,
&malloc_disabled_tcache, sizeof(malloc_disabled_tcache));
}
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 9f4df5e7..0584362f 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -875,7 +875,7 @@ malloc_conf_init(void) {
const char *opts, *k, *v;
size_t klen, vlen;
-#if defined(__ANDROID__)
+#if defined(__BIONIC__)
/* For Android, do not look at files nor environment variables for
* config data.
*/
@@ -1353,7 +1353,7 @@ static bool
malloc_init_hard_recursible(void) {
malloc_init_state = malloc_init_recursible;
-#if defined(__ANDROID__) && defined(ANDROID_NUM_ARENAS)
+#if defined(__BIONIC__) && defined(ANDROID_NUM_ARENAS)
/* Hardcode since this value won't be used. */
ncpus = 2;
#else
@@ -3341,7 +3341,7 @@ jemalloc_postfork_child(void) {
/******************************************************************************/
-#if defined(__ANDROID__) && !defined(JEMALLOC_JET)
+#if defined(__BIONIC__) && !defined(JEMALLOC_JET)
#include "android_je_iterate.c"
#include "android_je_mallinfo.c"
#endif
diff --git a/src/large.c b/src/large.c
index cbffd99b..4ea7f7a7 100644
--- a/src/large.c
+++ b/src/large.c
@@ -42,7 +42,7 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
*/
is_zeroed = zero;
if (likely(!tsdn_null(tsdn))) {
-#if defined(__ANDROID__) && !defined(__LP64__)
+#if defined(__BIONIC__) && !defined(__LP64__) && !defined(JEMALLOC_JET) && !defined(JEMALLOC_INTEGRATION_TEST)
/* On 32 bit systems, using a per arena cache can exhaust
* virtual address space. Force all huge allocations to
* always take place in the first arena.
diff --git a/src/rtree.c b/src/rtree.c
index 53702cf7..01b6e80b 100644
--- a/src/rtree.c
+++ b/src/rtree.c
@@ -242,8 +242,11 @@ rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
#define RTREE_GET_CHILD(level) { \
assert(level < RTREE_HEIGHT-1); \
- if (level != 0 && !dependent && \
- unlikely(!rtree_node_valid(node))) { \
+ /* ANDROID CHANGE: Bad pointers return NULL */ \
+ /* if (level != 0 && !dependent && */ \
+ /* unlikely(!rtree_node_valid(node))) { */ \
+ if (unlikely(!rtree_node_valid(node))) { \
+ /* ANDROID END CHANGE */ \
return NULL; \
} \
uintptr_t subkey = rtree_subkey(key, level); \
@@ -268,7 +271,10 @@ rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
*/
#define RTREE_GET_LEAF(level) { \
assert(level == RTREE_HEIGHT-1); \
- if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { \
+ /* ANDROID CHANGE: Bad pointers return NULL */ \
+ /* if (!dependent && unlikely(!rtree_leaf_valid(leaf))) {*/ \
+ if (unlikely(!rtree_leaf_valid(leaf))) { \
+ /* ANDROID END CHANGE */ \
return NULL; \
} \
if (RTREE_CTX_NCACHE_L2 > 1) { \
diff --git a/src/tcache.c b/src/tcache.c
index a769a6b1..b4320e42 100644
--- a/src/tcache.c
+++ b/src/tcache.c
@@ -9,7 +9,11 @@
/******************************************************************************/
/* Data. */
+#if !defined(__BIONIC__) || defined(ANDROID_ENABLE_TCACHE)
bool opt_tcache = true;
+#else
+bool opt_tcache = false;
+#endif
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
cache_bin_info_t *tcache_bin_info;