aboutsummaryrefslogtreecommitdiffstats
path: root/linker
diff options
context:
space:
mode:
authorVic Yang <victoryang@google.com>2019-01-29 20:23:16 -0800
committerVic Yang <victoryang@google.com>2019-01-30 22:13:13 -0800
commitbb7e1236f3676baffd5953061154e9a3552317d7 (patch)
treea1ddb820db657e34a56e66dd81a0b04a46922673 /linker
parent58dd1cbd7f2a0b8a779f2f43c45447cdccc09c3e (diff)
downloadandroid_bionic-bb7e1236f3676baffd5953061154e9a3552317d7.tar.gz
android_bionic-bb7e1236f3676baffd5953061154e9a3552317d7.tar.bz2
android_bionic-bb7e1236f3676baffd5953061154e9a3552317d7.zip
Purge linker block allocators before leaving linker
This is the second attempt to purge linker block allocators. Unlike the previously reverted change which purge allocators whenever all objects are freed, we only purge right before control leaves the linker. This limits the performance impact to one munmap() call per dlopen(), in most cases. Bug: 112073665 Test: Boot and check memory usage with 'showmap'. Test: Run camear cold start performance test. Change-Id: I02c7c44935f768e065fbe7ff0389a84bd44713f0
Diffstat (limited to 'linker')
-rw-r--r--linker/linker.cpp24
-rw-r--r--linker/linker.h2
-rw-r--r--linker/linker_block_allocator.cpp22
-rw-r--r--linker/linker_block_allocator.h4
-rw-r--r--linker/linker_main.cpp4
5 files changed, 55 insertions, 1 deletions
diff --git a/linker/linker.cpp b/linker/linker.cpp
index d0c740baf..428dd25de 100644
--- a/linker/linker.cpp
+++ b/linker/linker.cpp
@@ -536,6 +536,10 @@ class SizeBasedAllocator {
allocator_.free(ptr);
}
+ static void purge() {
+ allocator_.purge();
+ }
+
private:
static LinkerBlockAllocator allocator_;
};
@@ -553,6 +557,10 @@ class TypeBasedAllocator {
static void free(T* ptr) {
SizeBasedAllocator<sizeof(T)>::free(ptr);
}
+
+ static void purge() {
+ SizeBasedAllocator<sizeof(T)>::purge();
+ }
};
class LoadTask {
@@ -2074,6 +2082,8 @@ void* do_dlopen(const char* name, int flags,
ns == nullptr ? "(null)" : ns->get_name(),
ns);
+ auto purge_guard = android::base::make_scope_guard([&]() { purge_unused_memory(); });
+
auto failure_guard = android::base::make_scope_guard(
[&]() { LD_LOG(kLogDlopen, "... dlopen failed: %s", linker_get_error_buffer()); });
@@ -4069,3 +4079,17 @@ android_namespace_t* get_exported_namespace(const char* name) {
}
return it->second;
}
+
+void purge_unused_memory() {
+ // For now, we only purge the memory used by LoadTask because we know those
+ // are temporary objects.
+ //
+ // Purging other LinkerBlockAllocator hardly yields much because they hold
+ // information about namespaces and opened libraries, which are not freed
+ // when the control leaves the linker.
+ //
+ // Purging BionicAllocator may give us a few dirty pages back, but those pages
+ // would be already zeroed out, so they compress easily in ZRAM. Therefore,
+ // it is not worth munmap()'ing those pages.
+ TypeBasedAllocator<LoadTask>::purge();
+}
diff --git a/linker/linker.h b/linker/linker.h
index 91d3ddf16..964c26638 100644
--- a/linker/linker.h
+++ b/linker/linker.h
@@ -186,3 +186,5 @@ android_namespace_t* get_exported_namespace(const char* name);
void increment_dso_handle_reference_counter(void* dso_handle);
void decrement_dso_handle_reference_counter(void* dso_handle);
+
+void purge_unused_memory();
diff --git a/linker/linker_block_allocator.cpp b/linker/linker_block_allocator.cpp
index d72cad3d6..fdb4c8563 100644
--- a/linker/linker_block_allocator.cpp
+++ b/linker/linker_block_allocator.cpp
@@ -55,7 +55,8 @@ LinkerBlockAllocator::LinkerBlockAllocator(size_t block_size)
: block_size_(
round_up(block_size < sizeof(FreeBlockInfo) ? sizeof(FreeBlockInfo) : block_size, 16)),
page_list_(nullptr),
- free_block_list_(nullptr)
+ free_block_list_(nullptr),
+ allocated_(0)
{}
void* LinkerBlockAllocator::alloc() {
@@ -76,6 +77,8 @@ void* LinkerBlockAllocator::alloc() {
memset(block_info, 0, block_size_);
+ ++allocated_;
+
return block_info;
}
@@ -104,6 +107,8 @@ void LinkerBlockAllocator::free(void* block) {
block_info->num_free_blocks = 1;
free_block_list_ = block_info;
+
+ --allocated_;
}
void LinkerBlockAllocator::protect_all(int prot) {
@@ -154,3 +159,18 @@ LinkerBlockAllocatorPage* LinkerBlockAllocator::find_page(void* block) {
abort();
}
+
+void LinkerBlockAllocator::purge() {
+ if (allocated_) {
+ return;
+ }
+
+ LinkerBlockAllocatorPage* page = page_list_;
+ while (page) {
+ LinkerBlockAllocatorPage* next = page->next;
+ munmap(page, kAllocateSize);
+ page = next;
+ }
+ page_list_ = nullptr;
+ free_block_list_ = nullptr;
+}
diff --git a/linker/linker_block_allocator.h b/linker/linker_block_allocator.h
index 0c54b93fa..8ae4094be 100644
--- a/linker/linker_block_allocator.h
+++ b/linker/linker_block_allocator.h
@@ -50,6 +50,9 @@ class LinkerBlockAllocator {
void free(void* block);
void protect_all(int prot);
+ // Purge all pages if all previously allocated blocks have been freed.
+ void purge();
+
private:
void create_new_page();
LinkerBlockAllocatorPage* find_page(void* block);
@@ -57,6 +60,7 @@ class LinkerBlockAllocator {
size_t block_size_;
LinkerBlockAllocatorPage* page_list_;
void* free_block_list_;
+ size_t allocated_;
DISALLOW_COPY_AND_ASSIGN(LinkerBlockAllocator);
};
diff --git a/linker/linker_main.cpp b/linker/linker_main.cpp
index b0c27dcd6..7486cd79b 100644
--- a/linker/linker_main.cpp
+++ b/linker/linker_main.cpp
@@ -503,6 +503,10 @@ static ElfW(Addr) linker_main(KernelArgumentBlock& args, const char* exe_to_load
fflush(stdout);
#endif
+ // We are about to hand control over to the executable loaded. We don't want
+ // to leave dirty pages behind unnecessarily.
+ purge_unused_memory();
+
ElfW(Addr) entry = exe_info.entry_point;
TRACE("[ Ready to execute \"%s\" @ %p ]", si->get_realpath(), reinterpret_cast<void*>(entry));
return entry;