summaryrefslogtreecommitdiffstats
path: root/runtime/mem_map.cc
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2015-06-04 13:42:14 -0700
committerMathieu Chartier <mathieuc@google.com>2015-06-04 15:16:44 -0700
commit1c8bef4a71612f10b89b102434f70de5a127cc74 (patch)
tree59d19d274255777e41cba0034ac8c109261e754c /runtime/mem_map.cc
parentc991403cd8b869e4a38c11c6a58223b82b89a1b2 (diff)
downloadart-1c8bef4a71612f10b89b102434f70de5a127cc74.tar.gz
art-1c8bef4a71612f10b89b102434f70de5a127cc74.tar.bz2
art-1c8bef4a71612f10b89b102434f70de5a127cc74.zip
Add optimization for LOW_4G allocator
The optimization uses the maps_ field to skip over regions which we know wont be large enough for the request. Reduces time to create heap from 500-700ms to 100ms on N9. Bug: 20727525 Change-Id: I4fa92d58c2e135ba31a03ababff46669089bb542
Diffstat (limited to 'runtime/mem_map.cc')
-rw-r--r--runtime/mem_map.cc24
1 files changed, 24 insertions, 0 deletions
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index cf4233c3d1..7feac8ab0c 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -314,7 +314,31 @@ MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byt
if (low_4gb && expected_ptr == nullptr) {
bool first_run = true;
+ MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
+ // Use maps_ as an optimization to skip over large maps.
+ // Find the first map which is address > ptr.
+ auto it = maps_->upper_bound(reinterpret_cast<void*>(ptr));
+ if (it != maps_->begin()) {
+ auto before_it = it;
+ --before_it;
+ // Start at the end of the map before the upper bound.
+ ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd()));
+ CHECK_ALIGNED(ptr, kPageSize);
+ }
+ while (it != maps_->end()) {
+ // How much space do we have until the next map?
+ size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr;
+ // If the space may be sufficient, break out of the loop.
+ if (delta >= page_aligned_byte_count) {
+ break;
+ }
+ // Otherwise, skip to the end of the map.
+ ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd());
+ CHECK_ALIGNED(ptr, kPageSize);
+ ++it;
+ }
+
if (4U * GB - ptr < page_aligned_byte_count) {
// Not enough memory until 4GB.
if (first_run) {