summaryrefslogtreecommitdiffstats
path: root/runtime/mem_map.cc
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2014-05-30 13:02:46 -0700
committerMathieu Chartier <mathieuc@google.com>2014-05-30 13:19:19 -0700
commitc355a2a78d6ebdfdb645221275affb9136b4c667 (patch)
tree33e6ceb178ae6d82c630c5b34e216bc468276295 /runtime/mem_map.cc
parentb7f02280f7f56ae94fe7f01e161be0b725b6e4a9 (diff)
downloadart-c355a2a78d6ebdfdb645221275affb9136b4c667.tar.gz
art-c355a2a78d6ebdfdb645221275affb9136b4c667.tar.bz2
art-c355a2a78d6ebdfdb645221275affb9136b4c667.zip
Fix race condition in MemMap::MapAnonymous.
Previously we were using MAP_FIXED which introduced a serious race condition if MAP_32BIT was set since it would possibly overwrite an existing map at the address which we determined was free with msync. There was a window of time after we had msynced a page where another thread could map something at that page. The new method avoids using MAP_FIXED and unmaps allocations which succeed but aren't in the low 4GB when MAP_32BIT is set. Bug: 15338094 Bug: 14974497 Change-Id: I57f00baf4143e9fa17fb1d4c2be04b30705a2bfd
Diffstat (limited to 'runtime/mem_map.cc')
-rw-r--r--runtime/mem_map.cc11
1 files changed, 8 insertions, 3 deletions
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 49e0b54758..892e7f4acd 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -206,8 +206,6 @@ MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count
// MAP_32BIT only available on x86_64.
void* actual = MAP_FAILED;
if (low_4gb && expected == nullptr) {
- flags |= MAP_FIXED;
-
bool first_run = true;
for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
@@ -243,7 +241,14 @@ MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count
actual = mmap(reinterpret_cast<void*>(ptr), page_aligned_byte_count, prot, flags, fd.get(),
0);
if (actual != MAP_FAILED) {
- break;
+ // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
+ // 4GB. If this is the case, unmap and retry.
+ if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count < 4 * GB) {
+ break;
+ } else {
+ munmap(actual, page_aligned_byte_count);
+ actual = MAP_FAILED;
+ }
}
} else {
// Skip over last page.