diff options
Diffstat (limited to 'runtime')
-rw-r--r-- | runtime/arch/arm/context_arm.cc | 4 | ||||
-rw-r--r-- | runtime/arch/arm64/context_arm64.cc | 5 | ||||
-rw-r--r-- | runtime/arch/mips/context_mips.cc | 3 | ||||
-rw-r--r-- | runtime/arch/x86/context_x86.cc | 4 | ||||
-rw-r--r-- | runtime/arch/x86_64/context_x86_64.cc | 3 | ||||
-rw-r--r-- | runtime/entrypoints/entrypoint_utils-inl.h | 5 | ||||
-rw-r--r-- | runtime/gc/heap.cc | 2 | ||||
-rw-r--r-- | runtime/gc/space/image_space.cc | 43 | ||||
-rw-r--r-- | runtime/gc_root.h | 2 | ||||
-rw-r--r-- | runtime/intern_table.cc | 169 | ||||
-rw-r--r-- | runtime/intern_table.h | 82 | ||||
-rw-r--r-- | runtime/mirror/class.cc | 5 | ||||
-rw-r--r-- | runtime/runtime.cc | 7 |
13 files changed, 251 insertions, 83 deletions
diff --git a/runtime/arch/arm/context_arm.cc b/runtime/arch/arm/context_arm.cc index fd9c626228..9e8d282a87 100644 --- a/runtime/arch/arm/context_arm.cc +++ b/runtime/arch/arm/context_arm.cc @@ -17,10 +17,8 @@ #include "context_arm.h" #include "mirror/art_method-inl.h" -#include "mirror/object-inl.h" #include "quick/quick_method_frame_info.h" -#include "stack.h" -#include "thread.h" +#include "utils.h" namespace art { namespace arm { diff --git a/runtime/arch/arm64/context_arm64.cc b/runtime/arch/arm64/context_arm64.cc index 6aacda4d96..0a3148016a 100644 --- a/runtime/arch/arm64/context_arm64.cc +++ b/runtime/arch/arm64/context_arm64.cc @@ -19,11 +19,8 @@ #include "context_arm64.h" #include "mirror/art_method-inl.h" -#include "mirror/object-inl.h" #include "quick/quick_method_frame_info.h" -#include "stack.h" -#include "thread.h" - +#include "utils.h" namespace art { namespace arm64 { diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc index 789dbbb6d7..e1f6c06d1b 100644 --- a/runtime/arch/mips/context_mips.cc +++ b/runtime/arch/mips/context_mips.cc @@ -17,9 +17,8 @@ #include "context_mips.h" #include "mirror/art_method-inl.h" -#include "mirror/object-inl.h" #include "quick/quick_method_frame_info.h" -#include "stack.h" +#include "utils.h" namespace art { namespace mips { diff --git a/runtime/arch/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc index a7beaa9196..32eec57b50 100644 --- a/runtime/arch/x86/context_x86.cc +++ b/runtime/arch/x86/context_x86.cc @@ -17,9 +17,9 @@ #include "context_x86.h" #include "mirror/art_method-inl.h" -#include "mirror/object-inl.h" #include "quick/quick_method_frame_info.h" -#include "stack.h" +#include "utils.h" + namespace art { namespace x86 { diff --git a/runtime/arch/x86_64/context_x86_64.cc b/runtime/arch/x86_64/context_x86_64.cc index 79d0666ddc..6e9b99c2f7 100644 --- a/runtime/arch/x86_64/context_x86_64.cc +++ b/runtime/arch/x86_64/context_x86_64.cc @@ -17,9 +17,8 @@ #include "context_x86_64.h" #include "mirror/art_method-inl.h" -#include "mirror/object-inl.h" #include "quick/quick_method_frame_info.h" -#include "stack.h" +#include "utils.h" namespace art { namespace x86_64 { diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h index 670bf2aa8e..f76da8edaa 100644 --- a/runtime/entrypoints/entrypoint_utils-inl.h +++ b/runtime/entrypoints/entrypoint_utils-inl.h @@ -541,8 +541,7 @@ static inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx, mirror::Object* this_object, mirror::ArtMethod* referrer, bool access_check, InvokeType type) { - bool is_direct = type == kStatic || type == kDirect; - if (UNLIKELY(this_object == NULL && !is_direct)) { + if (UNLIKELY(this_object == NULL && type != kStatic)) { return NULL; } mirror::ArtMethod* resolved_method = @@ -567,7 +566,7 @@ static inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx, } if (type == kInterface) { // Most common form of slow path dispatch. return this_object->GetClass()->FindVirtualMethodForInterface(resolved_method); - } else if (is_direct) { + } else if (type == kStatic || type == kDirect) { return resolved_method; } else if (type == kSuper) { return referrer->GetDeclaringClass()->GetSuperClass() diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 6730dfe309..8e080d1cdd 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -54,6 +54,7 @@ #include "entrypoints/quick/quick_alloc_entrypoints.h" #include "heap-inl.h" #include "image.h" +#include "intern_table.h" #include "mirror/art_field-inl.h" #include "mirror/class-inl.h" #include "mirror/object.h" @@ -1897,6 +1898,7 @@ void Heap::PreZygoteFork() { LOG(WARNING) << __FUNCTION__ << " called when we already have a zygote space."; return; } + Runtime::Current()->GetInternTable()->SwapPostZygoteWithPreZygote(); VLOG(heap) << "Starting PreZygoteFork"; // Trim the pages at the end of the non moving space. non_moving_space_->Trim(); diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc index 6e1639c8e4..f03ea31098 100644 --- a/runtime/gc/space/image_space.cc +++ b/runtime/gc/space/image_space.cc @@ -17,6 +17,7 @@ #include "image_space.h" #include <dirent.h> +#include <sys/statvfs.h> #include <sys/types.h> #include <random> @@ -376,6 +377,41 @@ static bool ImageCreationAllowed(bool is_global_cache, std::string* error_msg) { return false; } +static constexpr uint64_t kLowSpaceValue = 50 * MB; +static constexpr uint64_t kTmpFsSentinelValue = 384 * MB; + +// Read the free space of the cache partition and make a decision whether to keep the generated +// image. This is to try to mitigate situations where the system might run out of space later. +static bool CheckSpace(const std::string& cache_filename, std::string* error_msg) { + // Using statvfs vs statvfs64 because of b/18207376, and it is enough for all practical purposes. + struct statvfs buf; + + int res = TEMP_FAILURE_RETRY(statvfs(cache_filename.c_str(), &buf)); + if (res != 0) { + // Could not stat. Conservatively tell the system to delete the image. + *error_msg = "Could not stat the filesystem, assuming low-memory situation."; + return false; + } + + uint64_t fs_overall_size = buf.f_bsize * static_cast<uint64_t>(buf.f_blocks); + // Zygote is privileged, but other things are not. Use bavail. + uint64_t fs_free_size = buf.f_bsize * static_cast<uint64_t>(buf.f_bavail); + + // Take the overall size as an indicator for a tmpfs, which is being used for the decryption + // environment. We do not want to fail quickening the boot image there, as it is beneficial + // for time-to-UI. + if (fs_overall_size > kTmpFsSentinelValue) { + if (fs_free_size < kLowSpaceValue) { + *error_msg = StringPrintf("Low-memory situation: only %4.2f megabytes available after image" + " generation, need at least %" PRIu64 ".", + static_cast<double>(fs_free_size) / MB, + kLowSpaceValue / MB); + return false; + } + } + return true; +} + ImageSpace* ImageSpace::Create(const char* image_location, const InstructionSet image_isa, std::string* error_msg) { @@ -523,6 +559,13 @@ ImageSpace* ImageSpace::Create(const char* image_location, PruneDexCache(image_isa); return nullptr; } else { + // Check whether there is enough space left over after we have generated the image. + if (!CheckSpace(cache_filename, error_msg)) { + // No. Delete the generated image and try to run out of the dex files. + PruneDexCache(image_isa); + return nullptr; + } + // Note that we must not use the file descriptor associated with // ScopedFlock::GetFile to Init the image file. We want the file // descriptor (and the associated exclusive lock) to be released when diff --git a/runtime/gc_root.h b/runtime/gc_root.h index b10a55c1a2..a347622112 100644 --- a/runtime/gc_root.h +++ b/runtime/gc_root.h @@ -30,7 +30,9 @@ class PACKED(4) GcRoot { ALWAYS_INLINE MirrorType* Read() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void VisitRoot(RootCallback* callback, void* arg, uint32_t thread_id, RootType root_type) { + DCHECK(!IsNull()); callback(reinterpret_cast<mirror::Object**>(&root_), arg, thread_id, root_type); + DCHECK(!IsNull()); } // This is only used by IrtIterator. diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc index f6e66616aa..23324a6e75 100644 --- a/runtime/intern_table.cc +++ b/runtime/intern_table.cc @@ -29,39 +29,34 @@ namespace art { InternTable::InternTable() - : log_new_roots_(false), allow_new_interns_(true), + : image_added_to_intern_table_(false), log_new_roots_(false), + allow_new_interns_(true), new_intern_condition_("New intern condition", *Locks::intern_table_lock_) { } size_t InternTable::Size() const { MutexLock mu(Thread::Current(), *Locks::intern_table_lock_); - return strong_interns_.size() + weak_interns_.size(); + return strong_interns_.Size() + weak_interns_.Size(); } size_t InternTable::StrongSize() const { MutexLock mu(Thread::Current(), *Locks::intern_table_lock_); - return strong_interns_.size(); + return strong_interns_.Size(); } size_t InternTable::WeakSize() const { MutexLock mu(Thread::Current(), *Locks::intern_table_lock_); - return weak_interns_.size(); + return weak_interns_.Size(); } void InternTable::DumpForSigQuit(std::ostream& os) const { - MutexLock mu(Thread::Current(), *Locks::intern_table_lock_); - os << "Intern table: " << strong_interns_.size() << " strong; " - << weak_interns_.size() << " weak\n"; + os << "Intern table: " << StrongSize() << " strong; " << WeakSize() << " weak\n"; } void InternTable::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags) { MutexLock mu(Thread::Current(), *Locks::intern_table_lock_); if ((flags & kVisitRootFlagAllRoots) != 0) { - for (auto& strong_intern : strong_interns_) { - const_cast<GcRoot<mirror::String>&>(strong_intern). - VisitRoot(callback, arg, 0, kRootInternedString); - DCHECK(!strong_intern.IsNull()); - } + strong_interns_.VisitRoots(callback, arg, flags); } else if ((flags & kVisitRootFlagNewRoots) != 0) { for (auto& root : new_strong_intern_roots_) { mirror::String* old_ref = root.Read<kWithoutReadBarrier>(); @@ -71,10 +66,8 @@ void InternTable::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags f // The GC moved a root in the log. Need to search the strong interns and update the // corresponding object. This is slow, but luckily for us, this may only happen with a // concurrent moving GC. - auto it = strong_interns_.find(GcRoot<mirror::String>(old_ref)); - DCHECK(it != strong_interns_.end()); - strong_interns_.erase(it); - strong_interns_.insert(GcRoot<mirror::String>(new_ref)); + strong_interns_.Remove(old_ref); + strong_interns_.Insert(new_ref); } } } @@ -91,21 +84,17 @@ void InternTable::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags f } mirror::String* InternTable::LookupStrong(mirror::String* s) { - return Lookup(&strong_interns_, s); + return strong_interns_.Find(s); } mirror::String* InternTable::LookupWeak(mirror::String* s) { - // Weak interns need a read barrier because they are weak roots. - return Lookup(&weak_interns_, s); + return weak_interns_.Find(s); } -mirror::String* InternTable::Lookup(Table* table, mirror::String* s) { - Locks::intern_table_lock_->AssertHeld(Thread::Current()); - auto it = table->find(GcRoot<mirror::String>(s)); - if (LIKELY(it != table->end())) { - return const_cast<GcRoot<mirror::String>&>(*it).Read(); - } - return nullptr; +void InternTable::SwapPostZygoteWithPreZygote() { + MutexLock mu(Thread::Current(), *Locks::intern_table_lock_); + weak_interns_.SwapPostZygoteWithPreZygote(); + strong_interns_.SwapPostZygoteWithPreZygote(); } mirror::String* InternTable::InsertStrong(mirror::String* s) { @@ -116,7 +105,7 @@ mirror::String* InternTable::InsertStrong(mirror::String* s) { if (log_new_roots_) { new_strong_intern_roots_.push_back(GcRoot<mirror::String>(s)); } - strong_interns_.insert(GcRoot<mirror::String>(s)); + strong_interns_.Insert(s); return s; } @@ -125,12 +114,12 @@ mirror::String* InternTable::InsertWeak(mirror::String* s) { if (runtime->IsActiveTransaction()) { runtime->RecordWeakStringInsertion(s); } - weak_interns_.insert(GcRoot<mirror::String>(s)); + weak_interns_.Insert(s); return s; } void InternTable::RemoveStrong(mirror::String* s) { - Remove(&strong_interns_, s); + strong_interns_.Remove(s); } void InternTable::RemoveWeak(mirror::String* s) { @@ -138,13 +127,7 @@ void InternTable::RemoveWeak(mirror::String* s) { if (runtime->IsActiveTransaction()) { runtime->RecordWeakStringRemoval(s); } - Remove(&weak_interns_, s); -} - -void InternTable::Remove(Table* table, mirror::String* s) { - auto it = table->find(GcRoot<mirror::String>(s)); - DCHECK(it != table->end()); - table->erase(it); + weak_interns_.Remove(s); } // Insert/remove methods used to undo changes made during an aborted transaction. @@ -165,11 +148,39 @@ void InternTable::RemoveWeakFromTransaction(mirror::String* s) { RemoveWeak(s); } -static mirror::String* LookupStringFromImage(mirror::String* s) +void InternTable::AddImageStringsToTable(gc::space::ImageSpace* image_space) { + MutexLock mu(Thread::Current(), *Locks::intern_table_lock_); + if (!image_added_to_intern_table_) { + mirror::Object* root = image_space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches); + mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>(); + for (int32_t i = 0; i < dex_caches->GetLength(); ++i) { + mirror::DexCache* dex_cache = dex_caches->Get(i); + const DexFile* dex_file = dex_cache->GetDexFile(); + const size_t num_strings = dex_file->NumStringIds(); + for (size_t j = 0; j < num_strings; ++j) { + mirror::String* image_string = dex_cache->GetResolvedString(j); + if (image_string != nullptr) { + mirror::String* found = LookupStrong(image_string); + if (found == nullptr) { + InsertStrong(image_string); + } else { + DCHECK_EQ(found, image_string); + } + } + } + } + image_added_to_intern_table_ = true; + } +} + +mirror::String* InternTable::LookupStringFromImage(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (image_added_to_intern_table_) { + return nullptr; + } gc::space::ImageSpace* image = Runtime::Current()->GetHeap()->GetImageSpace(); - if (image == NULL) { - return NULL; // No image present. + if (image == nullptr) { + return nullptr; // No image present. } mirror::Object* root = image->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches); mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>(); @@ -285,24 +296,12 @@ mirror::String* InternTable::InternWeak(mirror::String* s) { bool InternTable::ContainsWeak(mirror::String* s) { MutexLock mu(Thread::Current(), *Locks::intern_table_lock_); - const mirror::String* found = LookupWeak(s); - return found == s; + return LookupWeak(s) == s; } void InternTable::SweepInternTableWeaks(IsMarkedCallback* callback, void* arg) { MutexLock mu(Thread::Current(), *Locks::intern_table_lock_); - for (auto it = weak_interns_.begin(), end = weak_interns_.end(); it != end;) { - // This does not need a read barrier because this is called by GC. - GcRoot<mirror::String>& root = const_cast<GcRoot<mirror::String>&>(*it); - mirror::Object* object = root.Read<kWithoutReadBarrier>(); - mirror::Object* new_object = callback(object, arg); - if (new_object == nullptr) { - it = weak_interns_.erase(it); - } else { - root = GcRoot<mirror::String>(down_cast<mirror::String*>(new_object)); - ++it; - } - } + weak_interns_.SweepWeaks(callback, arg); } std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) { @@ -321,4 +320,68 @@ bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a, const_cast<GcRoot<mirror::String>&>(b).Read()); } +void InternTable::Table::Remove(mirror::String* s) { + auto it = post_zygote_table_.find(GcRoot<mirror::String>(s)); + if (it != post_zygote_table_.end()) { + post_zygote_table_.erase(it); + } else { + it = pre_zygote_table_.find(GcRoot<mirror::String>(s)); + DCHECK(it != pre_zygote_table_.end()); + pre_zygote_table_.erase(it); + } +} + +mirror::String* InternTable::Table::Find(mirror::String* s) { + Locks::intern_table_lock_->AssertHeld(Thread::Current()); + auto it = pre_zygote_table_.find(GcRoot<mirror::String>(s)); + if (LIKELY(it != pre_zygote_table_.end())) { + return const_cast<GcRoot<mirror::String>&>(*it).Read(); + } + it = post_zygote_table_.find(GcRoot<mirror::String>(s)); + if (LIKELY(it != post_zygote_table_.end())) { + return const_cast<GcRoot<mirror::String>&>(*it).Read(); + } + return nullptr; +} + +void InternTable::Table::SwapPostZygoteWithPreZygote() { + CHECK(pre_zygote_table_.empty()); + std::swap(pre_zygote_table_, post_zygote_table_); +} + +void InternTable::Table::Insert(mirror::String* s) { + // Always insert the post zygote table, this gets swapped when we create the zygote to be the + // pre zygote table. + post_zygote_table_.insert(GcRoot<mirror::String>(s)); +} + +void InternTable::Table::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags) { + for (auto& intern : pre_zygote_table_) { + const_cast<GcRoot<mirror::String>&>(intern).VisitRoot(callback, arg, 0, kRootInternedString); + } + for (auto& intern : post_zygote_table_) { + const_cast<GcRoot<mirror::String>&>(intern).VisitRoot(callback, arg, 0, kRootInternedString); + } +} + +void InternTable::Table::SweepWeaks(IsMarkedCallback* callback, void* arg) { + SweepWeaks(&pre_zygote_table_, callback, arg); + SweepWeaks(&post_zygote_table_, callback, arg); +} + +void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedCallback* callback, void* arg) { + for (auto it = set->begin(), end = set->end(); it != end;) { + // This does not need a read barrier because this is called by GC. + GcRoot<mirror::String>& root = const_cast<GcRoot<mirror::String>&>(*it); + mirror::Object* object = root.Read<kWithoutReadBarrier>(); + mirror::Object* new_object = callback(object, arg); + if (new_object == nullptr) { + it = set->erase(it); + } else { + root = GcRoot<mirror::String>(new_object->AsString()); + ++it; + } + } +} + } // namespace art diff --git a/runtime/intern_table.h b/runtime/intern_table.h index e3223c8616..0bff7b9ad2 100644 --- a/runtime/intern_table.h +++ b/runtime/intern_table.h @@ -26,6 +26,12 @@ namespace art { +namespace gc { +namespace space { +class ImageSpace; +} // namespace space +} // namespace gc + enum VisitRootFlags : uint8_t; namespace mirror { @@ -66,9 +72,12 @@ class InternTable { bool ContainsWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - size_t Size() const; - size_t StrongSize() const; - size_t WeakSize() const; + // Total number of interned strings. + size_t Size() const LOCKS_EXCLUDED(Locks::intern_table_lock_); + // Total number of weakly live interned strings. + size_t StrongSize() const LOCKS_EXCLUDED(Locks::intern_table_lock_); + // Total number of strongly live interned strings. + size_t WeakSize() const LOCKS_EXCLUDED(Locks::intern_table_lock_); void VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -78,6 +87,14 @@ class InternTable { void DisallowNewInterns() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); void AllowNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // Adds all of the resolved image strings from the image space into the intern table. The + // advantage of doing this is preventing expensive DexFile::FindStringId calls. + void AddImageStringsToTable(gc::space::ImageSpace* image_space) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_); + // Copy the post zygote tables to pre zygote to save memory by preventing dirty pages. + void SwapPostZygoteWithPreZygote() + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_); + private: class StringHashEquals { public: @@ -85,22 +102,60 @@ class InternTable { bool operator()(const GcRoot<mirror::String>& a, const GcRoot<mirror::String>& b) NO_THREAD_SAFETY_ANALYSIS; }; - typedef std::unordered_set<GcRoot<mirror::String>, StringHashEquals, StringHashEquals, - TrackingAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>> Table; + + // Table which holds pre zygote and post zygote interned strings. There is one instance for + // weak interns and strong interns. + class Table { + public: + mirror::String* Find(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + void Insert(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + void Remove(mirror::String* s) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + void VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + void SweepWeaks(IsMarkedCallback* callback, void* arg) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + void SwapPostZygoteWithPreZygote() EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + size_t Size() const EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) { + return pre_zygote_table_.size() + post_zygote_table_.size(); + } + + private: + typedef std::unordered_set<GcRoot<mirror::String>, StringHashEquals, StringHashEquals, + TrackingAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>> UnorderedSet; + + void SweepWeaks(UnorderedSet* set, IsMarkedCallback* callback, void* arg) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + + // We call SwapPostZygoteWithPreZygote when we create the zygote to reduce private dirty pages + // caused by modifying the zygote intern table hash table. The pre zygote table are the + // interned strings which were interned before we created the zygote space. Post zygote is self + // explanatory. + UnorderedSet pre_zygote_table_; + UnorderedSet post_zygote_table_; + }; mirror::String* Insert(mirror::String* s, bool is_strong) LOCKS_EXCLUDED(Locks::intern_table_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::String* LookupStrong(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); mirror::String* LookupWeak(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::String* Lookup(Table* table, mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); mirror::String* InsertStrong(mirror::String* s) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); mirror::String* InsertWeak(mirror::String* s) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); void RemoveStrong(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) @@ -108,14 +163,16 @@ class InternTable { void RemoveWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); - void Remove(Table* table, mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); // Transaction rollback access. + mirror::String* LookupStringFromImage(mirror::String* s) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); mirror::String* InsertStrongFromTransaction(mirror::String* s) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); mirror::String* InsertWeakFromTransaction(mirror::String* s) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); void RemoveStrongFromTransaction(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) @@ -125,6 +182,7 @@ class InternTable { EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); friend class Transaction; + bool image_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_); bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_); bool allow_new_interns_ GUARDED_BY(Locks::intern_table_lock_); ConditionVariable new_intern_condition_ GUARDED_BY(Locks::intern_table_lock_); diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index 8eafd6f731..828d9861e1 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -489,7 +489,10 @@ ArtMethod* Class::FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t if (GetDexCache() == dex_cache) { for (size_t i = 0; i < NumVirtualMethods(); ++i) { ArtMethod* method = GetVirtualMethod(i); - if (method->GetDexMethodIndex() == dex_method_idx) { + if (method->GetDexMethodIndex() == dex_method_idx && + // A miranda method may have a different DexCache and is always created by linking, + // never *declared* in the class. + !method->IsMiranda()) { return method; } } diff --git a/runtime/runtime.cc b/runtime/runtime.cc index db7936c96b..de3e976a86 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -412,8 +412,13 @@ bool Runtime::Start() { started_ = true; + if (IsZygote()) { + ScopedObjectAccess soa(self); + Runtime::Current()->GetInternTable()->AddImageStringsToTable(heap_->GetImageSpace()); + } + if (!IsImageDex2OatEnabled() || !Runtime::Current()->GetHeap()->HasImageSpace()) { - ScopedObjectAccess soa(Thread::Current()); + ScopedObjectAccess soa(self); StackHandleScope<1> hs(soa.Self()); auto klass(hs.NewHandle<mirror::Class>(mirror::Class::GetJavaLangClass())); class_linker_->EnsureInitialized(soa.Self(), klass, true, true); |