/* * Copyright (C) 2013 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_RUNTIME_MIRROR_DEX_CACHE_INL_H_ #define ART_RUNTIME_MIRROR_DEX_CACHE_INL_H_ #include "dex_cache.h" #include #include "art_field.h" #include "art_method.h" #include "base/casts.h" #include "base/enums.h" #include "class_linker.h" #include "dex/dex_file.h" #include "gc_root-inl.h" #include "mirror/call_site.h" #include "mirror/class.h" #include "mirror/method_type.h" #include "obj_ptr.h" #include "object-inl.h" #include "runtime.h" #include "write_barrier-inl.h" #include namespace art { namespace mirror { template inline DexCachePair::DexCachePair(ObjPtr object, uint32_t index) : object(object), index(index) {} template inline void DexCachePair::Initialize(std::atomic>* dex_cache) { DexCachePair first_elem; first_elem.object = GcRoot(nullptr); first_elem.index = InvalidIndexForSlot(0); dex_cache[0].store(first_elem, std::memory_order_relaxed); } template inline T* DexCachePair::GetObjectForIndex(uint32_t idx) { if (idx != index) { return nullptr; } DCHECK(!object.IsNull()); return object.Read(); } template inline void NativeDexCachePair::Initialize(std::atomic>* dex_cache, PointerSize pointer_size) { NativeDexCachePair first_elem; first_elem.object = nullptr; first_elem.index = InvalidIndexForSlot(0); DexCache::SetNativePairPtrSize(dex_cache, 0, first_elem, pointer_size); } inline uint32_t DexCache::ClassSize(PointerSize pointer_size) { const uint32_t vtable_entries = Object::kVTableLength; return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size); } inline uint32_t DexCache::StringSlotIndex(dex::StringIndex string_idx) { DCHECK_LT(string_idx.index_, GetDexFile()->NumStringIds()); const uint32_t slot_idx = string_idx.index_ % kDexCacheStringCacheSize; DCHECK_LT(slot_idx, NumStrings()); return slot_idx; } inline String* DexCache::GetResolvedString(dex::StringIndex string_idx) { const uint32_t num_preresolved_strings = NumPreResolvedStrings(); if (num_preresolved_strings != 0u) { GcRoot* preresolved_strings = GetPreResolvedStrings(); // num_preresolved_strings can become 0 and preresolved_strings can become null in any order // when ClearPreResolvedStrings is called. if (preresolved_strings != nullptr) { DCHECK_LT(string_idx.index_, num_preresolved_strings); DCHECK_EQ(num_preresolved_strings, GetDexFile()->NumStringIds()); mirror::String* string = preresolved_strings[string_idx.index_].Read(); if (LIKELY(string != nullptr)) { return string; } } } return GetStrings()[StringSlotIndex(string_idx)].load( std::memory_order_relaxed).GetObjectForIndex(string_idx.index_); } inline void DexCache::SetResolvedString(dex::StringIndex string_idx, ObjPtr resolved) { DCHECK(resolved != nullptr); GetStrings()[StringSlotIndex(string_idx)].store( StringDexCachePair(resolved, string_idx.index_), std::memory_order_relaxed); Runtime* const runtime = Runtime::Current(); if (UNLIKELY(runtime->IsActiveTransaction())) { DCHECK(runtime->IsAotCompiler()); runtime->RecordResolveString(this, string_idx); } // TODO: Fine-grained marking, so that we don't need to go through all arrays in full. WriteBarrier::ForEveryFieldWrite(this); } inline void DexCache::SetPreResolvedString(dex::StringIndex string_idx, ObjPtr resolved) { DCHECK(resolved != nullptr); DCHECK_LT(string_idx.index_, GetDexFile()->NumStringIds()); GetPreResolvedStrings()[string_idx.index_] = GcRoot(resolved); Runtime* const runtime = Runtime::Current(); CHECK(runtime->IsAotCompiler()); CHECK(!runtime->IsActiveTransaction()); // TODO: Fine-grained marking, so that we don't need to go through all arrays in full. WriteBarrier::ForEveryFieldWrite(this); } inline void DexCache::ClearPreResolvedStrings() { SetFieldPtr64*>(PreResolvedStringsOffset(), nullptr); SetField32(NumPreResolvedStringsOffset(), 0); } inline void DexCache::ClearString(dex::StringIndex string_idx) { DCHECK(Runtime::Current()->IsAotCompiler()); uint32_t slot_idx = StringSlotIndex(string_idx); StringDexCacheType* slot = &GetStrings()[slot_idx]; // This is racy but should only be called from the transactional interpreter. if (slot->load(std::memory_order_relaxed).index == string_idx.index_) { StringDexCachePair cleared(nullptr, StringDexCachePair::InvalidIndexForSlot(slot_idx)); slot->store(cleared, std::memory_order_relaxed); } } inline uint32_t DexCache::TypeSlotIndex(dex::TypeIndex type_idx) { DCHECK_LT(type_idx.index_, GetDexFile()->NumTypeIds()); const uint32_t slot_idx = type_idx.index_ % kDexCacheTypeCacheSize; DCHECK_LT(slot_idx, NumResolvedTypes()); return slot_idx; } inline Class* DexCache::GetResolvedType(dex::TypeIndex type_idx) { // It is theorized that a load acquire is not required since obtaining the resolved class will // always have an address dependency or a lock. return GetResolvedTypes()[TypeSlotIndex(type_idx)].load( std::memory_order_relaxed).GetObjectForIndex(type_idx.index_); } inline void DexCache::SetResolvedType(dex::TypeIndex type_idx, ObjPtr resolved) { DCHECK(resolved != nullptr); // TODO default transaction support. // Use a release store for SetResolvedType. This is done to prevent other threads from seeing a // class but not necessarily seeing the loaded members like the static fields array. // See b/32075261. GetResolvedTypes()[TypeSlotIndex(type_idx)].store( TypeDexCachePair(resolved, type_idx.index_), std::memory_order_release); // TODO: Fine-grained marking, so that we don't need to go through all arrays in full. WriteBarrier::ForEveryFieldWrite(this); } inline void DexCache::ClearResolvedType(dex::TypeIndex type_idx) { DCHECK(Runtime::Current()->IsAotCompiler()); uint32_t slot_idx = TypeSlotIndex(type_idx); TypeDexCacheType* slot = &GetResolvedTypes()[slot_idx]; // This is racy but should only be called from the single-threaded ImageWriter and tests. if (slot->load(std::memory_order_relaxed).index == type_idx.index_) { TypeDexCachePair cleared(nullptr, TypeDexCachePair::InvalidIndexForSlot(slot_idx)); slot->store(cleared, std::memory_order_relaxed); } } inline uint32_t DexCache::MethodTypeSlotIndex(dex::ProtoIndex proto_idx) { DCHECK(Runtime::Current()->IsMethodHandlesEnabled()); DCHECK_LT(proto_idx.index_, GetDexFile()->NumProtoIds()); const uint32_t slot_idx = proto_idx.index_ % kDexCacheMethodTypeCacheSize; DCHECK_LT(slot_idx, NumResolvedMethodTypes()); return slot_idx; } inline MethodType* DexCache::GetResolvedMethodType(dex::ProtoIndex proto_idx) { return GetResolvedMethodTypes()[MethodTypeSlotIndex(proto_idx)].load( std::memory_order_relaxed).GetObjectForIndex(proto_idx.index_); } inline void DexCache::SetResolvedMethodType(dex::ProtoIndex proto_idx, MethodType* resolved) { DCHECK(resolved != nullptr); GetResolvedMethodTypes()[MethodTypeSlotIndex(proto_idx)].store( MethodTypeDexCachePair(resolved, proto_idx.index_), std::memory_order_relaxed); // TODO: Fine-grained marking, so that we don't need to go through all arrays in full. WriteBarrier::ForEveryFieldWrite(this); } inline CallSite* DexCache::GetResolvedCallSite(uint32_t call_site_idx) { DCHECK(Runtime::Current()->IsMethodHandlesEnabled()); DCHECK_LT(call_site_idx, GetDexFile()->NumCallSiteIds()); GcRoot& target = GetResolvedCallSites()[call_site_idx]; Atomic>& ref = reinterpret_cast>&>(target); return ref.load(std::memory_order_seq_cst).Read(); } inline ObjPtr DexCache::SetResolvedCallSite(uint32_t call_site_idx, ObjPtr call_site) { DCHECK(Runtime::Current()->IsMethodHandlesEnabled()); DCHECK_LT(call_site_idx, GetDexFile()->NumCallSiteIds()); GcRoot null_call_site(nullptr); GcRoot candidate(call_site); GcRoot& target = GetResolvedCallSites()[call_site_idx]; // The first assignment for a given call site wins. Atomic>& ref = reinterpret_cast>&>(target); if (ref.CompareAndSetStrongSequentiallyConsistent(null_call_site, candidate)) { // TODO: Fine-grained marking, so that we don't need to go through all arrays in full. WriteBarrier::ForEveryFieldWrite(this); return call_site; } else { return target.Read(); } } inline uint32_t DexCache::FieldSlotIndex(uint32_t field_idx) { DCHECK_LT(field_idx, GetDexFile()->NumFieldIds()); const uint32_t slot_idx = field_idx % kDexCacheFieldCacheSize; DCHECK_LT(slot_idx, NumResolvedFields()); return slot_idx; } inline ArtField* DexCache::GetResolvedField(uint32_t field_idx, PointerSize ptr_size) { DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size); auto pair = GetNativePairPtrSize(GetResolvedFields(), FieldSlotIndex(field_idx), ptr_size); return pair.GetObjectForIndex(field_idx); } inline void DexCache::SetResolvedField(uint32_t field_idx, ArtField* field, PointerSize ptr_size) { DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size); DCHECK(field != nullptr); FieldDexCachePair pair(field, field_idx); SetNativePairPtrSize(GetResolvedFields(), FieldSlotIndex(field_idx), pair, ptr_size); } inline void DexCache::ClearResolvedField(uint32_t field_idx, PointerSize ptr_size) { DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size); uint32_t slot_idx = FieldSlotIndex(field_idx); auto* resolved_fields = GetResolvedFields(); // This is racy but should only be called from the single-threaded ImageWriter. DCHECK(Runtime::Current()->IsAotCompiler()); if (GetNativePairPtrSize(resolved_fields, slot_idx, ptr_size).index == field_idx) { FieldDexCachePair cleared(nullptr, FieldDexCachePair::InvalidIndexForSlot(slot_idx)); SetNativePairPtrSize(resolved_fields, slot_idx, cleared, ptr_size); } } inline uint32_t DexCache::MethodSlotIndex(uint32_t method_idx) { DCHECK_LT(method_idx, GetDexFile()->NumMethodIds()); const uint32_t slot_idx = method_idx % kDexCacheMethodCacheSize; DCHECK_LT(slot_idx, NumResolvedMethods()); return slot_idx; } inline ArtMethod* DexCache::GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size) { DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size); auto pair = GetNativePairPtrSize(GetResolvedMethods(), MethodSlotIndex(method_idx), ptr_size); return pair.GetObjectForIndex(method_idx); } inline void DexCache::SetResolvedMethod(uint32_t method_idx, ArtMethod* method, PointerSize ptr_size) { DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size); DCHECK(method != nullptr); MethodDexCachePair pair(method, method_idx); SetNativePairPtrSize(GetResolvedMethods(), MethodSlotIndex(method_idx), pair, ptr_size); } inline void DexCache::ClearResolvedMethod(uint32_t method_idx, PointerSize ptr_size) { DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size); uint32_t slot_idx = MethodSlotIndex(method_idx); auto* resolved_methods = GetResolvedMethods(); // This is racy but should only be called from the single-threaded ImageWriter. DCHECK(Runtime::Current()->IsAotCompiler()); if (GetNativePairPtrSize(resolved_methods, slot_idx, ptr_size).index == method_idx) { MethodDexCachePair cleared(nullptr, MethodDexCachePair::InvalidIndexForSlot(slot_idx)); SetNativePairPtrSize(resolved_methods, slot_idx, cleared, ptr_size); } } template NativeDexCachePair DexCache::GetNativePairPtrSize(std::atomic>* pair_array, size_t idx, PointerSize ptr_size) { if (ptr_size == PointerSize::k64) { auto* array = reinterpret_cast*>(pair_array); ConversionPair64 value = AtomicLoadRelaxed16B(&array[idx]); return NativeDexCachePair(reinterpret_cast64(value.first), dchecked_integral_cast(value.second)); } else { auto* array = reinterpret_cast*>(pair_array); ConversionPair32 value = array[idx].load(std::memory_order_relaxed); return NativeDexCachePair(reinterpret_cast32(value.first), value.second); } } template void DexCache::SetNativePairPtrSize(std::atomic>* pair_array, size_t idx, NativeDexCachePair pair, PointerSize ptr_size) { if (ptr_size == PointerSize::k64) { auto* array = reinterpret_cast*>(pair_array); ConversionPair64 v(reinterpret_cast64(pair.object), pair.index); AtomicStoreRelease16B(&array[idx], v); } else { auto* array = reinterpret_cast*>(pair_array); ConversionPair32 v(reinterpret_cast32(pair.object), dchecked_integral_cast(pair.index)); array[idx].store(v, std::memory_order_release); } } template inline void VisitDexCachePairs(std::atomic>* pairs, size_t num_pairs, const Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { for (size_t i = 0; i < num_pairs; ++i) { DexCachePair source = pairs[i].load(std::memory_order_relaxed); // NOTE: We need the "template" keyword here to avoid a compilation // failure. GcRoot is a template argument-dependent type and we need to // tell the compiler to treat "Read" as a template rather than a field or // function. Otherwise, on encountering the "<" token, the compiler would // treat "Read" as a field. T* const before = source.object.template Read(); visitor.VisitRootIfNonNull(source.object.AddressWithoutBarrier()); if (source.object.template Read() != before) { pairs[i].store(source, std::memory_order_relaxed); } } } template inline void DexCache::VisitReferences(ObjPtr klass, const Visitor& visitor) { // Visit instance fields first. VisitInstanceFieldsReferences(klass, visitor); // Visit arrays after. if (kVisitNativeRoots) { VisitDexCachePairs( GetStrings(), NumStrings(), visitor); VisitDexCachePairs( GetResolvedTypes(), NumResolvedTypes(), visitor); VisitDexCachePairs( GetResolvedMethodTypes(), NumResolvedMethodTypes(), visitor); GcRoot* resolved_call_sites = GetResolvedCallSites(); size_t num_call_sites = NumResolvedCallSites(); for (size_t i = 0; i != num_call_sites; ++i) { visitor.VisitRootIfNonNull(resolved_call_sites[i].AddressWithoutBarrier()); } GcRoot* const preresolved_strings = GetPreResolvedStrings(); const size_t num_preresolved_strings = NumPreResolvedStrings(); for (size_t i = 0; i != num_preresolved_strings; ++i) { visitor.VisitRootIfNonNull(preresolved_strings[i].AddressWithoutBarrier()); } } } template inline void DexCache::FixupStrings(StringDexCacheType* dest, const Visitor& visitor) { StringDexCacheType* src = GetStrings(); for (size_t i = 0, count = NumStrings(); i < count; ++i) { StringDexCachePair source = src[i].load(std::memory_order_relaxed); String* ptr = source.object.Read(); String* new_source = visitor(ptr); source.object = GcRoot(new_source); dest[i].store(source, std::memory_order_relaxed); } } template inline void DexCache::FixupResolvedTypes(TypeDexCacheType* dest, const Visitor& visitor) { TypeDexCacheType* src = GetResolvedTypes(); for (size_t i = 0, count = NumResolvedTypes(); i < count; ++i) { TypeDexCachePair source = src[i].load(std::memory_order_relaxed); Class* ptr = source.object.Read(); Class* new_source = visitor(ptr); source.object = GcRoot(new_source); dest[i].store(source, std::memory_order_relaxed); } } template inline void DexCache::FixupResolvedMethodTypes(MethodTypeDexCacheType* dest, const Visitor& visitor) { MethodTypeDexCacheType* src = GetResolvedMethodTypes(); for (size_t i = 0, count = NumResolvedMethodTypes(); i < count; ++i) { MethodTypeDexCachePair source = src[i].load(std::memory_order_relaxed); MethodType* ptr = source.object.Read(); MethodType* new_source = visitor(ptr); source.object = GcRoot(new_source); dest[i].store(source, std::memory_order_relaxed); } } template inline void DexCache::FixupResolvedCallSites(GcRoot* dest, const Visitor& visitor) { GcRoot* src = GetResolvedCallSites(); for (size_t i = 0, count = NumResolvedCallSites(); i < count; ++i) { mirror::CallSite* source = src[i].Read(); mirror::CallSite* new_source = visitor(source); dest[i] = GcRoot(new_source); } } inline ObjPtr DexCache::GetLocation() { return GetFieldObject(OFFSET_OF_OBJECT_MEMBER(DexCache, location_)); } } // namespace mirror } // namespace art #endif // ART_RUNTIME_MIRROR_DEX_CACHE_INL_H_