summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2016-10-14 18:43:19 -0700
committerMathieu Chartier <mathieuc@google.com>2016-10-17 11:36:39 -0700
commit31e88225b2ef68e7f32f11186acf922c74ddabab (patch)
treeca50f60634854056bb652e7c375e5ccca2def5cd
parent602142692643d8cd2c48d9aaeece26470f702e3b (diff)
downloadart-31e88225b2ef68e7f32f11186acf922c74ddabab.tar.gz
art-31e88225b2ef68e7f32f11186acf922c74ddabab.tar.bz2
art-31e88225b2ef68e7f32f11186acf922c74ddabab.zip
Move most mirror:: args to ObjPtr
Fixed possible moving GC bugs in ClinitImageUpdate class. Bug: 31113334 Test: test-art-host Change-Id: I0bf6578553d58b944aaa17665f1350bdf5ed15ec
-rw-r--r--compiler/driver/compiler_driver.cc62
-rw-r--r--compiler/image_writer.cc23
-rw-r--r--patchoat/patchoat.cc7
-rw-r--r--patchoat/patchoat.h4
-rw-r--r--runtime/gc/accounting/remembered_set.cc8
-rw-r--r--runtime/gc/collector/concurrent_copying.cc34
-rw-r--r--runtime/gc/collector/concurrent_copying.h3
-rw-r--r--runtime/gc/collector/garbage_collector.h3
-rw-r--r--runtime/gc/collector/mark_compact.cc12
-rw-r--r--runtime/gc/collector/mark_compact.h2
-rw-r--r--runtime/gc/collector/mark_sweep.cc10
-rw-r--r--runtime/gc/collector/mark_sweep.h2
-rw-r--r--runtime/gc/collector/semi_space.cc7
-rw-r--r--runtime/gc/collector/semi_space.h2
-rw-r--r--runtime/gc/heap.cc12
-rw-r--r--runtime/gc/space/image_space.cc5
-rw-r--r--runtime/mirror/array-inl.h26
-rw-r--r--runtime/mirror/array.h26
-rw-r--r--runtime/mirror/class_loader-inl.h3
-rw-r--r--runtime/mirror/class_loader.h2
-rw-r--r--runtime/mirror/dex_cache-inl.h5
-rw-r--r--runtime/mirror/dex_cache.h5
-rw-r--r--runtime/mirror/executable.cc12
-rw-r--r--runtime/mirror/field-inl.h7
-rw-r--r--runtime/mirror/field.cc4
-rw-r--r--runtime/mirror/field.h8
-rw-r--r--runtime/mirror/iftable-inl.h10
-rw-r--r--runtime/mirror/iftable.h9
-rw-r--r--runtime/mirror/object-inl.h20
-rw-r--r--runtime/mirror/object.cc14
-rw-r--r--runtime/mirror/object.h9
-rw-r--r--runtime/mirror/stack_trace_element.cc24
-rw-r--r--runtime/mirror/stack_trace_element.h12
-rw-r--r--runtime/mirror/string.cc45
-rw-r--r--runtime/mirror/string.h6
-rw-r--r--runtime/mirror/throwable.cc30
-rw-r--r--runtime/mirror/throwable.h8
37 files changed, 273 insertions, 208 deletions
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 8d64c65b1d..afaec52c7b 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1134,6 +1134,7 @@ static void MaybeAddToImageClasses(Handle<mirror::Class> c,
VLOG(compiler) << "Adding " << descriptor << " to image classes";
for (size_t i = 0; i < klass->NumDirectInterfaces(); ++i) {
StackHandleScope<1> hs2(self);
+ // May cause thread suspension.
MaybeAddToImageClasses(hs2.NewHandle(mirror::Class::GetDirectInterface(self, klass, i)),
image_classes);
}
@@ -1153,15 +1154,14 @@ static void MaybeAddToImageClasses(Handle<mirror::Class> c,
// Note: we can use object pointers because we suspend all threads.
class ClinitImageUpdate {
public:
- static ClinitImageUpdate* Create(std::unordered_set<std::string>* image_class_descriptors,
- Thread* self, ClassLinker* linker, std::string* error_msg) {
- std::unique_ptr<ClinitImageUpdate> res(new ClinitImageUpdate(image_class_descriptors, self,
+ static ClinitImageUpdate* Create(VariableSizedHandleScope& hs,
+ std::unordered_set<std::string>* image_class_descriptors,
+ Thread* self,
+ ClassLinker* linker) {
+ std::unique_ptr<ClinitImageUpdate> res(new ClinitImageUpdate(hs,
+ image_class_descriptors,
+ self,
linker));
- if (res->dex_cache_class_ == nullptr) {
- *error_msg = "Could not find DexCache class.";
- return nullptr;
- }
-
return res.release();
}
@@ -1171,7 +1171,9 @@ class ClinitImageUpdate {
}
// Visitor for VisitReferences.
- void operator()(mirror::Object* object, MemberOffset field_offset, bool /* is_static */) const
+ void operator()(ObjPtr<mirror::Object> object,
+ MemberOffset field_offset,
+ bool /* is_static */) const
REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* ref = object->GetFieldObject<mirror::Object>(field_offset);
if (ref != nullptr) {
@@ -1180,8 +1182,8 @@ class ClinitImageUpdate {
}
// java.lang.Reference visitor for VisitReferences.
- void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref ATTRIBUTE_UNUSED)
- const {}
+ void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
+ ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const {}
// Ignore class native roots.
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
@@ -1193,6 +1195,9 @@ class ClinitImageUpdate {
for (mirror::Class* klass_root : image_classes_) {
VisitClinitClassesObject(klass_root);
}
+ for (Handle<mirror::Class> h_klass : to_insert_) {
+ MaybeAddToImageClasses(h_klass, image_class_descriptors_);
+ }
}
private:
@@ -1219,20 +1224,19 @@ class ClinitImageUpdate {
ClinitImageUpdate* const data_;
};
- ClinitImageUpdate(std::unordered_set<std::string>* image_class_descriptors, Thread* self,
- ClassLinker* linker)
- REQUIRES_SHARED(Locks::mutator_lock_) :
- image_class_descriptors_(image_class_descriptors), self_(self) {
+ ClinitImageUpdate(VariableSizedHandleScope& hs,
+ std::unordered_set<std::string>* image_class_descriptors,
+ Thread* self,
+ ClassLinker* linker) REQUIRES_SHARED(Locks::mutator_lock_)
+ : hs_(hs),
+ image_class_descriptors_(image_class_descriptors),
+ self_(self) {
CHECK(linker != nullptr);
CHECK(image_class_descriptors != nullptr);
// Make sure nobody interferes with us.
old_cause_ = self->StartAssertNoThreadSuspension("Boot image closure");
- // Find the interesting classes.
- dex_cache_class_ = linker->LookupClass(self, "Ljava/lang/DexCache;",
- ComputeModifiedUtf8Hash("Ljava/lang/DexCache;"), nullptr);
-
// Find all the already-marked classes.
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
FindImageClassesVisitor visitor(this);
@@ -1251,25 +1255,25 @@ class ClinitImageUpdate {
marked_objects_.insert(object);
if (object->IsClass()) {
- // If it is a class, add it.
- StackHandleScope<1> hs(self_);
- MaybeAddToImageClasses(hs.NewHandle(object->AsClass()), image_class_descriptors_);
+ // Add to the TODO list since MaybeAddToImageClasses may cause thread suspension. Thread
+ // suspensionb is not safe to do in VisitObjects or VisitReferences.
+ to_insert_.push_back(hs_.NewHandle(object->AsClass()));
} else {
// Else visit the object's class.
VisitClinitClassesObject(object->GetClass());
}
// If it is not a DexCache, visit all references.
- mirror::Class* klass = object->GetClass();
- if (klass != dex_cache_class_) {
+ if (!object->IsDexCache()) {
object->VisitReferences(*this, *this);
}
}
+ VariableSizedHandleScope& hs_;
+ mutable std::vector<Handle<mirror::Class>> to_insert_;
mutable std::unordered_set<mirror::Object*> marked_objects_;
std::unordered_set<std::string>* const image_class_descriptors_;
std::vector<mirror::Class*> image_classes_;
- const mirror::Class* dex_cache_class_;
Thread* const self_;
const char* old_cause_;
@@ -1285,12 +1289,12 @@ void CompilerDriver::UpdateImageClasses(TimingLogger* timings) {
// Suspend all threads.
ScopedSuspendAll ssa(__FUNCTION__);
+ VariableSizedHandleScope hs(Thread::Current());
std::string error_msg;
- std::unique_ptr<ClinitImageUpdate> update(ClinitImageUpdate::Create(image_classes_.get(),
+ std::unique_ptr<ClinitImageUpdate> update(ClinitImageUpdate::Create(hs,
+ image_classes_.get(),
Thread::Current(),
- runtime->GetClassLinker(),
- &error_msg));
- CHECK(update.get() != nullptr) << error_msg; // TODO: Soft failure?
+ runtime->GetClassLinker()));
// Do the marking.
update->Walk();
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 13c73dcf42..412225c3be 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -1323,7 +1323,7 @@ class ImageWriter::VisitReferencesVisitor {
root->Assign(VisitReference(root->AsMirrorPtr()));
}
- ALWAYS_INLINE void operator() (mirror::Object* obj,
+ ALWAYS_INLINE void operator() (ObjPtr<mirror::Object> obj,
MemberOffset offset,
bool is_static ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1332,8 +1332,8 @@ class ImageWriter::VisitReferencesVisitor {
obj->SetFieldObject</*kTransactionActive*/false>(offset, VisitReference(ref));
}
- ALWAYS_INLINE void operator() (mirror::Class* klass ATTRIBUTE_UNUSED,
- mirror::Reference* ref) const
+ ALWAYS_INLINE void operator() (ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
+ ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) {
ref->SetReferent</*kTransactionActive*/false>(
VisitReference(ref->GetReferent<kWithoutReadBarrier>()));
@@ -1941,18 +1941,19 @@ class FixupVisitor {
void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
- void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<Object> obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
- Object* ref = obj->GetFieldObject<Object, kVerifyNone>(offset);
+ ObjPtr<Object> ref = obj->GetFieldObject<Object, kVerifyNone>(offset);
// Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
// image.
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
offset,
- image_writer_->GetImageAddress(ref));
+ image_writer_->GetImageAddress(ref.Ptr()));
}
// java.lang.ref.Reference visitor.
- void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
+ ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
mirror::Reference::ReferentOffset(),
@@ -1969,14 +1970,14 @@ class FixupClassVisitor FINAL : public FixupVisitor {
FixupClassVisitor(ImageWriter* image_writer, Object* copy) : FixupVisitor(image_writer, copy) {
}
- void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<Object> obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
DCHECK(obj->IsClass());
FixupVisitor::operator()(obj, offset, /*is_static*/false);
}
- void operator()(mirror::Class* klass ATTRIBUTE_UNUSED,
- mirror::Reference* ref ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
+ ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
LOG(FATAL) << "Reference not expected here.";
}
@@ -2045,7 +2046,7 @@ class NativeLocationVisitor {
void ImageWriter::FixupClass(mirror::Class* orig, mirror::Class* copy) {
orig->FixupNativePointers(copy, target_ptr_size_, NativeLocationVisitor(this));
FixupClassVisitor visitor(this, copy);
- static_cast<mirror::Object*>(orig)->VisitReferences(visitor, visitor);
+ ObjPtr<mirror::Object>(orig)->VisitReferences(visitor, visitor);
// Remove the clinitThreadId. This is required for image determinism.
copy->SetClinitThreadId(static_cast<pid_t>(0));
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index f3eb663cb8..d6ea18d648 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -715,15 +715,16 @@ bool PatchOat::PatchImage(bool primary_image) {
}
-void PatchOat::PatchVisitor::operator() (mirror::Object* obj, MemberOffset off,
+void PatchOat::PatchVisitor::operator() (ObjPtr<mirror::Object> obj,
+ MemberOffset off,
bool is_static_unused ATTRIBUTE_UNUSED) const {
mirror::Object* referent = obj->GetFieldObject<mirror::Object, kVerifyNone>(off);
mirror::Object* moved_object = patcher_->RelocatedAddressOfPointer(referent);
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
}
-void PatchOat::PatchVisitor::operator() (mirror::Class* cls ATTRIBUTE_UNUSED,
- mirror::Reference* ref) const {
+void PatchOat::PatchVisitor::operator() (ObjPtr<mirror::Class> cls ATTRIBUTE_UNUSED,
+ ObjPtr<mirror::Reference> ref) const {
MemberOffset off = mirror::Reference::ReferentOffset();
mirror::Object* referent = ref->GetReferent();
DCHECK(referent == nullptr ||
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index a97b051206..e7a3e91d81 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -198,10 +198,10 @@ class PatchOat {
public:
PatchVisitor(PatchOat* patcher, mirror::Object* copy) : patcher_(patcher), copy_(copy) {}
~PatchVisitor() {}
- void operator() (mirror::Object* obj, MemberOffset off, bool b) const
+ void operator() (ObjPtr<mirror::Object> obj, MemberOffset off, bool b) const
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// For reference classes.
- void operator() (mirror::Class* cls, mirror::Reference* ref) const
+ void operator() (ObjPtr<mirror::Class> cls, ObjPtr<mirror::Reference> ref) const
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// TODO: Consider using these for updating native class roots?
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 7229f76ed8..29bab01934 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -66,7 +66,9 @@ class RememberedSetReferenceVisitor {
: collector_(collector), target_space_(target_space),
contains_reference_to_target_space_(contains_reference_to_target_space) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<mirror::Object> obj,
+ MemberOffset offset,
+ bool is_static ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
@@ -77,7 +79,7 @@ class RememberedSetReferenceVisitor {
}
}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
if (target_space_->HasAddress(ref->GetReferent())) {
*contains_reference_to_target_space_ = true;
@@ -115,7 +117,7 @@ class RememberedSetObjectVisitor {
: collector_(collector), target_space_(target_space),
contains_reference_to_target_space_(contains_reference_to_target_space) {}
- void operator()(mirror::Object* obj) const REQUIRES(Locks::heap_bitmap_lock_)
+ void operator()(ObjPtr<mirror::Object> obj) const REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
RememberedSetReferenceVisitor visitor(target_space_, contains_reference_to_target_space_,
collector_);
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index dabb6da116..3dee974bed 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -354,14 +354,14 @@ class ConcurrentCopying::VerifyGrayImmuneObjectsVisitor {
explicit VerifyGrayImmuneObjectsVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
+ void operator()(ObjPtr<mirror::Object> obj, MemberOffset offset, bool /* is_static */)
const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset),
obj, offset);
}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
CheckReference(ref->GetReferent<kWithoutReadBarrier>(),
@@ -386,13 +386,15 @@ class ConcurrentCopying::VerifyGrayImmuneObjectsVisitor {
private:
ConcurrentCopying* const collector_;
- void CheckReference(mirror::Object* ref, mirror::Object* holder, MemberOffset offset) const
+ void CheckReference(ObjPtr<mirror::Object> ref,
+ ObjPtr<mirror::Object> holder,
+ MemberOffset offset) const
REQUIRES_SHARED(Locks::mutator_lock_) {
if (ref != nullptr) {
- if (!collector_->immune_spaces_.ContainsObject(ref)) {
+ if (!collector_->immune_spaces_.ContainsObject(ref.Ptr())) {
// Not immune, must be a zygote large object.
CHECK(Runtime::Current()->GetHeap()->GetLargeObjectsSpace()->IsZygoteLargeObject(
- Thread::Current(), ref))
+ Thread::Current(), ref.Ptr()))
<< "Non gray object references non immune, non zygote large object "<< ref << " "
<< PrettyTypeOf(ref) << " in holder " << holder << " " << PrettyTypeOf(holder)
<< " offset=" << offset.Uint32Value();
@@ -969,14 +971,17 @@ class ConcurrentCopying::VerifyNoFromSpaceRefsFieldVisitor {
explicit VerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<mirror::Object> obj,
+ MemberOffset offset,
+ bool is_static ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref =
obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
VerifyNoFromSpaceRefsVisitor visitor(collector_);
visitor(ref);
}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
this->operator()(ref, mirror::Reference::ReferentOffset(), false);
@@ -1091,14 +1096,16 @@ class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor {
explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<mirror::Object> obj,
+ MemberOffset offset,
+ bool is_static ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref =
obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
AssertToSpaceInvariantRefsVisitor visitor(collector_);
visitor(ref);
}
- void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
}
@@ -1780,13 +1787,13 @@ class ConcurrentCopying::RefFieldsVisitor {
explicit RefFieldsVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
+ void operator()(ObjPtr<mirror::Object> obj, MemberOffset offset, bool /* is_static */)
const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
- collector_->Process(obj, offset);
+ collector_->Process(obj.Ptr(), offset);
}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
collector_->DelayReferenceReferent(klass, ref);
@@ -2377,7 +2384,8 @@ mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
return Mark(from_ref);
}
-void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
+void ConcurrentCopying::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> reference) {
heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
}
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 81ffbc5f2c..5b8a557375 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -169,7 +169,8 @@ class ConcurrentCopying : public GarbageCollector {
void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) OVERRIDE
+ virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> reference) OVERRIDE
REQUIRES_SHARED(Locks::mutator_lock_);
void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 4ffa254bb0..5b513991d1 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -196,7 +196,8 @@ class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public Mark
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
- virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
+ virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> reference)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
protected:
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 6d2f009868..e0bf744f4c 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -418,7 +418,7 @@ class MarkCompact::UpdateReferenceVisitor {
collector_->UpdateHeapReference(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
}
- void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> /*klass*/, mirror::Reference* ref) const
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
collector_->UpdateHeapReference(
ref->GetFieldObjectReferenceAddr<kVerifyNone>(mirror::Reference::ReferentOffset()));
@@ -543,7 +543,8 @@ void MarkCompact::SweepLargeObjects(bool swap_bitmaps) {
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
-void MarkCompact::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
+void MarkCompact::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> reference) {
heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
}
@@ -551,13 +552,16 @@ class MarkCompact::MarkObjectVisitor {
public:
explicit MarkObjectVisitor(MarkCompact* collector) : collector_(collector) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const ALWAYS_INLINE
+ void operator()(ObjPtr<mirror::Object> obj,
+ MemberOffset offset,
+ bool /*is_static*/) const ALWAYS_INLINE
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// Object was already verified when we scanned it.
collector_->MarkObject(obj->GetFieldObject<mirror::Object, kVerifyNone>(offset));
}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_) {
collector_->DelayReferenceReferent(klass, ref);
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index a61646c75c..564f85b3f8 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -122,7 +122,7 @@ class MarkCompact : public GarbageCollector {
OVERRIDE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Schedules an unmarked object for reference processing.
- void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
+ void DelayReferenceReferent(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> reference)
REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index b89d99c661..c05719d2c1 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -272,7 +272,7 @@ class MarkSweep::ScanObjectVisitor {
explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
: mark_sweep_(mark_sweep) {}
- void operator()(mirror::Object* obj) const
+ void operator()(ObjPtr<mirror::Object> obj) const
ALWAYS_INLINE
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -280,7 +280,7 @@ class MarkSweep::ScanObjectVisitor {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
}
- mark_sweep_->ScanObject(obj);
+ mark_sweep_->ScanObject(obj.Ptr());
}
private:
@@ -616,7 +616,7 @@ class MarkSweep::DelayReferenceReferentVisitor {
public:
explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
collector_->DelayReferenceReferent(klass, ref);
@@ -1297,9 +1297,9 @@ void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
}
}
-// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
+// Process the "referent" field lin a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
-void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) {
+void MarkSweep::DelayReferenceReferent(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) {
heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, this);
}
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index bbac9da2b1..19c2e9aaba 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -225,7 +225,7 @@ class MarkSweep : public GarbageCollector {
}
// Schedules an unmarked object for reference processing.
- void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
+ void DelayReferenceReferent(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> reference)
REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 76a478ef1b..2cb17671e5 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -679,7 +679,8 @@ void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
-void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
+void SemiSpace::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> reference) {
heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
}
@@ -687,13 +688,13 @@ class SemiSpace::MarkObjectVisitor {
public:
explicit MarkObjectVisitor(SemiSpace* collector) : collector_(collector) {}
- void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE
+ void operator()(ObjPtr<Object> obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// Object was already verified when we scanned it.
collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
}
- void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
collector_->DelayReferenceReferent(klass, ref);
}
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 4b63d9b862..4cebcc3044 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -156,7 +156,7 @@ class SemiSpace : public GarbageCollector {
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Schedules an unmarked object for reference processing.
- void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
+ void DelayReferenceReferent(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> reference)
REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index bf5af8ee7e..45bd87b87c 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -2894,19 +2894,21 @@ class VerifyReferenceVisitor : public SingleRootVisitor {
return fail_count_->LoadSequentiallyConsistent();
}
- void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED, ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) {
if (verify_referent_) {
- VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
+ VerifyReference(ref.Ptr(), ref->GetReferent(), mirror::Reference::ReferentOffset());
}
}
- void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<mirror::Object> obj,
+ MemberOffset offset,
+ bool is_static ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) {
- VerifyReference(obj, obj->GetFieldObject<mirror::Object>(offset), offset);
+ VerifyReference(obj.Ptr(), obj->GetFieldObject<mirror::Object>(offset), offset);
}
- bool IsLive(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+ bool IsLive(ObjPtr<mirror::Object> obj) const NO_THREAD_SAFETY_ANALYSIS {
return heap_->IsLiveObjectLocked(obj, true, false, true);
}
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index a40e408bc8..81850067e6 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -912,7 +912,7 @@ class ImageSpaceLoader {
ALWAYS_INLINE void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
const {}
- ALWAYS_INLINE void operator()(mirror::Object* obj,
+ ALWAYS_INLINE void operator()(ObjPtr<mirror::Object> obj,
MemberOffset offset,
bool is_static ATTRIBUTE_UNUSED) const
NO_THREAD_SAFETY_ANALYSIS {
@@ -949,7 +949,8 @@ class ImageSpaceLoader {
}
// java.lang.ref.Reference visitor.
- void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
+ void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
+ ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
mirror::Object* obj = ref->GetReferent<kWithoutReadBarrier>();
ref->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index d18781a617..3789081c59 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -150,8 +150,11 @@ class SetLengthToUsableSizeVisitor {
};
template <bool kIsInstrumented, bool kFillUsable>
-inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count,
- size_t component_size_shift, gc::AllocatorType allocator_type) {
+inline Array* Array::Alloc(Thread* self,
+ ObjPtr<Class> array_class,
+ int32_t component_count,
+ size_t component_size_shift,
+ gc::AllocatorType allocator_type) {
DCHECK(allocator_type != gc::kAllocatorTypeLOS);
DCHECK(array_class != nullptr);
DCHECK(array_class->IsArrayClass());
@@ -204,7 +207,9 @@ inline void PrimitiveArray<T>::VisitRoots(RootVisitor* visitor) {
template<typename T>
inline PrimitiveArray<T>* PrimitiveArray<T>::Alloc(Thread* self, size_t length) {
- Array* raw_array = Array::Alloc<true>(self, GetArrayClass(), length,
+ Array* raw_array = Array::Alloc<true>(self,
+ GetArrayClass(),
+ length,
ComponentSizeShiftWidth(sizeof(T)),
Runtime::Current()->GetHeap()->GetCurrentAllocator());
return down_cast<PrimitiveArray<T>*>(raw_array);
@@ -275,7 +280,9 @@ static inline void ArrayForwardCopy(T* d, const T* s, int32_t count) {
}
template<class T>
-inline void PrimitiveArray<T>::Memmove(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos,
+inline void PrimitiveArray<T>::Memmove(int32_t dst_pos,
+ ObjPtr<PrimitiveArray<T>> src,
+ int32_t src_pos,
int32_t count) {
if (UNLIKELY(count == 0)) {
return;
@@ -335,7 +342,9 @@ inline void PrimitiveArray<T>::Memmove(int32_t dst_pos, PrimitiveArray<T>* src,
}
template<class T>
-inline void PrimitiveArray<T>::Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos,
+inline void PrimitiveArray<T>::Memcpy(int32_t dst_pos,
+ ObjPtr<PrimitiveArray<T>> src,
+ int32_t src_pos,
int32_t count) {
if (UNLIKELY(count == 0)) {
return;
@@ -415,6 +424,13 @@ inline void PointerArray::Fixup(mirror::PointerArray* dest,
}
}
+template<typename T>
+inline void PrimitiveArray<T>::SetArrayClass(ObjPtr<Class> array_class) {
+ CHECK(array_class_.IsNull());
+ CHECK(array_class != nullptr);
+ array_class_ = GcRoot<Class>(array_class);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 04d02f774f..994e9b2616 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -39,13 +39,19 @@ class MANAGED Array : public Object {
// least component_count size, however, if there's usable space at the end of the allocation the
// array will fill it.
template <bool kIsInstrumented, bool kFillUsable = false>
- ALWAYS_INLINE static Array* Alloc(Thread* self, Class* array_class, int32_t component_count,
- size_t component_size_shift, gc::AllocatorType allocator_type)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ ALWAYS_INLINE static Array* Alloc(Thread* self,
+ ObjPtr<Class> array_class,
+ int32_t component_count,
+ size_t component_size_shift,
+ gc::AllocatorType allocator_type)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
- static Array* CreateMultiArray(Thread* self, Handle<Class> element_class,
+ static Array* CreateMultiArray(Thread* self,
+ Handle<Class> element_class,
Handle<IntArray> dimensions)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
@@ -147,7 +153,7 @@ class MANAGED PrimitiveArray : public Array {
* smaller than element size copies). Arguments are assumed to be within the bounds of the array
* and the arrays non-null.
*/
- void Memmove(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count)
+ void Memmove(int32_t dst_pos, ObjPtr<PrimitiveArray<T>> src, int32_t src_pos, int32_t count)
REQUIRES_SHARED(Locks::mutator_lock_);
/*
@@ -155,14 +161,10 @@ class MANAGED PrimitiveArray : public Array {
* smaller than element size copies). Arguments are assumed to be within the bounds of the array
* and the arrays non-null.
*/
- void Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count)
+ void Memcpy(int32_t dst_pos, ObjPtr<PrimitiveArray<T>> src, int32_t src_pos, int32_t count)
REQUIRES_SHARED(Locks::mutator_lock_);
- static void SetArrayClass(Class* array_class) {
- CHECK(array_class_.IsNull());
- CHECK(array_class != nullptr);
- array_class_ = GcRoot<Class>(array_class);
- }
+ static void SetArrayClass(ObjPtr<Class> array_class);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
static Class* GetArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/mirror/class_loader-inl.h b/runtime/mirror/class_loader-inl.h
index cc910b035a..f5ecdaea26 100644
--- a/runtime/mirror/class_loader-inl.h
+++ b/runtime/mirror/class_loader-inl.h
@@ -21,6 +21,7 @@
#include "base/mutex-inl.h"
#include "class_table-inl.h"
+#include "obj_ptr-inl.h"
namespace art {
namespace mirror {
@@ -29,7 +30,7 @@ template <bool kVisitClasses,
VerifyObjectFlags kVerifyFlags,
ReadBarrierOption kReadBarrierOption,
typename Visitor>
-inline void ClassLoader::VisitReferences(mirror::Class* klass, const Visitor& visitor) {
+inline void ClassLoader::VisitReferences(ObjPtr<mirror::Class> klass, const Visitor& visitor) {
// Visit instance fields first.
VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
if (kVisitClasses) {
diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h
index 407678a52b..a62a460d81 100644
--- a/runtime/mirror/class_loader.h
+++ b/runtime/mirror/class_loader.h
@@ -67,7 +67,7 @@ class MANAGED ClassLoader : public Object {
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void VisitReferences(mirror::Class* klass, const Visitor& visitor)
+ void VisitReferences(ObjPtr<Class> klass, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::classlinker_classes_lock_);
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index b388f65d82..13d0108b2f 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -27,6 +27,7 @@
#include "mirror/class.h"
#include "mirror/method_type.h"
#include "runtime.h"
+#include "obj_ptr.h"
#include <atomic>
@@ -72,7 +73,7 @@ inline Class* DexCache::GetResolvedType(uint32_t type_idx) {
return GetResolvedTypes()[type_idx].Read();
}
-inline void DexCache::SetResolvedType(uint32_t type_idx, Class* resolved) {
+inline void DexCache::SetResolvedType(uint32_t type_idx, ObjPtr<Class> resolved) {
DCHECK_LT(type_idx, NumResolvedTypes()); // NOTE: Unchecked, i.e. not throwing AIOOB.
// TODO default transaction support.
GetResolvedTypes()[type_idx] = GcRoot<Class>(resolved);
@@ -162,7 +163,7 @@ template <bool kVisitNativeRoots,
VerifyObjectFlags kVerifyFlags,
ReadBarrierOption kReadBarrierOption,
typename Visitor>
-inline void DexCache::VisitReferences(mirror::Class* klass, const Visitor& visitor) {
+inline void DexCache::VisitReferences(ObjPtr<Class> klass, const Visitor& visitor) {
// Visit instance fields first.
VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
// Visit arrays after.
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 2fcabb5076..bbe98c2d53 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -220,7 +220,8 @@ class MANAGED DexCache FINAL : public Object {
Class* GetResolvedType(uint32_t type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
- void SetResolvedType(uint32_t type_idx, Class* resolved) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetResolvedType(uint32_t type_idx, ObjPtr<Class> resolved)
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -337,7 +338,7 @@ class MANAGED DexCache FINAL : public Object {
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void VisitReferences(mirror::Class* klass, const Visitor& visitor)
+ void VisitReferences(ObjPtr<mirror::Class> klass, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
HeapReference<Object> dex_;
diff --git a/runtime/mirror/executable.cc b/runtime/mirror/executable.cc
index 33ebd817d1..17c16a2c0b 100644
--- a/runtime/mirror/executable.cc
+++ b/runtime/mirror/executable.cc
@@ -32,14 +32,10 @@ bool Executable::CreateFromArtMethod(ArtMethod* method) {
return true;
}
-template bool Executable::CreateFromArtMethod<PointerSize::k32, false>(
- ArtMethod* method);
-template bool Executable::CreateFromArtMethod<PointerSize::k32, true>(
- ArtMethod* method);
-template bool Executable::CreateFromArtMethod<PointerSize::k64, false>(
- ArtMethod* method);
-template bool Executable::CreateFromArtMethod<PointerSize::k64, true>(
- ArtMethod* method);
+template bool Executable::CreateFromArtMethod<PointerSize::k32, false>(ArtMethod* method);
+template bool Executable::CreateFromArtMethod<PointerSize::k32, true>(ArtMethod* method);
+template bool Executable::CreateFromArtMethod<PointerSize::k64, false>(ArtMethod* method);
+template bool Executable::CreateFromArtMethod<PointerSize::k64, true>(ArtMethod* method);
ArtMethod* Executable::GetArtMethod() {
return reinterpret_cast<ArtMethod*>(GetField64(ArtMethodOffset()));
diff --git a/runtime/mirror/field-inl.h b/runtime/mirror/field-inl.h
index adc5107570..c03f20a991 100644
--- a/runtime/mirror/field-inl.h
+++ b/runtime/mirror/field-inl.h
@@ -79,10 +79,15 @@ inline mirror::Field* Field::CreateFromArtField(Thread* self, ArtField* field, b
}
template<bool kTransactionActive>
-void Field::SetDeclaringClass(ObjPtr<mirror::Class> c) {
+inline void Field::SetDeclaringClass(ObjPtr<mirror::Class> c) {
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), c);
}
+template<bool kTransactionActive>
+inline void Field::SetType(ObjPtr<mirror::Class> type) {
+ SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, type_), type);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/field.cc b/runtime/mirror/field.cc
index 65f6b1634d..f6b64897fa 100644
--- a/runtime/mirror/field.cc
+++ b/runtime/mirror/field.cc
@@ -27,7 +27,7 @@ namespace mirror {
GcRoot<Class> Field::static_class_;
GcRoot<Class> Field::array_class_;
-void Field::SetClass(Class* klass) {
+void Field::SetClass(ObjPtr<Class> klass) {
CHECK(static_class_.IsNull()) << static_class_.Read() << " " << klass;
CHECK(klass != nullptr);
static_class_ = GcRoot<Class>(klass);
@@ -38,7 +38,7 @@ void Field::ResetClass() {
static_class_ = GcRoot<Class>(nullptr);
}
-void Field::SetArrayClass(Class* klass) {
+void Field::SetArrayClass(ObjPtr<Class> klass) {
CHECK(array_class_.IsNull()) << array_class_.Read() << " " << klass;
CHECK(klass != nullptr);
array_class_ = GcRoot<Class>(klass);
diff --git a/runtime/mirror/field.h b/runtime/mirror/field.h
index c5357c9d39..222d709cef 100644
--- a/runtime/mirror/field.h
+++ b/runtime/mirror/field.h
@@ -83,10 +83,10 @@ class MANAGED Field : public AccessibleObject {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, offset_));
}
- static void SetClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void SetClass(ObjPtr<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
- static void SetArrayClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void SetArrayClass(ObjPtr<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
static void ResetArrayClass() REQUIRES_SHARED(Locks::mutator_lock_);
static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -113,9 +113,7 @@ class MANAGED Field : public AccessibleObject {
void SetDeclaringClass(ObjPtr<mirror::Class> c) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetType(mirror::Class* type) REQUIRES_SHARED(Locks::mutator_lock_) {
- SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, type_), type);
- }
+ void SetType(ObjPtr<mirror::Class> type) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
void SetAccessFlags(uint32_t flags) REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/mirror/iftable-inl.h b/runtime/mirror/iftable-inl.h
index b465d070a6..d6191c2cea 100644
--- a/runtime/mirror/iftable-inl.h
+++ b/runtime/mirror/iftable-inl.h
@@ -18,11 +18,12 @@
#define ART_RUNTIME_MIRROR_IFTABLE_INL_H_
#include "iftable.h"
+#include "obj_ptr-inl.h"
namespace art {
namespace mirror {
-inline void IfTable::SetInterface(int32_t i, Class* interface) {
+inline void IfTable::SetInterface(int32_t i, ObjPtr<Class> interface) {
DCHECK(interface != nullptr);
DCHECK(interface->IsInterface());
const size_t idx = i * kMax + kInterface;
@@ -30,6 +31,13 @@ inline void IfTable::SetInterface(int32_t i, Class* interface) {
SetWithoutChecks<false>(idx, interface);
}
+inline void IfTable::SetMethodArray(int32_t i, ObjPtr<PointerArray> arr) {
+ DCHECK(arr != nullptr);
+ auto idx = i * kMax + kMethodArray;
+ DCHECK(Get(idx) == nullptr);
+ Set<false>(idx, arr);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index a1a2f984f4..296c163ef7 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -31,7 +31,7 @@ class MANAGED IfTable FINAL : public ObjectArray<Object> {
return interface;
}
- ALWAYS_INLINE void SetInterface(int32_t i, Class* interface)
+ ALWAYS_INLINE void SetInterface(int32_t i, ObjPtr<Class> interface)
REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -51,12 +51,7 @@ class MANAGED IfTable FINAL : public ObjectArray<Object> {
return method_array == nullptr ? 0u : method_array->GetLength();
}
- void SetMethodArray(int32_t i, PointerArray* arr) REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(arr != nullptr);
- auto idx = i * kMax + kMethodArray;
- DCHECK(Get(idx) == nullptr);
- Set<false>(idx, arr);
- }
+ void SetMethodArray(int32_t i, ObjPtr<PointerArray> arr) REQUIRES_SHARED(Locks::mutator_lock_);
size_t Count() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetLength() / kMax;
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 3e7bca789c..f555c80f50 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -392,8 +392,8 @@ inline ShortArray* Object::AsShortSizedArray() {
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline bool Object::IsIntArray() {
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
- mirror::Class* klass = GetClass<kVerifyFlags, kReadBarrierOption>();
- mirror::Class* component_type = klass->GetComponentType<kVerifyFlags, kReadBarrierOption>();
+ ObjPtr<Class> klass = GetClass<kVerifyFlags, kReadBarrierOption>();
+ ObjPtr<Class> component_type = klass->GetComponentType<kVerifyFlags, kReadBarrierOption>();
return component_type != nullptr && component_type->template IsPrimitiveInt<kNewFlags>();
}
@@ -406,8 +406,8 @@ inline IntArray* Object::AsIntArray() {
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline bool Object::IsLongArray() {
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
- mirror::Class* klass = GetClass<kVerifyFlags, kReadBarrierOption>();
- mirror::Class* component_type = klass->GetComponentType<kVerifyFlags, kReadBarrierOption>();
+ ObjPtr<Class> klass = GetClass<kVerifyFlags, kReadBarrierOption>();
+ ObjPtr<Class> component_type = klass->GetComponentType<kVerifyFlags, kReadBarrierOption>();
return component_type != nullptr && component_type->template IsPrimitiveLong<kNewFlags>();
}
@@ -1117,7 +1117,7 @@ inline void Object::VisitFieldsReferences(uint32_t ref_offsets, const Visitor& v
// There is no reference offset bitmap. In the non-static case, walk up the class
// inheritance hierarchy and find reference offsets the hard way. In the static case, just
// consider this class.
- for (mirror::Class* klass = kIsStatic
+ for (ObjPtr<Class> klass = kIsStatic
? AsClass<kVerifyFlags, kReadBarrierOption>()
: GetClass<kVerifyFlags, kReadBarrierOption>();
klass != nullptr;
@@ -1146,13 +1146,13 @@ inline void Object::VisitFieldsReferences(uint32_t ref_offsets, const Visitor& v
}
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, typename Visitor>
-inline void Object::VisitInstanceFieldsReferences(mirror::Class* klass, const Visitor& visitor) {
+inline void Object::VisitInstanceFieldsReferences(ObjPtr<Class> klass, const Visitor& visitor) {
VisitFieldsReferences<false, kVerifyFlags, kReadBarrierOption>(
klass->GetReferenceInstanceOffsets<kVerifyFlags>(), visitor);
}
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, typename Visitor>
-inline void Object::VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor) {
+inline void Object::VisitStaticFieldsReferences(ObjPtr<Class> klass, const Visitor& visitor) {
DCHECK(!klass->IsTemp());
klass->VisitFieldsReferences<true, kVerifyFlags, kReadBarrierOption>(0, visitor);
}
@@ -1186,7 +1186,7 @@ template <bool kVisitNativeRoots,
typename JavaLangRefVisitor>
inline void Object::VisitReferences(const Visitor& visitor,
const JavaLangRefVisitor& ref_visitor) {
- mirror::Class* klass = GetClass<kVerifyFlags, kReadBarrierOption>();
+ ObjPtr<Class> klass = GetClass<kVerifyFlags, kReadBarrierOption>();
visitor(this, ClassOffset(), false);
const uint32_t class_flags = klass->GetClassFlags<kVerifyNone>();
if (LIKELY(class_flags == kClassFlagNormal)) {
@@ -1201,7 +1201,7 @@ inline void Object::VisitReferences(const Visitor& visitor,
DCHECK(!klass->IsStringClass());
if (class_flags == kClassFlagClass) {
DCHECK((klass->IsClassClass<kVerifyFlags, kReadBarrierOption>()));
- mirror::Class* as_klass = AsClass<kVerifyNone, kReadBarrierOption>();
+ ObjPtr<Class> as_klass = AsClass<kVerifyNone, kReadBarrierOption>();
as_klass->VisitReferences<kVisitNativeRoots, kVerifyFlags, kReadBarrierOption>(klass,
visitor);
} else if (class_flags == kClassFlagObjectArray) {
@@ -1228,7 +1228,7 @@ inline void Object::VisitReferences(const Visitor& visitor,
// actual string instances.
if (!klass->IsStringClass()) {
size_t total_reference_instance_fields = 0;
- mirror::Class* super_class = klass;
+ ObjPtr<Class> super_class = klass;
do {
total_reference_instance_fields += super_class->NumReferenceInstanceFields();
super_class = super_class->GetSuperClass<kVerifyFlags, kReadBarrierOption>();
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index daee727769..7e92c53a09 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -49,7 +49,7 @@ class CopyReferenceFieldsWithReadBarrierVisitor {
void operator()(ObjPtr<Object> obj, MemberOffset offset, bool /* is_static */) const
ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
// GetFieldObject() contains a RB.
- Object* ref = obj->GetFieldObject<Object>(offset);
+ ObjPtr<Object> ref = obj->GetFieldObject<Object>(offset);
// No WB here as a large object space does not have a card table
// coverage. Instead, cards will be marked separately.
dest_obj_->SetFieldObjectWithoutWriteBarrier<false, false>(offset, ref);
@@ -118,7 +118,7 @@ Object* Object::CopyObject(ObjPtr<mirror::Object> dest,
}
gc::Heap* heap = Runtime::Current()->GetHeap();
// Perform write barriers on copied object references.
- Class* c = src->GetClass();
+ ObjPtr<Class> c = src->GetClass();
if (c->IsArrayClass()) {
if (!c->GetComponentType()->IsPrimitive()) {
ObjectArray<Object>* array = dest->AsObjectArray<Object>();
@@ -182,8 +182,8 @@ void Object::SetHashCodeSeed(uint32_t new_seed) {
hash_code_seed.StoreRelaxed(new_seed);
}
-int32_t Object::IdentityHashCode() const {
- mirror::Object* current_this = const_cast<mirror::Object*>(this);
+int32_t Object::IdentityHashCode() {
+ ObjPtr<Object> current_this = this; // The this pointer may get invalidated by thread suspension.
while (true) {
LockWord lw = current_this->GetLockWord(false);
switch (lw.GetState()) {
@@ -192,7 +192,7 @@ int32_t Object::IdentityHashCode() const {
// loop iteration.
LockWord hash_word = LockWord::FromHashCode(GenerateIdentityHashCode(), lw.GCState());
DCHECK_EQ(hash_word.GetState(), LockWord::kHashCode);
- if (const_cast<Object*>(this)->CasLockWordWeakRelaxed(lw, hash_word)) {
+ if (current_this->CasLockWordWeakRelaxed(lw, hash_word)) {
return hash_word.GetHashCode();
}
break;
@@ -227,13 +227,13 @@ int32_t Object::IdentityHashCode() const {
}
void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, ObjPtr<Object> new_value) {
- Class* c = GetClass();
+ ObjPtr<Class> c = GetClass();
Runtime* runtime = Runtime::Current();
if (runtime->GetClassLinker() == nullptr || !runtime->IsStarted() ||
!runtime->GetHeap()->IsObjectValidationEnabled() || !c->IsResolved()) {
return;
}
- for (Class* cur = c; cur != nullptr; cur = cur->GetSuperClass()) {
+ for (ObjPtr<Class> cur = c; cur != nullptr; cur = cur->GetSuperClass()) {
for (ArtField& field : cur->GetIFields()) {
StackHandleScope<1> hs(Thread::Current());
Handle<Object> h_object(hs.NewHandle(new_value));
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 84aa96cbd7..13f402829c 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -130,9 +130,10 @@ class MANAGED LOCKABLE Object {
Object* Clone(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
- int32_t IdentityHashCode() const
+ int32_t IdentityHashCode()
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
+ REQUIRES(!Locks::thread_list_lock_,
+ !Locks::thread_suspend_count_lock_);
static MemberOffset MonitorOffset() {
return OFFSET_OF_OBJECT_MEMBER(Object, monitor_);
@@ -578,12 +579,12 @@ class MANAGED LOCKABLE Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void VisitInstanceFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR
+ void VisitInstanceFieldsReferences(ObjPtr<mirror::Class> klass, const Visitor& visitor) HOT_ATTR
REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR
+ void VisitStaticFieldsReferences(ObjPtr<mirror::Class> klass, const Visitor& visitor) HOT_ATTR
REQUIRES_SHARED(Locks::mutator_lock_);
private:
diff --git a/runtime/mirror/stack_trace_element.cc b/runtime/mirror/stack_trace_element.cc
index e2050cc144..c00cf916a8 100644
--- a/runtime/mirror/stack_trace_element.cc
+++ b/runtime/mirror/stack_trace_element.cc
@@ -28,7 +28,7 @@ namespace mirror {
GcRoot<Class> StackTraceElement::java_lang_StackTraceElement_;
-void StackTraceElement::SetClass(Class* java_lang_StackTraceElement) {
+void StackTraceElement::SetClass(ObjPtr<Class> java_lang_StackTraceElement) {
CHECK(java_lang_StackTraceElement_.IsNull());
CHECK(java_lang_StackTraceElement != nullptr);
java_lang_StackTraceElement_ = GcRoot<Class>(java_lang_StackTraceElement);
@@ -39,30 +39,34 @@ void StackTraceElement::ResetClass() {
java_lang_StackTraceElement_ = GcRoot<Class>(nullptr);
}
-StackTraceElement* StackTraceElement::Alloc(Thread* self, Handle<String> declaring_class,
- Handle<String> method_name, Handle<String> file_name,
+StackTraceElement* StackTraceElement::Alloc(Thread* self,
+ Handle<String> declaring_class,
+ Handle<String> method_name,
+ Handle<String> file_name,
int32_t line_number) {
ObjPtr<StackTraceElement> trace =
ObjPtr<StackTraceElement>::DownCast(GetStackTraceElement()->AllocObject(self));
if (LIKELY(trace != nullptr)) {
if (Runtime::Current()->IsActiveTransaction()) {
- trace->Init<true>(declaring_class, method_name, file_name, line_number);
+ trace->Init<true>(declaring_class.Get(), method_name.Get(), file_name.Get(), line_number);
} else {
- trace->Init<false>(declaring_class, method_name, file_name, line_number);
+ trace->Init<false>(declaring_class.Get(), method_name.Get(), file_name.Get(), line_number);
}
}
return trace.Ptr();
}
template<bool kTransactionActive>
-void StackTraceElement::Init(Handle<String> declaring_class, Handle<String> method_name,
- Handle<String> file_name, int32_t line_number) {
+void StackTraceElement::Init(ObjPtr<String> declaring_class,
+ ObjPtr<String> method_name,
+ ObjPtr<String> file_name,
+ int32_t line_number) {
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_),
- declaring_class.Get());
+ declaring_class);
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_),
- method_name.Get());
+ method_name);
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_),
- file_name.Get());
+ file_name);
SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_),
line_number);
}
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index 4b3d9d069e..d32d8dca26 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -47,12 +47,14 @@ class MANAGED StackTraceElement FINAL : public Object {
return GetField32(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_));
}
- static StackTraceElement* Alloc(Thread* self, Handle<String> declaring_class,
- Handle<String> method_name, Handle<String> file_name,
+ static StackTraceElement* Alloc(Thread* self,
+ Handle<String> declaring_class,
+ Handle<String> method_name,
+ Handle<String> file_name,
int32_t line_number)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- static void SetClass(Class* java_lang_StackTraceElement);
+ static void SetClass(ObjPtr<Class> java_lang_StackTraceElement);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -69,7 +71,9 @@ class MANAGED StackTraceElement FINAL : public Object {
int32_t line_number_;
template<bool kTransactionActive>
- void Init(Handle<String> declaring_class, Handle<String> method_name, Handle<String> file_name,
+ void Init(ObjPtr<String> declaring_class,
+ ObjPtr<String> method_name,
+ ObjPtr<String> file_name,
int32_t line_number)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index 46caa4d73f..ed1103f46d 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -48,7 +48,7 @@ int32_t String::FastIndexOf(int32_t ch, int32_t start) {
}
}
-void String::SetClass(Class* java_lang_String) {
+void String::SetClass(ObjPtr<Class> java_lang_String) {
CHECK(java_lang_String_.IsNull());
CHECK(java_lang_String != nullptr);
CHECK(java_lang_String->IsStringClass());
@@ -93,12 +93,13 @@ String* String::AllocFromStrings(Thread* self, Handle<String> string, Handle<Str
int32_t length = string->GetLength();
int32_t length2 = string2->GetLength();
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
- const bool compressible = kUseStringCompression && (string->IsCompressed() && string2->IsCompressed());
- const int32_t length_with_flag = (compressible) ? String::GetFlaggedCount(length + length2)
- : (length + length2);
+ const bool compressible = kUseStringCompression &&
+ (string->IsCompressed() && string2->IsCompressed());
+ const int32_t length_with_flag = compressible ? String::GetFlaggedCount(length + length2)
+ : (length + length2);
SetStringCountVisitor visitor(length_with_flag);
- String* new_string = Alloc<true>(self, length_with_flag, allocator_type, visitor);
+ ObjPtr<String> new_string = Alloc<true>(self, length_with_flag, allocator_type, visitor);
if (UNLIKELY(new_string == nullptr)) {
return nullptr;
}
@@ -123,7 +124,7 @@ String* String::AllocFromStrings(Thread* self, Handle<String> string, Handle<Str
memcpy(new_value + length, string2->GetValue(), length2 * sizeof(uint16_t));
}
}
- return new_string;
+ return new_string.Ptr();
}
String* String::AllocFromUtf16(Thread* self, int32_t utf16_length, const uint16_t* utf16_data_in) {
@@ -134,7 +135,7 @@ String* String::AllocFromUtf16(Thread* self, int32_t utf16_length, const uint16_
int32_t length_with_flag = (compressible) ? String::GetFlaggedCount(utf16_length)
: utf16_length;
SetStringCountVisitor visitor(length_with_flag);
- String* string = Alloc<true>(self, length_with_flag, allocator_type, visitor);
+ ObjPtr<String> string = Alloc<true>(self, length_with_flag, allocator_type, visitor);
if (UNLIKELY(string == nullptr)) {
return nullptr;
}
@@ -146,7 +147,7 @@ String* String::AllocFromUtf16(Thread* self, int32_t utf16_length, const uint16_
uint16_t* array = string->GetValue();
memcpy(array, utf16_data_in, utf16_length * sizeof(uint16_t));
}
- return string;
+ return string.Ptr();
}
String* String::AllocFromModifiedUtf8(Thread* self, const char* utf) {
@@ -156,18 +157,22 @@ String* String::AllocFromModifiedUtf8(Thread* self, const char* utf) {
return AllocFromModifiedUtf8(self, char_count, utf, byte_count);
}
-String* String::AllocFromModifiedUtf8(Thread* self, int32_t utf16_length, const char* utf8_data_in) {
+String* String::AllocFromModifiedUtf8(Thread* self,
+ int32_t utf16_length,
+ const char* utf8_data_in) {
return AllocFromModifiedUtf8(self, utf16_length, utf8_data_in, strlen(utf8_data_in));
}
-String* String::AllocFromModifiedUtf8(Thread* self, int32_t utf16_length,
- const char* utf8_data_in, int32_t utf8_length) {
+String* String::AllocFromModifiedUtf8(Thread* self,
+ int32_t utf16_length,
+ const char* utf8_data_in,
+ int32_t utf8_length) {
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
const bool compressible = kUseStringCompression && (utf16_length == utf8_length);
const int32_t utf16_length_with_flag = (compressible) ? String::GetFlaggedCount(utf16_length)
: utf16_length;
SetStringCountVisitor visitor(utf16_length_with_flag);
- String* string = Alloc<true>(self, utf16_length_with_flag, allocator_type, visitor);
+ ObjPtr<String> string = Alloc<true>(self, utf16_length_with_flag, allocator_type, visitor);
if (UNLIKELY(string == nullptr)) {
return nullptr;
}
@@ -177,10 +182,10 @@ String* String::AllocFromModifiedUtf8(Thread* self, int32_t utf16_length,
uint16_t* utf16_data_out = string->GetValue();
ConvertModifiedUtf8ToUtf16(utf16_data_out, utf16_length, utf8_data_in, utf8_length);
}
- return string;
+ return string.Ptr();
}
-bool String::Equals(String* that) {
+bool String::Equals(ObjPtr<String> that) {
if (this == that) {
// Quick reference equality test
return true;
@@ -281,9 +286,9 @@ std::string String::ToModifiedUtf8() {
return result;
}
-int32_t String::CompareTo(String* rhs) {
+int32_t String::CompareTo(ObjPtr<String> rhs) {
// Quick test for comparison of a string with itself.
- String* lhs = this;
+ ObjPtr<String> lhs = this;
if (lhs == rhs) {
return 0;
}
@@ -298,7 +303,9 @@ int32_t String::CompareTo(String* rhs) {
int32_t countDiff = lhsCount - rhsCount;
int32_t minCount = (countDiff < 0) ? lhsCount : rhsCount;
if (lhs->IsCompressed() && rhs->IsCompressed()) {
- int32_t comparison = memcmp(lhs->GetValueCompressed(), rhs->GetValueCompressed(), minCount * sizeof(uint8_t));
+ int32_t comparison = memcmp(lhs->GetValueCompressed(),
+ rhs->GetValueCompressed(),
+ minCount * sizeof(uint8_t));
if (comparison != 0) {
return comparison;
}
@@ -326,7 +333,7 @@ void String::VisitRoots(RootVisitor* visitor) {
CharArray* String::ToCharArray(Thread* self) {
StackHandleScope<1> hs(self);
Handle<String> string(hs.NewHandle(this));
- CharArray* result = CharArray::Alloc(self, GetLength());
+ ObjPtr<CharArray> result = CharArray::Alloc(self, GetLength());
if (result != nullptr) {
if (string->IsCompressed()) {
int32_t length = string->GetLength();
@@ -339,7 +346,7 @@ CharArray* String::ToCharArray(Thread* self) {
} else {
self->AssertPendingOOMException();
}
- return result;
+ return result.Ptr();
}
void String::GetChars(int32_t start, int32_t end, Handle<CharArray> array, int32_t index) {
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index a18692f95e..cfb1153bb3 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -146,7 +146,7 @@ class MANAGED String FINAL : public Object {
bool Equals(const StringPiece& modified_utf8)
REQUIRES_SHARED(Locks::mutator_lock_);
- bool Equals(String* that) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool Equals(ObjPtr<String> that) REQUIRES_SHARED(Locks::mutator_lock_);
// Compare UTF-16 code point values not in a locale-sensitive manner
int Compare(int32_t utf16_length, const char* utf8_data_in);
@@ -165,7 +165,7 @@ class MANAGED String FINAL : public Object {
int32_t FastIndexOf(MemoryType* chars, int32_t ch, int32_t start)
REQUIRES_SHARED(Locks::mutator_lock_);
- int32_t CompareTo(String* other) REQUIRES_SHARED(Locks::mutator_lock_);
+ int32_t CompareTo(ObjPtr<String> other) REQUIRES_SHARED(Locks::mutator_lock_);
CharArray* ToCharArray(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
@@ -200,7 +200,7 @@ class MANAGED String FINAL : public Object {
return java_lang_String_.Read();
}
- static void SetClass(Class* java_lang_String) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void SetClass(ObjPtr<Class> java_lang_String) REQUIRES_SHARED(Locks::mutator_lock_);
static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index 8f3ed84e11..7aff3deaa8 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -33,7 +33,7 @@ namespace mirror {
GcRoot<Class> Throwable::java_lang_Throwable_;
-void Throwable::SetDetailMessage(String* new_detail_message) {
+void Throwable::SetDetailMessage(ObjPtr<String> new_detail_message) {
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_), new_detail_message);
} else {
@@ -42,7 +42,7 @@ void Throwable::SetDetailMessage(String* new_detail_message) {
}
}
-void Throwable::SetCause(Throwable* cause) {
+void Throwable::SetCause(ObjPtr<Throwable> cause) {
CHECK(cause != nullptr);
CHECK(cause != this);
Throwable* current_cause = GetFieldObject<Throwable>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_));
@@ -54,7 +54,7 @@ void Throwable::SetCause(Throwable* cause) {
}
}
-void Throwable::SetStackState(Object* state) REQUIRES_SHARED(Locks::mutator_lock_) {
+void Throwable::SetStackState(ObjPtr<Object> state) REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(state != nullptr);
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObjectVolatile<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, backtrace_), state);
@@ -71,11 +71,11 @@ bool Throwable::IsCheckedException() {
}
int32_t Throwable::GetStackDepth() {
- Object* stack_state = GetStackState();
+ ObjPtr<Object> stack_state = GetStackState();
if (stack_state == nullptr || !stack_state->IsObjectArray()) {
return -1;
}
- mirror::ObjectArray<mirror::Object>* const trace = stack_state->AsObjectArray<mirror::Object>();
+ ObjPtr<mirror::ObjectArray<Object>> const trace = stack_state->AsObjectArray<Object>();
const int32_t array_len = trace->GetLength();
DCHECK_GT(array_len, 0);
// See method BuildInternalStackTraceVisitor::Init for the format.
@@ -85,22 +85,21 @@ int32_t Throwable::GetStackDepth() {
std::string Throwable::Dump() {
std::string result(PrettyTypeOf(this));
result += ": ";
- String* msg = GetDetailMessage();
+ ObjPtr<String> msg = GetDetailMessage();
if (msg != nullptr) {
result += msg->ToModifiedUtf8();
}
result += "\n";
- Object* stack_state = GetStackState();
+ ObjPtr<Object> stack_state = GetStackState();
// check stack state isn't missing or corrupt
if (stack_state != nullptr && stack_state->IsObjectArray()) {
- mirror::ObjectArray<mirror::Object>* object_array =
- stack_state->AsObjectArray<mirror::Object>();
+ ObjPtr<ObjectArray<Object>> object_array = stack_state->AsObjectArray<Object>();
// Decode the internal stack trace into the depth and method trace
// See method BuildInternalStackTraceVisitor::Init for the format.
DCHECK_GT(object_array->GetLength(), 0);
- mirror::Object* methods_and_dex_pcs = object_array->Get(0);
+ ObjPtr<Object> methods_and_dex_pcs = object_array->Get(0);
DCHECK(methods_and_dex_pcs->IsIntArray() || methods_and_dex_pcs->IsLongArray());
- mirror::PointerArray* method_trace = down_cast<mirror::PointerArray*>(methods_and_dex_pcs);
+ ObjPtr<PointerArray> method_trace = ObjPtr<PointerArray>::DownCast(methods_and_dex_pcs);
const int32_t array_len = method_trace->GetLength();
CHECK_EQ(array_len % 2, 0);
const auto depth = array_len / 2;
@@ -118,11 +117,12 @@ std::string Throwable::Dump() {
}
}
} else {
- Object* stack_trace = GetStackTrace();
+ ObjPtr<Object> stack_trace = GetStackTrace();
if (stack_trace != nullptr && stack_trace->IsObjectArray()) {
CHECK_EQ(stack_trace->GetClass()->GetComponentType(),
StackTraceElement::GetStackTraceElement());
- auto* ste_array = down_cast<ObjectArray<StackTraceElement>*>(stack_trace);
+ ObjPtr<ObjectArray<StackTraceElement>> ste_array =
+ ObjPtr<ObjectArray<StackTraceElement>>::DownCast(stack_trace);
if (ste_array->GetLength() == 0) {
result += "(Throwable with empty stack trace)";
} else {
@@ -142,7 +142,7 @@ std::string Throwable::Dump() {
result += "(Throwable with no stack trace)";
}
}
- Throwable* cause = GetFieldObject<Throwable>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_));
+ ObjPtr<Throwable> cause = GetFieldObject<Throwable>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_));
if (cause != nullptr && cause != this) { // Constructor makes cause == this by default.
result += "Caused by: ";
result += cause->Dump();
@@ -150,7 +150,7 @@ std::string Throwable::Dump() {
return result;
}
-void Throwable::SetClass(Class* java_lang_Throwable) {
+void Throwable::SetClass(ObjPtr<Class> java_lang_Throwable) {
CHECK(java_lang_Throwable_.IsNull());
CHECK(java_lang_Throwable != nullptr);
java_lang_Throwable_ = GcRoot<Class>(java_lang_Throwable);
diff --git a/runtime/mirror/throwable.h b/runtime/mirror/throwable.h
index 76824cb513..0a4ab6fe5c 100644
--- a/runtime/mirror/throwable.h
+++ b/runtime/mirror/throwable.h
@@ -31,7 +31,7 @@ namespace mirror {
// C++ mirror of java.lang.Throwable
class MANAGED Throwable : public Object {
public:
- void SetDetailMessage(String* new_detail_message) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetDetailMessage(ObjPtr<String> new_detail_message) REQUIRES_SHARED(Locks::mutator_lock_);
String* GetDetailMessage() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_));
@@ -42,8 +42,8 @@ class MANAGED Throwable : public Object {
// This is a runtime version of initCause, you shouldn't use it if initCause may have been
// overridden. Also it asserts rather than throwing exceptions. Currently this is only used
// in cases like the verifier where the checks cannot fail and initCause isn't overridden.
- void SetCause(Throwable* cause) REQUIRES_SHARED(Locks::mutator_lock_);
- void SetStackState(Object* state) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetCause(ObjPtr<Throwable> cause) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetStackState(ObjPtr<Object> state) REQUIRES_SHARED(Locks::mutator_lock_);
bool IsCheckedException() REQUIRES_SHARED(Locks::mutator_lock_);
static Class* GetJavaLangThrowable() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -53,7 +53,7 @@ class MANAGED Throwable : public Object {
int32_t GetStackDepth() REQUIRES_SHARED(Locks::mutator_lock_);
- static void SetClass(Class* java_lang_Throwable);
+ static void SetClass(ObjPtr<Class> java_lang_Throwable);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor)
REQUIRES_SHARED(Locks::mutator_lock_);