summaryrefslogtreecommitdiffstats
path: root/runtime/mirror
diff options
context:
space:
mode:
authorHiroshi Yamauchi <yamauchi@google.com>2015-01-09 14:03:35 -0800
committerHiroshi Yamauchi <yamauchi@google.com>2015-01-23 14:07:32 -0800
commit2cd334ae2d4287216523882f0d298cf3901b7ab1 (patch)
treeb72d3d07e5a04151caca96cae345075b6e4452b0 /runtime/mirror
parent604e2828896fbb8663897d1e75112da7305ead4c (diff)
downloadart-2cd334ae2d4287216523882f0d298cf3901b7ab1.tar.gz
art-2cd334ae2d4287216523882f0d298cf3901b7ab1.tar.bz2
art-2cd334ae2d4287216523882f0d298cf3901b7ab1.zip
More of the concurrent copying collector.
Bug: 12687968 Change-Id: I62f70274d47df6d6cab714df95c518b750ce3105
Diffstat (limited to 'runtime/mirror')
-rw-r--r--runtime/mirror/class-inl.h9
-rw-r--r--runtime/mirror/object-inl.h27
-rw-r--r--runtime/mirror/object.cc2
-rw-r--r--runtime/mirror/object.h12
-rw-r--r--runtime/mirror/object_array-inl.h4
5 files changed, 43 insertions, 11 deletions
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 3dc9e08a1..495f753c7 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -650,7 +650,14 @@ inline uint32_t Class::ComputeClassSize(bool has_embedded_tables,
template <bool kVisitClass, typename Visitor>
inline void Class::VisitReferences(mirror::Class* klass, const Visitor& visitor) {
VisitInstanceFieldsReferences<kVisitClass>(klass, visitor);
- if (!IsTemp() && IsResolved()) {
+ // Right after a class is allocated, but not yet loaded
+ // (kStatusNotReady, see ClassLinkder::LoadClass()), GC may find it
+ // and scan it. IsTemp() may call Class::GetAccessFlags() but may
+ // fail in the DCHECK in Class::GetAccessFlags() because the class
+ // status is kStatusNotReady. To avoid it, rely on IsResolved()
+ // only. This is fine because a temp class never goes into the
+ // kStatusResolved state.
+ if (IsResolved()) {
// Temp classes don't ever populate imt/vtable or static fields and they are not even
// allocated with the right size for those. Also, unresolved classes don't have fields
// linked yet.
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 121947dde..d6901634b 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -154,7 +154,6 @@ inline bool Object::AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object*
}
} while (!atomic_rb_ptr->CompareExchangeWeakSequentiallyConsistent(expected_ref.reference_,
new_ref.reference_));
- DCHECK_EQ(new_ref.reference_, atomic_rb_ptr->LoadRelaxed());
return true;
#else
UNUSED(expected_rb_ptr, rb_ptr);
@@ -826,6 +825,17 @@ inline HeapReference<Object>* Object::GetFieldObjectReferenceAddr(MemberOffset f
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline bool Object::CasFieldWeakSequentiallyConsistentObject(MemberOffset field_offset,
Object* old_value, Object* new_value) {
+ bool success = CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<
+ kTransactionActive, kCheckTransaction, kVerifyFlags>(field_offset, old_value, new_value);
+ if (success) {
+ Runtime::Current()->GetHeap()->WriteBarrierField(this, field_offset, new_value);
+ }
+ return success;
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline bool Object::CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier(
+ MemberOffset field_offset, Object* old_value, Object* new_value) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
@@ -848,7 +858,14 @@ inline bool Object::CasFieldWeakSequentiallyConsistentObject(MemberOffset field_
bool success = atomic_addr->CompareExchangeWeakSequentiallyConsistent(old_ref.reference_,
new_ref.reference_);
+ return success;
+}
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline bool Object::CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset,
+ Object* old_value, Object* new_value) {
+ bool success = CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier<
+ kTransactionActive, kCheckTransaction, kVerifyFlags>(field_offset, old_value, new_value);
if (success) {
Runtime::Current()->GetHeap()->WriteBarrierField(this, field_offset, new_value);
}
@@ -856,8 +873,8 @@ inline bool Object::CasFieldWeakSequentiallyConsistentObject(MemberOffset field_
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline bool Object::CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset,
- Object* old_value, Object* new_value) {
+inline bool Object::CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier(
+ MemberOffset field_offset, Object* old_value, Object* new_value) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
@@ -880,10 +897,6 @@ inline bool Object::CasFieldStrongSequentiallyConsistentObject(MemberOffset fiel
bool success = atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_ref.reference_,
new_ref.reference_);
-
- if (success) {
- Runtime::Current()->GetHeap()->WriteBarrierField(this, field_offset, new_value);
- }
return success;
}
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 6914f383d..9262a3e0b 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -75,7 +75,7 @@ Object* Object::CopyObject(Thread* self, mirror::Object* dest, mirror::Object* s
uint8_t* dst_bytes = reinterpret_cast<uint8_t*>(dest);
size_t offset = sizeof(Object);
memcpy(dst_bytes + offset, src_bytes + offset, num_bytes - offset);
- if (kUseBakerOrBrooksReadBarrier) {
+ if (kUseReadBarrier) {
// We need a RB here. After the memcpy that covers the whole
// object above, copy references fields one by one again with a
// RB. TODO: Optimize this later?
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 07d15b573..780c5aef4 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -240,12 +240,24 @@ class MANAGED LOCKABLE Object {
bool CasFieldWeakSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value,
Object* new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset,
+ Object* old_value,
+ Object* new_value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value,
Object* new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset,
+ Object* old_value,
+ Object* new_value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset);
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index fbc4f4a67..96d426b59 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -131,7 +131,7 @@ inline void ObjectArray<T>::AssignableMemmove(int32_t dst_pos, ObjectArray<T>* s
CHECK_EQ(sizeof(HeapReference<T>), sizeof(uint32_t));
IntArray* dstAsIntArray = reinterpret_cast<IntArray*>(this);
IntArray* srcAsIntArray = reinterpret_cast<IntArray*>(src);
- if (kUseBakerOrBrooksReadBarrier) {
+ if (kUseReadBarrier) {
// TODO: Optimize this later?
const bool copy_forward = (src != this) || (dst_pos < src_pos) || (dst_pos - src_pos >= count);
if (copy_forward) {
@@ -174,7 +174,7 @@ inline void ObjectArray<T>::AssignableMemcpy(int32_t dst_pos, ObjectArray<T>* sr
CHECK_EQ(sizeof(HeapReference<T>), sizeof(uint32_t));
IntArray* dstAsIntArray = reinterpret_cast<IntArray*>(this);
IntArray* srcAsIntArray = reinterpret_cast<IntArray*>(src);
- if (kUseBakerOrBrooksReadBarrier) {
+ if (kUseReadBarrier) {
// TODO: Optimize this later?
for (int i = 0; i < count; ++i) {
// We need a RB here. ObjectArray::GetWithoutChecks() contains a RB.