summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2014-05-06 10:57:27 -0700
committerMathieu Chartier <mathieuc@google.com>2014-05-07 15:28:35 -0700
commit78f7b4c04ab6e8b5581921bc95b67a9beee1c246 (patch)
tree78b93c72007478b5bfc3b88ab413fa3d772da723
parent052a647973b590c9d5007a2e16f313f4e32a70bd (diff)
downloadandroid_art-78f7b4c04ab6e8b5581921bc95b67a9beee1c246.tar.gz
android_art-78f7b4c04ab6e8b5581921bc95b67a9beee1c246.tar.bz2
android_art-78f7b4c04ab6e8b5581921bc95b67a9beee1c246.zip
Add concurrent reference processing.
Concurrent reference processing currently works by going into native code from java.lang.ref.Reference.get(). From there, we have a fast path if the references aren't being processed which returns the referent without needing to access any locks. In the slow path we block until reference processing is complete. It may be possible to improve the slow path if the referent is blackened. TODO: Investigate doing the fast path in java code by using racy reads of a static volatile boolean. This will work as long as there are no suspend points inbetween the boolean read and referent read. Bug: 14381653 Change-Id: I1546b55be4691fe4ff4aa6d857b234cce7187d87
-rw-r--r--runtime/Android.mk2
-rw-r--r--runtime/gc/collector/mark_sweep.cc29
-rw-r--r--runtime/gc/collector/mark_sweep.h9
-rw-r--r--runtime/gc/collector/semi_space.cc9
-rw-r--r--runtime/gc/heap.cc144
-rw-r--r--runtime/gc/heap.h40
-rw-r--r--runtime/gc/reference_processor.cc222
-rw-r--r--runtime/gc/reference_processor.h105
-rw-r--r--runtime/gc/reference_queue.cc8
-rw-r--r--runtime/gc/reference_queue.h7
-rw-r--r--runtime/native/java_lang_ref_Reference.cc42
-rw-r--r--runtime/runtime.cc1
12 files changed, 432 insertions, 186 deletions
diff --git a/runtime/Android.mk b/runtime/Android.mk
index bc971a9d8d..72f1774ba7 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -61,6 +61,7 @@ LIBART_COMMON_SRC_FILES := \
gc/collector/sticky_mark_sweep.cc \
gc/gc_cause.cc \
gc/heap.cc \
+ gc/reference_processor.cc \
gc/reference_queue.cc \
gc/space/bump_pointer_space.cc \
gc/space/dlmalloc_space.cc \
@@ -114,6 +115,7 @@ LIBART_COMMON_SRC_FILES := \
native/java_lang_Thread.cc \
native/java_lang_Throwable.cc \
native/java_lang_VMClassLoader.cc \
+ native/java_lang_ref_Reference.cc \
native/java_lang_reflect_Array.cc \
native/java_lang_reflect_Constructor.cc \
native/java_lang_reflect_Field.cc \
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index b8051c9b38..ff2eda0e30 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -31,6 +31,7 @@
#include "gc/accounting/mod_union_table.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
+#include "gc/reference_processor.h"
#include "gc/space/image_space.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
@@ -166,18 +167,9 @@ void MarkSweep::RunPhases() {
void MarkSweep::ProcessReferences(Thread* self) {
TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- GetHeap()->ProcessReferences(timings_, clear_soft_references_, &IsMarkedCallback,
- &MarkObjectCallback, &ProcessMarkStackPausedCallback, this);
-}
-
-void MarkSweep::PreProcessReferences() {
- if (IsConcurrent()) {
- // No reason to do this for non-concurrent GC since pre processing soft references only helps
- // pauses.
- timings_.NewSplit("PreProcessReferences");
- GetHeap()->ProcessSoftReferences(timings_, clear_soft_references_, &IsMarkedCallback,
- &MarkObjectCallback, &ProcessMarkStackPausedCallback, this);
- }
+ GetHeap()->GetReferenceProcessor()->ProcessReferences(
+ true, &timings_, clear_soft_references_, &IsMarkedCallback, &MarkObjectCallback,
+ &ProcessMarkStackCallback, this);
}
void MarkSweep::PausePhase() {
@@ -192,7 +184,6 @@ void MarkSweep::PausePhase() {
// Scan dirty objects, this is only required if we are not doing concurrent GC.
RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
}
- ProcessReferences(self);
{
TimingLogger::ScopedSplit split("SwapStacks", &timings_);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -210,6 +201,9 @@ void MarkSweep::PausePhase() {
// incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
// reference to a string that is about to be swept.
Runtime::Current()->DisallowNewSystemWeaks();
+ // Enable the reference processing slow path, needs to be done with mutators paused since there
+ // is no lock in the GetReferent fast path.
+ GetHeap()->GetReferenceProcessor()->EnableSlowPath();
}
void MarkSweep::PreCleanCards() {
@@ -265,7 +259,6 @@ void MarkSweep::MarkingPhase() {
MarkReachableObjects();
// Pre-clean dirtied cards to reduce pauses.
PreCleanCards();
- PreProcessReferences();
}
void MarkSweep::UpdateAndMarkModUnion() {
@@ -290,6 +283,8 @@ void MarkSweep::MarkReachableObjects() {
void MarkSweep::ReclaimPhase() {
TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
Thread* self = Thread::Current();
+ // Process the references concurrently.
+ ProcessReferences(self);
SweepSystemWeaks(self);
Runtime::Current()->AllowNewSystemWeaks();
{
@@ -1168,7 +1163,7 @@ void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference*
if (kCountJavaLangRefs) {
++reference_count_;
}
- heap_->DelayReferenceReferent(klass, ref, IsMarkedCallback, this);
+ heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, IsMarkedCallback, this);
}
class MarkObjectVisitor {
@@ -1198,8 +1193,8 @@ void MarkSweep::ScanObject(Object* obj) {
ScanObjectVisit(obj, mark_visitor, ref_visitor);
}
-void MarkSweep::ProcessMarkStackPausedCallback(void* arg) {
- reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(true);
+void MarkSweep::ProcessMarkStackCallback(void* arg) {
+ reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(false);
}
void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index bfc70d187d..3ebc0af178 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -123,10 +123,6 @@ class MarkSweep : public GarbageCollector {
void ProcessReferences(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void PreProcessReferences()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Update and mark references from immune spaces.
void UpdateAndMarkModUnion()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -191,8 +187,9 @@ class MarkSweep : public GarbageCollector {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static void ProcessMarkStackPausedCallback(void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ static void ProcessMarkStackCallback(void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void MarkRootParallelCallback(mirror::Object** root, void* arg, uint32_t thread_id,
RootType root_type)
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index f5d6299273..cfe0489e10 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -30,6 +30,7 @@
#include "gc/accounting/remembered_set.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
+#include "gc/reference_processor.h"
#include "gc/space/bump_pointer_space.h"
#include "gc/space/bump_pointer_space-inl.h"
#include "gc/space/image_space.h"
@@ -162,8 +163,9 @@ void SemiSpace::InitializePhase() {
void SemiSpace::ProcessReferences(Thread* self) {
TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback,
- &MarkObjectCallback, &ProcessMarkStackCallback, this);
+ GetHeap()->GetReferenceProcessor()->ProcessReferences(
+ false, &timings_, clear_soft_references_, &MarkedForwardingAddressCallback,
+ &MarkObjectCallback, &ProcessMarkStackCallback, this);
}
void SemiSpace::MarkingPhase() {
@@ -698,7 +700,8 @@ void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
- heap_->DelayReferenceReferent(klass, reference, MarkedForwardingAddressCallback, this);
+ heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference,
+ MarkedForwardingAddressCallback, this);
}
class SemiSpaceMarkObjectVisitor {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 5d38b02f4a..bb656e59e9 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -39,6 +39,7 @@
#include "gc/collector/partial_mark_sweep.h"
#include "gc/collector/semi_space.h"
#include "gc/collector/sticky_mark_sweep.h"
+#include "gc/reference_processor.h"
#include "gc/space/bump_pointer_space.h"
#include "gc/space/dlmalloc_space-inl.h"
#include "gc/space/image_space.h"
@@ -771,102 +772,6 @@ space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok)
return FindDiscontinuousSpaceFromObject(obj, true);
}
-struct SoftReferenceArgs {
- IsMarkedCallback* is_marked_callback_;
- MarkObjectCallback* mark_callback_;
- void* arg_;
-};
-
-mirror::Object* Heap::PreserveSoftReferenceCallback(mirror::Object* obj, void* arg) {
- SoftReferenceArgs* args = reinterpret_cast<SoftReferenceArgs*>(arg);
- // TODO: Not preserve all soft references.
- return args->mark_callback_(obj, args->arg_);
-}
-
-void Heap::ProcessSoftReferences(TimingLogger& timings, bool clear_soft,
- IsMarkedCallback* is_marked_callback,
- MarkObjectCallback* mark_object_callback,
- ProcessMarkStackCallback* process_mark_stack_callback, void* arg) {
- // Unless required to clear soft references with white references, preserve some white referents.
- if (!clear_soft) {
- // Don't clear for sticky GC.
- SoftReferenceArgs soft_reference_args;
- soft_reference_args.is_marked_callback_ = is_marked_callback;
- soft_reference_args.mark_callback_ = mark_object_callback;
- soft_reference_args.arg_ = arg;
- // References with a marked referent are removed from the list.
- soft_reference_queue_.PreserveSomeSoftReferences(&PreserveSoftReferenceCallback,
- &soft_reference_args);
- process_mark_stack_callback(arg);
- }
-}
-
-// Process reference class instances and schedule finalizations.
-void Heap::ProcessReferences(TimingLogger& timings, bool clear_soft,
- IsMarkedCallback* is_marked_callback,
- MarkObjectCallback* mark_object_callback,
- ProcessMarkStackCallback* process_mark_stack_callback, void* arg) {
- timings.StartSplit("(Paused)ProcessReferences");
- ProcessSoftReferences(timings, clear_soft, is_marked_callback, mark_object_callback,
- process_mark_stack_callback, arg);
- // Clear all remaining soft and weak references with white referents.
- soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
- weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
- timings.EndSplit();
- // Preserve all white objects with finalize methods and schedule them for finalization.
- timings.StartSplit("(Paused)EnqueueFinalizerReferences");
- finalizer_reference_queue_.EnqueueFinalizerReferences(cleared_references_, is_marked_callback,
- mark_object_callback, arg);
- process_mark_stack_callback(arg);
- timings.EndSplit();
- timings.StartSplit("(Paused)ProcessReferences");
- // Clear all f-reachable soft and weak references with white referents.
- soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
- weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
- // Clear all phantom references with white referents.
- phantom_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
- // At this point all reference queues other than the cleared references should be empty.
- DCHECK(soft_reference_queue_.IsEmpty());
- DCHECK(weak_reference_queue_.IsEmpty());
- DCHECK(finalizer_reference_queue_.IsEmpty());
- DCHECK(phantom_reference_queue_.IsEmpty());
- timings.EndSplit();
-}
-
-// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
-// marked, put it on the appropriate list in the heap for later processing.
-void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
- IsMarkedCallback is_marked_callback, void* arg) {
- // klass can be the class of the old object if the visitor already updated the class of ref.
- DCHECK(klass->IsReferenceClass());
- mirror::Object* referent = ref->GetReferent();
- if (referent != nullptr) {
- mirror::Object* forward_address = is_marked_callback(referent, arg);
- // Null means that the object is not currently marked.
- if (forward_address == nullptr) {
- Thread* self = Thread::Current();
- // TODO: Remove these locks, and use atomic stacks for storing references?
- // We need to check that the references haven't already been enqueued since we can end up
- // scanning the same reference multiple times due to dirty cards.
- if (klass->IsSoftReferenceClass()) {
- soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
- } else if (klass->IsWeakReferenceClass()) {
- weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
- } else if (klass->IsFinalizerReferenceClass()) {
- finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
- } else if (klass->IsPhantomReferenceClass()) {
- phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
- } else {
- LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
- << klass->GetAccessFlags();
- }
- } else if (referent != forward_address) {
- // Referent is already marked and we need to update it.
- ref->SetReferent<false>(forward_address);
- }
- }
-}
-
space::ImageSpace* Heap::GetImageSpace() const {
for (const auto& space : continuous_spaces_) {
if (space->IsImageSpace()) {
@@ -1477,7 +1382,7 @@ void Heap::TransitionCollector(CollectorType collector_type) {
ChangeCollector(collector_type);
tl->ResumeAll();
// Can't call into java code with all threads suspended.
- EnqueueClearedReferences();
+ reference_processor_.EnqueueClearedReferences();
uint64_t duration = NanoTime() - start_time;
GrowForUtilization(semi_space_collector_);
FinishGC(self, collector::kGcTypeFull);
@@ -1881,7 +1786,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
total_bytes_freed_ever_ += collector->GetFreedBytes();
RequestHeapTrim();
// Enqueue cleared references.
- EnqueueClearedReferences();
+ reference_processor_.EnqueueClearedReferences();
// Grow the heap so that we know when to perform the next GC.
GrowForUtilization(collector);
const size_t duration = collector->GetDurationNs();
@@ -1952,9 +1857,9 @@ class ScanVisitor {
// Verify a reference from an object.
class VerifyReferenceVisitor {
public:
- explicit VerifyReferenceVisitor(Heap* heap)
+ explicit VerifyReferenceVisitor(Heap* heap, bool verify_referent)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
- : heap_(heap), failed_(false) {}
+ : heap_(heap), failed_(false), verify_referent_(verify_referent) {}
bool Failed() const {
return failed_;
@@ -1962,7 +1867,9 @@ class VerifyReferenceVisitor {
void operator()(mirror::Class* klass, mirror::Reference* ref) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- this->operator()(ref, mirror::Reference::ReferentOffset(), false);
+ if (verify_referent_) {
+ this->operator()(ref, mirror::Reference::ReferentOffset(), false);
+ }
}
void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
@@ -2079,18 +1986,21 @@ class VerifyReferenceVisitor {
private:
Heap* const heap_;
mutable bool failed_;
+ bool verify_referent_;
};
// Verify all references within an object, for use with HeapBitmap::Visit.
class VerifyObjectVisitor {
public:
- explicit VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {}
+ explicit VerifyObjectVisitor(Heap* heap, bool verify_referent)
+ : heap_(heap), failed_(false), verify_referent_(verify_referent) {
+ }
void operator()(mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// Note: we are verifying the references in obj but not obj itself, this is because obj must
// be live or else how did we find it in the live bitmap?
- VerifyReferenceVisitor visitor(heap_);
+ VerifyReferenceVisitor visitor(heap_, verify_referent_);
// The class doesn't count as a reference but we should verify it anyways.
obj->VisitReferences<true>(visitor, visitor);
failed_ = failed_ || visitor.Failed();
@@ -2109,10 +2019,11 @@ class VerifyObjectVisitor {
private:
Heap* const heap_;
mutable bool failed_;
+ const bool verify_referent_;
};
// Must do this with mutators suspended since we are directly accessing the allocation stacks.
-bool Heap::VerifyHeapReferences() {
+bool Heap::VerifyHeapReferences(bool verify_referents) {
Thread* self = Thread::Current();
Locks::mutator_lock_->AssertExclusiveHeld(self);
// Lets sort our allocation stacks so that we can efficiently binary search them.
@@ -2121,7 +2032,7 @@ bool Heap::VerifyHeapReferences() {
// Since we sorted the allocation stack content, need to revoke all
// thread-local allocation stacks.
RevokeAllThreadLocalAllocationStacks(self);
- VerifyObjectVisitor visitor(this);
+ VerifyObjectVisitor visitor(this, verify_referents);
// Verify objects in the allocation stack since these will be objects which were:
// 1. Allocated prior to the GC (pre GC verification).
// 2. Allocated during the GC (pre sweep GC verification).
@@ -2399,7 +2310,9 @@ void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
// Swapping bound bitmaps does nothing.
gc->SwapBitmaps();
SwapSemiSpaces();
- if (!VerifyHeapReferences()) {
+ // Pass in false since concurrent reference processing can mean that the reference referents
+ // may point to dead objects at the point which PreSweepingGcVerification is called.
+ if (!VerifyHeapReferences(false)) {
LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed";
}
SwapSemiSpaces();
@@ -2622,27 +2535,10 @@ void Heap::AddFinalizerReference(Thread* self, mirror::Object** object) {
*object = soa.Decode<mirror::Object*>(arg.get());
}
-void Heap::EnqueueClearedReferences() {
- Thread* self = Thread::Current();
- Locks::mutator_lock_->AssertNotHeld(self);
- if (!cleared_references_.IsEmpty()) {
- // When a runtime isn't started there are no reference queues to care about so ignore.
- if (LIKELY(Runtime::Current()->IsStarted())) {
- ScopedObjectAccess soa(self);
- ScopedLocalRef<jobject> arg(self->GetJniEnv(),
- soa.AddLocalReference<jobject>(cleared_references_.GetList()));
- jvalue args[1];
- args[0].l = arg.get();
- InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args);
- }
- cleared_references_.Clear();
- }
-}
-
void Heap::RequestConcurrentGC(Thread* self) {
// Make sure that we can do a concurrent GC.
Runtime* runtime = Runtime::Current();
- if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
+ if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
self->IsHandlingStackOverflow()) {
return;
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 7a9ef1e3b1..f71de1ac89 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -35,7 +35,7 @@
#include "jni.h"
#include "object_callbacks.h"
#include "offsets.h"
-#include "reference_queue.h"
+#include "reference_processor.h"
#include "safe_map.h"
#include "thread_pool.h"
#include "verify_object.h"
@@ -54,6 +54,9 @@ namespace mirror {
} // namespace mirror
namespace gc {
+
+class ReferenceProcessor;
+
namespace accounting {
class HeapBitmap;
class ModUnionTable;
@@ -215,7 +218,7 @@ class Heap {
// Check sanity of all live references.
void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
- bool VerifyHeapReferences()
+ bool VerifyHeapReferences(bool verify_referents = true)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
bool VerifyMissingCardMarks()
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
@@ -314,21 +317,6 @@ class Heap {
return discontinuous_spaces_;
}
- static mirror::Object* PreserveSoftReferenceCallback(mirror::Object* obj, void* arg);
- void ProcessSoftReferences(TimingLogger& timings, bool clear_soft,
- IsMarkedCallback* is_marked_callback,
- MarkObjectCallback* mark_object_callback,
- ProcessMarkStackCallback* process_mark_stack_callback, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- void ProcessReferences(TimingLogger& timings, bool clear_soft,
- IsMarkedCallback* is_marked_callback,
- MarkObjectCallback* mark_object_callback,
- ProcessMarkStackCallback* process_mark_stack_callback,
- void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
// Enable verification of object references when the runtime is sufficiently initialized.
void EnableObjectValidation() {
verify_object_mode_ = kVerifyObjectSupport;
@@ -565,6 +553,10 @@ class Heap {
}
bool HasImageSpace() const;
+ ReferenceProcessor* GetReferenceProcessor() {
+ return &reference_processor_;
+ }
+
private:
void Compact(space::ContinuousMemMapAllocSpace* target_space,
space::ContinuousMemMapAllocSpace* source_space)
@@ -631,12 +623,6 @@ class Heap {
bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void EnqueueClearedReferences();
- // Returns true if the reference object has not yet been enqueued.
- void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
- IsMarkedCallback is_marked_callback, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Run the finalizers.
void RunFinalization(JNIEnv* env);
@@ -797,12 +783,8 @@ class Heap {
Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
UniquePtr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
- // Reference queues.
- ReferenceQueue soft_reference_queue_;
- ReferenceQueue weak_reference_queue_;
- ReferenceQueue finalizer_reference_queue_;
- ReferenceQueue phantom_reference_queue_;
- ReferenceQueue cleared_references_;
+ // Reference processor;
+ ReferenceProcessor reference_processor_;
// True while the garbage collector is running.
volatile CollectorType collector_type_running_ GUARDED_BY(gc_complete_lock_);
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
new file mode 100644
index 0000000000..ef9e1d4ee9
--- /dev/null
+++ b/runtime/gc/reference_processor.cc
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "reference_processor.h"
+
+#include "mirror/object-inl.h"
+#include "mirror/reference-inl.h"
+#include "reflection.h"
+#include "ScopedLocalRef.h"
+#include "scoped_thread_state_change.h"
+#include "well_known_classes.h"
+
+namespace art {
+namespace gc {
+
+ReferenceProcessor::ReferenceProcessor()
+ : process_references_args_(nullptr, nullptr, nullptr), slow_path_enabled_(false),
+ preserving_references_(false), lock_("reference processor lock"),
+ condition_("reference processor condition", lock_) {
+}
+
+void ReferenceProcessor::EnableSlowPath() {
+ Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+ slow_path_enabled_ = true;
+}
+
+void ReferenceProcessor::DisableSlowPath(Thread* self) {
+ slow_path_enabled_ = false;
+ // Set to null so that GetReferent knows to not attempt to use the callback for seeing if
+ // referents are marked.
+ process_references_args_.is_marked_callback_ = nullptr;
+ condition_.Broadcast(self);
+}
+
+mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference* reference) {
+ mirror::Object* const referent = reference->GetReferent();
+ if (LIKELY(!slow_path_enabled_)) {
+ return referent;
+ }
+ // Another fast path, the referent is cleared, we can just return null since there is no scenario
+ // where it becomes non-null.
+ if (referent == nullptr) {
+ return nullptr;
+ }
+ MutexLock mu(self, lock_);
+ while (slow_path_enabled_) {
+ // Try to see if the referent is already marked by using the is_marked_callback. We can return
+ // it to the mutator as long as the GC is not preserving references. If the GC is
+ // preserving references, the mutator could take a white field and move it somewhere else
+ // in the heap causing corruption since this field would get swept.
+ IsMarkedCallback* const is_marked_callback = process_references_args_.is_marked_callback_;
+ if (!preserving_references_ && is_marked_callback != nullptr) {
+ mirror::Object* const referent = reference->GetReferent();
+ mirror::Object* const obj = is_marked_callback(referent, process_references_args_.arg_);
+ // If it's null it means not marked, but it could become marked if the referent is reachable
+ // by finalizer referents. So we can not return in this case and must block.
+ if (obj != nullptr) {
+ return obj;
+ }
+ }
+ condition_.Wait(self);
+ }
+ return reference->GetReferent();
+}
+
+mirror::Object* ReferenceProcessor::PreserveSoftReferenceCallback(mirror::Object* obj, void* arg) {
+ auto* const args = reinterpret_cast<ProcessReferencesArgs*>(arg);
+ // TODO: Not preserve all soft references.
+ return args->mark_callback_(obj, args->arg_);
+}
+
+void ReferenceProcessor::StartPreservingReferences(Thread* self) {
+ MutexLock mu(self, lock_);
+ preserving_references_ = true;
+}
+
+void ReferenceProcessor::StopPreservingReferences(Thread* self) {
+ MutexLock mu(self, lock_);
+ preserving_references_ = false;
+ // We are done preserving references, some people who are blocked may see a marked referent.
+ condition_.Broadcast(self);
+}
+
+// Process reference class instances and schedule finalizations.
+void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timings,
+ bool clear_soft_references,
+ IsMarkedCallback* is_marked_callback,
+ MarkObjectCallback* mark_object_callback,
+ ProcessMarkStackCallback* process_mark_stack_callback,
+ void* arg) {
+ Thread* self = Thread::Current();
+ {
+ MutexLock mu(self, lock_);
+ process_references_args_.is_marked_callback_ = is_marked_callback;
+ process_references_args_.mark_callback_ = mark_object_callback;
+ process_references_args_.arg_ = arg;
+ }
+ if (concurrent) {
+ MutexLock mu(self, lock_);
+ CHECK(slow_path_enabled_) << "Slow path must be enabled for concurrent reference processing";
+ timings->StartSplit("ProcessReferences");
+ } else {
+ timings->StartSplit("(Paused)ProcessReferences");
+ }
+ // Unless required to clear soft references with white references, preserve some white referents.
+ if (!clear_soft_references) {
+ TimingLogger::ScopedSplit split(concurrent ? "PreserveSomeSoftReferences" :
+ "(Paused)PreserveSomeSoftReferences", timings);
+ if (concurrent) {
+ StartPreservingReferences(self);
+ }
+ // References with a marked referent are removed from the list.
+ soft_reference_queue_.PreserveSomeSoftReferences(&PreserveSoftReferenceCallback,
+ &process_references_args_);
+
+ process_mark_stack_callback(arg);
+ if (concurrent) {
+ StopPreservingReferences(self);
+ }
+ }
+ // Clear all remaining soft and weak references with white referents.
+ soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+ weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+ {
+ TimingLogger::ScopedSplit split(concurrent ? "EnqueueFinalizerReferences" :
+ "(Paused)EnqueueFinalizerReferences", timings);
+ if (concurrent) {
+ StartPreservingReferences(self);
+ }
+ // Preserve all white objects with finalize methods and schedule them for finalization.
+ finalizer_reference_queue_.EnqueueFinalizerReferences(cleared_references_, is_marked_callback,
+ mark_object_callback, arg);
+ process_mark_stack_callback(arg);
+ if (concurrent) {
+ StopPreservingReferences(self);
+ }
+ }
+ // Clear all finalizer referent reachable soft and weak references with white referents.
+ soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+ weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+ // Clear all phantom references with white referents.
+ phantom_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+ // At this point all reference queues other than the cleared references should be empty.
+ DCHECK(soft_reference_queue_.IsEmpty());
+ DCHECK(weak_reference_queue_.IsEmpty());
+ DCHECK(finalizer_reference_queue_.IsEmpty());
+ DCHECK(phantom_reference_queue_.IsEmpty());
+ if (concurrent) {
+ MutexLock mu(self, lock_);
+ // Done processing, disable the slow path and broadcast to the waiters.
+ DisableSlowPath(self);
+ }
+ timings->EndSplit();
+}
+
+// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
+// marked, put it on the appropriate list in the heap for later processing.
+void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
+ IsMarkedCallback is_marked_callback, void* arg) {
+ // klass can be the class of the old object if the visitor already updated the class of ref.
+ DCHECK(klass->IsReferenceClass());
+ mirror::Object* referent = ref->GetReferent();
+ if (referent != nullptr) {
+ mirror::Object* forward_address = is_marked_callback(referent, arg);
+ // Null means that the object is not currently marked.
+ if (forward_address == nullptr) {
+ Thread* self = Thread::Current();
+ // TODO: Remove these locks, and use atomic stacks for storing references?
+ // We need to check that the references haven't already been enqueued since we can end up
+ // scanning the same reference multiple times due to dirty cards.
+ if (klass->IsSoftReferenceClass()) {
+ soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
+ } else if (klass->IsWeakReferenceClass()) {
+ weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
+ } else if (klass->IsFinalizerReferenceClass()) {
+ finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
+ } else if (klass->IsPhantomReferenceClass()) {
+ phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
+ } else {
+ LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
+ << klass->GetAccessFlags();
+ }
+ } else if (referent != forward_address) {
+ // Referent is already marked and we need to update it.
+ ref->SetReferent<false>(forward_address);
+ }
+ }
+}
+
+void ReferenceProcessor::EnqueueClearedReferences() {
+ Thread* self = Thread::Current();
+ Locks::mutator_lock_->AssertNotHeld(self);
+ if (!cleared_references_.IsEmpty()) {
+ // When a runtime isn't started there are no reference queues to care about so ignore.
+ if (LIKELY(Runtime::Current()->IsStarted())) {
+ ScopedObjectAccess soa(self);
+ ScopedLocalRef<jobject> arg(self->GetJniEnv(),
+ soa.AddLocalReference<jobject>(cleared_references_.GetList()));
+ jvalue args[1];
+ args[0].l = arg.get();
+ InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args);
+ }
+ cleared_references_.Clear();
+ }
+}
+
+} // namespace gc
+} // namespace art
+
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
new file mode 100644
index 0000000000..f082a9ec66
--- /dev/null
+++ b/runtime/gc/reference_processor.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_REFERENCE_PROCESSOR_H_
+#define ART_RUNTIME_GC_REFERENCE_PROCESSOR_H_
+
+#include "base/mutex.h"
+#include "globals.h"
+#include "jni.h"
+#include "object_callbacks.h"
+#include "reference_queue.h"
+
+namespace art {
+
+class TimingLogger;
+
+namespace mirror {
+class Object;
+class Reference;
+} // namespace mirror
+
+namespace gc {
+
+class Heap;
+
+// Used to process java.lang.References concurrently or paused.
+class ReferenceProcessor {
+ public:
+ explicit ReferenceProcessor();
+ static mirror::Object* PreserveSoftReferenceCallback(mirror::Object* obj, void* arg);
+ void ProcessReferences(bool concurrent, TimingLogger* timings, bool clear_soft_references,
+ IsMarkedCallback* is_marked_callback,
+ MarkObjectCallback* mark_object_callback,
+ ProcessMarkStackCallback* process_mark_stack_callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ LOCKS_EXCLUDED(lock_);
+ // Only allow setting this with mutators suspended so that we can avoid using a lock in the
+ // GetReferent fast path as an optimization.
+ void EnableSlowPath() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Decode the referent, may block if references are being processed.
+ mirror::Object* GetReferent(Thread* self, mirror::Reference* reference)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+ void EnqueueClearedReferences() LOCKS_EXCLUDED(Locks::mutator_lock_);
+ void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
+ IsMarkedCallback is_marked_callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ private:
+ class ProcessReferencesArgs {
+ public:
+ ProcessReferencesArgs(IsMarkedCallback* is_marked_callback,
+ MarkObjectCallback* mark_callback, void* arg)
+ : is_marked_callback_(is_marked_callback), mark_callback_(mark_callback), arg_(arg) {
+ }
+
+ // The is marked callback is null when the args aren't set up.
+ IsMarkedCallback* is_marked_callback_;
+ MarkObjectCallback* mark_callback_;
+ void* arg_;
+ };
+ // Called by ProcessReferences.
+ void DisableSlowPath(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ // If we are preserving references it means that some dead objects may become live, we use start
+ // and stop preserving to block mutators using GetReferrent from getting access to these
+ // referents.
+ void StartPreservingReferences(Thread* self) LOCKS_EXCLUDED(lock_);
+ void StopPreservingReferences(Thread* self) LOCKS_EXCLUDED(lock_);
+ // Process args, used by the GetReferent to return referents which are already marked.
+ ProcessReferencesArgs process_references_args_ GUARDED_BY(lock_);
+ // Boolean for whether or not we need to go slow path in GetReferent.
+ volatile bool slow_path_enabled_;
+ // Boolean for whether or not we are preserving references (either soft references or finalizers).
+ // If this is true, then we cannot return a referent (see comment in GetReferent).
+ bool preserving_references_ GUARDED_BY(lock_);
+ // Lock that guards the reference processing.
+ Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ // Condition that people wait on if they attempt to get the referent of a reference while
+ // processing is in progress.
+ ConditionVariable condition_ GUARDED_BY(lock_);
+ // Reference queues used by the GC.
+ ReferenceQueue soft_reference_queue_;
+ ReferenceQueue weak_reference_queue_;
+ ReferenceQueue finalizer_reference_queue_;
+ ReferenceQueue phantom_reference_queue_;
+ ReferenceQueue cleared_references_;
+};
+
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_REFERENCE_PROCESSOR_H_
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index aee7891d2f..d2bd9a4797 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -131,8 +131,8 @@ void ReferenceQueue::ClearWhiteReferences(ReferenceQueue& cleared_references,
}
void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue& cleared_references,
- IsMarkedCallback is_marked_callback,
- MarkObjectCallback recursive_mark_callback,
+ IsMarkedCallback* is_marked_callback,
+ MarkObjectCallback* mark_object_callback,
void* arg) {
while (!IsEmpty()) {
mirror::FinalizerReference* ref = DequeuePendingReference()->AsFinalizerReference();
@@ -141,7 +141,7 @@ void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue& cleared_referenc
mirror::Object* forward_address = is_marked_callback(referent, arg);
// If the referent isn't marked, mark it and update the
if (forward_address == nullptr) {
- forward_address = recursive_mark_callback(referent, arg);
+ forward_address = mark_object_callback(referent, arg);
// If the referent is non-null the reference must queuable.
DCHECK(ref->IsEnqueuable());
// Move the updated referent to the zombie field.
@@ -160,7 +160,7 @@ void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue& cleared_referenc
}
}
-void ReferenceQueue::PreserveSomeSoftReferences(IsMarkedCallback preserve_callback, void* arg) {
+void ReferenceQueue::PreserveSomeSoftReferences(IsMarkedCallback* preserve_callback, void* arg) {
ReferenceQueue cleared;
while (!IsEmpty()) {
mirror::Reference* ref = DequeuePendingReference();
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 8d392babf6..4f223e22e3 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -59,8 +59,8 @@ class ReferenceQueue {
// Enqueues finalizer references with white referents. White referents are blackened, moved to the
// zombie field, and the referent field is cleared.
void EnqueueFinalizerReferences(ReferenceQueue& cleared_references,
- IsMarkedCallback is_marked_callback,
- MarkObjectCallback recursive_mark_callback, void* arg)
+ IsMarkedCallback* is_marked_callback,
+ MarkObjectCallback* mark_object_callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Walks the reference list marking any references subject to the reference clearing policy.
// References with a black referent are removed from the list. References with white referents
@@ -69,7 +69,8 @@ class ReferenceQueue {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Unlink the reference list clearing references objects with white referents. Cleared references
// registered to a reference queue are scheduled for appending by the heap worker thread.
- void ClearWhiteReferences(ReferenceQueue& cleared_references, IsMarkedCallback is_marked_callback,
+ void ClearWhiteReferences(ReferenceQueue& cleared_references,
+ IsMarkedCallback* is_marked_callback,
void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void Dump(std::ostream& os) const
diff --git a/runtime/native/java_lang_ref_Reference.cc b/runtime/native/java_lang_ref_Reference.cc
new file mode 100644
index 0000000000..f221ac60f5
--- /dev/null
+++ b/runtime/native/java_lang_ref_Reference.cc
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gc/heap.h"
+#include "gc/reference_processor.h"
+#include "jni_internal.h"
+#include "mirror/object-inl.h"
+#include "mirror/reference-inl.h"
+#include "scoped_fast_native_object_access.h"
+
+namespace art {
+
+static jobject Reference_get(JNIEnv* env, jobject javaThis) {
+ ScopedFastNativeObjectAccess soa(env);
+ mirror::Reference* const ref = soa.Decode<mirror::Reference*>(javaThis);
+ mirror::Object* const referent =
+ Runtime::Current()->GetHeap()->GetReferenceProcessor()->GetReferent(soa.Self(), ref);
+ return soa.AddLocalReference<jobject>(referent);
+}
+
+static JNINativeMethod gMethods[] = {
+ NATIVE_METHOD(Reference, get, "!()Ljava/lang/Object;"),
+};
+
+void register_java_lang_ref_Reference(JNIEnv* env) {
+ REGISTER_NATIVE_METHODS("java/lang/ref/Reference");
+}
+
+} // namespace art
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index cbd51d4965..6d9dfa6950 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -744,6 +744,7 @@ void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) {
REGISTER(register_java_lang_System);
REGISTER(register_java_lang_Thread);
REGISTER(register_java_lang_VMClassLoader);
+ REGISTER(register_java_lang_ref_Reference);
REGISTER(register_java_lang_reflect_Array);
REGISTER(register_java_lang_reflect_Constructor);
REGISTER(register_java_lang_reflect_Field);