summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2014-02-18 11:38:45 -0800
committerMathieu Chartier <mathieuc@google.com>2014-02-20 10:00:24 -0800
commit3bb57c7b41bf5419fe895e7aa664d8d430205ba8 (patch)
tree55b28f12c3e5b9174499fe49acec195410880566
parenta3537fb03e092a82f08ceb670a2eafa703203465 (diff)
downloadandroid_art-3bb57c7b41bf5419fe895e7aa664d8d430205ba8.tar.gz
android_art-3bb57c7b41bf5419fe895e7aa664d8d430205ba8.tar.bz2
android_art-3bb57c7b41bf5419fe895e7aa664d8d430205ba8.zip
Change ProcessReferences to not use RecursiveMarkObject.
Calling ProcessMarkStack in RecursiveMarkObject caused a lot of overhead due to timing logger splits. Changed the logic to be the same as prior to the reference queue refactoring which involves calling process mark stack after preserving soft references and enqueueing finalizer references. FinalizingGC longest pause is reduced by around 1/2 down to ~300ms. Benchmark score ~400000 -> ~600000. Also changed the timing logger splits in the GC to have (Paused) if the split is a paused part of the GC. Bug: 12129382 Change-Id: I7476d4f23670b19d70738e2fd48e37ec2f57e9f4
-rw-r--r--runtime/gc/collector/mark_sweep.cc23
-rw-r--r--runtime/gc/collector/mark_sweep.h7
-rw-r--r--runtime/gc/collector/semi_space.cc26
-rw-r--r--runtime/gc/collector/semi_space.h14
-rw-r--r--runtime/gc/heap.cc19
-rw-r--r--runtime/gc/heap.h4
-rw-r--r--runtime/object_callbacks.h1
7 files changed, 44 insertions, 50 deletions
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 006c271261..7b9d67578b 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -179,11 +179,11 @@ void MarkSweep::ProcessReferences(Thread* self) {
TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
GetHeap()->ProcessReferences(timings_, clear_soft_references_, &IsMarkedCallback,
- &RecursiveMarkObjectCallback, this);
+ &MarkObjectCallback, &ProcessMarkStackPausedCallback, this);
}
bool MarkSweep::HandleDirtyObjectsPhase() {
- TimingLogger::ScopedSplit split("HandleDirtyObjectsPhase", &timings_);
+ TimingLogger::ScopedSplit split("(Paused)HandleDirtyObjectsPhase", &timings_);
Thread* self = Thread::Current();
Locks::mutator_lock_->AssertExclusiveHeld(self);
@@ -400,10 +400,9 @@ inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) {
}
}
-mirror::Object* MarkSweep::RecursiveMarkObjectCallback(mirror::Object* obj, void* arg) {
+mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) {
MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
mark_sweep->MarkObject(obj);
- mark_sweep->ProcessMarkStack(true);
return obj;
}
@@ -546,13 +545,6 @@ void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*
reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root);
}
-mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* object, void* arg) {
- DCHECK(object != nullptr);
- DCHECK(arg != nullptr);
- reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(object);
- return object;
-}
-
void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
const StackVisitor* visitor) {
reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor);
@@ -957,7 +949,7 @@ void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
}
void MarkSweep::ReMarkRoots() {
- timings_.StartSplit("ReMarkRoots");
+ timings_.StartSplit("(Paused)ReMarkRoots");
Runtime::Current()->VisitRoots(MarkRootCallback, this, true, true);
timings_.EndSplit();
}
@@ -1208,6 +1200,11 @@ void MarkSweep::ScanObject(Object* obj) {
ScanObjectVisit(obj, visitor);
}
+void MarkSweep::ProcessMarkStackPausedCallback(void* arg) {
+ DCHECK(arg != nullptr);
+ reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(true);
+}
+
void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
Thread* self = Thread::Current();
ThreadPool* thread_pool = GetHeap()->GetThreadPool();
@@ -1231,7 +1228,7 @@ void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
// Scan anything that's on the mark stack.
void MarkSweep::ProcessMarkStack(bool paused) {
- timings_.StartSplit("ProcessMarkStack");
+ timings_.StartSplit(paused ? "(Paused)ProcessMarkStack" : "ProcessMarkStack");
size_t thread_count = GetThreadCount(paused);
if (kParallelProcessMarkStack && thread_count > 1 &&
mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 6a48cf7e99..963b9ea76f 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -176,7 +176,7 @@ class MarkSweep : public GarbageCollector {
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
Locks::mutator_lock_);
- static mirror::Object* RecursiveMarkObjectCallback(mirror::Object* obj, void* arg)
+ static mirror::Object* MarkObjectCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -185,9 +185,8 @@ class MarkSweep : public GarbageCollector {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static mirror::Object* MarkObjectCallback(mirror::Object* object, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ static void ProcessMarkStackPausedCallback(void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
static void MarkRootParallelCallback(mirror::Object** root, void* arg, uint32_t thread_id,
RootType root_type)
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index d64ec61e0f..882867b5d3 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -163,7 +163,7 @@ void SemiSpace::ProcessReferences(Thread* self) {
TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback,
- &RecursiveMarkObjectCallback, this);
+ &MarkObjectCallback, &ProcessMarkStackCallback, this);
}
void SemiSpace::MarkingPhase() {
@@ -310,7 +310,7 @@ void SemiSpace::MarkReachableObjects() {
}
// Recursively process the mark stack.
- ProcessMarkStack(true);
+ ProcessMarkStack();
}
void SemiSpace::ReclaimPhase() {
@@ -571,13 +571,15 @@ Object* SemiSpace::MarkObject(Object* obj) {
return forward_address;
}
-mirror::Object* SemiSpace::RecursiveMarkObjectCallback(mirror::Object* root, void* arg) {
+void SemiSpace::ProcessMarkStackCallback(void* arg) {
+ DCHECK(arg != nullptr);
+ reinterpret_cast<SemiSpace*>(arg)->ProcessMarkStack();
+}
+
+mirror::Object* SemiSpace::MarkObjectCallback(mirror::Object* root, void* arg) {
DCHECK(root != nullptr);
DCHECK(arg != nullptr);
- SemiSpace* semi_space = reinterpret_cast<SemiSpace*>(arg);
- mirror::Object* ret = semi_space->MarkObject(root);
- semi_space->ProcessMarkStack(true);
- return ret;
+ return reinterpret_cast<SemiSpace*>(arg)->MarkObject(root);
}
void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
@@ -587,12 +589,6 @@ void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*
*root = reinterpret_cast<SemiSpace*>(arg)->MarkObject(*root);
}
-Object* SemiSpace::MarkObjectCallback(Object* object, void* arg) {
- DCHECK(object != nullptr);
- DCHECK(arg != nullptr);
- return reinterpret_cast<SemiSpace*>(arg)->MarkObject(object);
-}
-
// Marks all objects in the root set.
void SemiSpace::MarkRoots() {
timings_.StartSplit("MarkRoots");
@@ -680,7 +676,7 @@ void SemiSpace::ScanObject(Object* obj) {
}
// Scan anything that's on the mark stack.
-void SemiSpace::ProcessMarkStack(bool paused) {
+void SemiSpace::ProcessMarkStack() {
space::MallocSpace* promo_dest_space = NULL;
accounting::SpaceBitmap* live_bitmap = NULL;
if (generational_ && !whole_heap_collection_) {
@@ -694,7 +690,7 @@ void SemiSpace::ProcessMarkStack(bool paused) {
DCHECK(mark_bitmap != nullptr);
DCHECK_EQ(live_bitmap, mark_bitmap);
}
- timings_.StartSplit(paused ? "(paused)ProcessMarkStack" : "ProcessMarkStack");
+ timings_.StartSplit("ProcessMarkStack");
while (!mark_stack_->IsEmpty()) {
Object* obj = mark_stack_->PopBack();
if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) {
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 89fe326150..ba973765b7 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -146,12 +146,12 @@ class SemiSpace : public GarbageCollector {
RootType /*root_type*/)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- static mirror::Object* MarkObjectCallback(mirror::Object* objecgt, void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- static mirror::Object* RecursiveMarkObjectCallback(mirror::Object* root, void* arg)
+ static mirror::Object* MarkObjectCallback(mirror::Object* root, void* arg)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ static void ProcessMarkStackCallback(void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
@@ -174,10 +174,6 @@ class SemiSpace : public GarbageCollector {
// Returns true if we should sweep the space.
virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const;
- // Returns how many threads we should use for the current GC phase based on if we are paused,
- // whether or not we care about pauses.
- size_t GetThreadCount(bool paused) const;
-
// Returns true if an object is inside of the immune region (assumed to be marked).
bool IsImmune(const mirror::Object* obj) const ALWAYS_INLINE {
return obj >= immune_begin_ && obj < immune_end_;
@@ -237,7 +233,7 @@ class SemiSpace : public GarbageCollector {
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Recursively blackens objects on the mark stack.
- void ProcessMarkStack(bool paused)
+ void ProcessMarkStack()
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
void EnqueueFinalizerReferences(mirror::Object** ref)
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 9e5e3abf89..8c89cdc251 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -610,41 +610,44 @@ space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok)
struct SoftReferenceArgs {
IsMarkedCallback* is_marked_callback_;
- MarkObjectCallback* recursive_mark_callback_;
+ MarkObjectCallback* mark_callback_;
void* arg_;
};
mirror::Object* Heap::PreserveSoftReferenceCallback(mirror::Object* obj, void* arg) {
SoftReferenceArgs* args = reinterpret_cast<SoftReferenceArgs*>(arg);
// TODO: Not preserve all soft references.
- return args->recursive_mark_callback_(obj, args->arg_);
+ return args->mark_callback_(obj, args->arg_);
}
// Process reference class instances and schedule finalizations.
void Heap::ProcessReferences(TimingLogger& timings, bool clear_soft,
IsMarkedCallback* is_marked_callback,
- MarkObjectCallback* recursive_mark_object_callback, void* arg) {
+ MarkObjectCallback* mark_object_callback,
+ ProcessMarkStackCallback* process_mark_stack_callback, void* arg) {
// Unless we are in the zygote or required to clear soft references with white references,
// preserve some white referents.
if (!clear_soft && !Runtime::Current()->IsZygote()) {
SoftReferenceArgs soft_reference_args;
soft_reference_args.is_marked_callback_ = is_marked_callback;
- soft_reference_args.recursive_mark_callback_ = recursive_mark_object_callback;
+ soft_reference_args.mark_callback_ = mark_object_callback;
soft_reference_args.arg_ = arg;
soft_reference_queue_.PreserveSomeSoftReferences(&PreserveSoftReferenceCallback,
&soft_reference_args);
+ process_mark_stack_callback(arg);
}
- timings.StartSplit("ProcessReferences");
+ timings.StartSplit("(Paused)ProcessReferences");
// Clear all remaining soft and weak references with white referents.
soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
timings.EndSplit();
// Preserve all white objects with finalize methods and schedule them for finalization.
- timings.StartSplit("EnqueueFinalizerReferences");
+ timings.StartSplit("(Paused)EnqueueFinalizerReferences");
finalizer_reference_queue_.EnqueueFinalizerReferences(cleared_references_, is_marked_callback,
- recursive_mark_object_callback, arg);
+ mark_object_callback, arg);
+ process_mark_stack_callback(arg);
timings.EndSplit();
- timings.StartSplit("ProcessReferences");
+ timings.StartSplit("(Paused)ProcessReferences");
// Clear all f-reachable soft and weak references with white referents.
soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 80a5a1aa19..9b763cbbd6 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -339,7 +339,9 @@ class Heap {
static mirror::Object* PreserveSoftReferenceCallback(mirror::Object* obj, void* arg);
void ProcessReferences(TimingLogger& timings, bool clear_soft,
IsMarkedCallback* is_marked_callback,
- MarkObjectCallback* recursive_mark_object_callback, void* arg)
+ MarkObjectCallback* mark_object_callback,
+ ProcessMarkStackCallback* process_mark_stack_callback,
+ void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h
index 6af338bd57..468ba08a43 100644
--- a/runtime/object_callbacks.h
+++ b/runtime/object_callbacks.h
@@ -60,6 +60,7 @@ typedef void (VerifyRootCallback)(const mirror::Object* root, void* arg, size_t
// address the object (if the object didn't move, returns the object input parameter).
typedef mirror::Object* (IsMarkedCallback)(mirror::Object* object, void* arg)
__attribute__((warn_unused_result));
+typedef void (ProcessMarkStackCallback)(void* arg);
} // namespace art