diff options
-rw-r--r-- | runtime/gc/heap-inl.h | 2 | ||||
-rw-r--r-- | runtime/gc/heap.cc | 30 | ||||
-rw-r--r-- | runtime/gc/heap.h | 6 | ||||
-rw-r--r-- | runtime/native/dalvik_system_VMRuntime.cc | 4 |
4 files changed, 26 insertions, 16 deletions
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h index 3e56205444..ac79ac28ef 100644 --- a/runtime/gc/heap-inl.h +++ b/runtime/gc/heap-inl.h @@ -419,7 +419,7 @@ inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated, mirror::Object** obj) { if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) { - RequestConcurrentGCAndSaveObject(self, obj); + RequestConcurrentGCAndSaveObject(self, false, obj); } } diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index b80c4b681c..cbbc76ccd1 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -3325,20 +3325,24 @@ void Heap::AddFinalizerReference(Thread* self, mirror::Object** object) { *object = soa.Decode<mirror::Object*>(arg.get()); } -void Heap::RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj) { +void Heap::RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj) { StackHandleScope<1> hs(self); HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj)); - RequestConcurrentGC(self); + RequestConcurrentGC(self, force_full); } class Heap::ConcurrentGCTask : public HeapTask { public: - explicit ConcurrentGCTask(uint64_t target_time) : HeapTask(target_time) { } + explicit ConcurrentGCTask(uint64_t target_time, bool force_full) + : HeapTask(target_time), force_full_(force_full) { } virtual void Run(Thread* self) OVERRIDE { gc::Heap* heap = Runtime::Current()->GetHeap(); - heap->ConcurrentGC(self); + heap->ConcurrentGC(self, force_full_); heap->ClearConcurrentGCRequest(); } + + private: + const bool force_full_; // If true, force full (or partial) collection. }; static bool CanAddHeapTask(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_) { @@ -3351,24 +3355,30 @@ void Heap::ClearConcurrentGCRequest() { concurrent_gc_pending_.StoreRelaxed(false); } -void Heap::RequestConcurrentGC(Thread* self) { +void Heap::RequestConcurrentGC(Thread* self, bool force_full) { if (CanAddHeapTask(self) && concurrent_gc_pending_.CompareExchangeStrongSequentiallyConsistent(false, true)) { - task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime())); // Start straight away. + task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(), // Start straight away. + force_full)); } } -void Heap::ConcurrentGC(Thread* self) { +void Heap::ConcurrentGC(Thread* self, bool force_full) { if (!Runtime::Current()->IsShuttingDown(self)) { // Wait for any GCs currently running to finish. if (WaitForGcToComplete(kGcCauseBackground, self) == collector::kGcTypeNone) { // If the we can't run the GC type we wanted to run, find the next appropriate one and try that // instead. E.g. can't do partial, so do full instead. - if (CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false) == + collector::GcType next_gc_type = next_gc_type_; + // If forcing full and next gc type is sticky, override with a non-sticky type. + if (force_full && next_gc_type == collector::kGcTypeSticky) { + next_gc_type = HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull; + } + if (CollectGarbageInternal(next_gc_type, kGcCauseBackground, false) == collector::kGcTypeNone) { for (collector::GcType gc_type : gc_plan_) { // Attempt to run the collector, if we succeed, we are done. - if (gc_type > next_gc_type_ && + if (gc_type > next_gc_type && CollectGarbageInternal(gc_type, kGcCauseBackground, false) != collector::kGcTypeNone) { break; @@ -3553,7 +3563,7 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) { UpdateMaxNativeFootprint(); } else if (!IsGCRequestPending()) { if (IsGcConcurrent()) { - RequestConcurrentGC(self); + RequestConcurrentGC(self, true); // Request non-sticky type. } else { CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false); } diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 565687c5b5..a4353f6ce9 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -288,7 +288,7 @@ class Heap { // Does a concurrent GC, should only be called by the GC daemon thread // through runtime. - void ConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); + void ConcurrentGC(Thread* self, bool force_full) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount. // The boolean decides whether to use IsAssignableFrom or == when comparing classes. @@ -664,7 +664,7 @@ class Heap { void RequestTrim(Thread* self) LOCKS_EXCLUDED(pending_task_lock_); // Request asynchronous GC. - void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(pending_task_lock_); + void RequestConcurrentGC(Thread* self, bool force_full) LOCKS_EXCLUDED(pending_task_lock_); // Whether or not we may use a garbage collector, used so that we only create collectors we need. bool MayUseCollector(CollectorType type) const; @@ -786,7 +786,7 @@ class Heap { void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) LOCKS_EXCLUDED(pending_task_lock_); - void RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj) + void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsGCRequestPending() const; diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc index 53bb129609..9736e1507f 100644 --- a/runtime/native/dalvik_system_VMRuntime.cc +++ b/runtime/native/dalvik_system_VMRuntime.cc @@ -223,7 +223,7 @@ static void VMRuntime_trimHeap(JNIEnv* env, jobject) { } static void VMRuntime_concurrentGC(JNIEnv* env, jobject) { - Runtime::Current()->GetHeap()->ConcurrentGC(ThreadForEnv(env)); + Runtime::Current()->GetHeap()->ConcurrentGC(ThreadForEnv(env), true); } static void VMRuntime_requestHeapTrim(JNIEnv* env, jobject) { @@ -231,7 +231,7 @@ static void VMRuntime_requestHeapTrim(JNIEnv* env, jobject) { } static void VMRuntime_requestConcurrentGC(JNIEnv* env, jobject) { - Runtime::Current()->GetHeap()->RequestConcurrentGC(ThreadForEnv(env)); + Runtime::Current()->GetHeap()->RequestConcurrentGC(ThreadForEnv(env), true); } static void VMRuntime_startHeapTaskProcessor(JNIEnv* env, jobject) { |