summaryrefslogtreecommitdiffstats
path: root/runtime/gc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/heap-inl.h223
-rw-r--r--runtime/gc/heap.cc303
-rw-r--r--runtime/gc/heap.h137
-rw-r--r--runtime/gc/space/bump_pointer_space.h6
4 files changed, 213 insertions, 456 deletions
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index e6829e2804..fcc07a0224 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -32,152 +32,126 @@
namespace art {
namespace gc {
-inline mirror::Object* Heap::AllocNonMovableObjectUninstrumented(Thread* self, mirror::Class* c,
- size_t byte_count) {
+template <const bool kInstrumented>
+inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Class* c,
+ size_t byte_count, AllocatorType allocator) {
DebugCheckPreconditionsForAllocObject(c, byte_count);
+ // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
+ // done in the runnable state where suspension is expected.
+ DCHECK_EQ(self->GetState(), kRunnable);
+ self->AssertThreadSuspensionIsAllowable();
mirror::Object* obj;
size_t bytes_allocated;
AllocationTimer alloc_timer(this, &obj);
- bool large_object_allocation = TryAllocLargeObjectUninstrumented(self, c, byte_count,
- &obj, &bytes_allocated);
- if (LIKELY(!large_object_allocation)) {
- // Non-large object allocation.
- if (!kUseRosAlloc) {
- DCHECK(non_moving_space_->IsDlMallocSpace());
- obj = AllocateUninstrumented(self, reinterpret_cast<space::DlMallocSpace*>(non_moving_space_),
- byte_count, &bytes_allocated);
+ if (UNLIKELY(ShouldAllocLargeObject(c, byte_count))) {
+ obj = TryToAllocate<kInstrumented>(self, kAllocatorTypeLOS, byte_count, false,
+ &bytes_allocated);
+ allocator = kAllocatorTypeLOS;
+ } else {
+ obj = TryToAllocate<kInstrumented>(self, allocator, byte_count, false, &bytes_allocated);
+ }
+
+ if (UNLIKELY(obj == nullptr)) {
+ SirtRef<mirror::Class> sirt_c(self, c);
+ obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated);
+ if (obj == nullptr) {
+ return nullptr;
} else {
- DCHECK(non_moving_space_->IsRosAllocSpace());
- obj = AllocateUninstrumented(self, reinterpret_cast<space::RosAllocSpace*>(non_moving_space_),
- byte_count, &bytes_allocated);
+ c = sirt_c.get();
}
- // Ensure that we did not allocate into a zygote space.
- DCHECK(obj == NULL || !have_zygote_space_ || !FindSpaceFromObject(obj, false)->IsZygoteSpace());
}
- if (LIKELY(obj != NULL)) {
- obj->SetClass(c);
- // Record allocation after since we want to use the atomic add for the atomic fence to guard
- // the SetClass since we do not want the class to appear NULL in another thread.
- size_t new_num_bytes_allocated = RecordAllocationUninstrumented(bytes_allocated, obj);
- DCHECK(!Dbg::IsAllocTrackingEnabled());
- CheckConcurrentGC(self, new_num_bytes_allocated, obj);
- if (kDesiredHeapVerification > kNoHeapVerification) {
- VerifyObject(obj);
+ obj->SetClass(c);
+ // TODO: Set array length here.
+ DCHECK_GT(bytes_allocated, 0u);
+ const size_t new_num_bytes_allocated =
+ static_cast<size_t>(num_bytes_allocated_.fetch_add(bytes_allocated)) + bytes_allocated;
+ // TODO: Deprecate.
+ if (kInstrumented) {
+ if (Runtime::Current()->HasStatsEnabled()) {
+ RuntimeStats* thread_stats = self->GetStats();
+ ++thread_stats->allocated_objects;
+ thread_stats->allocated_bytes += bytes_allocated;
+ RuntimeStats* global_stats = Runtime::Current()->GetStats();
+ ++global_stats->allocated_objects;
+ global_stats->allocated_bytes += bytes_allocated;
}
} else {
- ThrowOutOfMemoryError(self, byte_count, large_object_allocation);
+ DCHECK(!Runtime::Current()->HasStatsEnabled());
}
- if (kIsDebugBuild) {
- self->VerifyStack();
- }
- return obj;
-}
-
-inline mirror::Object* Heap::AllocMovableObjectUninstrumented(Thread* self, mirror::Class* c,
- size_t byte_count) {
- DebugCheckPreconditionsForAllocObject(c, byte_count);
- mirror::Object* obj;
- AllocationTimer alloc_timer(this, &obj);
- byte_count = (byte_count + 7) & ~7;
- if (UNLIKELY(IsOutOfMemoryOnAllocation(byte_count, false))) {
- CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, false);
- if (UNLIKELY(IsOutOfMemoryOnAllocation(byte_count, true))) {
- CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, true);
+ if (AllocatorHasAllocationStack(allocator)) {
+ // This is safe to do since the GC will never free objects which are neither in the allocation
+ // stack or the live bitmap.
+ while (!allocation_stack_->AtomicPushBack(obj)) {
+ CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
}
}
- obj = bump_pointer_space_->AllocNonvirtual(byte_count);
- if (LIKELY(obj != NULL)) {
- obj->SetClass(c);
- DCHECK(!obj->IsClass());
- // Record allocation after since we want to use the atomic add for the atomic fence to guard
- // the SetClass since we do not want the class to appear NULL in another thread.
- num_bytes_allocated_.fetch_add(byte_count);
- DCHECK(!Dbg::IsAllocTrackingEnabled());
- if (kDesiredHeapVerification > kNoHeapVerification) {
- VerifyObject(obj);
+ if (kInstrumented) {
+ if (Dbg::IsAllocTrackingEnabled()) {
+ Dbg::RecordAllocation(c, bytes_allocated);
}
} else {
- ThrowOutOfMemoryError(self, byte_count, false);
+ DCHECK(!Dbg::IsAllocTrackingEnabled());
+ }
+ if (AllocatorHasConcurrentGC(allocator)) {
+ CheckConcurrentGC(self, new_num_bytes_allocated, obj);
}
if (kIsDebugBuild) {
+ if (kDesiredHeapVerification > kNoHeapVerification) {
+ VerifyObject(obj);
+ }
self->VerifyStack();
}
return obj;
}
-inline size_t Heap::RecordAllocationUninstrumented(size_t size, mirror::Object* obj) {
- DCHECK(obj != NULL);
- DCHECK_GT(size, 0u);
- size_t old_num_bytes_allocated = static_cast<size_t>(num_bytes_allocated_.fetch_add(size));
-
- DCHECK(!Runtime::Current()->HasStatsEnabled());
-
- // This is safe to do since the GC will never free objects which are neither in the allocation
- // stack or the live bitmap.
- while (!allocation_stack_->AtomicPushBack(obj)) {
- CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
- }
-
- return old_num_bytes_allocated + size;
-}
-
-inline mirror::Object* Heap::TryToAllocateUninstrumented(Thread* self, space::AllocSpace* space, size_t alloc_size,
- bool grow, size_t* bytes_allocated) {
+template <const bool kInstrumented>
+inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator_type,
+ size_t alloc_size, bool grow,
+ size_t* bytes_allocated) {
if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) {
- return NULL;
+ return nullptr;
}
- DCHECK(!running_on_valgrind_);
- return space->Alloc(self, alloc_size, bytes_allocated);
-}
-
-// DlMallocSpace-specific version.
-inline mirror::Object* Heap::TryToAllocateUninstrumented(Thread* self, space::DlMallocSpace* space, size_t alloc_size,
- bool grow, size_t* bytes_allocated) {
- if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) {
- return NULL;
- }
- DCHECK(!running_on_valgrind_);
- return space->AllocNonvirtual(self, alloc_size, bytes_allocated);
-}
-
-// RosAllocSpace-specific version.
-inline mirror::Object* Heap::TryToAllocateUninstrumented(Thread* self, space::RosAllocSpace* space, size_t alloc_size,
- bool grow, size_t* bytes_allocated) {
- if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) {
- return NULL;
- }
- DCHECK(!running_on_valgrind_);
- return space->AllocNonvirtual(self, alloc_size, bytes_allocated);
-}
-
-template <class T>
-inline mirror::Object* Heap::AllocateUninstrumented(Thread* self, T* space, size_t alloc_size,
- size_t* bytes_allocated) {
- // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
- // done in the runnable state where suspension is expected.
- DCHECK_EQ(self->GetState(), kRunnable);
- self->AssertThreadSuspensionIsAllowable();
-
- mirror::Object* ptr = TryToAllocateUninstrumented(self, space, alloc_size, false, bytes_allocated);
- if (LIKELY(ptr != NULL)) {
- return ptr;
+ if (kInstrumented) {
+ if (UNLIKELY(running_on_valgrind_ && allocator_type == kAllocatorTypeFreeList)) {
+ return non_moving_space_->Alloc(self, alloc_size, bytes_allocated);
+ }
}
- return AllocateInternalWithGc(self, space, alloc_size, bytes_allocated);
-}
-
-inline bool Heap::TryAllocLargeObjectUninstrumented(Thread* self, mirror::Class* c, size_t byte_count,
- mirror::Object** obj_ptr, size_t* bytes_allocated) {
- bool large_object_allocation = ShouldAllocLargeObject(c, byte_count);
- if (UNLIKELY(large_object_allocation)) {
- mirror::Object* obj = AllocateUninstrumented(self, large_object_space_, byte_count, bytes_allocated);
- // Make sure that our large object didn't get placed anywhere within the space interval or else
- // it breaks the immune range.
- DCHECK(obj == NULL ||
- reinterpret_cast<byte*>(obj) < continuous_spaces_.front()->Begin() ||
- reinterpret_cast<byte*>(obj) >= continuous_spaces_.back()->End());
- *obj_ptr = obj;
+ mirror::Object* ret;
+ switch (allocator_type) {
+ case kAllocatorTypeBumpPointer: {
+ DCHECK(bump_pointer_space_ != nullptr);
+ alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment);
+ ret = bump_pointer_space_->AllocNonvirtual(alloc_size);
+ if (LIKELY(ret != nullptr)) {
+ *bytes_allocated = alloc_size;
+ }
+ break;
+ }
+ case kAllocatorTypeFreeList: {
+ if (kUseRosAlloc) {
+ ret = reinterpret_cast<space::RosAllocSpace*>(non_moving_space_)->AllocNonvirtual(
+ self, alloc_size, bytes_allocated);
+ } else {
+ ret = reinterpret_cast<space::DlMallocSpace*>(non_moving_space_)->AllocNonvirtual(
+ self, alloc_size, bytes_allocated);
+ }
+ break;
+ }
+ case kAllocatorTypeLOS: {
+ ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated);
+ // Make sure that our large object didn't get placed anywhere within the space interval or
+ // else it breaks the immune range.
+ DCHECK(ret == nullptr ||
+ reinterpret_cast<byte*>(ret) < continuous_spaces_.front()->Begin() ||
+ reinterpret_cast<byte*>(ret) >= continuous_spaces_.back()->End());
+ break;
+ }
+ default: {
+ LOG(FATAL) << "Invalid allocator type";
+ ret = nullptr;
+ }
}
- return large_object_allocation;
+ return ret;
}
inline void Heap::DebugCheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
@@ -198,14 +172,14 @@ inline Heap::AllocationTimer::~AllocationTimer() {
if (kMeasureAllocationTime) {
mirror::Object* allocated_obj = *allocated_obj_ptr_;
// Only if the allocation succeeded, record the time.
- if (allocated_obj != NULL) {
+ if (allocated_obj != nullptr) {
uint64_t allocation_end_time = NanoTime() / kTimeAdjust;
heap_->total_allocation_time_.fetch_add(allocation_end_time - allocation_start_time_);
}
}
};
-inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) {
+inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const {
// We need to have a zygote space or else our newly allocated large object can end up in the
// Zygote resulting in it being prematurely freed.
// We can only do this for primitive objects since large objects will not be within the card table
@@ -230,7 +204,8 @@ inline bool Heap::IsOutOfMemoryOnAllocation(size_t alloc_size, bool grow) {
return false;
}
-inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated, mirror::Object* obj) {
+inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
+ mirror::Object* obj) {
if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
// The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
SirtRef<mirror::Object> ref(self, obj);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 763bfe9cbd..c31e3e982b 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -74,7 +74,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
bool concurrent_gc, size_t parallel_gc_threads, size_t conc_gc_threads,
bool low_memory_mode, size_t long_pause_log_threshold, size_t long_gc_log_threshold,
bool ignore_max_footprint)
- : non_moving_space_(NULL),
+ : non_moving_space_(nullptr),
concurrent_gc_(!kMovingCollector && concurrent_gc),
parallel_gc_threads_(parallel_gc_threads),
conc_gc_threads_(conc_gc_threads),
@@ -128,6 +128,8 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
*/
max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval
: (kDesiredHeapVerification > kVerifyAllFast) ? KB : MB),
+ current_allocator_(kMovingCollector ? kAllocatorTypeBumpPointer : kAllocatorTypeFreeList),
+ current_non_moving_allocator_(kAllocatorTypeFreeList),
bump_pointer_space_(nullptr),
temp_space_(nullptr),
reference_referent_offset_(0),
@@ -256,9 +258,13 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
}
+ gc_plan_.push_back(collector::kGcTypeSticky);
+ gc_plan_.push_back(collector::kGcTypePartial);
+ gc_plan_.push_back(collector::kGcTypeFull);
} else {
semi_space_collector_ = new collector::SemiSpace(this);
garbage_collectors_.push_back(semi_space_collector_);
+ gc_plan_.push_back(collector::kGcTypeFull);
}
if (running_on_valgrind_) {
@@ -779,106 +785,6 @@ void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, bool large_obj
self->ThrowOutOfMemoryError(oss.str().c_str());
}
-inline bool Heap::TryAllocLargeObjectInstrumented(Thread* self, mirror::Class* c, size_t byte_count,
- mirror::Object** obj_ptr, size_t* bytes_allocated) {
- bool large_object_allocation = ShouldAllocLargeObject(c, byte_count);
- if (UNLIKELY(large_object_allocation)) {
- mirror::Object* obj = AllocateInstrumented(self, large_object_space_, byte_count, bytes_allocated);
- // Make sure that our large object didn't get placed anywhere within the space interval or else
- // it breaks the immune range.
- DCHECK(obj == nullptr ||
- reinterpret_cast<byte*>(obj) < continuous_spaces_.front()->Begin() ||
- reinterpret_cast<byte*>(obj) >= continuous_spaces_.back()->End());
- *obj_ptr = obj;
- }
- return large_object_allocation;
-}
-
-mirror::Object* Heap::AllocMovableObjectInstrumented(Thread* self, mirror::Class* c,
- size_t byte_count) {
- DebugCheckPreconditionsForAllocObject(c, byte_count);
- mirror::Object* obj;
- AllocationTimer alloc_timer(this, &obj);
- byte_count = RoundUp(byte_count, 8);
- if (UNLIKELY(IsOutOfMemoryOnAllocation(byte_count, false))) {
- CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, false);
- if (UNLIKELY(IsOutOfMemoryOnAllocation(byte_count, true))) {
- CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, true);
- }
- }
- obj = bump_pointer_space_->AllocNonvirtual(byte_count);
- if (LIKELY(obj != NULL)) {
- obj->SetClass(c);
- DCHECK(!obj->IsClass());
- // Record allocation after since we want to use the atomic add for the atomic fence to guard
- // the SetClass since we do not want the class to appear NULL in another thread.
- num_bytes_allocated_.fetch_add(byte_count);
- if (Runtime::Current()->HasStatsEnabled()) {
- RuntimeStats* thread_stats = Thread::Current()->GetStats();
- ++thread_stats->allocated_objects;
- thread_stats->allocated_bytes += byte_count;
- RuntimeStats* global_stats = Runtime::Current()->GetStats();
- ++global_stats->allocated_objects;
- global_stats->allocated_bytes += byte_count;
- }
- if (Dbg::IsAllocTrackingEnabled()) {
- Dbg::RecordAllocation(c, byte_count);
- }
- if (kDesiredHeapVerification > kNoHeapVerification) {
- VerifyObject(obj);
- }
- } else {
- ThrowOutOfMemoryError(self, byte_count, false);
- }
- if (kIsDebugBuild) {
- self->VerifyStack();
- }
- return obj;
-}
-
-mirror::Object* Heap::AllocNonMovableObjectInstrumented(Thread* self, mirror::Class* c,
- size_t byte_count) {
- DebugCheckPreconditionsForAllocObject(c, byte_count);
- mirror::Object* obj;
- size_t bytes_allocated;
- AllocationTimer alloc_timer(this, &obj);
- bool large_object_allocation = TryAllocLargeObjectInstrumented(self, c, byte_count, &obj,
- &bytes_allocated);
- if (LIKELY(!large_object_allocation)) {
- // Non-large object allocation.
- if (!kUseRosAlloc) {
- DCHECK(non_moving_space_->IsDlMallocSpace());
- obj = AllocateInstrumented(self, reinterpret_cast<space::DlMallocSpace*>(non_moving_space_),
- byte_count, &bytes_allocated);
- } else {
- DCHECK(non_moving_space_->IsRosAllocSpace());
- obj = AllocateInstrumented(self, reinterpret_cast<space::RosAllocSpace*>(non_moving_space_),
- byte_count, &bytes_allocated);
- }
- // Ensure that we did not allocate into a zygote space.
- DCHECK(obj == NULL || !have_zygote_space_ || !FindSpaceFromObject(obj, false)->IsZygoteSpace());
- }
- if (LIKELY(obj != NULL)) {
- obj->SetClass(c);
- // Record allocation after since we want to use the atomic add for the atomic fence to guard
- // the SetClass since we do not want the class to appear NULL in another thread.
- size_t new_num_bytes_allocated = RecordAllocationInstrumented(bytes_allocated, obj);
- if (Dbg::IsAllocTrackingEnabled()) {
- Dbg::RecordAllocation(c, byte_count);
- }
- CheckConcurrentGC(self, new_num_bytes_allocated, obj);
- if (kDesiredHeapVerification > kNoHeapVerification) {
- VerifyObject(obj);
- }
- } else {
- ThrowOutOfMemoryError(self, byte_count, large_object_allocation);
- }
- if (kIsDebugBuild) {
- self->VerifyStack();
- }
- return obj;
-}
-
void Heap::Trim() {
uint64_t start_ns = NanoTime();
// Trim the managed spaces.
@@ -1059,31 +965,6 @@ void Heap::VerifyHeap() {
GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
}
-inline size_t Heap::RecordAllocationInstrumented(size_t size, mirror::Object* obj) {
- DCHECK(obj != NULL);
- DCHECK_GT(size, 0u);
- size_t old_num_bytes_allocated = static_cast<size_t>(num_bytes_allocated_.fetch_add(size));
-
- if (Runtime::Current()->HasStatsEnabled()) {
- RuntimeStats* thread_stats = Thread::Current()->GetStats();
- ++thread_stats->allocated_objects;
- thread_stats->allocated_bytes += size;
-
- // TODO: Update these atomically.
- RuntimeStats* global_stats = Runtime::Current()->GetStats();
- ++global_stats->allocated_objects;
- global_stats->allocated_bytes += size;
- }
-
- // This is safe to do since the GC will never free objects which are neither in the allocation
- // stack or the live bitmap.
- while (!allocation_stack_->AtomicPushBack(obj)) {
- CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
- }
-
- return old_num_bytes_allocated + size;
-}
-
void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) {
DCHECK_LE(freed_bytes, static_cast<size_t>(num_bytes_allocated_));
num_bytes_allocated_.fetch_sub(freed_bytes);
@@ -1100,125 +981,50 @@ void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) {
}
}
-inline mirror::Object* Heap::TryToAllocateInstrumented(Thread* self, space::AllocSpace* space, size_t alloc_size,
- bool grow, size_t* bytes_allocated) {
- if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) {
- return NULL;
- }
- return space->Alloc(self, alloc_size, bytes_allocated);
-}
-
-// DlMallocSpace-specific version.
-inline mirror::Object* Heap::TryToAllocateInstrumented(Thread* self, space::DlMallocSpace* space, size_t alloc_size,
- bool grow, size_t* bytes_allocated) {
- if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) {
- return nullptr;
- }
- if (LIKELY(!running_on_valgrind_)) {
- return space->AllocNonvirtual(self, alloc_size, bytes_allocated);
- } else {
- return space->Alloc(self, alloc_size, bytes_allocated);
- }
-}
-
-// RosAllocSpace-specific version.
-inline mirror::Object* Heap::TryToAllocateInstrumented(Thread* self, space::RosAllocSpace* space, size_t alloc_size,
- bool grow, size_t* bytes_allocated) {
- if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) {
- return NULL;
- }
- if (LIKELY(!running_on_valgrind_)) {
- return space->AllocNonvirtual(self, alloc_size, bytes_allocated);
- } else {
- return space->Alloc(self, alloc_size, bytes_allocated);
- }
-}
-
-template <class T>
-inline mirror::Object* Heap::AllocateInstrumented(Thread* self, T* space, size_t alloc_size,
- size_t* bytes_allocated) {
- // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
- // done in the runnable state where suspension is expected.
- DCHECK_EQ(self->GetState(), kRunnable);
- self->AssertThreadSuspensionIsAllowable();
-
- mirror::Object* ptr = TryToAllocateInstrumented(self, space, alloc_size, false, bytes_allocated);
- if (LIKELY(ptr != NULL)) {
- return ptr;
- }
- return AllocateInternalWithGc(self, space, alloc_size, bytes_allocated);
-}
-
-mirror::Object* Heap::AllocateInternalWithGc(Thread* self, space::AllocSpace* space,
+mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
size_t alloc_size, size_t* bytes_allocated) {
- mirror::Object* ptr;
-
+ mirror::Object* ptr = nullptr;
// The allocation failed. If the GC is running, block until it completes, and then retry the
// allocation.
collector::GcType last_gc = WaitForGcToComplete(self);
if (last_gc != collector::kGcTypeNone) {
// A GC was in progress and we blocked, retry allocation now that memory has been freed.
- ptr = TryToAllocateInstrumented(self, space, alloc_size, false, bytes_allocated);
- if (ptr != NULL) {
- return ptr;
- }
+ ptr = TryToAllocate<true>(self, allocator, alloc_size, false, bytes_allocated);
}
// Loop through our different Gc types and try to Gc until we get enough free memory.
- for (size_t i = static_cast<size_t>(last_gc) + 1;
- i < static_cast<size_t>(collector::kGcTypeMax); ++i) {
- bool run_gc = false;
- collector::GcType gc_type = static_cast<collector::GcType>(i);
- switch (gc_type) {
- case collector::kGcTypeSticky: {
- const size_t alloc_space_size = non_moving_space_->Size();
- run_gc = alloc_space_size > min_alloc_space_size_for_sticky_gc_ &&
- non_moving_space_->Capacity() - alloc_space_size >=
- min_remaining_space_for_sticky_gc_;
- break;
- }
- case collector::kGcTypePartial:
- run_gc = have_zygote_space_;
- break;
- case collector::kGcTypeFull:
- run_gc = true;
- break;
- default:
- LOG(FATAL) << "Invalid GC type";
- }
-
- if (run_gc) {
- // If we actually ran a different type of Gc than requested, we can skip the index forwards.
- collector::GcType gc_type_ran = CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
- DCHECK_GE(static_cast<size_t>(gc_type_ran), i);
- i = static_cast<size_t>(gc_type_ran);
-
+ for (collector::GcType gc_type : gc_plan_) {
+ if (ptr != nullptr) {
+ break;
+ }
+ // Attempt to run the collector, if we succeed, re-try the allocation.
+ if (CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone) {
// Did we free sufficient memory for the allocation to succeed?
- ptr = TryToAllocateInstrumented(self, space, alloc_size, false, bytes_allocated);
- if (ptr != NULL) {
- return ptr;
- }
+ ptr = TryToAllocate<true>(self, allocator, alloc_size, false, bytes_allocated);
}
}
-
// Allocations have failed after GCs; this is an exceptional state.
- // Try harder, growing the heap if necessary.
- ptr = TryToAllocateInstrumented(self, space, alloc_size, true, bytes_allocated);
- if (ptr != NULL) {
- return ptr;
+ if (ptr == nullptr) {
+ // Try harder, growing the heap if necessary.
+ ptr = TryToAllocate<true>(self, allocator, alloc_size, true, bytes_allocated);
}
-
- // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
- // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
- // VM spec requires that all SoftReferences have been collected and cleared before throwing OOME.
-
- // TODO: Run finalization, but this can cause more allocations to occur.
- VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
- << " allocation";
-
- // We don't need a WaitForGcToComplete here either.
- CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, true);
- return TryToAllocateInstrumented(self, space, alloc_size, true, bytes_allocated);
+ if (ptr == nullptr) {
+ // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
+ // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
+ // VM spec requires that all SoftReferences have been collected and cleared before throwing
+ // OOME.
+ VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
+ << " allocation";
+ // TODO: Run finalization, but this may cause more allocations to occur.
+ // We don't need a WaitForGcToComplete here either.
+ DCHECK(!gc_plan_.empty());
+ CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
+ ptr = TryToAllocate<true>(self, allocator, alloc_size, true, bytes_allocated);
+ if (ptr == nullptr) {
+ ThrowOutOfMemoryError(self, alloc_size, false);
+ }
+ }
+ return ptr;
}
void Heap::SetTargetHeapUtilization(float target) {
@@ -1493,6 +1299,27 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
bool clear_soft_references) {
Thread* self = Thread::Current();
Runtime* runtime = Runtime::Current();
+ // If the heap can't run the GC, silently fail and return that no GC was run.
+ switch (gc_type) {
+ case collector::kGcTypeSticky: {
+ const size_t alloc_space_size = non_moving_space_->Size();
+ if (alloc_space_size < min_alloc_space_size_for_sticky_gc_ ||
+ non_moving_space_->Capacity() - alloc_space_size < min_remaining_space_for_sticky_gc_) {
+ return collector::kGcTypeNone;
+ }
+ break;
+ }
+ case collector::kGcTypePartial: {
+ if (!have_zygote_space_) {
+ return collector::kGcTypeNone;
+ }
+ break;
+ }
+ default: {
+ // Other GC types don't have any special cases which makes them not runnable. The main case
+ // here is full GC.
+ }
+ }
ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
Locks::mutator_lock_->AssertNotHeld(self);
if (self->IsHandlingStackOverflow()) {
@@ -1512,12 +1339,10 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
}
is_gc_running_ = true;
}
-
if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
++runtime->GetStats()->gc_for_alloc_count;
++self->GetStats()->gc_for_alloc_count;
}
-
uint64_t gc_start_time_ns = NanoTime();
uint64_t gc_start_size = GetBytesAllocated();
// Approximate allocation rate in bytes / second.
@@ -1528,11 +1353,6 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s";
}
- if (gc_type == collector::kGcTypeSticky &&
- non_moving_space_->Size() < min_alloc_space_size_for_sticky_gc_) {
- gc_type = collector::kGcTypePartial;
- }
-
DCHECK_LT(gc_type, collector::kGcTypeMax);
DCHECK_NE(gc_type, collector::kGcTypeNone);
@@ -2347,6 +2167,9 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
// Total number of native bytes allocated.
native_bytes_allocated_.fetch_add(bytes);
if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) {
+ collector::GcType gc_type = have_zygote_space_ ? collector::kGcTypePartial :
+ collector::kGcTypeFull;
+
// The second watermark is higher than the gc watermark. If you hit this it means you are
// allocating native objects faster than the GC can keep up with.
if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
@@ -2357,7 +2180,7 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
}
// If we still are over the watermark, attempt a GC for alloc and run finalizers.
if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
- CollectGarbageInternal(collector::kGcTypePartial, kGcCauseForAlloc, false);
+ CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
RunFinalization(env);
native_need_to_run_finalization_ = false;
CHECK(!env->ExceptionCheck());
@@ -2369,7 +2192,7 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
if (concurrent_gc_) {
RequestConcurrentGC(self);
} else {
- CollectGarbageInternal(collector::kGcTypePartial, kGcCauseForAlloc, false);
+ CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
}
}
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 3da3943aa7..5a0372a295 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -87,6 +87,13 @@ class AgeCardVisitor {
}
};
+// Different types of allocators.
+enum AllocatorType {
+ kAllocatorTypeBumpPointer,
+ kAllocatorTypeFreeList, // ROSAlloc / dlmalloc
+ kAllocatorTypeLOS, // Large object space.
+};
+
// What caused the GC?
enum GcCause {
// GC triggered by a failed allocation. Thread doing allocation is blocked waiting for GC before
@@ -143,41 +150,30 @@ class Heap {
~Heap();
// Allocates and initializes storage for an object instance.
- mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes)
+ template <const bool kInstrumented>
+ inline mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK(!kMovingClasses);
- return AllocObjectInstrumented(self, klass, num_bytes);
+ return AllocObjectWithAllocator<kInstrumented>(self, klass, num_bytes, GetCurrentAllocator());
}
- // Allocates and initializes storage for an object instance.
- mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes)
+ template <const bool kInstrumented>
+ inline mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass,
+ size_t num_bytes)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK(!kMovingClasses);
- return AllocNonMovableObjectInstrumented(self, klass, num_bytes);
+ return AllocObjectWithAllocator<kInstrumented>(self, klass, num_bytes,
+ GetCurrentNonMovingAllocator());
}
- mirror::Object* AllocObjectInstrumented(Thread* self, mirror::Class* klass, size_t num_bytes)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK(!kMovingClasses);
- if (kMovingCollector) {
- return AllocMovableObjectInstrumented(self, klass, num_bytes);
- } else {
- return AllocNonMovableObjectInstrumented(self, klass, num_bytes);
- }
+ template <bool kInstrumented>
+ ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(Thread* self, mirror::Class* klass,
+ size_t num_bytes, AllocatorType allocator)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ AllocatorType GetCurrentAllocator() const {
+ return current_allocator_;
}
- mirror::Object* AllocObjectUninstrumented(Thread* self, mirror::Class* klass, size_t num_bytes)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK(!kMovingClasses);
- if (kMovingCollector) {
- return AllocMovableObjectUninstrumented(self, klass, num_bytes);
- } else {
- return AllocNonMovableObjectUninstrumented(self, klass, num_bytes);
- }
+
+ AllocatorType GetCurrentNonMovingAllocator() const {
+ return current_non_moving_allocator_;
}
- mirror::Object* AllocNonMovableObjectInstrumented(Thread* self, mirror::Class* klass,
- size_t num_bytes)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::Object* AllocNonMovableObjectUninstrumented(Thread* self, mirror::Class* klass,
- size_t num_bytes)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Visit all of the live objects in the heap.
void VisitObjects(ObjectVisitorCallback callback, void* arg)
@@ -488,13 +484,6 @@ class Heap {
accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space);
void AddModUnionTable(accounting::ModUnionTable* mod_union_table);
- mirror::Object* AllocMovableObjectInstrumented(Thread* self, mirror::Class* klass,
- size_t num_bytes)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::Object* AllocMovableObjectUninstrumented(Thread* self, mirror::Class* klass,
- size_t num_bytes)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
bool IsCompilingBoot() const;
bool HasImageSpace() const;
@@ -502,30 +491,19 @@ class Heap {
void Compact(space::ContinuousMemMapAllocSpace* target_space,
space::ContinuousMemMapAllocSpace* source_space);
- bool TryAllocLargeObjectInstrumented(Thread* self, mirror::Class* c, size_t byte_count,
- mirror::Object** obj_ptr, size_t* bytes_allocated)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool TryAllocLargeObjectUninstrumented(Thread* self, mirror::Class* c, size_t byte_count,
- mirror::Object** obj_ptr, size_t* bytes_allocated)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count);
- void CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated, mirror::Object* obj);
-
- // Allocates uninitialized storage. Passing in a null space tries to place the object in the
- // large object space.
- template <class T> mirror::Object* AllocateInstrumented(Thread* self, T* space, size_t num_bytes,
- size_t* bytes_allocated)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- template <class T> mirror::Object* AllocateUninstrumented(Thread* self, T* space, size_t num_bytes,
- size_t* bytes_allocated)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
+ return allocator_type == kAllocatorTypeFreeList;
+ }
+ static bool AllocatorHasConcurrentGC(AllocatorType allocator_type) {
+ return allocator_type == kAllocatorTypeFreeList;
+ }
+ bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const;
+ ALWAYS_INLINE void CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
+ mirror::Object* obj);
// Handles Allocate()'s slow allocation path with GC involved after
// an initial allocation attempt failed.
- mirror::Object* AllocateInternalWithGc(Thread* self, space::AllocSpace* space, size_t num_bytes,
+ mirror::Object* AllocateInternalWithGc(Thread* self, AllocatorType allocator, size_t num_bytes,
size_t* bytes_allocated)
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -535,37 +513,12 @@ class Heap {
size_t bytes)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Try to allocate a number of bytes, this function never does any GCs.
- mirror::Object* TryToAllocateInstrumented(Thread* self, space::AllocSpace* space, size_t alloc_size,
- bool grow, size_t* bytes_allocated)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- // Try to allocate a number of bytes, this function never does any GCs. DlMallocSpace-specialized version.
- mirror::Object* TryToAllocateInstrumented(Thread* self, space::DlMallocSpace* space, size_t alloc_size,
- bool grow, size_t* bytes_allocated)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- // Try to allocate a number of bytes, this function never does any GCs. RosAllocSpace-specialized version.
- mirror::Object* TryToAllocateInstrumented(Thread* self, space::RosAllocSpace* space, size_t alloc_size,
- bool grow, size_t* bytes_allocated)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- mirror::Object* TryToAllocateUninstrumented(Thread* self, space::AllocSpace* space, size_t alloc_size,
- bool grow, size_t* bytes_allocated)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- mirror::Object* TryToAllocateUninstrumented(Thread* self, space::DlMallocSpace* space, size_t alloc_size,
- bool grow, size_t* bytes_allocated)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- mirror::Object* TryToAllocateUninstrumented(Thread* self, space::RosAllocSpace* space, size_t alloc_size,
- bool grow, size_t* bytes_allocated)
- LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
+ // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so
+ // that the switch statement is constant optimized in the entrypoints.
+ template <const bool kInstrumented>
+ ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self, AllocatorType allocator_type,
+ size_t alloc_size, bool grow,
+ size_t* bytes_allocated)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ThrowOutOfMemoryError(Thread* self, size_t byte_count, bool large_object_allocation)
@@ -816,12 +769,18 @@ class Heap {
// Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
// to use the live bitmap as the old mark bitmap.
const size_t max_allocation_stack_size_;
- bool is_allocation_stack_sorted_;
UniquePtr<accounting::ObjectStack> allocation_stack_;
// Second allocation stack so that we can process allocation with the heap unlocked.
UniquePtr<accounting::ObjectStack> live_stack_;
+ // Allocator type.
+ const AllocatorType current_allocator_;
+ const AllocatorType current_non_moving_allocator_;
+
+ // Which GCs we run in order when we an allocation fails.
+ std::vector<collector::GcType> gc_plan_;
+
// Bump pointer spaces.
space::BumpPointerSpace* bump_pointer_space_;
// Temp space is the space which the semispace collector copies to.
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 0faac0ce46..9b0b6aae3c 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -120,6 +120,9 @@ class BumpPointerSpace : public ContinuousMemMapAllocSpace {
static mirror::Object* GetNextObject(mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Alignment.
+ static constexpr size_t kAlignment = 8;
+
protected:
BumpPointerSpace(const std::string& name, MemMap* mem_map);
@@ -132,9 +135,6 @@ class BumpPointerSpace : public ContinuousMemMapAllocSpace {
AtomicInteger total_bytes_allocated_;
AtomicInteger total_objects_allocated_;
- // Alignment.
- static constexpr size_t kAlignment = 8;
-
byte* growth_end_;
private: