diff options
Diffstat (limited to 'runtime/gc')
| -rw-r--r-- | runtime/gc/accounting/heap_bitmap-inl.h | 24 | ||||
| -rw-r--r-- | runtime/gc/allocator/dlmalloc.cc | 10 | ||||
| -rw-r--r-- | runtime/gc/allocator/dlmalloc.h | 17 | ||||
| -rw-r--r-- | runtime/gc/allocator/rosalloc.cc | 28 | ||||
| -rw-r--r-- | runtime/gc/allocator/rosalloc.h | 4 | ||||
| -rw-r--r-- | runtime/gc/collector/mark_compact.cc | 6 | ||||
| -rw-r--r-- | runtime/gc/collector/mark_sweep.cc | 26 | ||||
| -rw-r--r-- | runtime/gc/collector/semi_space.cc | 4 | ||||
| -rw-r--r-- | runtime/gc/heap.cc | 30 | ||||
| -rw-r--r-- | runtime/gc/heap_test.cc | 4 | ||||
| -rw-r--r-- | runtime/gc/reference_processor.cc | 2 | ||||
| -rw-r--r-- | runtime/gc/space/bump_pointer_space.cc | 6 | ||||
| -rw-r--r-- | runtime/gc/space/dlmalloc_space.cc | 47 | ||||
| -rw-r--r-- | runtime/gc/space/rosalloc_space.cc | 23 | ||||
| -rw-r--r-- | runtime/gc/space/space.cc | 4 |
15 files changed, 134 insertions, 101 deletions
diff --git a/runtime/gc/accounting/heap_bitmap-inl.h b/runtime/gc/accounting/heap_bitmap-inl.h index c67542f484..34c15c7f8b 100644 --- a/runtime/gc/accounting/heap_bitmap-inl.h +++ b/runtime/gc/accounting/heap_bitmap-inl.h @@ -40,9 +40,9 @@ inline bool HeapBitmap::Test(const mirror::Object* obj) { if (LIKELY(bitmap != nullptr)) { return bitmap->Test(obj); } - for (const auto& bitmap : large_object_bitmaps_) { - if (LIKELY(bitmap->HasAddress(obj))) { - return bitmap->Test(obj); + for (const auto& lo_bitmap : large_object_bitmaps_) { + if (LIKELY(lo_bitmap->HasAddress(obj))) { + return lo_bitmap->Test(obj); } } LOG(FATAL) << "Invalid object " << obj; @@ -55,9 +55,9 @@ inline void HeapBitmap::Clear(const mirror::Object* obj) { bitmap->Clear(obj); return; } - for (const auto& bitmap : large_object_bitmaps_) { - if (LIKELY(bitmap->HasAddress(obj))) { - bitmap->Clear(obj); + for (const auto& lo_bitmap : large_object_bitmaps_) { + if (LIKELY(lo_bitmap->HasAddress(obj))) { + lo_bitmap->Clear(obj); } } LOG(FATAL) << "Invalid object " << obj; @@ -70,9 +70,9 @@ inline bool HeapBitmap::Set(const mirror::Object* obj, const LargeObjectSetVisit return bitmap->Set(obj); } visitor(obj); - for (const auto& bitmap : large_object_bitmaps_) { - if (LIKELY(bitmap->HasAddress(obj))) { - return bitmap->Set(obj); + for (const auto& lo_bitmap : large_object_bitmaps_) { + if (LIKELY(lo_bitmap->HasAddress(obj))) { + return lo_bitmap->Set(obj); } } LOG(FATAL) << "Invalid object " << obj; @@ -87,9 +87,9 @@ inline bool HeapBitmap::AtomicTestAndSet(const mirror::Object* obj, return bitmap->AtomicTestAndSet(obj); } visitor(obj); - for (const auto& bitmap : large_object_bitmaps_) { - if (LIKELY(bitmap->HasAddress(obj))) { - return bitmap->AtomicTestAndSet(obj); + for (const auto& lo_bitmap : large_object_bitmaps_) { + if (LIKELY(lo_bitmap->HasAddress(obj))) { + return lo_bitmap->AtomicTestAndSet(obj); } } LOG(FATAL) << "Invalid object " << obj; diff --git a/runtime/gc/allocator/dlmalloc.cc b/runtime/gc/allocator/dlmalloc.cc index acff52d50d..8558f96730 100644 --- a/runtime/gc/allocator/dlmalloc.cc +++ b/runtime/gc/allocator/dlmalloc.cc @@ -19,8 +19,8 @@ #include "base/logging.h" // ART specific morecore implementation defined in space.cc. +static void* art_heap_morecore(void* m, intptr_t increment); #define MORECORE(x) art_heap_morecore(m, x) -extern "C" void* art_heap_morecore(void* m, intptr_t increment); // Custom heap error handling. #define PROCEED_ON_ERROR 0 @@ -31,12 +31,16 @@ static void art_heap_usage_error(const char* function, void* p); // Ugly inclusion of C file so that ART specific #defines configure dlmalloc for our use for // mspaces (regular dlmalloc is still declared in bionic). +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wredundant-decls" #pragma GCC diagnostic ignored "-Wempty-body" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "../../../bionic/libc/upstream-dlmalloc/malloc.c" -#pragma GCC diagnostic warning "-Wstrict-aliasing" -#pragma GCC diagnostic warning "-Wempty-body" +#pragma GCC diagnostic pop +static void* art_heap_morecore(void* m, intptr_t increment) { + return ::art::gc::allocator::ArtDlMallocMoreCore(m, increment); +} static void art_heap_corruption(const char* function) { LOG(::art::FATAL) << "Corrupt heap detected in: " << function; diff --git a/runtime/gc/allocator/dlmalloc.h b/runtime/gc/allocator/dlmalloc.h index c7ecbc83ce..0e91a4372c 100644 --- a/runtime/gc/allocator/dlmalloc.h +++ b/runtime/gc/allocator/dlmalloc.h @@ -17,6 +17,8 @@ #ifndef ART_RUNTIME_GC_ALLOCATOR_DLMALLOC_H_ #define ART_RUNTIME_GC_ALLOCATOR_DLMALLOC_H_ +#include <cstdint> + // Configure dlmalloc for mspaces. // Avoid a collision with one used in llvm. #undef HAVE_MMAP @@ -28,7 +30,10 @@ #define ONLY_MSPACES 1 #define MALLOC_INSPECT_ALL 1 +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wredundant-decls" #include "../../bionic/libc/upstream-dlmalloc/malloc.h" +#pragma GCC diagnostic pop #ifdef HAVE_ANDROID_OS // Define dlmalloc routines from bionic that cannot be included directly because of redefining @@ -47,4 +52,16 @@ extern "C" void DlmallocMadviseCallback(void* start, void* end, size_t used_byte extern "C" void DlmallocBytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg); extern "C" void DlmallocObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg); +namespace art { +namespace gc { +namespace allocator { + +// Callback from dlmalloc when it needs to increase the footprint. Must be implemented somewhere +// else (currently dlmalloc_space.cc). +void* ArtDlMallocMoreCore(void* mspace, intptr_t increment); + +} // namespace allocator +} // namespace gc +} // namespace art + #endif // ART_RUNTIME_GC_ALLOCATOR_DLMALLOC_H_ diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc index f5e2fed9d7..f9d6a512ce 100644 --- a/runtime/gc/allocator/rosalloc.cc +++ b/runtime/gc/allocator/rosalloc.cc @@ -31,8 +31,6 @@ namespace art { namespace gc { namespace allocator { -extern "C" void* art_heap_rosalloc_morecore(RosAlloc* rosalloc, intptr_t increment); - static constexpr bool kUsePrefetchDuringAllocRun = true; static constexpr bool kPrefetchNewRunDataByZeroing = false; static constexpr size_t kPrefetchStride = 64; @@ -179,7 +177,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type page_map_size_ = new_num_of_pages; DCHECK_LE(page_map_size_, max_page_map_size_); free_page_run_size_map_.resize(new_num_of_pages); - art_heap_rosalloc_morecore(this, increment); + ArtRosAllocMoreCore(this, increment); if (last_free_page_run_size > 0) { // There was a free page run at the end. Expand its size. DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this)); @@ -745,7 +743,7 @@ size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) { const size_t idx = run->size_bracket_idx_; const size_t bracket_size = bracketSizes[idx]; bool run_was_full = false; - MutexLock mu(self, *size_bracket_locks_[idx]); + MutexLock brackets_mu(self, *size_bracket_locks_[idx]); if (kIsDebugBuild) { run_was_full = run->IsFull(); } @@ -785,7 +783,7 @@ size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) { DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end()); run->ZeroHeader(); { - MutexLock mu(self, lock_); + MutexLock lock_mu(self, lock_); FreePages(self, run, true); } } else { @@ -1243,7 +1241,7 @@ size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) { run->to_be_bulk_freed_ = false; #endif size_t idx = run->size_bracket_idx_; - MutexLock mu(self, *size_bracket_locks_[idx]); + MutexLock brackets_mu(self, *size_bracket_locks_[idx]); if (run->IsThreadLocal()) { DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets); DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end()); @@ -1303,7 +1301,7 @@ size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) { } if (!run_was_current) { run->ZeroHeader(); - MutexLock mu(self, lock_); + MutexLock lock_mu(self, lock_); FreePages(self, run, true); } } else { @@ -1521,7 +1519,7 @@ bool RosAlloc::Trim() { page_map_size_ = new_num_of_pages; free_page_run_size_map_.resize(new_num_of_pages); DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages); - art_heap_rosalloc_morecore(this, -(static_cast<intptr_t>(decrement))); + ArtRosAllocMoreCore(this, -(static_cast<intptr_t>(decrement))); if (kTraceRosAlloc) { LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from " << footprint_ << " to " << new_footprint; @@ -1737,14 +1735,14 @@ void RosAlloc::AssertThreadLocalRunsAreRevoked(Thread* thread) { void RosAlloc::AssertAllThreadLocalRunsAreRevoked() { if (kIsDebugBuild) { Thread* self = Thread::Current(); - MutexLock mu(self, *Locks::runtime_shutdown_lock_); - MutexLock mu2(self, *Locks::thread_list_lock_); + MutexLock shutdown_mu(self, *Locks::runtime_shutdown_lock_); + MutexLock thread_list_mu(self, *Locks::thread_list_lock_); std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); for (Thread* t : thread_list) { AssertThreadLocalRunsAreRevoked(t); } for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) { - MutexLock mu(self, *size_bracket_locks_[idx]); + MutexLock brackets_mu(self, *size_bracket_locks_[idx]); CHECK_EQ(current_runs_[idx], dedicated_full_run_); } } @@ -1873,11 +1871,11 @@ void RosAlloc::Verify() { Thread* self = Thread::Current(); CHECK(Locks::mutator_lock_->IsExclusiveHeld(self)) << "The mutator locks isn't exclusively locked at " << __PRETTY_FUNCTION__; - MutexLock mu(self, *Locks::thread_list_lock_); + MutexLock thread_list_mu(self, *Locks::thread_list_lock_); ReaderMutexLock wmu(self, bulk_free_lock_); std::vector<Run*> runs; { - MutexLock mu(self, lock_); + MutexLock lock_mu(self, lock_); size_t pm_end = page_map_size_; size_t i = 0; while (i < pm_end) { @@ -1968,7 +1966,7 @@ void RosAlloc::Verify() { std::list<Thread*> threads = Runtime::Current()->GetThreadList()->GetList(); for (Thread* thread : threads) { for (size_t i = 0; i < kNumThreadLocalSizeBrackets; ++i) { - MutexLock mu(self, *size_bracket_locks_[i]); + MutexLock brackets_mu(self, *size_bracket_locks_[i]); Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i)); CHECK(thread_local_run != nullptr); CHECK(thread_local_run->IsThreadLocal()); @@ -1977,7 +1975,7 @@ void RosAlloc::Verify() { } } for (size_t i = 0; i < kNumOfSizeBrackets; i++) { - MutexLock mu(self, *size_bracket_locks_[i]); + MutexLock brackets_mu(self, *size_bracket_locks_[i]); Run* current_run = current_runs_[i]; CHECK(current_run != nullptr); if (current_run != dedicated_full_run_) { diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h index a2f8342fd8..2a0bf10d90 100644 --- a/runtime/gc/allocator/rosalloc.h +++ b/runtime/gc/allocator/rosalloc.h @@ -616,6 +616,10 @@ class RosAlloc { }; std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs); +// Callback from rosalloc when it needs to increase the footprint. Must be implemented somewhere +// else (currently rosalloc_space.cc). +void* ArtRosAllocMoreCore(allocator::RosAlloc* rosalloc, intptr_t increment); + } // namespace allocator } // namespace gc } // namespace art diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc index 6691b0f4fc..b2482acaf5 100644 --- a/runtime/gc/collector/mark_compact.cc +++ b/runtime/gc/collector/mark_compact.cc @@ -239,7 +239,7 @@ void MarkCompact::UpdateAndMarkModUnion() { accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); if (table != nullptr) { // TODO: Improve naming. - TimingLogger::ScopedTiming t( + TimingLogger::ScopedTiming t2( space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : "UpdateAndMarkImageModUnionTable", GetTimings()); table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); @@ -348,7 +348,7 @@ void MarkCompact::UpdateReferences() { accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); if (table != nullptr) { // TODO: Improve naming. - TimingLogger::ScopedTiming t( + TimingLogger::ScopedTiming t2( space->IsZygoteSpace() ? "UpdateZygoteModUnionTableReferences" : "UpdateImageModUnionTableReferences", GetTimings()); @@ -538,7 +538,7 @@ void MarkCompact::Sweep(bool swap_bitmaps) { if (!ShouldSweepSpace(alloc_space)) { continue; } - TimingLogger::ScopedTiming t( + TimingLogger::ScopedTiming t2( alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings()); RecordFree(alloc_space->Sweep(swap_bitmaps)); } diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index e3966e3081..6ad44e6a75 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -667,10 +667,10 @@ class MarkStackTask : public Task { Object* obj = nullptr; if (kUseMarkStackPrefetch) { while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { - Object* obj = mark_stack_[--mark_stack_pos_]; - DCHECK(obj != nullptr); - __builtin_prefetch(obj); - prefetch_fifo.push_back(obj); + Object* mark_stack_obj = mark_stack_[--mark_stack_pos_]; + DCHECK(mark_stack_obj != nullptr); + __builtin_prefetch(mark_stack_obj); + prefetch_fifo.push_back(mark_stack_obj); } if (UNLIKELY(prefetch_fifo.empty())) { break; @@ -928,7 +928,7 @@ void MarkSweep::ReMarkRoots() { kVisitRootFlagStopLoggingNewRoots | kVisitRootFlagClearRootLog)); if (kVerifyRootsMarked) { - TimingLogger::ScopedTiming t("(Paused)VerifyRoots", GetTimings()); + TimingLogger::ScopedTiming t2("(Paused)VerifyRoots", GetTimings()); Runtime::Current()->VisitRoots(VerifyRootMarked, this); } } @@ -1057,7 +1057,7 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma // if needed. if (!mark_bitmap->Test(obj)) { if (chunk_free_pos >= kSweepArrayChunkFreeSize) { - TimingLogger::ScopedTiming t("FreeList", GetTimings()); + TimingLogger::ScopedTiming t2("FreeList", GetTimings()); freed.objects += chunk_free_pos; freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); chunk_free_pos = 0; @@ -1069,7 +1069,7 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma } } if (chunk_free_pos > 0) { - TimingLogger::ScopedTiming t("FreeList", GetTimings()); + TimingLogger::ScopedTiming t2("FreeList", GetTimings()); freed.objects += chunk_free_pos; freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); chunk_free_pos = 0; @@ -1099,10 +1099,10 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma } } { - TimingLogger::ScopedTiming t("RecordFree", GetTimings()); + TimingLogger::ScopedTiming t2("RecordFree", GetTimings()); RecordFree(freed); RecordFreeLOS(freed_los); - t.NewTiming("ResetStack"); + t2.NewTiming("ResetStack"); allocations->Reset(); } sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero(); @@ -1218,10 +1218,10 @@ void MarkSweep::ProcessMarkStack(bool paused) { Object* obj = NULL; if (kUseMarkStackPrefetch) { while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { - Object* obj = mark_stack_->PopBack(); - DCHECK(obj != NULL); - __builtin_prefetch(obj); - prefetch_fifo.push_back(obj); + Object* mark_stack_obj = mark_stack_->PopBack(); + DCHECK(mark_stack_obj != NULL); + __builtin_prefetch(mark_stack_obj); + prefetch_fifo.push_back(mark_stack_obj); } if (prefetch_fifo.empty()) { break; diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc index e141b6f4ab..cb9f111058 100644 --- a/runtime/gc/collector/semi_space.cc +++ b/runtime/gc/collector/semi_space.cc @@ -224,7 +224,7 @@ void SemiSpace::MarkingPhase() { // Need to do this before the checkpoint since we don't want any threads to add references to // the live stack during the recursive mark. if (kUseThreadLocalAllocationStack) { - TimingLogger::ScopedTiming t("RevokeAllThreadLocalAllocationStacks", GetTimings()); + TimingLogger::ScopedTiming t2("RevokeAllThreadLocalAllocationStacks", GetTimings()); heap_->RevokeAllThreadLocalAllocationStacks(self_); } heap_->SwapStacks(self_); @@ -368,7 +368,7 @@ void SemiSpace::MarkReachableObjects() { CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_); space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace(); if (is_large_object_space_immune_ && los != nullptr) { - TimingLogger::ScopedTiming t("VisitLargeObjects", GetTimings()); + TimingLogger::ScopedTiming t2("VisitLargeObjects", GetTimings()); DCHECK(collect_from_space_only_); // Delay copying the live set to the marked set until here from // BindBitmaps() as the large objects on the allocation stack may diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 9fd9a2b377..06cd326d84 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -599,8 +599,8 @@ void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) { } } // Unprotect all the spaces. - for (const auto& space : continuous_spaces_) { - mprotect(space->Begin(), space->Capacity(), PROT_READ | PROT_WRITE); + for (const auto& con_space : continuous_spaces_) { + mprotect(con_space->Begin(), con_space->Capacity(), PROT_READ | PROT_WRITE); } stream << "Object " << obj; if (space != nullptr) { @@ -1266,12 +1266,12 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat continue; } // Attempt to run the collector, if we succeed, re-try the allocation. - const bool gc_ran = + const bool plan_gc_ran = CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone; if (was_default_allocator && allocator != GetCurrentAllocator()) { return nullptr; } - if (gc_ran) { + if (plan_gc_ran) { // Did we free sufficient memory for the allocation to succeed? mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size); @@ -1532,7 +1532,7 @@ HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() { ScopedThreadStateChange tsc(self, kWaitingPerformingGc); Locks::mutator_lock_->AssertNotHeld(self); { - ScopedThreadStateChange tsc(self, kWaitingForGcToComplete); + ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete); MutexLock mu(self, *gc_complete_lock_); // Ensure there is only one GC at a time. WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self); @@ -1604,7 +1604,7 @@ void Heap::TransitionCollector(CollectorType collector_type) { // compacting_gc_disable_count_, this should rarely occurs). for (;;) { { - ScopedThreadStateChange tsc(self, kWaitingForGcToComplete); + ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete); MutexLock mu(self, *gc_complete_lock_); // Ensure there is only one GC at a time. WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self); @@ -2079,7 +2079,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus bool compacting_gc; { gc_complete_lock_->AssertNotHeld(self); - ScopedThreadStateChange tsc(self, kWaitingForGcToComplete); + ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete); MutexLock mu(self, *gc_complete_lock_); // Ensure there is only one GC at a time. WaitForGcToCompleteLocked(gc_cause, self); @@ -2646,15 +2646,15 @@ void Heap::ProcessCards(TimingLogger* timings, bool use_rem_sets) { if (table != nullptr) { const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" : "ImageModUnionClearCards"; - TimingLogger::ScopedTiming t(name, timings); + TimingLogger::ScopedTiming t2(name, timings); table->ClearCards(); } else if (use_rem_sets && rem_set != nullptr) { DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS) << static_cast<int>(collector_type_); - TimingLogger::ScopedTiming t("AllocSpaceRemSetClearCards", timings); + TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings); rem_set->ClearCards(); } else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) { - TimingLogger::ScopedTiming t("AllocSpaceClearCards", timings); + TimingLogger::ScopedTiming t2("AllocSpaceClearCards", timings); // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards // were dirty before the GC started. // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread) @@ -2676,7 +2676,7 @@ void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) { TimingLogger* const timings = current_gc_iteration_.GetTimings(); TimingLogger::ScopedTiming t(__FUNCTION__, timings); if (verify_pre_gc_heap_) { - TimingLogger::ScopedTiming t("(Paused)PreGcVerifyHeapReferences", timings); + TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyHeapReferences", timings); ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); size_t failures = VerifyHeapReferences(); if (failures > 0) { @@ -2686,7 +2686,7 @@ void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) { } // Check that all objects which reference things in the live stack are on dirty cards. if (verify_missing_card_marks_) { - TimingLogger::ScopedTiming t("(Paused)PreGcVerifyMissingCardMarks", timings); + TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyMissingCardMarks", timings); ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); SwapStacks(self); // Sort the live stack so that we can quickly binary search it later. @@ -2695,7 +2695,7 @@ void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) { SwapStacks(self); } if (verify_mod_union_table_) { - TimingLogger::ScopedTiming t("(Paused)PreGcVerifyModUnionTables", timings); + TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyModUnionTables", timings); ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_); for (const auto& table_pair : mod_union_tables_) { accounting::ModUnionTable* mod_union_table = table_pair.second; @@ -2727,7 +2727,7 @@ void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) { // Called before sweeping occurs since we want to make sure we are not going so reclaim any // reachable objects. if (verify_pre_sweeping_heap_) { - TimingLogger::ScopedTiming t("(Paused)PostSweepingVerifyHeapReferences", timings); + TimingLogger::ScopedTiming t2("(Paused)PostSweepingVerifyHeapReferences", timings); CHECK_NE(self->GetState(), kRunnable); WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); // Swapping bound bitmaps does nothing. @@ -2760,7 +2760,7 @@ void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) { RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification"); } if (verify_post_gc_heap_) { - TimingLogger::ScopedTiming t("(Paused)PostGcVerifyHeapReferences", timings); + TimingLogger::ScopedTiming t2("(Paused)PostGcVerifyHeapReferences", timings); ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); size_t failures = VerifyHeapReferences(); if (failures > 0) { diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc index 3106b4c913..73196b20a2 100644 --- a/runtime/gc/heap_test.cc +++ b/runtime/gc/heap_test.cc @@ -48,8 +48,8 @@ TEST_F(HeapTest, GarbageCollectClassLinkerInit) { Handle<mirror::Class> c( hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;"))); for (size_t i = 0; i < 1024; ++i) { - StackHandleScope<1> hs(soa.Self()); - Handle<mirror::ObjectArray<mirror::Object>> array(hs.NewHandle( + StackHandleScope<1> hs2(soa.Self()); + Handle<mirror::ObjectArray<mirror::Object>> array(hs2.NewHandle( mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), c.Get(), 2048))); for (size_t j = 0; j < 2048; ++j) { mirror::String* string = mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"); diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc index bfaa2bb0cc..012f9f91f5 100644 --- a/runtime/gc/reference_processor.cc +++ b/runtime/gc/reference_processor.cc @@ -143,7 +143,7 @@ void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timing soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg); weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg); { - TimingLogger::ScopedTiming t(concurrent ? "EnqueueFinalizerReferences" : + TimingLogger::ScopedTiming t2(concurrent ? "EnqueueFinalizerReferences" : "(Paused)EnqueueFinalizerReferences", timings); if (concurrent) { StartPreservingReferences(self); diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc index 0a55b52c08..04b09e9969 100644 --- a/runtime/gc/space/bump_pointer_space.cc +++ b/runtime/gc/space/bump_pointer_space.cc @@ -188,11 +188,11 @@ void BumpPointerSpace::Walk(ObjectCallback* callback, void* arg) { size_t block_size = header->size_; pos += sizeof(BlockHeader); // Skip the header so that we know where the objects mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos); - const mirror::Object* end = reinterpret_cast<const mirror::Object*>(pos + block_size); - CHECK_LE(reinterpret_cast<const uint8_t*>(end), End()); + const mirror::Object* end_obj = reinterpret_cast<const mirror::Object*>(pos + block_size); + CHECK_LE(reinterpret_cast<const uint8_t*>(end_obj), End()); // We don't know how many objects are allocated in the current block. When we hit a null class // assume its the end. TODO: Have a thread update the header when it flushes the block? - while (obj < end && obj->GetClass() != nullptr) { + while (obj < end_obj && obj->GetClass() != nullptr) { callback(obj, arg); obj = GetNextObject(obj); } diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc index 445c720d4c..3072c23bf3 100644 --- a/runtime/gc/space/dlmalloc_space.cc +++ b/runtime/gc/space/dlmalloc_space.cc @@ -213,27 +213,6 @@ size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** p } } -// Callback from dlmalloc when it needs to increase the footprint -extern "C" void* art_heap_morecore(void* mspace, intptr_t increment) { - Heap* heap = Runtime::Current()->GetHeap(); - DlMallocSpace* dlmalloc_space = heap->GetDlMallocSpace(); - // Support for multiple DlMalloc provided by a slow path. - if (UNLIKELY(dlmalloc_space == nullptr || dlmalloc_space->GetMspace() != mspace)) { - dlmalloc_space = nullptr; - for (space::ContinuousSpace* space : heap->GetContinuousSpaces()) { - if (space->IsDlMallocSpace()) { - DlMallocSpace* cur_dlmalloc_space = space->AsDlMallocSpace(); - if (cur_dlmalloc_space->GetMspace() == mspace) { - dlmalloc_space = cur_dlmalloc_space; - break; - } - } - } - CHECK(dlmalloc_space != nullptr) << "Couldn't find DlmMallocSpace with mspace=" << mspace; - } - return dlmalloc_space->MoreCore(increment); -} - size_t DlMallocSpace::Trim() { MutexLock mu(Thread::Current(), lock_); // Trim to release memory at the end of the space. @@ -330,5 +309,31 @@ void DlMallocSpace::LogFragmentationAllocFailure(std::ostream& os, size_t failed } } // namespace space + +namespace allocator { + +// Implement the dlmalloc morecore callback. +void* ArtDlMallocMoreCore(void* mspace, intptr_t increment) { + Heap* heap = Runtime::Current()->GetHeap(); + ::art::gc::space::DlMallocSpace* dlmalloc_space = heap->GetDlMallocSpace(); + // Support for multiple DlMalloc provided by a slow path. + if (UNLIKELY(dlmalloc_space == nullptr || dlmalloc_space->GetMspace() != mspace)) { + dlmalloc_space = nullptr; + for (space::ContinuousSpace* space : heap->GetContinuousSpaces()) { + if (space->IsDlMallocSpace()) { + ::art::gc::space::DlMallocSpace* cur_dlmalloc_space = space->AsDlMallocSpace(); + if (cur_dlmalloc_space->GetMspace() == mspace) { + dlmalloc_space = cur_dlmalloc_space; + break; + } + } + } + CHECK(dlmalloc_space != nullptr) << "Couldn't find DlmMallocSpace with mspace=" << mspace; + } + return dlmalloc_space->MoreCore(increment); +} + +} // namespace allocator + } // namespace gc } // namespace art diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc index 161eba9c1d..ff8b570a02 100644 --- a/runtime/gc/space/rosalloc_space.cc +++ b/runtime/gc/space/rosalloc_space.cc @@ -228,15 +228,6 @@ size_t RosAllocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** p return bytes_freed; } -// Callback from rosalloc when it needs to increase the footprint -extern "C" void* art_heap_rosalloc_morecore(allocator::RosAlloc* rosalloc, intptr_t increment) { - Heap* heap = Runtime::Current()->GetHeap(); - RosAllocSpace* rosalloc_space = heap->GetRosAllocSpace(rosalloc); - DCHECK(rosalloc_space != nullptr); - DCHECK_EQ(rosalloc_space->GetRosAlloc(), rosalloc); - return rosalloc_space->MoreCore(increment); -} - size_t RosAllocSpace::Trim() { VLOG(heap) << "RosAllocSpace::Trim() "; { @@ -367,5 +358,19 @@ void RosAllocSpace::Clear() { } } // namespace space + +namespace allocator { + +// Callback from rosalloc when it needs to increase the footprint. +void* ArtRosAllocMoreCore(allocator::RosAlloc* rosalloc, intptr_t increment) { + Heap* heap = Runtime::Current()->GetHeap(); + art::gc::space::RosAllocSpace* rosalloc_space = heap->GetRosAllocSpace(rosalloc); + DCHECK(rosalloc_space != nullptr); + DCHECK_EQ(rosalloc_space->GetRosAlloc(), rosalloc); + return rosalloc_space->MoreCore(increment); +} + +} // namespace allocator + } // namespace gc } // namespace art diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc index b233805e4c..486d79ad1b 100644 --- a/runtime/gc/space/space.cc +++ b/runtime/gc/space/space.cc @@ -133,8 +133,8 @@ void ContinuousMemMapAllocSpace::SwapBitmaps() { mark_bitmap_->SetName(temp_name); } -AllocSpace::SweepCallbackContext::SweepCallbackContext(bool swap_bitmaps, space::Space* space) - : swap_bitmaps(swap_bitmaps), space(space), self(Thread::Current()) { +AllocSpace::SweepCallbackContext::SweepCallbackContext(bool swap_bitmaps_in, space::Space* space_in) + : swap_bitmaps(swap_bitmaps_in), space(space_in), self(Thread::Current()) { } } // namespace space |
