summaryrefslogtreecommitdiffstats
path: root/runtime/debugger.cc
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2014-10-10 12:45:50 -0700
committerMathieu Chartier <mathieuc@google.com>2014-10-10 15:45:35 -0700
commit4c69d7f4df78d20c631abc5f70b811a9944854d3 (patch)
treed4480e82ff05207d15a2f6af60223504ac702ed8 /runtime/debugger.cc
parenta9b097443f26a6df1a322a8fef50fdd1f26a410b (diff)
downloadart-4c69d7f4df78d20c631abc5f70b811a9944854d3.tar.gz
art-4c69d7f4df78d20c631abc5f70b811a9944854d3.tar.bz2
art-4c69d7f4df78d20c631abc5f70b811a9944854d3.zip
Hold mutator lock in DdmSendHeapSegments for all spaces
Previously we were releasing the mutator lock in DdmSendHeapSegments and only reacquiring it for RosAlloc spaces. This was causing problems since the HeapChunkCallback access object fields through mirror. Bug: 17950534 (cherry picked from commit d6527cf8e824d9057f32755f2ff4bdcf46c7095b) Change-Id: Idb307fd4c01450a07e3c9621e04d2aabf2c6a0b9
Diffstat (limited to 'runtime/debugger.cc')
-rw-r--r--runtime/debugger.cc25
1 files changed, 14 insertions, 11 deletions
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 96b44bfdf7..9d2ca7c05d 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -4364,11 +4364,7 @@ void Dbg::DdmSendHeapSegments(bool native) {
Thread* self = Thread::Current();
- // To allow the Walk/InspectAll() below to exclusively-lock the
- // mutator lock, temporarily release the shared access to the
- // mutator lock here by transitioning to the suspended state.
Locks::mutator_lock_->AssertSharedHeld(self);
- self->TransitionFromRunnableToSuspended(kSuspended);
// Send a series of heap segment chunks.
HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native);
@@ -4382,32 +4378,39 @@ void Dbg::DdmSendHeapSegments(bool native) {
gc::Heap* heap = Runtime::Current()->GetHeap();
for (const auto& space : heap->GetContinuousSpaces()) {
if (space->IsDlMallocSpace()) {
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
// dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
// allocation then the first sizeof(size_t) may belong to it.
context.SetChunkOverhead(sizeof(size_t));
space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
} else if (space->IsRosAllocSpace()) {
context.SetChunkOverhead(0);
- space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
+ // Need to acquire the mutator lock before the heap bitmap lock with exclusive access since
+ // RosAlloc's internal logic doesn't know to release and reacquire the heap bitmap lock.
+ self->TransitionFromRunnableToSuspended(kSuspended);
+ ThreadList* tl = Runtime::Current()->GetThreadList();
+ tl->SuspendAll();
+ {
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
+ }
+ tl->ResumeAll();
+ self->TransitionFromSuspendedToRunnable();
} else if (space->IsBumpPointerSpace()) {
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
context.SetChunkOverhead(0);
- ReaderMutexLock mu(self, *Locks::mutator_lock_);
- WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
} else {
UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
}
context.ResetStartOfNextChunk();
}
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
// Walk the large objects, these are not in the AllocSpace.
context.SetChunkOverhead(0);
heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
}
- // Shared-lock the mutator lock back.
- self->TransitionFromSuspendedToRunnable();
- Locks::mutator_lock_->AssertSharedHeld(self);
-
// Finally, send a heap end chunk.
Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
}