summaryrefslogtreecommitdiffstats
path: root/runtime/base
diff options
context:
space:
mode:
authorBrian Carlstrom <bdc@google.com>2014-09-05 13:01:41 -0700
committerBrian Carlstrom <bdc@google.com>2014-09-05 13:24:24 -0700
commit306db81aba41eb244a4e8299cf58ac18ae9999c7 (patch)
treea069a9dda1b246466cbeee2736f46f18ef56714e /runtime/base
parentb14339904c9cacc4af74260c7325e4eb32947f95 (diff)
downloadart-306db81aba41eb244a4e8299cf58ac18ae9999c7.tar.gz
art-306db81aba41eb244a4e8299cf58ac18ae9999c7.tar.bz2
art-306db81aba41eb244a4e8299cf58ac18ae9999c7.zip
Fix numerous issues with DdmVmInternal allocation tracking
Issues addressed: - Using without JDWP attached caused native crash. - When buffer is full (64k entries), number of entries reported was 0. - Disabling tracking after disabling tracking caused native crash. - Asking for allocations after disabled caused native crash. - Lock ordering issues between mutator lock and alloc tracker lock. Adding 098-ddmc test to cover these cases. Bug: 17392248 (cherry picked from commit a5815065ac0877add9c0db3605d27b4d6c426e61) Change-Id: Ib0bc18dfcdafcc050ab9dceed3d167dd878d1d7a
Diffstat (limited to 'runtime/base')
-rw-r--r--runtime/base/mutex.cc26
-rw-r--r--runtime/base/mutex.h13
2 files changed, 32 insertions, 7 deletions
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 23caefc8bb..f01ea0c452 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -30,22 +30,24 @@
namespace art {
Mutex* Locks::abort_lock_ = nullptr;
+Mutex* Locks::alloc_tracker_lock_ = nullptr;
Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr;
ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
+Mutex* Locks::deoptimization_lock_ = nullptr;
ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
Mutex* Locks::jni_libraries_lock_ = nullptr;
Mutex* Locks::logging_lock_ = nullptr;
Mutex* Locks::mem_maps_lock_ = nullptr;
Mutex* Locks::modify_ldt_lock_ = nullptr;
ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
+Mutex* Locks::profiler_lock_ = nullptr;
Mutex* Locks::runtime_shutdown_lock_ = nullptr;
Mutex* Locks::thread_list_lock_ = nullptr;
Mutex* Locks::thread_list_suspend_thread_lock_ = nullptr;
Mutex* Locks::thread_suspend_count_lock_ = nullptr;
Mutex* Locks::trace_lock_ = nullptr;
-Mutex* Locks::profiler_lock_ = nullptr;
Mutex* Locks::unexpected_signal_lock_ = nullptr;
Mutex* Locks::intern_table_lock_ = nullptr;
@@ -830,21 +832,23 @@ void Locks::Init() {
DCHECK(modify_ldt_lock_ == nullptr);
}
DCHECK(abort_lock_ != nullptr);
+ DCHECK(alloc_tracker_lock_ != nullptr);
DCHECK(allocated_monitor_ids_lock_ != nullptr);
DCHECK(allocated_thread_ids_lock_ != nullptr);
DCHECK(breakpoint_lock_ != nullptr);
DCHECK(classlinker_classes_lock_ != nullptr);
+ DCHECK(deoptimization_lock_ != nullptr);
DCHECK(heap_bitmap_lock_ != nullptr);
+ DCHECK(intern_table_lock_ != nullptr);
DCHECK(jni_libraries_lock_ != nullptr);
DCHECK(logging_lock_ != nullptr);
DCHECK(mutator_lock_ != nullptr);
+ DCHECK(profiler_lock_ != nullptr);
DCHECK(thread_list_lock_ != nullptr);
DCHECK(thread_list_suspend_thread_lock_ != nullptr);
DCHECK(thread_suspend_count_lock_ != nullptr);
DCHECK(trace_lock_ != nullptr);
- DCHECK(profiler_lock_ != nullptr);
DCHECK(unexpected_signal_lock_ != nullptr);
- DCHECK(intern_table_lock_ != nullptr);
} else {
// Create global locks in level order from highest lock level to lowest.
LockLevel current_lock_level = kThreadListSuspendThreadLock;
@@ -853,7 +857,12 @@ void Locks::Init() {
new Mutex("thread list suspend thread by .. lock", current_lock_level);
#define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
- DCHECK_LT(new_level, current_lock_level); \
+ if (new_level >= current_lock_level) { \
+ /* Do not use CHECKs or FATAL here, abort_lock_ is not setup yet. */ \
+ fprintf(stderr, "New local level %d is not less than current level %d\n", \
+ new_level, current_lock_level); \
+ exit(1); \
+ } \
current_lock_level = new_level;
UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
@@ -876,6 +885,14 @@ void Locks::Init() {
DCHECK(trace_lock_ == nullptr);
trace_lock_ = new Mutex("trace lock", current_lock_level);
+ UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock);
+ DCHECK(deoptimization_lock_ == nullptr);
+ deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kAllocTrackerLock);
+ DCHECK(alloc_tracker_lock_ == nullptr);
+ alloc_tracker_lock_ = new Mutex("AllocTracker lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
DCHECK(thread_list_lock_ == nullptr);
thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
@@ -911,7 +928,6 @@ void Locks::Init() {
DCHECK(intern_table_lock_ == nullptr);
intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
-
UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
DCHECK(abort_lock_ == nullptr);
abort_lock_ = new Mutex("abort lock", current_lock_level, true);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 2a623fdb05..6642b1e989 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -85,6 +85,7 @@ enum LockLevel {
kJniLoadLibraryLock,
kThreadListLock,
kBreakpointInvokeLock,
+ kAllocTrackerLock,
kDeoptimizationLock,
kTraceLock,
kProfilerLock,
@@ -557,9 +558,17 @@ class Locks {
// Guards trace (ie traceview) requests.
static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
+ // Guards debugger recent allocation records.
+ static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
+
+ // Guards updates to instrumentation to ensure mutual exclusion of
+ // events like deoptimization requests.
+ // TODO: improve name, perhaps instrumentation_update_lock_.
+ static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
+
// The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
// attaching and detaching.
- static Mutex* thread_list_lock_ ACQUIRED_AFTER(trace_lock_);
+ static Mutex* thread_list_lock_ ACQUIRED_AFTER(deoptimization_lock_);
// Guards maintaining loading library data structures.
static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
@@ -586,7 +595,7 @@ class Locks {
static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
// Have an exclusive aborting thread.
- static Mutex* abort_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
+ static Mutex* abort_lock_ ACQUIRED_AFTER(intern_table_lock_);
// Allow mutual exclusion when manipulating Thread::suspend_count_.
// TODO: Does the trade-off of a per-thread lock make sense?