summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2013-09-19 10:01:59 -0700
committerMathieu Chartier <mathieuc@google.com>2013-09-20 10:36:49 -0700
commitc11d9b8870de5f860b13c84003ade7b3f3125a52 (patch)
tree89b0250d35a950774f98fa1abfa9cc296c9d85db /runtime
parent261d3cda32b28782d894be0244e617f78182ee3b (diff)
downloadart-c11d9b8870de5f860b13c84003ade7b3f3125a52.tar.gz
art-c11d9b8870de5f860b13c84003ade7b3f3125a52.tar.bz2
art-c11d9b8870de5f860b13c84003ade7b3f3125a52.zip
Re-enable concurrent system weak sweeping.
Enabled by disallowing new system weaks during the pause and re-allowing it after the system weaks have been swept. Reduces GC pause by ~1ms. Fixes pause regression caused by fix for Bug: 10626133 Change-Id: If49d33e7ef19cb728ed3cef5187acfa53b9b05d8
Diffstat (limited to 'runtime')
-rw-r--r--runtime/gc/collector/mark_sweep.cc67
-rw-r--r--runtime/gc/collector/mark_sweep.h4
-rw-r--r--runtime/intern_table.cc24
-rw-r--r--runtime/intern_table.h5
-rw-r--r--runtime/jni_internal.cc90
-rw-r--r--runtime/jni_internal.h24
-rw-r--r--runtime/monitor.cc22
-rw-r--r--runtime/monitor.h6
-rw-r--r--runtime/runtime.cc12
-rw-r--r--runtime/runtime.h3
-rw-r--r--runtime/scoped_thread_state_change.h8
-rw-r--r--runtime/thread.cc5
12 files changed, 167 insertions, 103 deletions
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 167140cb97..6790144603 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -207,10 +207,6 @@ bool MarkSweep::HandleDirtyObjectsPhase() {
}
ProcessReferences(self);
- {
- ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- SweepSystemWeaks();
- }
// Only need to do this if we have the card mark verification on, and only during concurrent GC.
if (GetHeap()->verify_missing_card_marks_ || GetHeap()->verify_pre_gc_heap_||
@@ -228,6 +224,12 @@ bool MarkSweep::HandleDirtyObjectsPhase() {
// Ensure that nobody inserted items in the live stack after we swapped the stacks.
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
+
+ // Disallow new system weaks to prevent a race which occurs when someone adds a new system
+ // weak before we sweep them. Since this new system weak may not be marked, the GC may
+ // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
+ // reference to a string that is about to be swept.
+ Runtime::Current()->DisallowNewSystemWeaks();
return true;
}
@@ -289,14 +291,16 @@ void MarkSweep::ReclaimPhase() {
if (!IsConcurrent()) {
ProcessReferences(self);
- {
- ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- SweepSystemWeaks();
- }
- timings_.StartSplit("PreSweepingGcVerification");
- heap_->PreSweepingGcVerification(this);
- timings_.EndSplit();
- } else {
+ }
+
+ {
+ WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ SweepSystemWeaks();
+ }
+
+ if (IsConcurrent()) {
+ Runtime::Current()->AllowNewSystemWeaks();
+
base::TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
@@ -1002,13 +1006,7 @@ void MarkSweep::ReMarkRoots() {
}
void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) {
- JavaVMExt* vm = Runtime::Current()->GetJavaVM();
- WriterMutexLock mu(Thread::Current(), vm->weak_globals_lock);
- for (const Object** entry : vm->weak_globals) {
- if (!is_marked(*entry, arg)) {
- *entry = kClearedJniWeakGlobal;
- }
- }
+ Runtime::Current()->GetJavaVM()->SweepWeakGlobals(is_marked, arg);
}
struct ArrayMarkedCheck {
@@ -1029,31 +1027,8 @@ bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) {
return false;
}
-void MarkSweep::SweepSystemWeaksArray(accounting::ObjectStack* allocations) {
- Runtime* runtime = Runtime::Current();
- // The callbacks check
- // !is_marked where is_marked is the callback but we want
- // !IsMarked && IsLive
- // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
- // Or for swapped (IsLive || !IsMarked).
-
- timings_.StartSplit("SweepSystemWeaksArray");
- ArrayMarkedCheck visitor;
- visitor.live_stack = allocations;
- visitor.mark_sweep = this;
- runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedArrayCallback, &visitor);
- runtime->GetMonitorList()->SweepMonitorList(IsMarkedArrayCallback, &visitor);
- SweepJniWeakGlobals(IsMarkedArrayCallback, &visitor);
- timings_.EndSplit();
-}
-
void MarkSweep::SweepSystemWeaks() {
Runtime* runtime = Runtime::Current();
- // The callbacks check
- // !is_marked where is_marked is the callback but we want
- // !IsMarked && IsLive
- // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
- // Or for swapped (IsLive || !IsMarked).
timings_.StartSplit("SweepSystemWeaks");
runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this);
runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this);
@@ -1087,12 +1062,7 @@ void MarkSweep::VerifySystemWeaks() {
// Verify system weaks, uses a special IsMarked callback which always returns true.
runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this);
runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this);
-
- JavaVMExt* vm = runtime->GetJavaVM();
- ReaderMutexLock mu(Thread::Current(), vm->weak_globals_lock);
- for (const Object** entry : vm->weak_globals) {
- VerifyIsLive(*entry);
- }
+ runtime->GetJavaVM()->SweepWeakGlobals(VerifyIsLiveCallback, this);
}
struct SweepCallbackContext {
@@ -1172,7 +1142,6 @@ void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
space::DlMallocSpace* space = heap_->GetAllocSpace();
-
timings_.StartSplit("SweepArray");
// Newly allocated objects MUST be in the alloc space and those are the only objects which we are
// going to free.
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 435b086163..feef992c83 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -204,10 +204,6 @@ class MarkSweep : public GarbageCollector {
void SweepSystemWeaks()
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- // Only sweep the weaks which are inside of an allocation stack.
- void SweepSystemWeaksArray(accounting::ObjectStack* allocations)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
static bool VerifyIsLiveCallback(const mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 89c15f8db1..20729790a9 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -28,7 +28,9 @@
namespace art {
InternTable::InternTable()
- : intern_table_lock_("InternTable lock"), is_dirty_(false) {}
+ : intern_table_lock_("InternTable lock"), is_dirty_(false), allow_new_interns_(true),
+ new_intern_condition_("New intern condition", intern_table_lock_) {
+}
size_t InternTable::Size() const {
MutexLock mu(Thread::Current(), intern_table_lock_);
@@ -111,12 +113,30 @@ static mirror::String* LookupStringFromImage(mirror::String* s)
return NULL;
}
+void InternTable::AllowNewInterns() {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, intern_table_lock_);
+ allow_new_interns_ = true;
+ new_intern_condition_.Broadcast(self);
+}
+
+void InternTable::DisallowNewInterns() {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, intern_table_lock_);
+ allow_new_interns_ = false;
+}
+
mirror::String* InternTable::Insert(mirror::String* s, bool is_strong) {
- MutexLock mu(Thread::Current(), intern_table_lock_);
+ Thread* self = Thread::Current();
+ MutexLock mu(self, intern_table_lock_);
DCHECK(s != NULL);
uint32_t hash_code = s->GetHashCode();
+ while (UNLIKELY(!allow_new_interns_)) {
+ new_intern_condition_.WaitHoldingLocks(self);
+ }
+
if (is_strong) {
// Check the strong table for a match.
mirror::String* strong = Lookup(strong_interns_, s, hash_code);
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 07615dca89..e68af907ea 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -66,6 +66,9 @@ class InternTable {
void DumpForSigQuit(std::ostream& os) const;
+ void DisallowNewInterns() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void AllowNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
private:
typedef std::multimap<int32_t, mirror::String*> Table;
@@ -79,6 +82,8 @@ class InternTable {
mutable Mutex intern_table_lock_;
bool is_dirty_ GUARDED_BY(intern_table_lock_);
+ bool allow_new_interns_ GUARDED_BY(intern_table_lock_);
+ ConditionVariable new_intern_condition_ GUARDED_BY(intern_table_lock_);
Table strong_interns_ GUARDED_BY(intern_table_lock_);
Table weak_interns_ GUARDED_BY(intern_table_lock_);
};
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index d72ddf688d..0a0028462d 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -86,14 +86,7 @@ static const size_t kWeakGlobalsMax = 51200; // Arbitrary sanity check. (Must f
static jweak AddWeakGlobalReference(ScopedObjectAccess& soa, Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (obj == NULL) {
- return NULL;
- }
- JavaVMExt* vm = soa.Vm();
- IndirectReferenceTable& weak_globals = vm->weak_globals;
- WriterMutexLock mu(soa.Self(), vm->weak_globals_lock);
- IndirectRef ref = weak_globals.Add(IRT_FIRST_SEGMENT, obj);
- return reinterpret_cast<jweak>(ref);
+ return soa.Vm()->AddWeakGlobalReference(soa.Self(), obj);
}
static bool IsBadJniVersion(int version) {
@@ -854,17 +847,9 @@ class JNI {
}
static void DeleteWeakGlobalRef(JNIEnv* env, jweak obj) {
- if (obj == NULL) {
- return;
- }
- ScopedObjectAccess soa(env);
- JavaVMExt* vm = soa.Vm();
- IndirectReferenceTable& weak_globals = vm->weak_globals;
- WriterMutexLock mu(soa.Self(), vm->weak_globals_lock);
-
- if (!weak_globals.Remove(IRT_FIRST_SEGMENT, obj)) {
- LOG(WARNING) << "JNI WARNING: DeleteWeakGlobalRef(" << obj << ") "
- << "failed to find entry";
+ if (obj != nullptr) {
+ ScopedObjectAccess soa(env);
+ soa.Vm()->DeleteWeakGlobalRef(soa.Self(), obj);
}
}
@@ -2996,10 +2981,12 @@ JavaVMExt::JavaVMExt(Runtime* runtime, Runtime::ParsedOptions* options)
pin_table("pin table", kPinTableInitial, kPinTableMax),
globals_lock("JNI global reference table lock"),
globals(gGlobalsInitial, gGlobalsMax, kGlobal),
- weak_globals_lock("JNI weak global reference table lock"),
- weak_globals(kWeakGlobalsInitial, kWeakGlobalsMax, kWeakGlobal),
libraries_lock("JNI shared libraries map lock", kLoadLibraryLock),
- libraries(new Libraries) {
+ libraries(new Libraries),
+ weak_globals_lock_("JNI weak global reference table lock"),
+ weak_globals_(kWeakGlobalsInitial, kWeakGlobalsMax, kWeakGlobal),
+ allow_new_weak_globals_(true),
+ weak_globals_add_condition_("weak globals add condition", weak_globals_lock_) {
functions = unchecked_functions = &gJniInvokeInterface;
if (options->check_jni_) {
SetCheckJniEnabled(true);
@@ -3010,6 +2997,26 @@ JavaVMExt::~JavaVMExt() {
delete libraries;
}
+jweak JavaVMExt::AddWeakGlobalReference(Thread* self, mirror::Object* obj) {
+ if (obj == nullptr) {
+ return nullptr;
+ }
+ MutexLock mu(self, weak_globals_lock_);
+ while (UNLIKELY(!allow_new_weak_globals_)) {
+ weak_globals_add_condition_.WaitHoldingLocks(self);
+ }
+ IndirectRef ref = weak_globals_.Add(IRT_FIRST_SEGMENT, obj);
+ return reinterpret_cast<jweak>(ref);
+}
+
+void JavaVMExt::DeleteWeakGlobalRef(Thread* self, jweak obj) {
+ MutexLock mu(self, weak_globals_lock_);
+ if (!weak_globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
+ LOG(WARNING) << "JNI WARNING: DeleteWeakGlobalRef(" << obj << ") "
+ << "failed to find entry";
+ }
+}
+
void JavaVMExt::SetCheckJniEnabled(bool enabled) {
check_jni = enabled;
functions = enabled ? GetCheckJniInvokeInterface() : &gJniInvokeInterface;
@@ -3031,9 +3038,9 @@ void JavaVMExt::DumpForSigQuit(std::ostream& os) {
os << "; globals=" << globals.Capacity();
}
{
- ReaderMutexLock mu(self, weak_globals_lock);
- if (weak_globals.Capacity() > 0) {
- os << " (plus " << weak_globals.Capacity() << " weak)";
+ MutexLock mu(self, weak_globals_lock_);
+ if (weak_globals_.Capacity() > 0) {
+ os << " (plus " << weak_globals_.Capacity() << " weak)";
}
}
os << '\n';
@@ -3044,6 +3051,35 @@ void JavaVMExt::DumpForSigQuit(std::ostream& os) {
}
}
+void JavaVMExt::DisallowNewWeakGlobals() {
+ MutexLock mu(Thread::Current(), weak_globals_lock_);
+ allow_new_weak_globals_ = false;
+}
+
+void JavaVMExt::AllowNewWeakGlobals() {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, weak_globals_lock_);
+ allow_new_weak_globals_ = true;
+ weak_globals_add_condition_.Broadcast(self);
+}
+
+void JavaVMExt::SweepWeakGlobals(IsMarkedTester is_marked, void* arg) {
+ MutexLock mu(Thread::Current(), weak_globals_lock_);
+ for (const Object** entry : weak_globals_) {
+ if (!is_marked(*entry, arg)) {
+ *entry = kClearedJniWeakGlobal;
+ }
+ }
+}
+
+mirror::Object* JavaVMExt::DecodeWeakGlobal(Thread* self, IndirectRef ref) {
+ MutexLock mu(self, weak_globals_lock_);
+ while (UNLIKELY(!allow_new_weak_globals_)) {
+ weak_globals_add_condition_.WaitHoldingLocks(self);
+ }
+ return const_cast<mirror::Object*>(weak_globals_.Get(ref));
+}
+
void JavaVMExt::DumpReferenceTables(std::ostream& os) {
Thread* self = Thread::Current();
{
@@ -3051,8 +3087,8 @@ void JavaVMExt::DumpReferenceTables(std::ostream& os) {
globals.Dump(os);
}
{
- ReaderMutexLock mu(self, weak_globals_lock);
- weak_globals.Dump(os);
+ MutexLock mu(self, weak_globals_lock_);
+ weak_globals_.Dump(os);
}
{
MutexLock mu(self, pins_lock);
diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h
index bad3841c88..32d0bfcee8 100644
--- a/runtime/jni_internal.h
+++ b/runtime/jni_internal.h
@@ -61,7 +61,8 @@ void InvokeWithArgArray(const ScopedObjectAccess& soa, mirror::ArtMethod* method
int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause);
-struct JavaVMExt : public JavaVM {
+class JavaVMExt : public JavaVM {
+ public:
JavaVMExt(Runtime* runtime, Runtime::ParsedOptions* options);
~JavaVMExt();
@@ -91,6 +92,15 @@ struct JavaVMExt : public JavaVM {
void VisitRoots(RootVisitor*, void*);
+ void DisallowNewWeakGlobals() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void AllowNewWeakGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ jweak AddWeakGlobalReference(Thread* self, mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void DeleteWeakGlobalRef(Thread* self, jweak obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SweepWeakGlobals(IsMarkedTester is_marked, void* arg);
+ mirror::Object* DecodeWeakGlobal(Thread* self, IndirectRef ref);
+
Runtime* runtime;
// Used for testing. By default, we'll LOG(FATAL) the reason.
@@ -115,15 +125,19 @@ struct JavaVMExt : public JavaVM {
ReaderWriterMutex globals_lock DEFAULT_MUTEX_ACQUIRED_AFTER;
IndirectReferenceTable globals GUARDED_BY(globals_lock);
- // JNI weak global references.
- ReaderWriterMutex weak_globals_lock DEFAULT_MUTEX_ACQUIRED_AFTER;
- IndirectReferenceTable weak_globals GUARDED_BY(weak_globals_lock);
-
Mutex libraries_lock DEFAULT_MUTEX_ACQUIRED_AFTER;
Libraries* libraries GUARDED_BY(libraries_lock);
// Used by -Xcheck:jni.
const JNIInvokeInterface* unchecked_functions;
+
+ private:
+ // TODO: Make the other members of this class also private.
+ // JNI weak global references.
+ Mutex weak_globals_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ IndirectReferenceTable weak_globals_ GUARDED_BY(weak_globals_lock_);
+ bool allow_new_weak_globals_ GUARDED_BY(weak_globals_lock_);
+ ConditionVariable weak_globals_add_condition_ GUARDED_BY(weak_globals_lock_);
};
struct JNIEnvExt : public JNIEnv {
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 66c51e6eb6..088d1f7a88 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -989,7 +989,9 @@ void Monitor::TranslateLocation(const mirror::ArtMethod* method, uint32_t dex_pc
line_number = mh.GetLineNumFromDexPC(dex_pc);
}
-MonitorList::MonitorList() : monitor_list_lock_("MonitorList lock") {
+MonitorList::MonitorList()
+ : allow_new_monitors_(true), monitor_list_lock_("MonitorList lock"),
+ monitor_add_condition_("MonitorList disallow condition", monitor_list_lock_) {
}
MonitorList::~MonitorList() {
@@ -997,8 +999,24 @@ MonitorList::~MonitorList() {
STLDeleteElements(&list_);
}
-void MonitorList::Add(Monitor* m) {
+void MonitorList::DisallowNewMonitors() {
MutexLock mu(Thread::Current(), monitor_list_lock_);
+ allow_new_monitors_ = false;
+}
+
+void MonitorList::AllowNewMonitors() {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, monitor_list_lock_);
+ allow_new_monitors_ = true;
+ monitor_add_condition_.Broadcast(self);
+}
+
+void MonitorList::Add(Monitor* m) {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, monitor_list_lock_);
+ while (UNLIKELY(!allow_new_monitors_)) {
+ monitor_add_condition_.WaitHoldingLocks(self);
+ }
list_.push_front(m);
}
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 66517683c2..0b5b7e546a 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -182,12 +182,14 @@ class MonitorList {
~MonitorList();
void Add(Monitor* m);
-
void SweepMonitorList(IsMarkedTester is_marked, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
+ void DisallowNewMonitors();
+ void AllowNewMonitors();
private:
+ bool allow_new_monitors_ GUARDED_BY(monitor_list_lock_);
Mutex monitor_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ ConditionVariable monitor_add_condition_ GUARDED_BY(monitor_list_lock_);
std::list<Monitor*> list_ GUARDED_BY(monitor_list_lock_);
friend class Monitor;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index f7b5f7442b..657735a06b 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1237,6 +1237,18 @@ mirror::ArtMethod* Runtime::CreateCalleeSaveMethod(InstructionSet instruction_se
return method.get();
}
+void Runtime::DisallowNewSystemWeaks() {
+ monitor_list_->DisallowNewMonitors();
+ intern_table_->DisallowNewInterns();
+ java_vm_->DisallowNewWeakGlobals();
+}
+
+void Runtime::AllowNewSystemWeaks() {
+ monitor_list_->AllowNewMonitors();
+ intern_table_->AllowNewInterns();
+ java_vm_->AllowNewWeakGlobals();
+}
+
void Runtime::SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type) {
DCHECK_LT(static_cast<int>(type), static_cast<int>(kLastCalleeSaveType));
callee_save_methods_[type] = method;
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 21161a03b4..365d2d860b 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -302,6 +302,9 @@ class Runtime {
return "2.0.0";
}
+ void DisallowNewSystemWeaks() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void AllowNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
// clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
void VisitRoots(RootVisitor* visitor, void* arg, bool only_dirty, bool clean_dirty)
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index 5f649b117b..d3f3a88d66 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -196,8 +196,6 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
template<typename T>
T Decode(jobject obj) const
- LOCKS_EXCLUDED(JavaVMExt::globals_lock,
- JavaVMExt::weak_globals_lock)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
@@ -205,8 +203,6 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
}
mirror::ArtField* DecodeField(jfieldID fid) const
- LOCKS_EXCLUDED(JavaVMExt::globals_lock,
- JavaVMExt::weak_globals_lock)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
@@ -218,8 +214,6 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
}
jfieldID EncodeField(mirror::ArtField* field) const
- LOCKS_EXCLUDED(JavaVMExt::globals_lock,
- JavaVMExt::weak_globals_lock)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
@@ -230,8 +224,6 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
}
mirror::ArtMethod* DecodeMethod(jmethodID mid) const
- LOCKS_EXCLUDED(JavaVMExt::globals_lock,
- JavaVMExt::weak_globals_lock)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK_EQ(thread_state_, kRunnable); // Don't work with raw objects in non-runnable states.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index a454195316..558ceb47f9 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1188,10 +1188,7 @@ mirror::Object* Thread::DecodeJObject(jobject obj) const {
result = const_cast<mirror::Object*>(globals.Get(ref));
} else {
DCHECK_EQ(kind, kWeakGlobal);
- JavaVMExt* vm = Runtime::Current()->GetJavaVM();
- IndirectReferenceTable& weak_globals = vm->weak_globals;
- ReaderMutexLock mu(const_cast<Thread*>(this), vm->weak_globals_lock);
- result = const_cast<mirror::Object*>(weak_globals.Get(ref));
+ result = Runtime::Current()->GetJavaVM()->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
if (result == kClearedJniWeakGlobal) {
// This is a special case where it's okay to return NULL.
return NULL;