summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2016-08-24 16:36:42 +0000
committerandroid-build-merger <android-build-merger@google.com>2016-08-24 16:36:42 +0000
commit4d6ad96cc027778b221984b502ab2274fe258aa1 (patch)
treeb19f9fa21f13c504b73b8df5d1097e66bd343668
parent1f678c0b95edeeec50bdc9ab07f9220be14e86b3 (diff)
parent23da026ec7a7fae22833ed2f61a80d9f9bf7e732 (diff)
downloadart-4d6ad96cc027778b221984b502ab2274fe258aa1.tar.gz
art-4d6ad96cc027778b221984b502ab2274fe258aa1.tar.bz2
art-4d6ad96cc027778b221984b502ab2274fe258aa1.zip
Revert "Revert "Use try lock to fix class resolution race""
am: 23da026ec7 Change-Id: Icc27f56679886cddbacdd2fcb198bdeeafdd1904
-rw-r--r--runtime/class_linker.cc37
-rw-r--r--runtime/mirror/object-inl.h6
-rw-r--r--runtime/mirror/object.h5
-rw-r--r--runtime/monitor.cc52
-rw-r--r--runtime/monitor.h11
-rw-r--r--runtime/monitor_test.cc57
-rw-r--r--runtime/object_lock.cc15
-rw-r--r--runtime/object_lock.h21
8 files changed, 175 insertions, 29 deletions
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 1914733b7c..498ace3b2d 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2179,20 +2179,37 @@ mirror::Class* ClassLinker::EnsureResolved(Thread* self,
}
// Wait for the class if it has not already been linked.
- if (!klass->IsResolved() && !klass->IsErroneous()) {
+ size_t index = 0;
+ // Maximum number of yield iterations until we start sleeping.
+ static const size_t kNumYieldIterations = 1000;
+ // How long each sleep is in us.
+ static const size_t kSleepDurationUS = 1000; // 1 ms.
+ while (!klass->IsResolved() && !klass->IsErroneous()) {
StackHandleScope<1> hs(self);
HandleWrapper<mirror::Class> h_class(hs.NewHandleWrapper(&klass));
- ObjectLock<mirror::Class> lock(self, h_class);
- // Check for circular dependencies between classes.
- if (!h_class->IsResolved() && h_class->GetClinitThreadId() == self->GetTid()) {
- ThrowClassCircularityError(h_class.Get());
- mirror::Class::SetStatus(h_class, mirror::Class::kStatusError, self);
- return nullptr;
+ {
+ ObjectTryLock<mirror::Class> lock(self, h_class);
+ // Can not use a monitor wait here since it may block when returning and deadlock if another
+ // thread has locked klass.
+ if (lock.Acquired()) {
+ // Check for circular dependencies between classes, the lock is required for SetStatus.
+ if (!h_class->IsResolved() && h_class->GetClinitThreadId() == self->GetTid()) {
+ ThrowClassCircularityError(h_class.Get());
+ mirror::Class::SetStatus(h_class, mirror::Class::kStatusError, self);
+ return nullptr;
+ }
+ }
}
- // Wait for the pending initialization to complete.
- while (!h_class->IsResolved() && !h_class->IsErroneous()) {
- lock.WaitIgnoringInterrupts();
+ {
+ // Handle wrapper deals with klass moving.
+ ScopedThreadSuspension sts(self, kSuspended);
+ if (index < kNumYieldIterations) {
+ sched_yield();
+ } else {
+ usleep(kSleepDurationUS);
+ }
}
+ ++index;
}
if (klass->IsErroneous()) {
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 76a36ac893..e1097fa7ca 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -106,7 +106,11 @@ inline uint32_t Object::GetLockOwnerThreadId() {
}
inline mirror::Object* Object::MonitorEnter(Thread* self) {
- return Monitor::MonitorEnter(self, this);
+ return Monitor::MonitorEnter(self, this, /*trylock*/false);
+}
+
+inline mirror::Object* Object::MonitorTryEnter(Thread* self) {
+ return Monitor::MonitorEnter(self, this, /*trylock*/true);
}
inline bool Object::MonitorExit(Thread* self) {
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 0ee46c3556..e174cbcadc 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -140,6 +140,11 @@ class MANAGED LOCKABLE Object {
SHARED_REQUIRES(Locks::mutator_lock_);
uint32_t GetLockOwnerThreadId();
+ // Try to enter the monitor, returns non null if we succeeded.
+ mirror::Object* MonitorTryEnter(Thread* self)
+ EXCLUSIVE_LOCK_FUNCTION()
+ REQUIRES(!Roles::uninterruptible_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
mirror::Object* MonitorEnter(Thread* self)
EXCLUSIVE_LOCK_FUNCTION()
REQUIRES(!Roles::uninterruptible_)
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 0f567053e5..3771877c9d 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -314,21 +314,34 @@ std::string Monitor::PrettyContentionInfo(const std::string& owner_name,
return oss.str();
}
+bool Monitor::TryLockLocked(Thread* self) {
+ if (owner_ == nullptr) { // Unowned.
+ owner_ = self;
+ CHECK_EQ(lock_count_, 0);
+ // When debugging, save the current monitor holder for future
+ // acquisition failures to use in sampled logging.
+ if (lock_profiling_threshold_ != 0) {
+ locking_method_ = self->GetCurrentMethod(&locking_dex_pc_);
+ }
+ } else if (owner_ == self) { // Recursive.
+ lock_count_++;
+ } else {
+ return false;
+ }
+ AtraceMonitorLock(self, GetObject(), false /* is_wait */);
+ return true;
+}
+
+bool Monitor::TryLock(Thread* self) {
+ MutexLock mu(self, monitor_lock_);
+ return TryLockLocked(self);
+}
+
void Monitor::Lock(Thread* self) {
MutexLock mu(self, monitor_lock_);
while (true) {
- if (owner_ == nullptr) { // Unowned.
- owner_ = self;
- CHECK_EQ(lock_count_, 0);
- // When debugging, save the current monitor holder for future
- // acquisition failures to use in sampled logging.
- if (lock_profiling_threshold_ != 0) {
- locking_method_ = self->GetCurrentMethod(&locking_dex_pc_);
- }
- break;
- } else if (owner_ == self) { // Recursive.
- lock_count_++;
- break;
+ if (TryLockLocked(self)) {
+ return;
}
// Contended.
const bool log_contention = (lock_profiling_threshold_ != 0);
@@ -430,8 +443,6 @@ void Monitor::Lock(Thread* self) {
monitor_lock_.Lock(self); // Reacquire locks in order.
--num_waiters_;
}
-
- AtraceMonitorLock(self, GetObject(), false /* is_wait */);
}
static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
@@ -852,7 +863,7 @@ static mirror::Object* FakeUnlock(mirror::Object* obj)
return obj;
}
-mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
+mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj, bool trylock) {
DCHECK(self != nullptr);
DCHECK(obj != nullptr);
self->AssertThreadSuspensionIsAllowable();
@@ -898,6 +909,9 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
InflateThinLocked(self, h_obj, lock_word, 0);
}
} else {
+ if (trylock) {
+ return nullptr;
+ }
// Contention.
contention_count++;
Runtime* runtime = Runtime::Current();
@@ -916,8 +930,12 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
}
case LockWord::kFatLocked: {
Monitor* mon = lock_word.FatLockMonitor();
- mon->Lock(self);
- return h_obj.Get(); // Success!
+ if (trylock) {
+ return mon->TryLock(self) ? h_obj.Get() : nullptr;
+ } else {
+ mon->Lock(self);
+ return h_obj.Get(); // Success!
+ }
}
case LockWord::kHashCode:
// Inflate with the existing hashcode.
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 7b4b8f9467..1d829e1d68 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -62,7 +62,7 @@ class Monitor {
NO_THREAD_SAFETY_ANALYSIS; // TODO: Reading lock owner without holding lock is racy.
// NO_THREAD_SAFETY_ANALYSIS for mon->Lock.
- static mirror::Object* MonitorEnter(Thread* thread, mirror::Object* obj)
+ static mirror::Object* MonitorEnter(Thread* thread, mirror::Object* obj, bool trylock)
EXCLUSIVE_LOCK_FUNCTION(obj)
NO_THREAD_SAFETY_ANALYSIS
REQUIRES(!Roles::uninterruptible_)
@@ -193,6 +193,15 @@ class Monitor {
!monitor_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Try to lock without blocking, returns true if we acquired the lock.
+ bool TryLock(Thread* self)
+ REQUIRES(!monitor_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ // Variant for already holding the monitor lock.
+ bool TryLockLocked(Thread* self)
+ REQUIRES(monitor_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
void Lock(Thread* self)
REQUIRES(!monitor_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index 83e0c0dea9..48d256c985 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -26,6 +26,7 @@
#include "handle_scope-inl.h"
#include "mirror/class-inl.h"
#include "mirror/string-inl.h" // Strings are easiest to allocate
+#include "object_lock.h"
#include "scoped_thread_state_change.h"
#include "thread_pool.h"
@@ -374,4 +375,60 @@ TEST_F(MonitorTest, CheckExceptionsWait3) {
"Monitor test thread pool 3");
}
+class TryLockTask : public Task {
+ public:
+ explicit TryLockTask(Handle<mirror::Object> obj) : obj_(obj) {}
+
+ void Run(Thread* self) {
+ ScopedObjectAccess soa(self);
+ // Lock is held by other thread, try lock should fail.
+ ObjectTryLock<mirror::Object> lock(self, obj_);
+ EXPECT_FALSE(lock.Acquired());
+ }
+
+ void Finalize() {
+ delete this;
+ }
+
+ private:
+ Handle<mirror::Object> obj_;
+};
+
+// Test trylock in deadlock scenarios.
+TEST_F(MonitorTest, TestTryLock) {
+ ScopedLogSeverity sls(LogSeverity::FATAL);
+
+ Thread* const self = Thread::Current();
+ ThreadPool thread_pool("the pool", 2);
+ ScopedObjectAccess soa(self);
+ StackHandleScope<3> hs(self);
+ Handle<mirror::Object> obj1(
+ hs.NewHandle<mirror::Object>(mirror::String::AllocFromModifiedUtf8(self, "hello, world!")));
+ Handle<mirror::Object> obj2(
+ hs.NewHandle<mirror::Object>(mirror::String::AllocFromModifiedUtf8(self, "hello, world!")));
+ {
+ ObjectLock<mirror::Object> lock1(self, obj1);
+ ObjectLock<mirror::Object> lock2(self, obj1);
+ {
+ ObjectTryLock<mirror::Object> trylock(self, obj1);
+ EXPECT_TRUE(trylock.Acquired());
+ }
+ // Test failure case.
+ thread_pool.AddTask(self, new TryLockTask(obj1));
+ thread_pool.StartWorkers(self);
+ ScopedThreadSuspension sts(self, kSuspended);
+ thread_pool.Wait(Thread::Current(), /*do_work*/false, /*may_hold_locks*/false);
+ }
+ // Test that the trylock actually locks the object.
+ {
+ ObjectTryLock<mirror::Object> trylock(self, obj1);
+ EXPECT_TRUE(trylock.Acquired());
+ obj1->Notify(self);
+ // Since we hold the lock there should be no monitor state exeception.
+ self->AssertNoPendingException();
+ }
+ thread_pool.StopWorkers(self);
+}
+
+
} // namespace art
diff --git a/runtime/object_lock.cc b/runtime/object_lock.cc
index f7accc0f31..b8754a4093 100644
--- a/runtime/object_lock.cc
+++ b/runtime/object_lock.cc
@@ -47,7 +47,22 @@ void ObjectLock<T>::NotifyAll() {
obj_->NotifyAll(self_);
}
+template <typename T>
+ObjectTryLock<T>::ObjectTryLock(Thread* self, Handle<T> object) : self_(self), obj_(object) {
+ CHECK(object.Get() != nullptr);
+ acquired_ = obj_->MonitorTryEnter(self_) != nullptr;
+}
+
+template <typename T>
+ObjectTryLock<T>::~ObjectTryLock() {
+ if (acquired_) {
+ obj_->MonitorExit(self_);
+ }
+}
+
template class ObjectLock<mirror::Class>;
template class ObjectLock<mirror::Object>;
+template class ObjectTryLock<mirror::Class>;
+template class ObjectTryLock<mirror::Object>;
} // namespace art
diff --git a/runtime/object_lock.h b/runtime/object_lock.h
index eb7cbd85d3..7f02b37258 100644
--- a/runtime/object_lock.h
+++ b/runtime/object_lock.h
@@ -45,6 +45,27 @@ class ObjectLock {
DISALLOW_COPY_AND_ASSIGN(ObjectLock);
};
+template <typename T>
+class ObjectTryLock {
+ public:
+ ObjectTryLock(Thread* self, Handle<T> object) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ ~ObjectTryLock() SHARED_REQUIRES(Locks::mutator_lock_);
+
+ bool Acquired() const {
+ return acquired_;
+ }
+
+ private:
+ Thread* const self_;
+ Handle<T> const obj_;
+ bool acquired_;
+
+
+ DISALLOW_COPY_AND_ASSIGN(ObjectTryLock);
+};
+
+
} // namespace art
#endif // ART_RUNTIME_OBJECT_LOCK_H_