summaryrefslogtreecommitdiffstats
path: root/runtime/monitor.cc
diff options
context:
space:
mode:
authorAndreas Gampe <agampe@google.com>2014-04-17 10:35:09 -0700
committerAndreas Gampe <agampe@google.com>2014-07-10 00:51:04 -0700
commit74240819ae09e29b2753ef38f4eb4be1c2762e2e (patch)
tree61e2d3aa7268ce49fe77715593896f59feb92fe6 /runtime/monitor.cc
parent32710dd4a0232149002a5ae7bde1c640cdffd564 (diff)
downloadart-74240819ae09e29b2753ef38f4eb4be1c2762e2e.tar.gz
art-74240819ae09e29b2753ef38f4eb4be1c2762e2e.tar.bz2
art-74240819ae09e29b2753ef38f4eb4be1c2762e2e.zip
Use memory chunks for monitors on LP64
Monitor IDs in lock words are only 30b. On a 32b system that works fine, as memory is usually aligned enough that shifting works out. On 64b systems, the virtual memory space is too large for that. This adds memory chunks into which we allocate the monitors so that we have base_addr + offset and can use the offset as the monitor ID. To allow for relatively compact but growable storage, we use a list of chunks. Added a global lock for the monitor pool. Change-Id: I0e290c4914a2556e0b2eef9902422d7c4dcf536d
Diffstat (limited to 'runtime/monitor.cc')
-rw-r--r--runtime/monitor.cc57
1 files changed, 45 insertions, 12 deletions
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index eb62a694e0..c3ec38d1d3 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -90,7 +90,33 @@ Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_
hash_code_(hash_code),
locking_method_(NULL),
locking_dex_pc_(0),
- monitor_id_(MonitorPool::CreateMonitorId(self, this)) {
+ monitor_id_(MonitorPool::ComputeMonitorId(this, self)) {
+#ifdef __LP64__
+ DCHECK(false) << "Should not be reached in 64b";
+ next_free_ = nullptr;
+#endif
+ // We should only inflate a lock if the owner is ourselves or suspended. This avoids a race
+ // with the owner unlocking the thin-lock.
+ CHECK(owner == nullptr || owner == self || owner->IsSuspended());
+ // The identity hash code is set for the life time of the monitor.
+}
+
+Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code,
+ MonitorId id)
+ : monitor_lock_("a monitor lock", kMonitorLock),
+ monitor_contenders_("monitor contenders", monitor_lock_),
+ num_waiters_(0),
+ owner_(owner),
+ lock_count_(0),
+ obj_(obj),
+ wait_set_(NULL),
+ hash_code_(hash_code),
+ locking_method_(NULL),
+ locking_dex_pc_(0),
+ monitor_id_(id) {
+#ifdef __LP64__
+ next_free_ = nullptr;
+#endif
// We should only inflate a lock if the owner is ourselves or suspended. This avoids a race
// with the owner unlocking the thin-lock.
CHECK(owner == nullptr || owner == self || owner->IsSuspended());
@@ -146,7 +172,6 @@ bool Monitor::Install(Thread* self) {
}
Monitor::~Monitor() {
- MonitorPool::ReleaseMonitorId(monitor_id_);
// Deflated monitors have a null object.
}
@@ -621,20 +646,23 @@ bool Monitor::Deflate(Thread* self, mirror::Object* obj) {
* inflating the lock and so the caller should read the monitor following the call.
*/
void Monitor::Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code) {
- DCHECK(self != NULL);
- DCHECK(obj != NULL);
+ DCHECK(self != nullptr);
+ DCHECK(obj != nullptr);
// Allocate and acquire a new monitor.
- std::unique_ptr<Monitor> m(new Monitor(self, owner, obj, hash_code));
+ Monitor* m = MonitorPool::CreateMonitor(self, owner, obj, hash_code);
+ DCHECK(m != nullptr);
if (m->Install(self)) {
if (owner != nullptr) {
VLOG(monitor) << "monitor: thread" << owner->GetThreadId()
- << " created monitor " << m.get() << " for object " << obj;
+ << " created monitor " << m << " for object " << obj;
} else {
VLOG(monitor) << "monitor: Inflate with hashcode " << hash_code
- << " created monitor " << m.get() << " for object " << obj;
+ << " created monitor " << m << " for object " << obj;
}
- Runtime::Current()->GetMonitorList()->Add(m.release());
+ Runtime::Current()->GetMonitorList()->Add(m);
CHECK_EQ(obj->GetLockWord(true).GetState(), LockWord::kFatLocked);
+ } else {
+ MonitorPool::ReleaseMonitor(self, m);
}
}
@@ -1071,8 +1099,12 @@ MonitorList::MonitorList()
}
MonitorList::~MonitorList() {
- MutexLock mu(Thread::Current(), monitor_list_lock_);
- STLDeleteElements(&list_);
+ Thread* self = Thread::Current();
+ MutexLock mu(self, monitor_list_lock_);
+ // Release all monitors to the pool.
+ // TODO: Is it an invariant that *all* open monitors are in the list? Then we could
+ // clear faster in the pool.
+ MonitorPool::ReleaseMonitors(self, &list_);
}
void MonitorList::DisallowNewMonitors() {
@@ -1097,7 +1129,8 @@ void MonitorList::Add(Monitor* m) {
}
void MonitorList::SweepMonitorList(IsMarkedCallback* callback, void* arg) {
- MutexLock mu(Thread::Current(), monitor_list_lock_);
+ Thread* self = Thread::Current();
+ MutexLock mu(self, monitor_list_lock_);
for (auto it = list_.begin(); it != list_.end(); ) {
Monitor* m = *it;
// Disable the read barrier in GetObject() as this is called by GC.
@@ -1107,7 +1140,7 @@ void MonitorList::SweepMonitorList(IsMarkedCallback* callback, void* arg) {
if (new_obj == nullptr) {
VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object "
<< obj;
- delete m;
+ MonitorPool::ReleaseMonitor(self, m);
it = list_.erase(it);
} else {
m->SetObject(new_obj);