summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Gampe <agampe@google.com>2014-07-10 07:54:06 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2014-07-09 14:45:53 +0000
commitc4f72ec44660f804b595bfaf2b959f46fd2ff00d (patch)
treedf8764e4ec382b4e4810b8d0309823a40ef47d5c
parentaf4cf5d72a2bf77f0e442bedb208f9227f262c89 (diff)
parent74240819ae09e29b2753ef38f4eb4be1c2762e2e (diff)
downloadart-c4f72ec44660f804b595bfaf2b959f46fd2ff00d.tar.gz
art-c4f72ec44660f804b595bfaf2b959f46fd2ff00d.tar.bz2
art-c4f72ec44660f804b595bfaf2b959f46fd2ff00d.zip
Merge "Use memory chunks for monitors on LP64"
-rw-r--r--build/Android.gtest.mk1
-rw-r--r--runtime/atomic.h25
-rw-r--r--runtime/base/mutex.cc6
-rw-r--r--runtime/base/mutex.h6
-rw-r--r--runtime/monitor.cc57
-rw-r--r--runtime/monitor.h10
-rw-r--r--runtime/monitor_pool.cc128
-rw-r--r--runtime/monitor_pool.h122
-rw-r--r--runtime/monitor_pool_test.cc125
9 files changed, 417 insertions, 63 deletions
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 10cd1cc65a..d501b57883 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -110,6 +110,7 @@ RUNTIME_GTEST_COMMON_SRC_FILES := \
runtime/mem_map_test.cc \
runtime/mirror/dex_cache_test.cc \
runtime/mirror/object_test.cc \
+ runtime/monitor_pool_test.cc \
runtime/monitor_test.cc \
runtime/parsed_options_test.cc \
runtime/reference_table_test.cc \
diff --git a/runtime/atomic.h b/runtime/atomic.h
index c2abf56453..5ddafb4b89 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -390,6 +390,20 @@ template<int SZ, class T> struct AtomicHelper {
}
};
+// Interpret the bit pattern of input (type U) as type V. Requires the size
+// of V >= size of U (compile-time checked).
+// Reproduced here from utils.h to keep dependencies small.
+template<typename U, typename V>
+static inline V bit_cast_atomic(U in) {
+ COMPILE_ASSERT(sizeof(U) == sizeof(V), size_of_u_not_eq_size_of_v);
+ union {
+ U u;
+ V v;
+ } tmp;
+ tmp.u = in;
+ return tmp.v;
+}
+
template<class T> struct AtomicHelper<8, T> {
friend class Atomic<T>;
@@ -400,15 +414,14 @@ template<class T> struct AtomicHelper<8, T> {
// sizeof(T) == 8
volatile const int64_t* loc_ptr =
reinterpret_cast<volatile const int64_t*>(loc);
- return static_cast<T>(QuasiAtomic::Read64(loc_ptr));
+ return bit_cast_atomic<int64_t, T>(QuasiAtomic::Read64(loc_ptr));
}
static void StoreRelaxed(volatile T* loc, T desired) {
// sizeof(T) == 8
volatile int64_t* loc_ptr =
reinterpret_cast<volatile int64_t*>(loc);
- QuasiAtomic::Write64(loc_ptr,
- static_cast<int64_t>(desired));
+ QuasiAtomic::Write64(loc_ptr, bit_cast_atomic<T, int64_t>(desired));
}
@@ -416,9 +429,9 @@ template<class T> struct AtomicHelper<8, T> {
T expected_value, T desired_value) {
// sizeof(T) == 8
volatile int64_t* loc_ptr = reinterpret_cast<volatile int64_t*>(loc);
- return QuasiAtomic::Cas64(
- static_cast<int64_t>(reinterpret_cast<uintptr_t>(expected_value)),
- static_cast<int64_t>(reinterpret_cast<uintptr_t>(desired_value)), loc_ptr);
+ return QuasiAtomic::Cas64(bit_cast_atomic<T, int64_t>(expected_value),
+ bit_cast_atomic<T, int64_t>(desired_value),
+ loc_ptr);
}
};
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index bde28866c9..7779547725 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -30,6 +30,7 @@
namespace art {
Mutex* Locks::abort_lock_ = nullptr;
+Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
Mutex* Locks::breakpoint_lock_ = nullptr;
ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
@@ -832,6 +833,7 @@ void Locks::Init() {
DCHECK(modify_ldt_lock_ == nullptr);
}
DCHECK(abort_lock_ != nullptr);
+ DCHECK(allocated_monitor_ids_lock_ != nullptr);
DCHECK(allocated_thread_ids_lock_ != nullptr);
DCHECK(breakpoint_lock_ != nullptr);
DCHECK(classlinker_classes_lock_ != nullptr);
@@ -883,6 +885,10 @@ void Locks::Init() {
classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
current_lock_level);
+ UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock);
+ DCHECK(allocated_monitor_ids_lock_ == nullptr);
+ allocated_monitor_ids_lock_ = new Mutex("allocated monitor ids lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
DCHECK(allocated_thread_ids_lock_ == nullptr);
allocated_thread_ids_lock_ = new Mutex("allocated thread ids lock", current_lock_level);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 9dc7deab84..8d2cd07aea 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -70,7 +70,6 @@ enum LockLevel {
kMarkSweepMarkStackLock,
kTransactionLogLock,
kInternTableLock,
- kMonitorPoolLock,
kDefaultMutexLevel,
kMarkSweepLargeObjectLock,
kPinTableLock,
@@ -78,6 +77,7 @@ enum LockLevel {
kJdwpObjectRegistryLock,
kModifyLdtLock,
kAllocatedThreadIdsLock,
+ kMonitorPoolLock,
kClassLinkerClassesLock,
kBreakpointLock,
kMonitorLock,
@@ -560,8 +560,10 @@ class Locks {
// doesn't try to hold a higher level Mutex.
#define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
+ static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
+
// Guard the allocation/deallocation of thread ids.
- static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
+ static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
// Guards modification of the LDT on x86.
static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index eb62a694e0..c3ec38d1d3 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -90,7 +90,33 @@ Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_
hash_code_(hash_code),
locking_method_(NULL),
locking_dex_pc_(0),
- monitor_id_(MonitorPool::CreateMonitorId(self, this)) {
+ monitor_id_(MonitorPool::ComputeMonitorId(this, self)) {
+#ifdef __LP64__
+ DCHECK(false) << "Should not be reached in 64b";
+ next_free_ = nullptr;
+#endif
+ // We should only inflate a lock if the owner is ourselves or suspended. This avoids a race
+ // with the owner unlocking the thin-lock.
+ CHECK(owner == nullptr || owner == self || owner->IsSuspended());
+ // The identity hash code is set for the life time of the monitor.
+}
+
+Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code,
+ MonitorId id)
+ : monitor_lock_("a monitor lock", kMonitorLock),
+ monitor_contenders_("monitor contenders", monitor_lock_),
+ num_waiters_(0),
+ owner_(owner),
+ lock_count_(0),
+ obj_(obj),
+ wait_set_(NULL),
+ hash_code_(hash_code),
+ locking_method_(NULL),
+ locking_dex_pc_(0),
+ monitor_id_(id) {
+#ifdef __LP64__
+ next_free_ = nullptr;
+#endif
// We should only inflate a lock if the owner is ourselves or suspended. This avoids a race
// with the owner unlocking the thin-lock.
CHECK(owner == nullptr || owner == self || owner->IsSuspended());
@@ -146,7 +172,6 @@ bool Monitor::Install(Thread* self) {
}
Monitor::~Monitor() {
- MonitorPool::ReleaseMonitorId(monitor_id_);
// Deflated monitors have a null object.
}
@@ -621,20 +646,23 @@ bool Monitor::Deflate(Thread* self, mirror::Object* obj) {
* inflating the lock and so the caller should read the monitor following the call.
*/
void Monitor::Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code) {
- DCHECK(self != NULL);
- DCHECK(obj != NULL);
+ DCHECK(self != nullptr);
+ DCHECK(obj != nullptr);
// Allocate and acquire a new monitor.
- std::unique_ptr<Monitor> m(new Monitor(self, owner, obj, hash_code));
+ Monitor* m = MonitorPool::CreateMonitor(self, owner, obj, hash_code);
+ DCHECK(m != nullptr);
if (m->Install(self)) {
if (owner != nullptr) {
VLOG(monitor) << "monitor: thread" << owner->GetThreadId()
- << " created monitor " << m.get() << " for object " << obj;
+ << " created monitor " << m << " for object " << obj;
} else {
VLOG(monitor) << "monitor: Inflate with hashcode " << hash_code
- << " created monitor " << m.get() << " for object " << obj;
+ << " created monitor " << m << " for object " << obj;
}
- Runtime::Current()->GetMonitorList()->Add(m.release());
+ Runtime::Current()->GetMonitorList()->Add(m);
CHECK_EQ(obj->GetLockWord(true).GetState(), LockWord::kFatLocked);
+ } else {
+ MonitorPool::ReleaseMonitor(self, m);
}
}
@@ -1071,8 +1099,12 @@ MonitorList::MonitorList()
}
MonitorList::~MonitorList() {
- MutexLock mu(Thread::Current(), monitor_list_lock_);
- STLDeleteElements(&list_);
+ Thread* self = Thread::Current();
+ MutexLock mu(self, monitor_list_lock_);
+ // Release all monitors to the pool.
+ // TODO: Is it an invariant that *all* open monitors are in the list? Then we could
+ // clear faster in the pool.
+ MonitorPool::ReleaseMonitors(self, &list_);
}
void MonitorList::DisallowNewMonitors() {
@@ -1097,7 +1129,8 @@ void MonitorList::Add(Monitor* m) {
}
void MonitorList::SweepMonitorList(IsMarkedCallback* callback, void* arg) {
- MutexLock mu(Thread::Current(), monitor_list_lock_);
+ Thread* self = Thread::Current();
+ MutexLock mu(self, monitor_list_lock_);
for (auto it = list_.begin(); it != list_.end(); ) {
Monitor* m = *it;
// Disable the read barrier in GetObject() as this is called by GC.
@@ -1107,7 +1140,7 @@ void MonitorList::SweepMonitorList(IsMarkedCallback* callback, void* arg) {
if (new_obj == nullptr) {
VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object "
<< obj;
- delete m;
+ MonitorPool::ReleaseMonitor(self, m);
it = list_.erase(it);
} else {
m->SetObject(new_obj);
diff --git a/runtime/monitor.h b/runtime/monitor.h
index d7552a3fd5..0d0ad0b13d 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -124,7 +124,9 @@ class Monitor {
private:
explicit Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ explicit Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code,
+ MonitorId id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Install the monitor into its object, may fail if another thread installs a different monitor
// first.
@@ -212,8 +214,14 @@ class Monitor {
// The denser encoded version of this monitor as stored in the lock word.
MonitorId monitor_id_;
+#ifdef __LP64__
+ // Free list for monitor pool.
+ Monitor* next_free_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
+#endif
+
friend class MonitorInfo;
friend class MonitorList;
+ friend class MonitorPool;
friend class mirror::Object;
DISALLOW_COPY_AND_ASSIGN(Monitor);
};
diff --git a/runtime/monitor_pool.cc b/runtime/monitor_pool.cc
index eb7525a6ad..440a6be07b 100644
--- a/runtime/monitor_pool.cc
+++ b/runtime/monitor_pool.cc
@@ -23,36 +23,118 @@
namespace art {
-MonitorPool::MonitorPool() : allocated_ids_lock_("allocated monitor ids lock",
- LockLevel::kMonitorPoolLock) {
-}
+namespace mirror {
+ class Object;
+} // namespace mirror
-Monitor* MonitorPool::LookupMonitorFromTable(MonitorId mon_id) {
- ReaderMutexLock mu(Thread::Current(), allocated_ids_lock_);
- return table_.Get(mon_id);
+MonitorPool::MonitorPool()
+ : num_chunks_(0), capacity_(0), first_free_(nullptr) {
+ AllocateChunk(); // Get our first chunk.
}
-MonitorId MonitorPool::AllocMonitorIdFromTable(Thread* self, Monitor* mon) {
- WriterMutexLock mu(self, allocated_ids_lock_);
- for (size_t i = 0; i < allocated_ids_.size(); ++i) {
- if (!allocated_ids_[i]) {
- allocated_ids_.set(i);
- MonitorId mon_id = i + 1; // Zero is reserved to mean "invalid".
- table_.Put(mon_id, mon);
- return mon_id;
+// Assumes locks are held appropriately when necessary.
+// We do not need a lock in the constructor, but we need one when in CreateMonitorInPool.
+void MonitorPool::AllocateChunk() {
+ DCHECK(first_free_ == nullptr);
+
+ // Do we need to resize?
+ if (num_chunks_ == capacity_) {
+ if (capacity_ == 0U) {
+ // Initialization.
+ capacity_ = kInitialChunkStorage;
+ uintptr_t* new_backing = new uintptr_t[capacity_];
+ monitor_chunks_.StoreRelaxed(new_backing);
+ } else {
+ size_t new_capacity = 2 * capacity_;
+ uintptr_t* new_backing = new uintptr_t[new_capacity];
+ uintptr_t* old_backing = monitor_chunks_.LoadRelaxed();
+ memcpy(new_backing, old_backing, sizeof(uintptr_t) * capacity_);
+ monitor_chunks_.StoreRelaxed(new_backing);
+ capacity_ = new_capacity;
+ old_chunk_arrays_.push_back(old_backing);
+ LOG(INFO) << "Resizing to capacity " << capacity_;
}
}
- LOG(FATAL) << "Out of internal monitor ids";
- return 0;
+
+ // Allocate the chunk.
+ void* chunk = malloc(kChunkSize);
+ // Check we allocated memory.
+ CHECK_NE(reinterpret_cast<uintptr_t>(nullptr), reinterpret_cast<uintptr_t>(chunk));
+ // Check it is aligned as we need it.
+ CHECK_EQ(0U, reinterpret_cast<uintptr_t>(chunk) % kMonitorAlignment);
+
+ // Add the chunk.
+ *(monitor_chunks_.LoadRelaxed()+num_chunks_) = reinterpret_cast<uintptr_t>(chunk);
+ num_chunks_++;
+
+ // Set up the free list
+ Monitor* last = reinterpret_cast<Monitor*>(reinterpret_cast<uintptr_t>(chunk) +
+ (kChunkCapacity - 1) * kAlignedMonitorSize);
+ last->next_free_ = nullptr;
+ // Eagerly compute id.
+ last->monitor_id_ = OffsetToMonitorId((num_chunks_ - 1) * kChunkSize +
+ (kChunkCapacity - 1) * kAlignedMonitorSize);
+ for (size_t i = 0; i < kChunkCapacity - 1; ++i) {
+ Monitor* before = reinterpret_cast<Monitor*>(reinterpret_cast<uintptr_t>(last) -
+ kAlignedMonitorSize);
+ before->next_free_ = last;
+ // Derive monitor_id from last.
+ before->monitor_id_ = OffsetToMonitorId(MonitorIdToOffset(last->monitor_id_) -
+ kAlignedMonitorSize);
+
+ last = before;
+ }
+ DCHECK(last == reinterpret_cast<Monitor*>(chunk));
+ first_free_ = last;
+}
+
+Monitor* MonitorPool::CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj,
+ int32_t hash_code)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // We are gonna allocate, so acquire the writer lock.
+ MutexLock mu(self, *Locks::allocated_monitor_ids_lock_);
+
+ // Enough space, or need to resize?
+ if (first_free_ == nullptr) {
+ LOG(INFO) << "Allocating a new chunk.";
+ AllocateChunk();
+ }
+
+ Monitor* mon_uninitialized = first_free_;
+ first_free_ = first_free_->next_free_;
+
+ // Pull out the id which was preinitialized.
+ MonitorId id = mon_uninitialized->monitor_id_;
+
+ // Initialize it.
+ Monitor* monitor = new(mon_uninitialized) Monitor(self, owner, obj, hash_code, id);
+
+ return monitor;
}
-void MonitorPool::ReleaseMonitorIdFromTable(MonitorId mon_id) {
- WriterMutexLock mu(Thread::Current(), allocated_ids_lock_);
- DCHECK(table_.Get(mon_id) != nullptr);
- table_.erase(mon_id);
- --mon_id; // Zero is reserved to mean "invalid".
- DCHECK(allocated_ids_[mon_id]) << mon_id;
- allocated_ids_.reset(mon_id);
+void MonitorPool::ReleaseMonitorToPool(Thread* self, Monitor* monitor) {
+ // Might be racy with allocation, so acquire lock.
+ MutexLock mu(self, *Locks::allocated_monitor_ids_lock_);
+
+ // Keep the monitor id. Don't trust it's not cleared.
+ MonitorId id = monitor->monitor_id_;
+
+ // Call the destructor.
+ // TODO: Exception safety?
+ monitor->~Monitor();
+
+ // Add to the head of the free list.
+ monitor->next_free_ = first_free_;
+ first_free_ = monitor;
+
+ // Rewrite monitor id.
+ monitor->monitor_id_ = id;
+}
+
+void MonitorPool::ReleaseMonitorsToPool(Thread* self, std::list<Monitor*>* monitors) {
+ for (Monitor* mon : *monitors) {
+ ReleaseMonitorToPool(self, mon);
+ }
}
} // namespace art
diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h
index 32e1553891..5bc28f132a 100644
--- a/runtime/monitor_pool.h
+++ b/runtime/monitor_pool.h
@@ -20,11 +20,11 @@
#include "monitor.h"
#ifdef __LP64__
-#include <bitset>
#include <stdint.h>
-
+#include "atomic.h"
#include "runtime.h"
-#include "safe_map.h"
+#else
+#include "base/stl_util.h" // STLDeleteElements
#endif
namespace art {
@@ -41,11 +41,36 @@ class MonitorPool {
#endif
}
+ static Monitor* CreateMonitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+#ifndef __LP64__
+ return new Monitor(self, owner, obj, hash_code);
+#else
+ return GetMonitorPool()->CreateMonitorInPool(self, owner, obj, hash_code);
+#endif
+ }
+
+ static void ReleaseMonitor(Thread* self, Monitor* monitor) {
+#ifndef __LP64__
+ delete monitor;
+#else
+ GetMonitorPool()->ReleaseMonitorToPool(self, monitor);
+#endif
+ }
+
+ static void ReleaseMonitors(Thread* self, std::list<Monitor*>* monitors) {
+#ifndef __LP64__
+ STLDeleteElements(monitors);
+#else
+ GetMonitorPool()->ReleaseMonitorsToPool(self, monitors);
+#endif
+ }
+
static Monitor* MonitorFromMonitorId(MonitorId mon_id) {
#ifndef __LP64__
return reinterpret_cast<Monitor*>(mon_id << 3);
#else
- return Runtime::Current()->GetMonitorPool()->LookupMonitorFromTable(mon_id);
+ return GetMonitorPool()->LookupMonitor(mon_id);
#endif
}
@@ -57,39 +82,98 @@ class MonitorPool {
#endif
}
- static MonitorId CreateMonitorId(Thread* self, Monitor* mon) {
+ static MonitorId ComputeMonitorId(Monitor* mon, Thread* self) {
#ifndef __LP64__
- UNUSED(self);
return MonitorIdFromMonitor(mon);
#else
- return Runtime::Current()->GetMonitorPool()->AllocMonitorIdFromTable(self, mon);
+ return GetMonitorPool()->ComputeMonitorIdInPool(mon, self);
#endif
}
- static void ReleaseMonitorId(MonitorId mon_id) {
+ static MonitorPool* GetMonitorPool() {
#ifndef __LP64__
- UNUSED(mon_id);
+ return nullptr;
#else
- Runtime::Current()->GetMonitorPool()->ReleaseMonitorIdFromTable(mon_id);
+ return Runtime::Current()->GetMonitorPool();
#endif
}
private:
#ifdef __LP64__
- MonitorPool();
+ // When we create a monitor pool, threads have not been initialized, yet, so ignore thread-safety
+ // analysis.
+ MonitorPool() NO_THREAD_SAFETY_ANALYSIS;
- Monitor* LookupMonitorFromTable(MonitorId mon_id);
+ void AllocateChunk() EXCLUSIVE_LOCKS_REQUIRED(Locks::allocated_monitor_ids_lock_);
- MonitorId LookupMonitorIdFromTable(Monitor* mon);
+ Monitor* CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- MonitorId AllocMonitorIdFromTable(Thread* self, Monitor* mon);
+ void ReleaseMonitorToPool(Thread* self, Monitor* monitor);
+ void ReleaseMonitorsToPool(Thread* self, std::list<Monitor*>* monitors);
- void ReleaseMonitorIdFromTable(MonitorId mon_id);
+ // Note: This is safe as we do not ever move chunks.
+ Monitor* LookupMonitor(MonitorId mon_id) {
+ size_t offset = MonitorIdToOffset(mon_id);
+ size_t index = offset / kChunkSize;
+ size_t offset_in_chunk = offset % kChunkSize;
+ uintptr_t base = *(monitor_chunks_.LoadRelaxed()+index);
+ return reinterpret_cast<Monitor*>(base + offset_in_chunk);
+ }
+
+ static bool IsInChunk(uintptr_t base_addr, Monitor* mon) {
+ uintptr_t mon_ptr = reinterpret_cast<uintptr_t>(mon);
+ return base_addr <= mon_ptr && (mon_ptr - base_addr < kChunkSize);
+ }
+
+ // Note: This is safe as we do not ever move chunks.
+ MonitorId ComputeMonitorIdInPool(Monitor* mon, Thread* self) {
+ MutexLock mu(self, *Locks::allocated_monitor_ids_lock_);
+ for (size_t index = 0; index < num_chunks_; ++index) {
+ uintptr_t chunk_addr = *(monitor_chunks_.LoadRelaxed() + index);
+ if (IsInChunk(chunk_addr, mon)) {
+ return OffsetToMonitorId(reinterpret_cast<uintptr_t>(mon) - chunk_addr + index * kChunkSize);
+ }
+ }
+ LOG(FATAL) << "Did not find chunk that contains monitor.";
+ return 0;
+ }
+
+ static size_t MonitorIdToOffset(MonitorId id) {
+ return id << 3;
+ }
+
+ static MonitorId OffsetToMonitorId(size_t offset) {
+ return static_cast<MonitorId>(offset >> 3);
+ }
- ReaderWriterMutex allocated_ids_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- static constexpr uint32_t kMaxMonitorId = 0xFFFF;
- std::bitset<kMaxMonitorId> allocated_ids_ GUARDED_BY(allocated_ids_lock_);
- SafeMap<MonitorId, Monitor*> table_ GUARDED_BY(allocated_ids_lock_);
+ // TODO: There are assumptions in the code that monitor addresses are 8B aligned (>>3).
+ static constexpr size_t kMonitorAlignment = 8;
+ // Size of a monitor, rounded up to a multiple of alignment.
+ static constexpr size_t kAlignedMonitorSize = (sizeof(Monitor) + kMonitorAlignment - 1) &
+ -kMonitorAlignment;
+ // As close to a page as we can get seems a good start.
+ static constexpr size_t kChunkCapacity = kPageSize / kAlignedMonitorSize;
+ // Chunk size that is referenced in the id. We can collapse this to the actually used storage
+ // in a chunk, i.e., kChunkCapacity * kAlignedMonitorSize, but this will mean proper divisions.
+ static constexpr size_t kChunkSize = kPageSize;
+ // The number of initial chunks storable in monitor_chunks_. The number is large enough to make
+ // resizing unlikely, but small enough to not waste too much memory.
+ static constexpr size_t kInitialChunkStorage = 8U;
+
+ // List of memory chunks. Each chunk is kChunkSize.
+ Atomic<uintptr_t*> monitor_chunks_;
+ // Number of chunks stored.
+ size_t num_chunks_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
+ // Number of chunks storable.
+ size_t capacity_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
+
+ // To avoid race issues when resizing, we keep all the previous arrays.
+ std::vector<uintptr_t*> old_chunk_arrays_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
+
+ // Start of free list of monitors.
+ // Note: these point to the right memory regions, but do *not* denote initialized objects.
+ Monitor* first_free_ GUARDED_BY(Locks::allocated_monitor_ids_lock_);
#endif
};
diff --git a/runtime/monitor_pool_test.cc b/runtime/monitor_pool_test.cc
new file mode 100644
index 0000000000..cddc245a42
--- /dev/null
+++ b/runtime/monitor_pool_test.cc
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "monitor_pool.h"
+
+#include "common_runtime_test.h"
+
+namespace art {
+
+class MonitorPoolTest : public CommonRuntimeTest {};
+
+class RandGen {
+ public:
+ explicit RandGen(uint32_t seed) : val_(seed) {}
+
+ uint32_t next() {
+ val_ = val_ * 48271 % 2147483647 + 13;
+ return val_;
+ }
+
+ uint32_t val_;
+};
+
+static void VerifyMonitor(Monitor* mon, Thread* self) {
+ // Check whether the monitor id is correct.
+ EXPECT_EQ(MonitorPool::MonitorIdFromMonitor(mon), mon->GetMonitorId());
+ // Check whether the monitor id agrees with the compuation.
+ EXPECT_EQ(MonitorPool::ComputeMonitorId(mon, self), mon->GetMonitorId());
+ // Check whether we can use the monitor ID to get the monitor.
+ EXPECT_EQ(mon, MonitorPool::MonitorFromMonitorId(mon->GetMonitorId()));
+}
+
+TEST_F(MonitorPoolTest, MonitorPoolTest) {
+ std::vector<Monitor*> monitors;
+ RandGen r(0x1234);
+
+ // 1) Create and release monitors without increasing the storage.
+
+ // Number of max alive monitors before resize.
+ // Note: for correct testing, make sure this is corresponding to monitor-pool's initial size.
+ const size_t kMaxUsage = 28;
+
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+
+ // Allocate and release monitors.
+ for (size_t i = 0; i < 1000 ; i++) {
+ bool alloc;
+ if (monitors.size() == 0) {
+ alloc = true;
+ } else if (monitors.size() == kMaxUsage) {
+ alloc = false;
+ } else {
+ // Random decision.
+ alloc = r.next() % 2 == 0;
+ }
+
+ if (alloc) {
+ Monitor* mon = MonitorPool::CreateMonitor(self, self, nullptr, static_cast<int32_t>(i));
+ monitors.push_back(mon);
+
+ VerifyMonitor(mon, self);
+ } else {
+ // Release a random monitor.
+ size_t index = r.next() % monitors.size();
+ Monitor* mon = monitors[index];
+ monitors.erase(monitors.begin() + index);
+
+ // Recheck the monitor.
+ VerifyMonitor(mon, self);
+
+ MonitorPool::ReleaseMonitor(self, mon);
+ }
+ }
+
+ // Loop some time.
+
+ for (size_t i = 0; i < 10; ++i) {
+ // 2.1) Create enough monitors to require new chunks.
+ size_t target_size = monitors.size() + 2*kMaxUsage;
+ while (monitors.size() < target_size) {
+ Monitor* mon = MonitorPool::CreateMonitor(self, self, nullptr,
+ static_cast<int32_t>(-monitors.size()));
+ monitors.push_back(mon);
+
+ VerifyMonitor(mon, self);
+ }
+
+ // 2.2) Verify all monitors.
+ for (Monitor* mon : monitors) {
+ VerifyMonitor(mon, self);
+ }
+
+ // 2.3) Release a number of monitors randomly.
+ for (size_t j = 0; j < kMaxUsage; j++) {
+ // Release a random monitor.
+ size_t index = r.next() % monitors.size();
+ Monitor* mon = monitors[index];
+ monitors.erase(monitors.begin() + index);
+
+ MonitorPool::ReleaseMonitor(self, mon);
+ }
+ }
+
+ // Check and release all remaining monitors.
+ for (Monitor* mon : monitors) {
+ VerifyMonitor(mon, self);
+ MonitorPool::ReleaseMonitor(self, mon);
+ }
+}
+
+} // namespace art