summaryrefslogtreecommitdiffstats
path: root/runtime/atomic.h
diff options
context:
space:
mode:
authorAndreas Gampe <agampe@google.com>2014-04-17 10:35:09 -0700
committerAndreas Gampe <agampe@google.com>2014-07-10 00:51:04 -0700
commit74240819ae09e29b2753ef38f4eb4be1c2762e2e (patch)
tree61e2d3aa7268ce49fe77715593896f59feb92fe6 /runtime/atomic.h
parent32710dd4a0232149002a5ae7bde1c640cdffd564 (diff)
downloadart-74240819ae09e29b2753ef38f4eb4be1c2762e2e.tar.gz
art-74240819ae09e29b2753ef38f4eb4be1c2762e2e.tar.bz2
art-74240819ae09e29b2753ef38f4eb4be1c2762e2e.zip
Use memory chunks for monitors on LP64
Monitor IDs in lock words are only 30b. On a 32b system that works fine, as memory is usually aligned enough that shifting works out. On 64b systems, the virtual memory space is too large for that. This adds memory chunks into which we allocate the monitors so that we have base_addr + offset and can use the offset as the monitor ID. To allow for relatively compact but growable storage, we use a list of chunks. Added a global lock for the monitor pool. Change-Id: I0e290c4914a2556e0b2eef9902422d7c4dcf536d
Diffstat (limited to 'runtime/atomic.h')
-rw-r--r--runtime/atomic.h25
1 files changed, 19 insertions, 6 deletions
diff --git a/runtime/atomic.h b/runtime/atomic.h
index ed83a33b0b..4b83baee28 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -382,6 +382,20 @@ template<int SZ, class T> struct AtomicHelper {
}
};
+// Interpret the bit pattern of input (type U) as type V. Requires the size
+// of V >= size of U (compile-time checked).
+// Reproduced here from utils.h to keep dependencies small.
+template<typename U, typename V>
+static inline V bit_cast_atomic(U in) {
+ COMPILE_ASSERT(sizeof(U) == sizeof(V), size_of_u_not_eq_size_of_v);
+ union {
+ U u;
+ V v;
+ } tmp;
+ tmp.u = in;
+ return tmp.v;
+}
+
template<class T> struct AtomicHelper<8, T> {
friend class Atomic<T>;
@@ -392,15 +406,14 @@ template<class T> struct AtomicHelper<8, T> {
// sizeof(T) == 8
volatile const int64_t* loc_ptr =
reinterpret_cast<volatile const int64_t*>(loc);
- return static_cast<T>(QuasiAtomic::Read64(loc_ptr));
+ return bit_cast_atomic<int64_t, T>(QuasiAtomic::Read64(loc_ptr));
}
static void StoreRelaxed(volatile T* loc, T desired) {
// sizeof(T) == 8
volatile int64_t* loc_ptr =
reinterpret_cast<volatile int64_t*>(loc);
- QuasiAtomic::Write64(loc_ptr,
- static_cast<int64_t>(desired));
+ QuasiAtomic::Write64(loc_ptr, bit_cast_atomic<T, int64_t>(desired));
}
@@ -408,9 +421,9 @@ template<class T> struct AtomicHelper<8, T> {
T expected_value, T desired_value) {
// sizeof(T) == 8
volatile int64_t* loc_ptr = reinterpret_cast<volatile int64_t*>(loc);
- return QuasiAtomic::Cas64(
- static_cast<int64_t>(reinterpret_cast<uintptr_t>(expected_value)),
- static_cast<int64_t>(reinterpret_cast<uintptr_t>(desired_value)), loc_ptr);
+ return QuasiAtomic::Cas64(bit_cast_atomic<T, int64_t>(expected_value),
+ bit_cast_atomic<T, int64_t>(desired_value),
+ loc_ptr);
}
};