summaryrefslogtreecommitdiffstats
path: root/vm/Atomic.cpp
diff options
context:
space:
mode:
authorElliott Hughes <enh@google.com>2012-05-02 15:08:08 -0700
committerElliott Hughes <enh@google.com>2012-05-02 15:08:08 -0700
commit627d8d06b366c4aa54bee43daee0d0011638b608 (patch)
treebe1d9ff294c6d76d6c9e8dcfd3d171aabcf4427d /vm/Atomic.cpp
parentce00cbe5fb3ead585b97b257d0cfb7973ca4ab01 (diff)
parent323d9152187610653be7f07d045f76ea1bd3af5b (diff)
downloadandroid_dalvik-627d8d06b366c4aa54bee43daee0d0011638b608.tar.gz
android_dalvik-627d8d06b366c4aa54bee43daee0d0011638b608.tar.bz2
android_dalvik-627d8d06b366c4aa54bee43daee0d0011638b608.zip
resolved conflicts for merge of 323d9152 to jb-dev-plus-aosp
Change-Id: Ie94542150dad725ca09ad2824cddd034cd6a149b
Diffstat (limited to 'vm/Atomic.cpp')
-rw-r--r--vm/Atomic.cpp281
1 files changed, 103 insertions, 178 deletions
diff --git a/vm/Atomic.cpp b/vm/Atomic.cpp
index f53a7e40c..f0f548927 100644
--- a/vm/Atomic.cpp
+++ b/vm/Atomic.cpp
@@ -18,75 +18,41 @@
#include <cutils/atomic.h>
-/*
- * Quasi-atomic 64-bit operations, for platforms that lack the real thing.
- *
- * TODO: unify ARMv6/x86/sh implementations using the to-be-written
- * spin lock implementation. We don't want to rely on mutex innards,
- * and it would be great if all platforms were running the same code.
- */
+#if defined(__arm__)
+#include <machine/cpu-features.h>
+#endif
-#if defined(HAVE_MACOSX_IPC)
+/*****************************************************************************/
-#include <libkern/OSAtomic.h>
+#if defined(HAVE_MACOSX_IPC)
+#define NEED_MAC_QUASI_ATOMICS 1
-#if defined(__ppc__) \
- || defined(__PPC__) \
- || defined(__powerpc__) \
- || defined(__powerpc) \
- || defined(__POWERPC__) \
- || defined(_M_PPC) \
- || defined(__PPC)
-#define NEED_QUASIATOMICS 1
-#else
+#elif defined(__i386__) || defined(__x86_64__)
+#define NEED_PTHREADS_QUASI_ATOMICS 1
-int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
- volatile int64_t* addr)
-{
- return OSAtomicCompareAndSwap64Barrier(oldvalue, newvalue,
- (int64_t*)addr) == 0;
-}
+#elif defined(__mips__)
+#define NEED_PTHREADS_QUASI_ATOMICS 1
+#elif defined(__arm__)
-static inline int64_t dvmQuasiAtomicSwap64Body(int64_t value,
- volatile int64_t* addr)
-{
- int64_t oldValue;
- do {
- oldValue = *addr;
- } while (dvmQuasiAtomicCas64(oldValue, value, addr));
- return oldValue;
-}
-
-int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
-{
- return dvmQuasiAtomicSwap64Body(value, addr);
-}
+// TODO: Clang can not process our inline assembly at the moment.
+#if defined(__ARM_HAVE_LDREXD) && !defined(__clang__)
+#define NEED_ARM_LDREXD_QUASI_ATOMICS 1
+#else
+#define NEED_PTHREADS_QUASI_ATOMICS 1
+#endif
-int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
-{
- int64_t oldValue;
- ANDROID_MEMBAR_STORE();
- oldValue = dvmQuasiAtomicSwap64Body(value, addr);
- /* TUNING: barriers can be avoided on some architectures */
- ANDROID_MEMBAR_FULL();
- return oldValue;
-}
+#elif defined(__sh__)
+#define NEED_PTHREADS_QUASI_ATOMICS 1
-int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
-{
- return OSAtomicAdd64Barrier(0, addr);
-}
+#else
+#error "Unsupported atomic operations for this platform"
#endif
-#elif defined(__i386__) || defined(__x86_64__)
-#define NEED_QUASIATOMICS 1
+/*****************************************************************************/
-#elif __arm__
-#include <machine/cpu-features.h>
+#if NEED_ARM_LDREXD_QUASI_ATOMICS
-// Clang can not process this assembly at the moment.
-#if defined(__ARM_HAVE_LDREXD) && !defined(__clang__)
static inline int64_t dvmQuasiAtomicSwap64Body(int64_t newvalue,
volatile int64_t* addr)
{
@@ -145,37 +111,93 @@ int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
: "r" (addr));
return value;
}
+#endif
-#else
+/*****************************************************************************/
+
+#if NEED_MAC_QUASI_ATOMICS
+
+#include <libkern/OSAtomic.h>
+
+int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
+ volatile int64_t* addr)
+{
+ return OSAtomicCompareAndSwap64Barrier(oldvalue, newvalue,
+ (int64_t*)addr) == 0;
+}
+
+
+static inline int64_t dvmQuasiAtomicSwap64Body(int64_t value,
+ volatile int64_t* addr)
+{
+ int64_t oldValue;
+ do {
+ oldValue = *addr;
+ } while (dvmQuasiAtomicCas64(oldValue, value, addr));
+ return oldValue;
+}
+
+int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
+{
+ return dvmQuasiAtomicSwap64Body(value, addr);
+}
+
+int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
+{
+ int64_t oldValue;
+ ANDROID_MEMBAR_STORE();
+ oldValue = dvmQuasiAtomicSwap64Body(value, addr);
+ /* TUNING: barriers can be avoided on some architectures */
+ ANDROID_MEMBAR_FULL();
+ return oldValue;
+}
+
+int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
+{
+ return OSAtomicAdd64Barrier(0, addr);
+}
+#endif
+
+/*****************************************************************************/
+
+#if NEED_PTHREADS_QUASI_ATOMICS
+
+// In the absence of a better implementation, we implement the 64-bit atomic
+// operations through mutex locking.
-// on the device, we implement the 64-bit atomic operations through
-// mutex locking. normally, this is bad because we must initialize
-// a pthread_mutex_t before being able to use it, and this means
-// having to do an initialization check on each function call, and
-// that's where really ugly things begin...
-//
-// BUT, as a special twist, we take advantage of the fact that in our
-// pthread library, a mutex is simply a volatile word whose value is always
-// initialized to 0. In other words, simply declaring a static mutex
-// object initializes it !
-//
// another twist is that we use a small array of mutexes to dispatch
// the contention locks from different memory addresses
-//
#include <pthread.h>
-#define SWAP_LOCK_COUNT 32U
-static pthread_mutex_t _swap_locks[SWAP_LOCK_COUNT];
+static const size_t kSwapLockCount = 32;
+static pthread_mutex_t* gSwapLocks[kSwapLockCount];
-#define SWAP_LOCK(addr) \
- &_swap_locks[((unsigned)(void*)(addr) >> 3U) % SWAP_LOCK_COUNT]
+void dvmQuasiAtomicsStartup() {
+ for (size_t i = 0; i < kSwapLockCount; ++i) {
+ pthread_mutex_t* m = new pthread_mutex_t;
+ dvmInitMutex(m);
+ gSwapLocks[i] = m;
+ }
+}
+void dvmQuasiAtomicsShutdown() {
+ for (size_t i = 0; i < kSwapLockCount; ++i) {
+ pthread_mutex_t* m = gSwapLocks[i];
+ gSwapLocks[kSwapLockCount] = NULL;
+ dvmDestroyMutex(m);
+ delete m;
+ }
+}
+
+static inline pthread_mutex_t* GetSwapLock(const volatile int64_t* addr) {
+ return gSwapLocks[((unsigned)(void*)(addr) >> 3U) % kSwapLockCount];
+}
int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
{
int64_t oldValue;
- pthread_mutex_t* lock = SWAP_LOCK(addr);
+ pthread_mutex_t* lock = GetSwapLock(addr);
pthread_mutex_lock(lock);
@@ -196,7 +218,7 @@ int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
volatile int64_t* addr)
{
int result;
- pthread_mutex_t* lock = SWAP_LOCK(addr);
+ pthread_mutex_t* lock = GetSwapLock(addr);
pthread_mutex_lock(lock);
@@ -213,7 +235,7 @@ int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
{
int64_t result;
- pthread_mutex_t* lock = SWAP_LOCK(addr);
+ pthread_mutex_t* lock = GetSwapLock(addr);
pthread_mutex_lock(lock);
result = *addr;
@@ -221,107 +243,10 @@ int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
return result;
}
-#endif /*__ARM_HAVE_LDREXD*/
-
-/*****************************************************************************/
-#elif __sh__
-#define NEED_QUASIATOMICS 1
-
#else
-#error "Unsupported atomic operations for this platform"
-#endif
+// The other implementations don't need any special setup.
+void dvmQuasiAtomicsStartup() {}
+void dvmQuasiAtomicsShutdown() {}
-#if NEED_QUASIATOMICS
-
-/* Note that a spinlock is *not* a good idea in general
- * since they can introduce subtle issues. For example,
- * a real-time thread trying to acquire a spinlock already
- * acquired by another thread will never yeld, making the
- * CPU loop endlessly!
- *
- * However, this code is only used on the Linux simulator
- * so it's probably ok for us.
- *
- * The alternative is to use a pthread mutex, but
- * these must be initialized before being used, and
- * then you have the problem of lazily initializing
- * a mutex without any other synchronization primitive.
- *
- * TODO: these currently use sched_yield(), which is not guaranteed to
- * do anything at all. We need to use dvmIterativeSleep or a wait /
- * notify mechanism if the initial attempt fails.
- */
-
-/* global spinlock for all 64-bit quasiatomic operations */
-static int32_t quasiatomic_spinlock = 0;
-
-int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
- volatile int64_t* addr)
-{
- int result;
-
- while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
-#ifdef HAVE_WIN32_THREADS
- Sleep(0);
-#else
- sched_yield();
-#endif
- }
-
- if (*addr == oldvalue) {
- *addr = newvalue;
- result = 0;
- } else {
- result = 1;
- }
-
- android_atomic_release_store(0, &quasiatomic_spinlock);
-
- return result;
-}
-
-int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
-{
- int64_t result;
-
- while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
-#ifdef HAVE_WIN32_THREADS
- Sleep(0);
-#else
- sched_yield();
-#endif
- }
-
- result = *addr;
- android_atomic_release_store(0, &quasiatomic_spinlock);
-
- return result;
-}
-
-int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
-{
- int64_t result;
-
- while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
-#ifdef HAVE_WIN32_THREADS
- Sleep(0);
-#else
- sched_yield();
-#endif
- }
-
- result = *addr;
- *addr = value;
- android_atomic_release_store(0, &quasiatomic_spinlock);
-
- return result;
-}
-
-/* Same as dvmQuasiAtomicSwap64 - syscall handles barrier */
-int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
-{
- return dvmQuasiAtomicSwap64(value, addr);
-}
-
-#endif /*NEED_QUASIATOMICS*/
+#endif /*NEED_PTHREADS_QUASI_ATOMICS*/