summaryrefslogtreecommitdiffstats
path: root/vm/Atomic.cpp
diff options
context:
space:
mode:
authorbuzbee <buzbee@google.com>2011-09-27 11:47:28 -0700
committerbuzbee <buzbee@google.com>2011-09-27 16:59:35 -0700
commit4185972e211b0c84b9fe7d90c56b28cc15e474fa (patch)
tree8182adea9ecc6ec9fa00a0291313340f94959e9c /vm/Atomic.cpp
parent927765c9a80a730d73bd40e2ef60ccaa01652449 (diff)
downloadandroid_dalvik-4185972e211b0c84b9fe7d90c56b28cc15e474fa.tar.gz
android_dalvik-4185972e211b0c84b9fe7d90c56b28cc15e474fa.tar.bz2
android_dalvik-4185972e211b0c84b9fe7d90c56b28cc15e474fa.zip
Fix memory barriers (Issue 3338450)
Add extra memory barrier on volatile stores. Change-Id: Id4a4750cdfc910eda2f0b44ead0af2a569b5735e
Diffstat (limited to 'vm/Atomic.cpp')
-rw-r--r--vm/Atomic.cpp48
1 files changed, 46 insertions, 2 deletions
diff --git a/vm/Atomic.cpp b/vm/Atomic.cpp
index 4473c8568..927a92616 100644
--- a/vm/Atomic.cpp
+++ b/vm/Atomic.cpp
@@ -47,7 +47,9 @@ int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
(int64_t*)addr) == 0;
}
-int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
+
+static inline int64_t dvmQuasiAtomicSwap64Body(int64_t value,
+ volatile int64_t* addr)
{
int64_t oldValue;
do {
@@ -56,6 +58,21 @@ int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
return oldValue;
}
+int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
+{
+ return dvmQuasiAtomicSwap64Body(value, adddr);
+}
+
+int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
+{
+ int64_t oldValue;
+ ANDROID_MEMBAR_STORE();
+ oldValue = dvmQuasiAtomicSwap64Body(value, addr);
+ /* TUNING: barriers can be avoided on some architectures */
+ ANDROID_MEMBAR_FULL();
+ return oldValue;
+}
+
int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
{
return OSAtomicAdd64Barrier(0, addr);
@@ -69,7 +86,8 @@ int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
#include <machine/cpu-features.h>
#ifdef __ARM_HAVE_LDREXD
-int64_t dvmQuasiAtomicSwap64(int64_t newvalue, volatile int64_t* addr)
+static inline int64_t dvmQuasiAtomicSwap64Body(int64_t newvalue,
+ volatile int64_t* addr)
{
int64_t prev;
int status;
@@ -84,6 +102,20 @@ int64_t dvmQuasiAtomicSwap64(int64_t newvalue, volatile int64_t* addr)
return prev;
}
+int64_t dvmQuasiAtomicSwap64(int64_t newvalue, volatile int64_t* addr)
+{
+ return dvmQuasiAtomicSwap64Body(newvalue, addr);
+}
+
+int64_t dvmQuasiAtomicSwap64Sync(int64_t newvalue, volatile int64_t* addr)
+{
+ int64_t prev;
+ ANDROID_MEMBAR_STORE();
+ prev = dvmQuasiAtomicSwap64Body(newvalue, addr);
+ ANDROID_MEMBAR_FULL();
+ return prev;
+}
+
int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
volatile int64_t* addr)
{
@@ -153,6 +185,12 @@ int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
return oldValue;
}
+/* Same as dvmQuasiAtomicSwap64 - mutex handles barrier */
+int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
+{
+ return dvmQuasiAtomicSwap64(value, addr);
+}
+
int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
volatile int64_t* addr)
{
@@ -279,4 +317,10 @@ int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
return result;
}
+/* Same as dvmQuasiAtomicSwap64 - syscall handles barrier */
+int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
+{
+ return dvmQuasiAtomicSwap64(value, addr);
+}
+
#endif /*NEED_QUASIATOMICS*/