summaryrefslogtreecommitdiffstats
path: root/include/cutils
diff options
context:
space:
mode:
Diffstat (limited to 'include/cutils')
-rw-r--r--include/cutils/ashmem.h2
-rw-r--r--include/cutils/atomic-arm.h269
-rw-r--r--include/cutils/atomic-inline.h58
-rw-r--r--include/cutils/atomic-x86.h139
-rw-r--r--include/cutils/atomic.h103
5 files changed, 543 insertions, 28 deletions
diff --git a/include/cutils/ashmem.h b/include/cutils/ashmem.h
index fd56dbef3..25b233e6a 100644
--- a/include/cutils/ashmem.h
+++ b/include/cutils/ashmem.h
@@ -10,7 +10,7 @@
#ifndef _CUTILS_ASHMEM_H
#define _CUTILS_ASHMEM_H
-#include <stdint.h>
+#include <stddef.h>
#ifdef __cplusplus
extern "C" {
diff --git a/include/cutils/atomic-arm.h b/include/cutils/atomic-arm.h
new file mode 100644
index 000000000..0dd629d86
--- /dev/null
+++ b/include/cutils/atomic-arm.h
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_CUTILS_ATOMIC_ARM_H
+#define ANDROID_CUTILS_ATOMIC_ARM_H
+
+#include <stdint.h>
+#include <machine/cpu-features.h>
+
+extern inline void android_compiler_barrier(void)
+{
+ __asm__ __volatile__ ("" : : : "memory");
+}
+
+#if ANDROID_SMP == 0
+extern inline void android_memory_barrier(void)
+{
+ android_compiler_barrier();
+}
+#elif defined(__ARM_HAVE_DMB)
+extern inline void android_memory_barrier(void)
+{
+ __asm__ __volatile__ ("dmb" : : : "memory");
+}
+#elif defined(__ARM_HAVE_LDREX_STREX)
+extern inline void android_memory_barrier(void)
+{
+ __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5"
+ : : "r" (0) : "memory");
+}
+#else
+extern inline void android_memory_barrier(void)
+{
+ typedef void (kuser_memory_barrier)(void);
+ (*(kuser_memory_barrier *)0xffff0fa0)();
+}
+#endif
+
+extern inline int32_t android_atomic_acquire_load(volatile int32_t *ptr)
+{
+ int32_t value = *ptr;
+ android_memory_barrier();
+ return value;
+}
+
+extern inline int32_t android_atomic_release_load(volatile int32_t *ptr)
+{
+ android_memory_barrier();
+ return *ptr;
+}
+
+extern inline void android_atomic_acquire_store(int32_t value,
+ volatile int32_t *ptr)
+{
+ *ptr = value;
+ android_memory_barrier();
+}
+
+extern inline void android_atomic_release_store(int32_t value,
+ volatile int32_t *ptr)
+{
+ android_memory_barrier();
+ *ptr = value;
+}
+
+#if defined(__thumb__)
+extern int android_atomic_cas(int32_t old_value, int32_t new_value,
+ volatile int32_t *ptr);
+#elif defined(__ARM_HAVE_LDREX_STREX)
+extern inline int android_atomic_cas(int32_t old_value, int32_t new_value,
+ volatile int32_t *ptr)
+{
+ int32_t prev, status;
+ do {
+ __asm__ __volatile__ ("ldrex %0, [%3]\n"
+ "mov %1, #0\n"
+ "teq %0, %4\n"
+ "strexeq %1, %5, [%3]"
+ : "=&r" (prev), "=&r" (status), "+m"(*ptr)
+ : "r" (ptr), "Ir" (old_value), "r" (new_value)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+ return prev != old_value;
+}
+#else
+extern inline int android_atomic_cas(int32_t old_value, int32_t new_value,
+ volatile int32_t *ptr)
+{
+ typedef int (kuser_cmpxchg)(int32_t, int32_t, volatile int32_t *);
+ int32_t prev, status;
+ prev = *ptr;
+ do {
+ status = (*(kuser_cmpxchg *)0xffff0fc0)(old_value, new_value, ptr);
+ if (__builtin_expect(status == 0, 1))
+ return 0;
+ prev = *ptr;
+ } while (prev == old_value);
+ return 1;
+}
+#endif
+
+extern inline int android_atomic_acquire_cas(int32_t old_value,
+ int32_t new_value,
+ volatile int32_t *ptr)
+{
+ int status = android_atomic_cas(old_value, new_value, ptr);
+ android_memory_barrier();
+ return status;
+}
+
+extern inline int android_atomic_release_cas(int32_t old_value,
+ int32_t new_value,
+ volatile int32_t *ptr)
+{
+ android_memory_barrier();
+ return android_atomic_cas(old_value, new_value, ptr);
+}
+
+
+#if defined(__thumb__)
+extern int32_t android_atomic_swap(int32_t new_value,
+ volatile int32_t *ptr);
+#elif defined(__ARM_HAVE_LDREX_STREX)
+extern inline int32_t android_atomic_swap(int32_t new_value,
+ volatile int32_t *ptr)
+{
+ int32_t prev, status;
+ do {
+ __asm__ __volatile__ ("ldrex %0, [%3]\n"
+ "strex %1, %4, [%3]"
+ : "=&r" (prev), "=&r" (status), "+m" (*ptr)
+ : "r" (ptr), "r" (new_value)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+ android_memory_barrier();
+ return prev;
+}
+#else
+extern inline int32_t android_atomic_swap(int32_t new_value,
+ volatile int32_t *ptr)
+{
+ int32_t prev;
+ __asm__ __volatile__ ("swp %0, %2, [%3]"
+ : "=&r" (prev), "+m" (*ptr)
+ : "r" (new_value), "r" (ptr)
+ : "cc");
+ android_memory_barrier();
+ return prev;
+}
+#endif
+
+#if defined(__thumb__)
+extern int32_t android_atomic_add(int32_t increment,
+ volatile int32_t *ptr);
+#elif defined(__ARM_HAVE_LDREX_STREX)
+extern inline int32_t android_atomic_add(int32_t increment,
+ volatile int32_t *ptr)
+{
+ int32_t prev, tmp, status;
+ android_memory_barrier();
+ do {
+ __asm__ __volatile__ ("ldrex %0, [%4]\n"
+ "add %1, %0, %5\n"
+ "strex %2, %1, [%4]"
+ : "=&r" (prev), "=&r" (tmp),
+ "=&r" (status), "+m" (*ptr)
+ : "r" (ptr), "Ir" (increment)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+ return prev;
+}
+#else
+extern inline int32_t android_atomic_add(int32_t increment,
+ volatile int32_t *ptr)
+{
+ int32_t prev, status;
+ android_memory_barrier();
+ do {
+ prev = *ptr;
+ status = android_atomic_cas(prev, prev + increment, ptr);
+ } while (__builtin_expect(status != 0, 0));
+ return prev;
+}
+#endif
+
+extern inline int32_t android_atomic_inc(volatile int32_t *addr) {
+ return android_atomic_add(1, addr);
+}
+
+extern inline int32_t android_atomic_dec(volatile int32_t *addr) {
+ return android_atomic_add(-1, addr);
+}
+
+#if defined(__thumb__)
+extern int32_t android_atomic_and(int32_t value, volatile int32_t *ptr);
+#elif defined(__ARM_HAVE_LDREX_STREX)
+extern inline int32_t android_atomic_and(int32_t value, volatile int32_t *ptr)
+{
+ int32_t prev, tmp, status;
+ android_memory_barrier();
+ do {
+ __asm__ __volatile__ ("ldrex %0, [%4]\n"
+ "and %1, %0, %5\n"
+ "strex %2, %1, [%4]"
+ : "=&r" (prev), "=&r" (tmp),
+ "=&r" (status), "+m" (*ptr)
+ : "r" (ptr), "Ir" (value)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+ return prev;
+}
+#else
+extern inline int32_t android_atomic_and(int32_t value, volatile int32_t *ptr)
+{
+ int32_t prev, status;
+ android_memory_barrier();
+ do {
+ prev = *ptr;
+ status = android_atomic_cas(prev, prev & value, ptr);
+ } while (__builtin_expect(status != 0, 0));
+ return prev;
+}
+#endif
+
+#if defined(__thumb__)
+extern int32_t android_atomic_or(int32_t value, volatile int32_t *ptr);
+#elif defined(__ARM_HAVE_LDREX_STREX)
+extern inline int32_t android_atomic_or(int32_t value, volatile int32_t *ptr)
+{
+ int32_t prev, tmp, status;
+ android_memory_barrier();
+ do {
+ __asm__ __volatile__ ("ldrex %0, [%4]\n"
+ "orr %1, %0, %5\n"
+ "strex %2, %1, [%4]"
+ : "=&r" (prev), "=&r" (tmp),
+ "=&r" (status), "+m" (*ptr)
+ : "r" (ptr), "Ir" (value)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
+ return prev;
+}
+#else
+extern inline int32_t android_atomic_or(int32_t value, volatile int32_t *ptr)
+{
+ int32_t prev, status;
+ android_memory_barrier();
+ do {
+ prev = *ptr;
+ status = android_atomic_cas(prev, prev | value, ptr);
+ } while (__builtin_expect(status != 0, 0));
+ return prev;
+}
+#endif
+
+#endif /* ANDROID_CUTILS_ATOMIC_ARM_H */
diff --git a/include/cutils/atomic-inline.h b/include/cutils/atomic-inline.h
new file mode 100644
index 000000000..715e0aadf
--- /dev/null
+++ b/include/cutils/atomic-inline.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_CUTILS_ATOMIC_INLINE_H
+#define ANDROID_CUTILS_ATOMIC_INLINE_H
+
+/*
+ * Inline declarations and macros for some special-purpose atomic
+ * operations. These are intended for rare circumstances where a
+ * memory barrier needs to be issued inline rather than as a function
+ * call.
+ *
+ * Most code should not use these.
+ *
+ * Anything that does include this file must set ANDROID_SMP to either
+ * 0 or 1, indicating compilation for UP or SMP, respectively.
+ *
+ * Macros defined in this header:
+ *
+ * void ANDROID_MEMBAR_FULL(void)
+ * Full memory barrier. Provides a compiler reordering barrier, and
+ * on SMP systems emits an appropriate instruction.
+ */
+
+#if !defined(ANDROID_SMP)
+# error "Must define ANDROID_SMP before including atomic-inline.h"
+#endif
+
+#if defined(__arm__)
+#include <cutils/atomic-arm.h>
+#elif defined(__i386__) || defined(__x86_64__)
+#include <cutils/atomic-x86.h>
+#elif defined(__sh__)
+/* implementation is in atomic-android-sh.c */
+#else
+#error atomic operations are unsupported
+#endif
+
+#if ANDROID_SMP == 0
+#define ANDROID_MEMBAR_FULL android_compiler_barrier
+#else
+#define ANDROID_MEMBAR_FULL android_memory_barrier
+#endif
+
+#endif /* ANDROID_CUTILS_ATOMIC_INLINE_H */
diff --git a/include/cutils/atomic-x86.h b/include/cutils/atomic-x86.h
new file mode 100644
index 000000000..06b643fbe
--- /dev/null
+++ b/include/cutils/atomic-x86.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_CUTILS_ATOMIC_X86_H
+#define ANDROID_CUTILS_ATOMIC_X86_H
+
+#include <stdint.h>
+
+extern inline void android_compiler_barrier(void)
+{
+ __asm__ __volatile__ ("" : : : "memory");
+}
+
+#if ANDROID_SMP == 0
+extern inline void android_memory_barrier(void)
+{
+ android_compiler_barrier();
+}
+#else
+extern inline void android_memory_barrier(void)
+{
+ __asm__ __volatile__ ("mfence" : : : "memory");
+}
+#endif
+
+extern inline int32_t android_atomic_acquire_load(volatile int32_t *ptr) {
+ int32_t value = *ptr;
+ android_compiler_barrier();
+ return value;
+}
+
+extern inline int32_t android_atomic_release_load(volatile int32_t *ptr) {
+ android_memory_barrier();
+ return *ptr;
+}
+
+extern inline void android_atomic_acquire_store(int32_t value,
+ volatile int32_t *ptr) {
+ *ptr = value;
+ android_memory_barrier();
+}
+
+extern inline void android_atomic_release_store(int32_t value,
+ volatile int32_t *ptr) {
+ android_compiler_barrier();
+ *ptr = value;
+}
+
+extern inline int android_atomic_cas(int32_t old_value, int32_t new_value,
+ volatile int32_t *ptr)
+{
+ int32_t prev;
+ __asm__ __volatile__ ("lock; cmpxchgl %1, %2"
+ : "=a" (prev)
+ : "q" (new_value), "m" (*ptr), "0" (old_value)
+ : "memory");
+ return prev != old_value;
+}
+
+extern inline int android_atomic_acquire_cas(int32_t old_value,
+ int32_t new_value,
+ volatile int32_t *ptr)
+{
+ /* Loads are not reordered with other loads. */
+ return android_atomic_cas(old_value, new_value, ptr);
+}
+
+extern inline int android_atomic_release_cas(int32_t old_value,
+ int32_t new_value,
+ volatile int32_t *ptr)
+{
+ /* Stores are not reordered with other stores. */
+ return android_atomic_cas(old_value, new_value, ptr);
+}
+
+extern inline int32_t android_atomic_swap(int32_t new_value,
+ volatile int32_t *ptr)
+{
+ __asm__ __volatile__ ("xchgl %1, %0"
+ : "=r" (new_value)
+ : "m" (*ptr), "0" (new_value)
+ : "memory");
+ /* new_value now holds the old value of *ptr */
+ return new_value;
+}
+
+extern inline int32_t android_atomic_add(int32_t increment,
+ volatile int32_t *ptr)
+{
+ __asm__ __volatile__ ("lock; xaddl %0, %1"
+ : "+r" (increment), "+m" (*ptr)
+ : : "memory");
+ /* increment now holds the old value of *ptr */
+ return increment;
+}
+
+extern inline int32_t android_atomic_inc(volatile int32_t *addr) {
+ return android_atomic_add(1, addr);
+}
+
+extern inline int32_t android_atomic_dec(volatile int32_t *addr) {
+ return android_atomic_add(-1, addr);
+}
+
+extern inline int32_t android_atomic_and(int32_t value,
+ volatile int32_t *ptr)
+{
+ int32_t prev, status;
+ do {
+ prev = *ptr;
+ status = android_atomic_cas(prev, prev & value, ptr);
+ } while (__builtin_expect(status != 0, 0));
+ return prev;
+}
+
+extern inline int32_t android_atomic_or(int32_t value, volatile int32_t *ptr)
+{
+ int32_t prev, status;
+ do {
+ prev = *ptr;
+ status = android_atomic_cas(prev, prev | value, ptr);
+ } while (__builtin_expect(status != 0, 0));
+ return prev;
+}
+
+#endif /* ANDROID_CUTILS_ATOMIC_X86_H */
diff --git a/include/cutils/atomic.h b/include/cutils/atomic.h
index 5694d66ac..38668486e 100644
--- a/include/cutils/atomic.h
+++ b/include/cutils/atomic.h
@@ -25,53 +25,102 @@ extern "C" {
#endif
/*
- * NOTE: memory shared between threads is synchronized by all atomic operations
- * below, this means that no explicit memory barrier is required: all reads or
- * writes issued before android_atomic_* operations are guaranteed to complete
- * before the atomic operation takes place.
+ * A handful of basic atomic operations. The appropriate pthread
+ * functions should be used instead of these whenever possible.
+ *
+ * The "acquire" and "release" terms can be defined intuitively in terms
+ * of the placement of memory barriers in a simple lock implementation:
+ * - wait until compare-and-swap(lock-is-free --> lock-is-held) succeeds
+ * - barrier
+ * - [do work]
+ * - barrier
+ * - store(lock-is-free)
+ * In very crude terms, the initial (acquire) barrier prevents any of the
+ * "work" from happening before the lock is held, and the later (release)
+ * barrier ensures that all of the work happens before the lock is released.
+ * (Think of cached writes, cache read-ahead, and instruction reordering
+ * around the CAS and store instructions.)
+ *
+ * The barriers must apply to both the compiler and the CPU. Note it is
+ * legal for instructions that occur before an "acquire" barrier to be
+ * moved down below it, and for instructions that occur after a "release"
+ * barrier to be moved up above it.
+ *
+ * The ARM-driven implementation we use here is short on subtlety,
+ * and actually requests a full barrier from the compiler and the CPU.
+ * The only difference between acquire and release is in whether they
+ * are issued before or after the atomic operation with which they
+ * are associated. To ease the transition to C/C++ atomic intrinsics,
+ * you should not rely on this, and instead assume that only the minimal
+ * acquire/release protection is provided.
+ *
+ * NOTE: all int32_t* values are expected to be aligned on 32-bit boundaries.
+ * If they are not, atomicity is not guaranteed.
*/
-void android_atomic_write(int32_t value, volatile int32_t* addr);
-
/*
- * all these atomic operations return the previous value
+ * Basic arithmetic and bitwise operations. These all provide a
+ * barrier with "release" ordering, and return the previous value.
+ *
+ * These have the same characteristics (e.g. what happens on overflow)
+ * as the equivalent non-atomic C operations.
*/
-
-
int32_t android_atomic_inc(volatile int32_t* addr);
int32_t android_atomic_dec(volatile int32_t* addr);
-
int32_t android_atomic_add(int32_t value, volatile int32_t* addr);
int32_t android_atomic_and(int32_t value, volatile int32_t* addr);
int32_t android_atomic_or(int32_t value, volatile int32_t* addr);
-int32_t android_atomic_swap(int32_t value, volatile int32_t* addr);
+/*
+ * Perform an atomic load with "acquire" or "release" ordering.
+ *
+ * This is only necessary if you need the memory barrier. A 32-bit read
+ * from a 32-bit aligned address is atomic on all supported platforms.
+ */
+int32_t android_atomic_acquire_load(volatile int32_t* addr);
+int32_t android_atomic_release_load(volatile int32_t* addr);
/*
- * NOTE: Two "quasiatomic" operations on the exact same memory address
- * are guaranteed to operate atomically with respect to each other,
- * but no guarantees are made about quasiatomic operations mixed with
- * non-quasiatomic operations on the same address, nor about
- * quasiatomic operations that are performed on partially-overlapping
- * memory.
+ * Perform an atomic store with "acquire" or "release" ordering.
+ *
+ * This is only necessary if you need the memory barrier. A 32-bit write
+ * to a 32-bit aligned address is atomic on all supported platforms.
*/
+void android_atomic_acquire_store(int32_t value, volatile int32_t* addr);
+void android_atomic_release_store(int32_t value, volatile int32_t* addr);
-int64_t android_quasiatomic_swap_64(int64_t value, volatile int64_t* addr);
-int64_t android_quasiatomic_read_64(volatile int64_t* addr);
-
/*
- * cmpxchg return a non zero value if the exchange was NOT performed,
- * in other words if oldvalue != *addr
+ * Unconditional swap operation with release ordering.
+ *
+ * Stores the new value at *addr, and returns the previous value.
*/
+int32_t android_atomic_swap(int32_t value, volatile int32_t* addr);
-int android_atomic_cmpxchg(int32_t oldvalue, int32_t newvalue,
+/*
+ * Compare-and-set operation with "acquire" or "release" ordering.
+ *
+ * This returns zero if the new value was successfully stored, which will
+ * only happen when *addr == oldvalue.
+ *
+ * (The return value is inverted from implementations on other platforms,
+ * but matches the ARM ldrex/strex result.)
+ *
+ * Implementations that use the release CAS in a loop may be less efficient
+ * than possible, because we re-issue the memory barrier on each iteration.
+ */
+int android_atomic_acquire_cas(int32_t oldvalue, int32_t newvalue,
+ volatile int32_t* addr);
+int android_atomic_release_cas(int32_t oldvalue, int32_t newvalue,
volatile int32_t* addr);
-int android_quasiatomic_cmpxchg_64(int64_t oldvalue, int64_t newvalue,
- volatile int64_t* addr);
-
+/*
+ * Aliases for code using an older version of this header. These are now
+ * deprecated and should not be used. The definitions will be removed
+ * in a future release.
+ */
+#define android_atomic_write android_atomic_release_store
+#define android_atomic_cmpxchg android_atomic_release_cas
-
#ifdef __cplusplus
} // extern "C"
#endif