summaryrefslogtreecommitdiffstats
path: root/src/pulsecore
diff options
context:
space:
mode:
authorLennart Poettering <lennart@poettering.net>2008-03-26 21:15:52 +0000
committerLennart Poettering <lennart@poettering.net>2008-03-26 21:15:52 +0000
commit7262e2fc35bd8597cf80b14e0da7c8692246fd3b (patch)
tree247d15d2f4d60087e77feccbd39f54692ad3965e /src/pulsecore
parent6ad7621b61d40dba9b877379ef5f15f73a2ed268 (diff)
add proper arm atomic ops support, patch from Jyri Sarha
git-svn-id: file:///home/lennart/svn/public/pulseaudio/trunk@2127 fefdeb5f-60dc-0310-8127-8f9354f1896f
Diffstat (limited to 'src/pulsecore')
-rw-r--r--src/pulsecore/atomic.h231
1 files changed, 230 insertions, 1 deletions
diff --git a/src/pulsecore/atomic.h b/src/pulsecore/atomic.h
index c2c99888..ad3dca30 100644
--- a/src/pulsecore/atomic.h
+++ b/src/pulsecore/atomic.h
@@ -36,7 +36,7 @@
* On gcc >= 4.1 we use the builtin atomic functions. otherwise we use
* libatomic_ops
*/
-
+#
#ifndef PACKAGE
#error "Please include config.h before including this file!"
#endif
@@ -182,6 +182,235 @@ static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* n
return result;
}
+#elif defined(ATOMIC_ARM_INLINE_ASM)
+
+/*
+ These should only be enabled if we have ARMv6 or better.
+*/
+
+typedef struct pa_atomic {
+ volatile int value;
+} pa_atomic_t;
+
+#define PA_ATOMIC_INIT(v) { .value = (v) }
+
+static inline void pa_memory_barrier(void) {
+#ifdef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
+ asm volatile ("mcr p15, 0, r0, c7, c10, 5 @ dmb");
+#endif
+}
+
+static inline int pa_atomic_load(const pa_atomic_t *a) {
+ pa_memory_barrier();
+ return a->value;
+}
+
+static inline void pa_atomic_store(pa_atomic_t *a, int i) {
+ a->value = i;
+ pa_memory_barrier();
+}
+
+/* Returns the previously set value */
+static inline int pa_atomic_add(pa_atomic_t *a, int i) {
+ unsigned long not_exclusive;
+ int new_val, old_val;
+
+ pa_memory_barrier();
+ do {
+ asm volatile ("ldrex %0, [%3]\n"
+ "add %2, %0, %4\n"
+ "strex %1, %2, [%3]\n"
+ : "=&r" (old_val), "=&r" (not_exclusive), "=&r" (new_val)
+ : "r" (&a->value), "Ir" (i)
+ : "cc");
+ } while(not_exclusive);
+ pa_memory_barrier();
+
+ return old_val;
+}
+
+/* Returns the previously set value */
+static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
+ unsigned long not_exclusive;
+ int new_val, old_val;
+
+ pa_memory_barrier();
+ do {
+ asm volatile ("ldrex %0, [%3]\n"
+ "sub %2, %0, %4\n"
+ "strex %1, %2, [%3]\n"
+ : "=&r" (old_val), "=&r" (not_exclusive), "=&r" (new_val)
+ : "r" (&a->value), "Ir" (i)
+ : "cc");
+ } while(not_exclusive);
+ pa_memory_barrier();
+
+ return old_val;
+}
+
+static inline int pa_atomic_inc(pa_atomic_t *a) {
+ return pa_atomic_add(a, 1);
+}
+
+static inline int pa_atomic_dec(pa_atomic_t *a) {
+ return pa_atomic_sub(a, 1);
+}
+
+static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
+ unsigned long not_equal, not_exclusive;
+
+ pa_memory_barrier();
+ do {
+ asm volatile ("ldrex %0, [%2]\n"
+ "subs %0, %0, %3\n"
+ "mov %1, %0\n"
+ "strexeq %0, %4, [%2]\n"
+ : "=&r" (not_exclusive), "=&r" (not_equal)
+ : "r" (&a->value), "Ir" (old_i), "r" (new_i)
+ : "cc");
+ } while(not_exclusive && !not_equal);
+ pa_memory_barrier();
+
+ return !not_equal;
+}
+
+typedef struct pa_atomic_ptr {
+ volatile unsigned long value;
+} pa_atomic_ptr_t;
+
+#define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
+
+static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
+ pa_memory_barrier();
+ return (void*) a->value;
+}
+
+static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
+ a->value = (unsigned long) p;
+ pa_memory_barrier();
+}
+
+static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
+ unsigned long not_equal, not_exclusive;
+
+ pa_memory_barrier();
+ do {
+ asm volatile ("ldrex %0, [%2]\n"
+ "subs %0, %0, %3\n"
+ "mov %1, %0\n"
+ "strexeq %0, %4, [%2]\n"
+ : "=&r" (not_exclusive), "=&r" (not_equal)
+ : "r" (&a->value), "Ir" (old_p), "r" (new_p)
+ : "cc");
+ } while(not_exclusive && !not_equal);
+ pa_memory_barrier();
+
+ return !not_equal;
+}
+
+#elif defined(ATOMIC_ARM_LINUX_HELPERS)
+
+/* See file arch/arm/kernel/entry-armv.S in your kernel sources for more
+ information about these functions. The arm kernel helper functions first
+ appeared in 2.6.16.
+ Apply --disable-atomic-arm-linux-helpers flag to confugure if you prefere
+ inline asm implementation or you have an obsolete Linux kernel.
+*/
+/* Memory barrier */
+typedef void (__kernel_dmb_t)(void);
+#define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
+
+static inline void pa_memory_barrier(void) {
+#ifndef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
+ __kernel_dmb();
+#endif
+}
+
+/* Atomic exchange (__kernel_cmpxchg_t contains memory barriers if needed) */
+typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
+#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
+
+/* This is just to get rid of all warnings */
+typedef int (__kernel_cmpxchg_u_t)(unsigned long oldval, unsigned long newval, volatile unsigned long *ptr);
+#define __kernel_cmpxchg_u (*(__kernel_cmpxchg_u_t *)0xffff0fc0)
+
+typedef struct pa_atomic {
+ volatile int value;
+} pa_atomic_t;
+
+#define PA_ATOMIC_INIT(v) { .value = (v) }
+
+static inline int pa_atomic_load(const pa_atomic_t *a) {
+ pa_memory_barrier();
+ return a->value;
+}
+
+static inline void pa_atomic_store(pa_atomic_t *a, int i) {
+ a->value = i;
+ pa_memory_barrier();
+}
+
+/* Returns the previously set value */
+static inline int pa_atomic_add(pa_atomic_t *a, int i) {
+ int old_val;
+ do {
+ old_val = a->value;
+ } while(__kernel_cmpxchg(old_val, old_val + i, &a->value));
+ return old_val;
+}
+
+/* Returns the previously set value */
+static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
+ int old_val;
+ do {
+ old_val = a->value;
+ } while(__kernel_cmpxchg(old_val, old_val - i, &a->value));
+ return old_val;
+}
+
+/* Returns the previously set value */
+static inline int pa_atomic_inc(pa_atomic_t *a) {
+ return pa_atomic_add(a, 1);
+}
+
+/* Returns the previously set value */
+static inline int pa_atomic_dec(pa_atomic_t *a) {
+ return pa_atomic_sub(a, 1);
+}
+
+/* Returns non-zero when the operation was successful. */
+static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
+ int failed = 1;
+ do {
+ failed = __kernel_cmpxchg(old_i, new_i, &a->value);
+ } while(failed && a->value == old_i);
+ return !failed;
+}
+
+typedef struct pa_atomic_ptr {
+ volatile unsigned long value;
+} pa_atomic_ptr_t;
+
+#define PA_ATOMIC_PTR_INIT(v) { .value = (unsigned long) (v) }
+
+static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
+ pa_memory_barrier();
+ return (void*) a->value;
+}
+
+static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
+ a->value = (unsigned long) p;
+ pa_memory_barrier();
+}
+
+static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
+ int failed = 1;
+ do {
+ failed = __kernel_cmpxchg_u((unsigned long) old_p, (unsigned long) new_p, &a->value);
+ } while(failed && a->value == old_p);
+ return !failed;
+}
+
#else
/* libatomic_ops based implementation */