summaryrefslogtreecommitdiffstats
path: root/src/pulsecore/atomic.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/pulsecore/atomic.h')
-rw-r--r--src/pulsecore/atomic.h193
1 files changed, 175 insertions, 18 deletions
diff --git a/src/pulsecore/atomic.h b/src/pulsecore/atomic.h
index 013e8c20..c2c99888 100644
--- a/src/pulsecore/atomic.h
+++ b/src/pulsecore/atomic.h
@@ -24,48 +24,201 @@
USA.
***/
-#include <atomic_ops.h>
-
-/* atomic_ops guarantees us that sizeof(AO_t) == sizeof(void*).
+/*
+ * atomic_ops guarantees us that sizeof(AO_t) == sizeof(void*). It is
+ * not guaranteed however, that sizeof(AO_t) == sizeof(size_t).
+ * however very likely.
+ *
+ * For now we do only full memory barriers. Eventually we might want
+ * to support more elaborate memory barriers, in which case we will add
+ * suffixes to the function names.
*
- * It is not guaranteed however, that sizeof(AO_t) == sizeof(size_t).
- * however very likely. */
+ * On gcc >= 4.1 we use the builtin atomic functions. otherwise we use
+ * libatomic_ops
+ */
-typedef struct pa_atomic_int {
- volatile AO_t value;
-} pa_atomic_int_t;
+#ifndef PACKAGE
+#error "Please include config.h before including this file!"
+#endif
+
+#ifdef HAVE_ATOMIC_BUILTINS
+
+/* __sync based implementation */
+
+typedef struct pa_atomic {
+ volatile int value;
+} pa_atomic_t;
#define PA_ATOMIC_INIT(v) { .value = (v) }
-/* For now we do only full memory barriers. Eventually we might want
- * to support more elaborate memory barriers, in which case we will add
- * suffixes to the function names */
+static inline int pa_atomic_load(const pa_atomic_t *a) {
+ __sync_synchronize();
+ return a->value;
+}
+
+static inline void pa_atomic_store(pa_atomic_t *a, int i) {
+ a->value = i;
+ __sync_synchronize();
+}
+
+/* Returns the previously set value */
+static inline int pa_atomic_add(pa_atomic_t *a, int i) {
+ return __sync_fetch_and_add(&a->value, i);
+}
+
+/* Returns the previously set value */
+static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
+ return __sync_fetch_and_sub(&a->value, i);
+}
+
+/* Returns the previously set value */
+static inline int pa_atomic_inc(pa_atomic_t *a) {
+ return pa_atomic_add(a, 1);
+}
+
+/* Returns the previously set value */
+static inline int pa_atomic_dec(pa_atomic_t *a) {
+ return pa_atomic_sub(a, 1);
+}
+
+/* Returns non-zero when the operation was successful. */
+static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
+ return __sync_bool_compare_and_swap(&a->value, old_i, new_i);
+}
+
+typedef struct pa_atomic_ptr {
+ volatile unsigned long value;
+} pa_atomic_ptr_t;
+
+#define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
+
+static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
+ __sync_synchronize();
+ return (void*) a->value;
+}
+
+static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
+ a->value = (unsigned long) p;
+ __sync_synchronize();
+}
+
+static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
+ return __sync_bool_compare_and_swap(&a->value, (long) old_p, (long) new_p);
+}
+
+#elif defined(__GNUC__) && (defined(__amd64__) || defined(__x86_64__))
+
+#error "The native atomic operations implementation for AMD64 has not been tested. libatomic_ops is known to not work properly on AMD64 and your gcc version is too old for the gcc-builtin atomic ops support. You have three options now: make the native atomic operations implementation for AMD64 work, fix libatomic_ops, or upgrade your GCC."
+
+/* Addapted from glibc */
+
+typedef struct pa_atomic {
+ volatile int value;
+} pa_atomic_t;
+
+#define PA_ATOMIC_INIT(v) { .value = (v) }
+
+static inline int pa_atomic_load(const pa_atomic_t *a) {
+ return a->value;
+}
+
+static inline void pa_atomic_store(pa_atomic_t *a, int i) {
+ a->value = i;
+}
+
+static inline int pa_atomic_add(pa_atomic_t *a, int i) {
+ int result;
+
+ __asm __volatile ("lock; xaddl %0, %1"
+ : "=r" (result), "=m" (a->value)
+ : "0" (i), "m" (a->value));
+
+ return result;
+}
+
+static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
+ return pa_atomic_add(a, -i);
+}
+
+static inline int pa_atomic_inc(pa_atomic_t *a) {
+ return pa_atomic_add(a, 1);
+}
-static inline int pa_atomic_load(const pa_atomic_int_t *a) {
+static inline int pa_atomic_dec(pa_atomic_t *a) {
+ return pa_atomic_sub(a, 1);
+}
+
+static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
+ int result;
+
+ __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
+ : "=a" (result), "=m" (a->value)
+ : "r" (new_i), "m" (a->value), "0" (old_i));
+
+ return result == oldval;
+}
+
+typedef struct pa_atomic_ptr {
+ volatile unsigned long value;
+} pa_atomic_ptr_t;
+
+#define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
+
+static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
+ return (void*) a->value;
+}
+
+static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
+ a->value = (unsigned long) p;
+}
+
+static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
+ void *result;
+
+ __asm__ __volatile__ ("lock; cmpxchgq %q2, %1"
+ : "=a" (result), "=m" (a->value)
+ : "r" (new_p), "m" (a->value), "0" (old_p));
+
+ return result;
+}
+
+#else
+
+/* libatomic_ops based implementation */
+
+#include <atomic_ops.h>
+
+typedef struct pa_atomic {
+ volatile AO_t value;
+} pa_atomic_t;
+
+#define PA_ATOMIC_INIT(v) { .value = (v) }
+
+static inline int pa_atomic_load(const pa_atomic_t *a) {
return (int) AO_load_full((AO_t*) &a->value);
}
-static inline void pa_atomic_store(pa_atomic_int_t *a, int i) {
+static inline void pa_atomic_store(pa_atomic_t *a, int i) {
AO_store_full(&a->value, (AO_t) i);
}
-static inline int pa_atomic_add(pa_atomic_int_t *a, int i) {
+static inline int pa_atomic_add(pa_atomic_t *a, int i) {
return AO_fetch_and_add_full(&a->value, (AO_t) i);
}
-static inline int pa_atomic_sub(pa_atomic_int_t *a, int i) {
+static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
return AO_fetch_and_add_full(&a->value, (AO_t) -i);
}
-static inline int pa_atomic_inc(pa_atomic_int_t *a) {
+static inline int pa_atomic_inc(pa_atomic_t *a) {
return AO_fetch_and_add1_full(&a->value);
}
-static inline int pa_atomic_dec(pa_atomic_int_t *a) {
+static inline int pa_atomic_dec(pa_atomic_t *a) {
return AO_fetch_and_sub1_full(&a->value);
}
-static inline int pa_atomic_cmpxchg(pa_atomic_int_t *a, int old_i, int new_i) {
+static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
return AO_compare_and_swap_full(&a->value, old_i, new_i);
}
@@ -73,6 +226,8 @@ typedef struct pa_atomic_ptr {
volatile AO_t value;
} pa_atomic_ptr_t;
+#define PA_ATOMIC_PTR_INIT(v) { .value = (AO_t) (v) }
+
static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
return (void*) AO_load_full((AO_t*) &a->value);
}
@@ -86,3 +241,5 @@ static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* n
}
#endif
+
+#endif