aboutsummaryrefslogtreecommitdiffstats
path: root/src/libstrongswan/utils/utils.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/libstrongswan/utils/utils.h')
-rw-r--r--src/libstrongswan/utils/utils.h21
1 files changed, 20 insertions, 1 deletions
diff --git a/src/libstrongswan/utils/utils.h b/src/libstrongswan/utils/utils.h
index 4b2990371..8f91e8431 100644
--- a/src/libstrongswan/utils/utils.h
+++ b/src/libstrongswan/utils/utils.h
@@ -750,6 +750,25 @@ typedef u_int refcount_t;
#ifdef HAVE_GCC_ATOMIC_OPERATIONS
+#define ref_get(ref) __atomic_add_fetch(ref, 1, __ATOMIC_RELAXED)
+/* The relaxed memory model works fine for increments as these (usually) don't
+ * change the state of refcounted objects. But here we have to ensure that we
+ * free the right stuff if ref counted objects are mutable. So we have to sync
+ * with other threads that call ref_put(). It would be sufficient to use
+ * __ATOMIC_RELEASE here and then call __atomic_thread_fence() with
+ * __ATOMIC_ACQUIRE if we reach 0, but since we don't have control over the use
+ * of ref_put() we have to make sure. */
+#define ref_put(ref) (!__atomic_sub_fetch(ref, 1, __ATOMIC_ACQ_REL))
+#define ref_cur(ref) __atomic_load_n(ref, __ATOMIC_RELAXED)
+
+#define _cas_impl(ptr, oldval, newval) ({ typeof(oldval) _old = oldval; \
+ __atomic_compare_exchange_n(ptr, &_old, newval, FALSE, \
+ __ATOMIC_SEQ_CST, __ATOMIC_RELAXED); })
+#define cas_bool(ptr, oldval, newval) _cas_impl(ptr, oldval, newval)
+#define cas_ptr(ptr, oldval, newval) _cas_impl(ptr, oldval, newval)
+
+#elif defined(HAVE_GCC_SYNC_OPERATIONS)
+
#define ref_get(ref) __sync_add_and_fetch(ref, 1)
#define ref_put(ref) (!__sync_sub_and_fetch(ref, 1))
#define ref_cur(ref) __sync_fetch_and_add(ref, 0)
@@ -759,7 +778,7 @@ typedef u_int refcount_t;
#define cas_ptr(ptr, oldval, newval) \
(__sync_bool_compare_and_swap(ptr, oldval, newval))
-#else /* !HAVE_GCC_ATOMIC_OPERATIONS */
+#else /* !HAVE_GCC_ATOMIC_OPERATIONS && !HAVE_GCC_SYNC_OPERATIONS */
/**
* Get a new reference.