diff options
author | Tobias Brunner <tobias@strongswan.org> | 2014-04-11 16:07:32 +0200 |
---|---|---|
committer | Tobias Brunner <tobias@strongswan.org> | 2014-04-24 17:54:14 +0200 |
commit | 0f603d425df132adcb60b9f25ac277b26e80c72a (patch) | |
tree | 68b150acfcd3ad078e4311922dad2910e9ce137c /src | |
parent | efedd0d21e4caf6edae6872f29c470a464e1917a (diff) | |
download | strongswan-0f603d425df132adcb60b9f25ac277b26e80c72a.tar.bz2 strongswan-0f603d425df132adcb60b9f25ac277b26e80c72a.tar.xz |
utils: Use GCC's __atomic built-ins if available
These are available since GCC 4.7 and will eventually replace the __sync
operations. They support the memory model defined by C++11. For instance,
by using __ATOMIC_RELAXED for some operations on the reference counters we
can avoid memory barriers, which are required by __sync operations (whose
memory model essentially is __ATOMIC_SEQ_CST).
Diffstat (limited to 'src')
-rw-r--r-- | src/libstrongswan/utils/utils.c | 4 | ||||
-rw-r--r-- | src/libstrongswan/utils/utils.h | 21 |
2 files changed, 22 insertions, 3 deletions
diff --git a/src/libstrongswan/utils/utils.c b/src/libstrongswan/utils/utils.c index e4da6ddb9..f2a4a065c 100644 --- a/src/libstrongswan/utils/utils.c +++ b/src/libstrongswan/utils/utils.c @@ -511,7 +511,7 @@ void nop() { } -#ifndef HAVE_GCC_ATOMIC_OPERATIONS +#if !defined(HAVE_GCC_ATOMIC_OPERATIONS) && !defined(HAVE_GCC_SYNC_OPERATIONS) /** * We use a single mutex for all refcount variables. @@ -578,7 +578,7 @@ bool cas_##name(type *ptr, type oldval, type newval) \ _cas_impl(bool, bool) _cas_impl(ptr, void*) -#endif /* HAVE_GCC_ATOMIC_OPERATIONS */ +#endif /* !HAVE_GCC_ATOMIC_OPERATIONS && !HAVE_GCC_SYNC_OPERATIONS */ #ifdef HAVE_FMEMOPEN_FALLBACK diff --git a/src/libstrongswan/utils/utils.h b/src/libstrongswan/utils/utils.h index 4b2990371..8f91e8431 100644 --- a/src/libstrongswan/utils/utils.h +++ b/src/libstrongswan/utils/utils.h @@ -750,6 +750,25 @@ typedef u_int refcount_t; #ifdef HAVE_GCC_ATOMIC_OPERATIONS +#define ref_get(ref) __atomic_add_fetch(ref, 1, __ATOMIC_RELAXED) +/* The relaxed memory model works fine for increments as these (usually) don't + * change the state of refcounted objects. But here we have to ensure that we + * free the right stuff if ref counted objects are mutable. So we have to sync + * with other threads that call ref_put(). It would be sufficient to use + * __ATOMIC_RELEASE here and then call __atomic_thread_fence() with + * __ATOMIC_ACQUIRE if we reach 0, but since we don't have control over the use + * of ref_put() we have to make sure. */ +#define ref_put(ref) (!__atomic_sub_fetch(ref, 1, __ATOMIC_ACQ_REL)) +#define ref_cur(ref) __atomic_load_n(ref, __ATOMIC_RELAXED) + +#define _cas_impl(ptr, oldval, newval) ({ typeof(oldval) _old = oldval; \ + __atomic_compare_exchange_n(ptr, &_old, newval, FALSE, \ + __ATOMIC_SEQ_CST, __ATOMIC_RELAXED); }) +#define cas_bool(ptr, oldval, newval) _cas_impl(ptr, oldval, newval) +#define cas_ptr(ptr, oldval, newval) _cas_impl(ptr, oldval, newval) + +#elif defined(HAVE_GCC_SYNC_OPERATIONS) + #define ref_get(ref) __sync_add_and_fetch(ref, 1) #define ref_put(ref) (!__sync_sub_and_fetch(ref, 1)) #define ref_cur(ref) __sync_fetch_and_add(ref, 0) @@ -759,7 +778,7 @@ typedef u_int refcount_t; #define cas_ptr(ptr, oldval, newval) \ (__sync_bool_compare_and_swap(ptr, oldval, newval)) -#else /* !HAVE_GCC_ATOMIC_OPERATIONS */ +#else /* !HAVE_GCC_ATOMIC_OPERATIONS && !HAVE_GCC_SYNC_OPERATIONS */ /** * Get a new reference. |