00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044 #include <amino/util.h>
00045
00046
00047
00048 #if !defined(GCC)
00049 #error "This file must be compiled with GNUC compiler"
00050 #endif
00051
00052 #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 0)
00053 #else
00054 #error "GCC version too low. It must be greater than 4.0"
00055 #endif
00056
00057
00058 #define _ATOMIC_LOAD_( __a__, __x__ ) \
00059 ({\
00060 if(__x__ != memory_order_relaxed) \
00061 compiler_barrier();\
00062 __typeof__((__a__)->__f__) __r__ = (__a__)->__f__; \
00063 __r__; })
00064
00065 #define _ATOMIC_STORE_( __a__, __m__, __x__ ) \
00066 ({\
00067 if(__x__ != memory_order_relaxed) \
00068 compiler_barrier();\
00069 if(__x__ == memory_order_seq_cst)\
00070 __asm__ __volatile(\
00071 "XCHG %0, %1"\
00072 :"=q"(__a__->__f__)\
00073 :"m"(__m__)\
00074 );\
00075 else\
00076 (__a__)->__f__ = __m__; \
00077 __m__; })
00078
00079
00080 #define _ATOMIC_FENCE_( __a__, __x__ ) \
00081 ({ if(__x__ == memory_order_acquire || __x__ == memory_order_release) \
00082 compiler_barrier(); \
00083 else if(__x__ == memory_order_seq_cst) \
00084 SC_FENCE; \
00085 else if(__x__ == memory_order_acq_rel) \
00086 CC_FENCE; \
00087 })
00088
00089
00090 #define _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ) \
00091 ({ \
00092 __typeof__(__m__) __v__ = (__m__); \
00093 bool __r__; \
00094 __r__ = cas(__a__, __e__, (unsigned long)__v__, sizeof(__v__)); \
00095 __r__; })
00096
00097
00098
00099
00100 #define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ ) \
00101 ({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
00102 __typeof__(__m__) __oldValue__; \
00103 __typeof__((__a__)->__f__) __newValue__; \
00104 do { \
00105 __oldValue__ = *__p__; \
00106 __newValue__ = __oldValue__; \
00107 switch(__o__){\
00108 case ops_swap:\
00109 __newValue__ = __m__; \
00110 break; \
00111 case ops_add:\
00112 __newValue__ += __m__; \
00113 break; \
00114 case ops_sub: \
00115 __newValue__ -= __m__; \
00116 break; \
00117 case ops_and:\
00118 __newValue__ &= __m__; \
00119 break; \
00120 case ops_or:\
00121 __newValue__ |= __m__; \
00122 break; \
00123 case ops_xor:\
00124 __newValue__ ^= __m__; \
00125 break; \
00126 }\
00127 } while(!cas(__a__, &__oldValue__, (unsigned long)__newValue__, sizeof(__newValue__)));\
00128 __oldValue__; })
00129
00130
00131
00132
00133
00134
00135
00136
00137
00138
00139 #define ATOMIC_INTEGRAL_LOCK_FREE 2
00140 #define ATOMIC_ADDRESS_LOCK_FREE 2
00141
00142
00143
00144 bool atomic_flag_test_and_set_explicit(
00145 volatile atomic_flag* __a__, memory_order __x__ );
00146
00147 bool atomic_flag_test_and_set( volatile atomic_flag* __a__ );
00148
00149 void atomic_flag_clear_explicit(
00150 volatile atomic_flag* __a__, memory_order __x__ );
00151
00152 void atomic_flag_clear(volatile atomic_flag* __a__ );
00153
00154 void atomic_flag_fence(const volatile atomic_flag* __a__, memory_order __x__ );
00155
00156
00157
00158 #ifdef __cplusplus
00159
00160 inline bool atomic_flag::test_and_set( memory_order __x__ ) volatile
00161 { return atomic_flag_test_and_set_explicit( this, __x__ ); }
00162
00163 inline void atomic_flag::clear( memory_order __x__ ) volatile
00164 { atomic_flag_clear_explicit( this, __x__ ); }
00165
00166 inline void atomic_flag::fence( memory_order __x__ ) const volatile
00167 { atomic_flag_fence( this, __x__ ); }
00168
00169 #endif