00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031 #ifndef _GLIBCXX_ATOMIC_0_H
00032 #define _GLIBCXX_ATOMIC_0_H 1
00033
00034 #pragma GCC system_header
00035
00036
00037
00038
00039 namespace __atomic0
00040 {
00041 struct atomic_flag;
00042
00043
00044 #define _ATOMIC_LOAD_(__a, __x) \
00045 ({__typeof__ _ATOMIC_MEMBER_* __p = &_ATOMIC_MEMBER_; \
00046 __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
00047 __atomic_flag_wait_explicit(__g, __x); \
00048 __typeof__ _ATOMIC_MEMBER_ __r = *__p; \
00049 atomic_flag_clear_explicit(__g, __x); \
00050 __r; })
00051
00052 #define _ATOMIC_STORE_(__a, __m, __x) \
00053 ({__typeof__ _ATOMIC_MEMBER_* __p = &_ATOMIC_MEMBER_; \
00054 __typeof__(__m) __v = (__m); \
00055 __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
00056 __atomic_flag_wait_explicit(__g, __x); \
00057 *__p = __v; \
00058 atomic_flag_clear_explicit(__g, __x); \
00059 __v; })
00060
00061 #define _ATOMIC_MODIFY_(__a, __o, __m, __x) \
00062 ({__typeof__ _ATOMIC_MEMBER_* __p = &_ATOMIC_MEMBER_; \
00063 __typeof__(__m) __v = (__m); \
00064 __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
00065 __atomic_flag_wait_explicit(__g, __x); \
00066 __typeof__ _ATOMIC_MEMBER_ __r = *__p; \
00067 *__p __o __v; \
00068 atomic_flag_clear_explicit(__g, __x); \
00069 __r; })
00070
00071 #define _ATOMIC_CMPEXCHNG_(__a, __e, __m, __x) \
00072 ({__typeof__ _ATOMIC_MEMBER_* __p = &_ATOMIC_MEMBER_; \
00073 __typeof__(__e) __q = (__e); \
00074 __typeof__(__m) __v = (__m); \
00075 bool __r; \
00076 __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
00077 __atomic_flag_wait_explicit(__g, __x); \
00078 __typeof__ _ATOMIC_MEMBER_ __t__ = *__p; \
00079 if (__t__ == *__q) { *__p = __v; __r = true; } \
00080 else { *__q = __t__; __r = false; } \
00081 atomic_flag_clear_explicit(__g, __x); \
00082 __r; })
00083
00084
00085 struct atomic_flag : public __atomic_flag_base
00086 {
00087 atomic_flag() = default;
00088 ~atomic_flag() = default;
00089 atomic_flag(const atomic_flag&) = delete;
00090 atomic_flag& operator=(const atomic_flag&) volatile = delete;
00091
00092
00093 atomic_flag(bool __i): __atomic_flag_base({ __i }) { }
00094
00095 bool
00096 test_and_set(memory_order __m = memory_order_seq_cst);
00097
00098 void
00099 clear(memory_order __m = memory_order_seq_cst);
00100 };
00101
00102
00103 struct atomic_address
00104 {
00105 private:
00106 void* _M_i;
00107
00108 public:
00109 atomic_address() = default;
00110 ~atomic_address() = default;
00111 atomic_address(const atomic_address&) = delete;
00112 atomic_address& operator=(const atomic_address&) volatile = delete;
00113
00114 atomic_address(void* __v) { _M_i = __v; }
00115
00116 bool
00117 is_lock_free() const
00118 { return false; }
00119
00120 void
00121 store(void* __v, memory_order __m = memory_order_seq_cst)
00122 {
00123 __glibcxx_assert(__m != memory_order_acquire);
00124 __glibcxx_assert(__m != memory_order_acq_rel);
00125 __glibcxx_assert(__m != memory_order_consume);
00126 _ATOMIC_STORE_(this, __v, __m);
00127 }
00128
00129 void*
00130 load(memory_order __m = memory_order_seq_cst) const
00131 {
00132 __glibcxx_assert(__m != memory_order_release);
00133 __glibcxx_assert(__m != memory_order_acq_rel);
00134 return _ATOMIC_LOAD_(this, __m);
00135 }
00136
00137 void*
00138 exchange(void* __v, memory_order __m = memory_order_seq_cst)
00139 { return _ATOMIC_MODIFY_(this, =, __v, __m); }
00140
00141 bool
00142 compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
00143 memory_order __m2)
00144 {
00145 __glibcxx_assert(__m2 != memory_order_release);
00146 __glibcxx_assert(__m2 != memory_order_acq_rel);
00147 __glibcxx_assert(__m2 <= __m1);
00148 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
00149 }
00150
00151 bool
00152 compare_exchange_weak(void*& __v1, void* __v2,
00153 memory_order __m = memory_order_seq_cst)
00154 {
00155 return compare_exchange_weak(__v1, __v2, __m,
00156 __calculate_memory_order(__m));
00157 }
00158
00159 bool
00160 compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
00161 memory_order __m2)
00162 {
00163 __glibcxx_assert(__m2 != memory_order_release);
00164 __glibcxx_assert(__m2 != memory_order_acq_rel);
00165 __glibcxx_assert(__m2 <= __m1);
00166 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
00167 }
00168
00169 bool
00170 compare_exchange_strong(void*& __v1, void* __v2,
00171 memory_order __m = memory_order_seq_cst)
00172 {
00173 return compare_exchange_strong(__v1, __v2, __m,
00174 __calculate_memory_order(__m));
00175 }
00176
00177 void*
00178 fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
00179 {
00180 void** __p = &(_M_i);
00181 __atomic_flag_base* __g = __atomic_flag_for_address(__p);
00182 __atomic_flag_wait_explicit(__g, __m);
00183 void* __r = *__p;
00184 *__p = (void*)((char*)(*__p) + __d);
00185 atomic_flag_clear_explicit(__g, __m);
00186 return __r;
00187 }
00188
00189 void*
00190 fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
00191 {
00192 void** __p = &(_M_i);
00193 __atomic_flag_base* __g = __atomic_flag_for_address(__p);
00194 __atomic_flag_wait_explicit(__g, __m);
00195 void* __r = *__p;
00196 *__p = (void*)((char*)(*__p) - __d);
00197 atomic_flag_clear_explicit(__g, __m);
00198 return __r;
00199 }
00200
00201 operator void*() const
00202 { return load(); }
00203
00204 void*
00205 operator=(void* __v)
00206 {
00207 store(__v);
00208 return __v;
00209 }
00210
00211 void*
00212 operator+=(ptrdiff_t __d)
00213 { return fetch_add(__d) + __d; }
00214
00215 void*
00216 operator-=(ptrdiff_t __d)
00217 { return fetch_sub(__d) - __d; }
00218 };
00219
00220
00221
00222
00223
00224
00225
00226
00227
00228
00229
00230
00231
00232
00233
00234
00235
00236
00237
00238
00239
00240
00241
00242
00243 template<typename _ITp>
00244 struct __atomic_base
00245 {
00246 private:
00247 typedef _ITp __integral_type;
00248
00249 __integral_type _M_i;
00250
00251 public:
00252 __atomic_base() = default;
00253 ~__atomic_base() = default;
00254 __atomic_base(const __atomic_base&) = delete;
00255 __atomic_base& operator=(const __atomic_base&) volatile = delete;
00256
00257
00258 __atomic_base(__integral_type __i) { _M_i = __i; }
00259
00260 operator __integral_type() const
00261 { return load(); }
00262
00263 __integral_type
00264 operator=(__integral_type __i)
00265 {
00266 store(__i);
00267 return __i;
00268 }
00269
00270 __integral_type
00271 operator++(int)
00272 { return fetch_add(1); }
00273
00274 __integral_type
00275 operator--(int)
00276 { return fetch_sub(1); }
00277
00278 __integral_type
00279 operator++()
00280 { return fetch_add(1) + 1; }
00281
00282 __integral_type
00283 operator--()
00284 { return fetch_sub(1) - 1; }
00285
00286 __integral_type
00287 operator+=(__integral_type __i)
00288 { return fetch_add(__i) + __i; }
00289
00290 __integral_type
00291 operator-=(__integral_type __i)
00292 { return fetch_sub(__i) - __i; }
00293
00294 __integral_type
00295 operator&=(__integral_type __i)
00296 { return fetch_and(__i) & __i; }
00297
00298 __integral_type
00299 operator|=(__integral_type __i)
00300 { return fetch_or(__i) | __i; }
00301
00302 __integral_type
00303 operator^=(__integral_type __i)
00304 { return fetch_xor(__i) ^ __i; }
00305
00306 bool
00307 is_lock_free() const
00308 { return false; }
00309
00310 void
00311 store(__integral_type __i, memory_order __m = memory_order_seq_cst)
00312 {
00313 __glibcxx_assert(__m != memory_order_acquire);
00314 __glibcxx_assert(__m != memory_order_acq_rel);
00315 __glibcxx_assert(__m != memory_order_consume);
00316 _ATOMIC_STORE_(this, __i, __m);
00317 }
00318
00319 __integral_type
00320 load(memory_order __m = memory_order_seq_cst) const
00321 {
00322 __glibcxx_assert(__m != memory_order_release);
00323 __glibcxx_assert(__m != memory_order_acq_rel);
00324 return _ATOMIC_LOAD_(this, __m);
00325 }
00326
00327 __integral_type
00328 exchange(__integral_type __i, memory_order __m = memory_order_seq_cst)
00329 { return _ATOMIC_MODIFY_(this, =, __i, __m); }
00330
00331 bool
00332 compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
00333 memory_order __m1, memory_order __m2)
00334 {
00335 __glibcxx_assert(__m2 != memory_order_release);
00336 __glibcxx_assert(__m2 != memory_order_acq_rel);
00337 __glibcxx_assert(__m2 <= __m1);
00338 return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
00339 }
00340
00341 bool
00342 compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
00343 memory_order __m = memory_order_seq_cst)
00344 {
00345 return compare_exchange_weak(__i1, __i2, __m,
00346 __calculate_memory_order(__m));
00347 }
00348
00349 bool
00350 compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
00351 memory_order __m1, memory_order __m2)
00352 {
00353 __glibcxx_assert(__m2 != memory_order_release);
00354 __glibcxx_assert(__m2 != memory_order_acq_rel);
00355 __glibcxx_assert(__m2 <= __m1);
00356 return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
00357 }
00358
00359 bool
00360 compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
00361 memory_order __m = memory_order_seq_cst)
00362 {
00363 return compare_exchange_strong(__i1, __i2, __m,
00364 __calculate_memory_order(__m));
00365 }
00366
00367 __integral_type
00368 fetch_add(__integral_type __i, memory_order __m = memory_order_seq_cst)
00369 { return _ATOMIC_MODIFY_(this, +=, __i, __m); }
00370
00371 __integral_type
00372 fetch_sub(__integral_type __i, memory_order __m = memory_order_seq_cst)
00373 { return _ATOMIC_MODIFY_(this, -=, __i, __m); }
00374
00375 __integral_type
00376 fetch_and(__integral_type __i, memory_order __m = memory_order_seq_cst)
00377 { return _ATOMIC_MODIFY_(this, &=, __i, __m); }
00378
00379 __integral_type
00380 fetch_or(__integral_type __i, memory_order __m = memory_order_seq_cst)
00381 { return _ATOMIC_MODIFY_(this, |=, __i, __m); }
00382
00383 __integral_type
00384 fetch_xor(__integral_type __i, memory_order __m = memory_order_seq_cst)
00385 { return _ATOMIC_MODIFY_(this, ^=, __i, __m); }
00386 };
00387
00388
00389
00390
00391 struct atomic_bool
00392 {
00393 private:
00394 __atomic_base<bool> _M_base;
00395
00396 public:
00397 atomic_bool() = default;
00398 ~atomic_bool() = default;
00399 atomic_bool(const atomic_bool&) = delete;
00400 atomic_bool& operator=(const atomic_bool&) volatile = delete;
00401
00402 atomic_bool(bool __i) : _M_base(__i) { }
00403
00404 bool
00405 operator=(bool __i)
00406 { return _M_base.operator=(__i); }
00407
00408 operator bool() const
00409 { return _M_base.load(); }
00410
00411 bool
00412 is_lock_free() const
00413 { return _M_base.is_lock_free(); }
00414
00415 void
00416 store(bool __i, memory_order __m = memory_order_seq_cst)
00417 { _M_base.store(__i, __m); }
00418
00419 bool
00420 load(memory_order __m = memory_order_seq_cst) const
00421 { return _M_base.load(__m); }
00422
00423 bool
00424 exchange(bool __i, memory_order __m = memory_order_seq_cst)
00425 { return _M_base.exchange(__i, __m); }
00426
00427 bool
00428 compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1,
00429 memory_order __m2)
00430 { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); }
00431
00432 bool
00433 compare_exchange_weak(bool& __i1, bool __i2,
00434 memory_order __m = memory_order_seq_cst)
00435 { return _M_base.compare_exchange_weak(__i1, __i2, __m); }
00436
00437 bool
00438 compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1,
00439 memory_order __m2)
00440 { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); }
00441
00442
00443 bool
00444 compare_exchange_strong(bool& __i1, bool __i2,
00445 memory_order __m = memory_order_seq_cst)
00446 { return _M_base.compare_exchange_strong(__i1, __i2, __m); }
00447 };
00448
00449 #undef _ATOMIC_LOAD_
00450 #undef _ATOMIC_STORE_
00451 #undef _ATOMIC_MODIFY_
00452 #undef _ATOMIC_CMPEXCHNG_
00453 }
00454
00455
00456
00457 #endif