mirror of git://gcc.gnu.org/git/gcc.git
				
				
				
			
		
			
				
	
	
		
			1904 lines
		
	
	
		
			58 KiB
		
	
	
	
		
			C++
		
	
	
	
			
		
		
	
	
			1904 lines
		
	
	
		
			58 KiB
		
	
	
	
		
			C++
		
	
	
	
| // -*- C++ -*- header.
 | |
| 
 | |
| // Copyright (C) 2008-2019 Free Software Foundation, Inc.
 | |
| //
 | |
| // This file is part of the GNU ISO C++ Library.  This library is free
 | |
| // software; you can redistribute it and/or modify it under the
 | |
| // terms of the GNU General Public License as published by the
 | |
| // Free Software Foundation; either version 3, or (at your option)
 | |
| // any later version.
 | |
| 
 | |
| // This library is distributed in the hope that it will be useful,
 | |
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | |
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | |
| // GNU General Public License for more details.
 | |
| 
 | |
| // Under Section 7 of GPL version 3, you are granted additional
 | |
| // permissions described in the GCC Runtime Library Exception, version
 | |
| // 3.1, as published by the Free Software Foundation.
 | |
| 
 | |
| // You should have received a copy of the GNU General Public License and
 | |
| // a copy of the GCC Runtime Library Exception along with this program;
 | |
| // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
 | |
| // <http://www.gnu.org/licenses/>.
 | |
| 
 | |
| // ????????????????????????????????????????????????????????????????????
 | |
| //
 | |
| // This is a copy of the libstdc++ header, with the trivial modification
 | |
| // of ignoring the c++config.h include.  If and when the top-level build is
 | |
| // fixed so that target libraries can be built using the newly built, we can
 | |
| // delete this file.
 | |
| //
 | |
| // ????????????????????????????????????????????????????????????????????
 | |
| 
 | |
| /** @file include/atomic
 | |
|  *  This is a Standard C++ Library header.
 | |
|  */
 | |
| 
 | |
| // Based on "C++ Atomic Types and Operations" by Hans Boehm and Lawrence Crowl.
 | |
| // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2427.html
 | |
| 
 | |
| #ifndef _GLIBCXX_ATOMIC
 | |
| #define _GLIBCXX_ATOMIC 1
 | |
| 
 | |
| #define __libitm_always_inline __attribute__((always_inline))
 | |
| 
 | |
| // #pragma GCC system_header
 | |
| 
 | |
| // #ifndef __GXX_EXPERIMENTAL_CXX0X__
 | |
| // # include <bits/c++0x_warning.h>
 | |
| // #endif
 | |
| 
 | |
| // #include <bits/atomic_base.h>
 | |
| 
 | |
| namespace std // _GLIBCXX_VISIBILITY(default)
 | |
| {
 | |
| // _GLIBCXX_BEGIN_NAMESPACE_VERSION
 | |
| 
 | |
|   /**
 | |
|    * @defgroup atomics Atomics
 | |
|    *
 | |
|    * Components for performing atomic operations.
 | |
|    * @{
 | |
|    */
 | |
| 
 | |
|   /// Enumeration for memory_order
 | |
|   typedef enum memory_order
 | |
|     {
 | |
|       memory_order_relaxed,
 | |
|       memory_order_consume,
 | |
|       memory_order_acquire,
 | |
|       memory_order_release,
 | |
|       memory_order_acq_rel,
 | |
|       memory_order_seq_cst
 | |
|     } memory_order;
 | |
| 
 | |
|   inline __libitm_always_inline memory_order
 | |
|   __calculate_memory_order(memory_order __m) noexcept
 | |
|   {
 | |
|     const bool __cond1 = __m == memory_order_release;
 | |
|     const bool __cond2 = __m == memory_order_acq_rel;
 | |
|     memory_order __mo1(__cond1 ? memory_order_relaxed : __m);
 | |
|     memory_order __mo2(__cond2 ? memory_order_acquire : __mo1);
 | |
|     return __mo2;
 | |
|   }
 | |
| 
 | |
|   inline __libitm_always_inline void
 | |
|   atomic_thread_fence(memory_order __m) noexcept
 | |
|   {
 | |
|     __atomic_thread_fence (__m);
 | |
|   }
 | |
| 
 | |
|   inline __libitm_always_inline void
 | |
|   atomic_signal_fence(memory_order __m) noexcept
 | |
|   {
 | |
|     __atomic_thread_fence (__m);
 | |
|   }
 | |
| 
 | |
|   /// kill_dependency
 | |
|   template<typename _Tp>
 | |
|     inline _Tp
 | |
|     kill_dependency(_Tp __y) noexcept
 | |
|     {
 | |
|       _Tp __ret(__y);
 | |
|       return __ret;
 | |
|     }
 | |
| 
 | |
|   /// Lock-free Property
 | |
| 
 | |
| 
 | |
| #define ATOMIC_BOOL_LOCK_FREE		__GCC_ATOMIC_BOOL_LOCK_FREE
 | |
| #define ATOMIC_CHAR_LOCK_FREE		__GCC_ATOMIC_CHAR_LOCK_FREE
 | |
| #define ATOMIC_WCHAR_T_LOCK_FREE	__GCC_ATOMIC_WCHAR_T_LOCK_FREE
 | |
| #define ATOMIC_CHAR16_T_LOCK_FREE	__GCC_ATOMIC_CHAR16_T_LOCK_FREE
 | |
| #define ATOMIC_CHAR32_T_LOCK_FREE	__GCC_ATOMIC_CHAR32_T_LOCK_FREE
 | |
| #define ATOMIC_SHORT_LOCK_FREE		__GCC_ATOMIC_SHORT_LOCK_FREE
 | |
| #define ATOMIC_INT_LOCK_FREE		__GCC_ATOMIC_INT_LOCK_FREE
 | |
| #define ATOMIC_LONG_LOCK_FREE		__GCC_ATOMIC_LONG_LOCK_FREE
 | |
| #define ATOMIC_LLONG_LOCK_FREE		__GCC_ATOMIC_LLONG_LOCK_FREE
 | |
| #define ATOMIC_POINTER_LOCK_FREE	__GCC_ATOMIC_POINTER_LOCK_FREE
 | |
| 
 | |
|   // Base types for atomics.
 | |
|   template<typename _IntTp>
 | |
|     struct __atomic_base;
 | |
| 
 | |
|   /// atomic_char
 | |
|   typedef __atomic_base<char>  	       		atomic_char;
 | |
| 
 | |
|   /// atomic_schar
 | |
|   typedef __atomic_base<signed char>	     	atomic_schar;
 | |
| 
 | |
|   /// atomic_uchar
 | |
|   typedef __atomic_base<unsigned char>		atomic_uchar;
 | |
| 
 | |
|   /// atomic_short
 | |
|   typedef __atomic_base<short>			atomic_short;
 | |
| 
 | |
|   /// atomic_ushort
 | |
|   typedef __atomic_base<unsigned short>	 	atomic_ushort;
 | |
| 
 | |
|   /// atomic_int
 | |
|   typedef __atomic_base<int>  	       		atomic_int;
 | |
| 
 | |
|   /// atomic_uint
 | |
|   typedef __atomic_base<unsigned int>	     	atomic_uint;
 | |
| 
 | |
|   /// atomic_long
 | |
|   typedef __atomic_base<long>  	       		atomic_long;
 | |
| 
 | |
|   /// atomic_ulong
 | |
|   typedef __atomic_base<unsigned long>		atomic_ulong;
 | |
| 
 | |
|   /// atomic_llong
 | |
|   typedef __atomic_base<long long>  		atomic_llong;
 | |
| 
 | |
|   /// atomic_ullong
 | |
|   typedef __atomic_base<unsigned long long> 	atomic_ullong;
 | |
| 
 | |
|   /// atomic_wchar_t
 | |
|   typedef __atomic_base<wchar_t>  		atomic_wchar_t;
 | |
| 
 | |
|   /// atomic_char16_t
 | |
|   typedef __atomic_base<char16_t>  		atomic_char16_t;
 | |
| 
 | |
|   /// atomic_char32_t
 | |
|   typedef __atomic_base<char32_t>  		atomic_char32_t;
 | |
| 
 | |
|   /// atomic_char32_t
 | |
|   typedef __atomic_base<char32_t>  		atomic_char32_t;
 | |
| 
 | |
| 
 | |
|   /// atomic_int_least8_t
 | |
|   typedef __atomic_base<int_least8_t>  		atomic_int_least8_t;
 | |
| 
 | |
|   /// atomic_uint_least8_t
 | |
|   typedef __atomic_base<uint_least8_t>	       	atomic_uint_least8_t;
 | |
| 
 | |
|   /// atomic_int_least16_t
 | |
|   typedef __atomic_base<int_least16_t>	       	atomic_int_least16_t;
 | |
| 
 | |
|   /// atomic_uint_least16_t
 | |
|   typedef __atomic_base<uint_least16_t>	       	atomic_uint_least16_t;
 | |
| 
 | |
|   /// atomic_int_least32_t
 | |
|   typedef __atomic_base<int_least32_t>	       	atomic_int_least32_t;
 | |
| 
 | |
|   /// atomic_uint_least32_t
 | |
|   typedef __atomic_base<uint_least32_t>	       	atomic_uint_least32_t;
 | |
| 
 | |
|   /// atomic_int_least64_t
 | |
|   typedef __atomic_base<int_least64_t>	       	atomic_int_least64_t;
 | |
| 
 | |
|   /// atomic_uint_least64_t
 | |
|   typedef __atomic_base<uint_least64_t>	       	atomic_uint_least64_t;
 | |
| 
 | |
| 
 | |
|   /// atomic_int_fast8_t
 | |
|   typedef __atomic_base<int_fast8_t>  		atomic_int_fast8_t;
 | |
| 
 | |
|   /// atomic_uint_fast8_t
 | |
|   typedef __atomic_base<uint_fast8_t>	      	atomic_uint_fast8_t;
 | |
| 
 | |
|   /// atomic_int_fast16_t
 | |
|   typedef __atomic_base<int_fast16_t>	      	atomic_int_fast16_t;
 | |
| 
 | |
|   /// atomic_uint_fast16_t
 | |
|   typedef __atomic_base<uint_fast16_t>	      	atomic_uint_fast16_t;
 | |
| 
 | |
|   /// atomic_int_fast32_t
 | |
|   typedef __atomic_base<int_fast32_t>	      	atomic_int_fast32_t;
 | |
| 
 | |
|   /// atomic_uint_fast32_t
 | |
|   typedef __atomic_base<uint_fast32_t>	      	atomic_uint_fast32_t;
 | |
| 
 | |
|   /// atomic_int_fast64_t
 | |
|   typedef __atomic_base<int_fast64_t>	      	atomic_int_fast64_t;
 | |
| 
 | |
|   /// atomic_uint_fast64_t
 | |
|   typedef __atomic_base<uint_fast64_t>	      	atomic_uint_fast64_t;
 | |
| 
 | |
| 
 | |
|   /// atomic_intptr_t
 | |
|   typedef __atomic_base<intptr_t>  	       	atomic_intptr_t;
 | |
| 
 | |
|   /// atomic_uintptr_t
 | |
|   typedef __atomic_base<uintptr_t>  	       	atomic_uintptr_t;
 | |
| 
 | |
|   /// atomic_size_t
 | |
|   typedef __atomic_base<size_t>	 	       	atomic_size_t;
 | |
| 
 | |
|   /// atomic_intmax_t
 | |
|   typedef __atomic_base<intmax_t>  	       	atomic_intmax_t;
 | |
| 
 | |
|   /// atomic_uintmax_t
 | |
|   typedef __atomic_base<uintmax_t>  	       	atomic_uintmax_t;
 | |
| 
 | |
|   /// atomic_ptrdiff_t
 | |
|   typedef __atomic_base<ptrdiff_t>  	       	atomic_ptrdiff_t;
 | |
| 
 | |
| 
 | |
| #define ATOMIC_VAR_INIT(_VI) { _VI }
 | |
| 
 | |
|   template<typename _Tp>
 | |
|     struct atomic;
 | |
| 
 | |
|   template<typename _Tp>
 | |
|     struct atomic<_Tp*>;
 | |
| 
 | |
| 
 | |
|   /**
 | |
|    *  @brief Base type for atomic_flag.
 | |
|    *
 | |
|    *  Base type is POD with data, allowing atomic_flag to derive from
 | |
|    *  it and meet the standard layout type requirement. In addition to
 | |
|    *  compatibilty with a C interface, this allows different
 | |
|    *  implementations of atomic_flag to use the same atomic operation
 | |
|    *  functions, via a standard conversion to the __atomic_flag_base
 | |
|    *  argument.
 | |
|   */
 | |
|   // _GLIBCXX_BEGIN_EXTERN_C
 | |
| 
 | |
|   struct __atomic_flag_base
 | |
|   {
 | |
|     bool _M_i;
 | |
|   };
 | |
| 
 | |
|   // _GLIBCXX_END_EXTERN_C
 | |
| 
 | |
| #define ATOMIC_FLAG_INIT { false }
 | |
| 
 | |
|   /// atomic_flag
 | |
|   struct atomic_flag : public __atomic_flag_base
 | |
|   {
 | |
|     atomic_flag() noexcept = default;
 | |
|     ~atomic_flag() noexcept = default;
 | |
|     atomic_flag(const atomic_flag&) = delete;
 | |
|     atomic_flag& operator=(const atomic_flag&) = delete;
 | |
|     atomic_flag& operator=(const atomic_flag&) volatile = delete;
 | |
| 
 | |
|     // Conversion to ATOMIC_FLAG_INIT.
 | |
|     atomic_flag(bool __i) noexcept : __atomic_flag_base({ __i }) { }
 | |
| 
 | |
|     __libitm_always_inline bool
 | |
|     test_and_set(memory_order __m = memory_order_seq_cst) noexcept
 | |
|     {
 | |
|       return __atomic_test_and_set (&_M_i, __m);
 | |
|     }
 | |
| 
 | |
|     __libitm_always_inline bool
 | |
|     test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|     {
 | |
|       return __atomic_test_and_set (&_M_i, __m);
 | |
|     }
 | |
| 
 | |
|     __libitm_always_inline void
 | |
|     clear(memory_order __m = memory_order_seq_cst) noexcept
 | |
|     {
 | |
|       // __glibcxx_assert(__m != memory_order_consume);
 | |
|       // __glibcxx_assert(__m != memory_order_acquire);
 | |
|       // __glibcxx_assert(__m != memory_order_acq_rel);
 | |
| 
 | |
|       __atomic_clear (&_M_i, __m);
 | |
|     }
 | |
| 
 | |
|     __libitm_always_inline void
 | |
|     clear(memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|     {
 | |
|       // __glibcxx_assert(__m != memory_order_consume);
 | |
|       // __glibcxx_assert(__m != memory_order_acquire);
 | |
|       // __glibcxx_assert(__m != memory_order_acq_rel);
 | |
| 
 | |
|       __atomic_clear (&_M_i, __m);
 | |
|     }
 | |
|   };
 | |
| 
 | |
| 
 | |
|   /// Base class for atomic integrals.
 | |
|   //
 | |
|   // For each of the integral types, define atomic_[integral type] struct
 | |
|   //
 | |
|   // atomic_bool     bool
 | |
|   // atomic_char     char
 | |
|   // atomic_schar    signed char
 | |
|   // atomic_uchar    unsigned char
 | |
|   // atomic_short    short
 | |
|   // atomic_ushort   unsigned short
 | |
|   // atomic_int      int
 | |
|   // atomic_uint     unsigned int
 | |
|   // atomic_long     long
 | |
|   // atomic_ulong    unsigned long
 | |
|   // atomic_llong    long long
 | |
|   // atomic_ullong   unsigned long long
 | |
|   // atomic_char16_t char16_t
 | |
|   // atomic_char32_t char32_t
 | |
|   // atomic_wchar_t  wchar_t
 | |
|   //
 | |
|   // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
 | |
|   // 8 bytes, since that is what GCC built-in functions for atomic
 | |
|   // memory access expect.
 | |
|   template<typename _ITp>
 | |
|     struct __atomic_base
 | |
|     {
 | |
|     private:
 | |
|       typedef _ITp 	__int_type;
 | |
| 
 | |
|       __int_type 	_M_i;
 | |
| 
 | |
|     public:
 | |
|       __atomic_base() noexcept = default;
 | |
|       ~__atomic_base() noexcept = default;
 | |
|       __atomic_base(const __atomic_base&) = delete;
 | |
|       __atomic_base& operator=(const __atomic_base&) = delete;
 | |
|       __atomic_base& operator=(const __atomic_base&) volatile = delete;
 | |
| 
 | |
|       // Requires __int_type convertible to _M_i.
 | |
|       constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
 | |
| 
 | |
|       operator __int_type() const noexcept
 | |
|       { return load(); }
 | |
| 
 | |
|       operator __int_type() const volatile noexcept
 | |
|       { return load(); }
 | |
| 
 | |
|       __int_type
 | |
|       operator=(__int_type __i) noexcept
 | |
|       {
 | |
| 	store(__i);
 | |
| 	return __i;
 | |
|       }
 | |
| 
 | |
|       __int_type
 | |
|       operator=(__int_type __i) volatile noexcept
 | |
|       {
 | |
| 	store(__i);
 | |
| 	return __i;
 | |
|       }
 | |
| 
 | |
|       __int_type
 | |
|       operator++(int) noexcept
 | |
|       { return fetch_add(1); }
 | |
| 
 | |
|       __int_type
 | |
|       operator++(int) volatile noexcept
 | |
|       { return fetch_add(1); }
 | |
| 
 | |
|       __int_type
 | |
|       operator--(int) noexcept
 | |
|       { return fetch_sub(1); }
 | |
| 
 | |
|       __int_type
 | |
|       operator--(int) volatile noexcept
 | |
|       { return fetch_sub(1); }
 | |
| 
 | |
|       __int_type
 | |
|       operator++() noexcept
 | |
|       { return __atomic_add_fetch(&_M_i, 1, memory_order_seq_cst); }
 | |
| 
 | |
|       __int_type
 | |
|       operator++() volatile noexcept
 | |
|       { return __atomic_add_fetch(&_M_i, 1, memory_order_seq_cst); }
 | |
| 
 | |
|       __int_type
 | |
|       operator--() noexcept
 | |
|       { return __atomic_sub_fetch(&_M_i, 1, memory_order_seq_cst); }
 | |
| 
 | |
|       __int_type
 | |
|       operator--() volatile noexcept
 | |
|       { return __atomic_sub_fetch(&_M_i, 1, memory_order_seq_cst); }
 | |
| 
 | |
|       __int_type
 | |
|       operator+=(__int_type __i) noexcept
 | |
|       { return __atomic_add_fetch(&_M_i, __i, memory_order_seq_cst); }
 | |
| 
 | |
|       __int_type
 | |
|       operator+=(__int_type __i) volatile noexcept
 | |
|       { return __atomic_add_fetch(&_M_i, __i, memory_order_seq_cst); }
 | |
| 
 | |
|       __int_type
 | |
|       operator-=(__int_type __i) noexcept
 | |
|       { return __atomic_sub_fetch(&_M_i, __i, memory_order_seq_cst); }
 | |
| 
 | |
|       __int_type
 | |
|       operator-=(__int_type __i) volatile noexcept
 | |
|       { return __atomic_sub_fetch(&_M_i, __i, memory_order_seq_cst); }
 | |
| 
 | |
|       __int_type
 | |
|       operator&=(__int_type __i) noexcept
 | |
|       { return __atomic_and_fetch(&_M_i, __i, memory_order_seq_cst); }
 | |
| 
 | |
|       __int_type
 | |
|       operator&=(__int_type __i) volatile noexcept
 | |
|       { return __atomic_and_fetch(&_M_i, __i, memory_order_seq_cst); }
 | |
| 
 | |
|       __int_type
 | |
|       operator|=(__int_type __i) noexcept
 | |
|       { return __atomic_or_fetch(&_M_i, __i, memory_order_seq_cst); }
 | |
| 
 | |
|       __int_type
 | |
|       operator|=(__int_type __i) volatile noexcept
 | |
|       { return __atomic_or_fetch(&_M_i, __i, memory_order_seq_cst); }
 | |
| 
 | |
|       __int_type
 | |
|       operator^=(__int_type __i) noexcept
 | |
|       { return __atomic_xor_fetch(&_M_i, __i, memory_order_seq_cst); }
 | |
| 
 | |
|       __int_type
 | |
|       operator^=(__int_type __i) volatile noexcept
 | |
|       { return __atomic_xor_fetch(&_M_i, __i, memory_order_seq_cst); }
 | |
| 
 | |
|       bool
 | |
|       is_lock_free() const noexcept
 | |
|       { return __atomic_is_lock_free (sizeof (_M_i), &_M_i); }
 | |
| 
 | |
|       bool
 | |
|       is_lock_free() const volatile noexcept
 | |
|       { return __atomic_is_lock_free (sizeof (_M_i), &_M_i); }
 | |
| 
 | |
|       __libitm_always_inline void
 | |
|       store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
 | |
|       {
 | |
| 	// __glibcxx_assert(__m != memory_order_acquire);
 | |
| 	// __glibcxx_assert(__m != memory_order_acq_rel);
 | |
| 	// __glibcxx_assert(__m != memory_order_consume);
 | |
| 
 | |
| 	__atomic_store_n(&_M_i, __i, __m);
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline void
 | |
|       store(__int_type __i,
 | |
| 	    memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|       {
 | |
| 	// __glibcxx_assert(__m != memory_order_acquire);
 | |
| 	// __glibcxx_assert(__m != memory_order_acq_rel);
 | |
| 	// __glibcxx_assert(__m != memory_order_consume);
 | |
| 
 | |
| 	__atomic_store_n(&_M_i, __i, __m);
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline __int_type
 | |
|       load(memory_order __m = memory_order_seq_cst) const noexcept
 | |
|       {
 | |
| 	// __glibcxx_assert(__m != memory_order_release);
 | |
| 	// __glibcxx_assert(__m != memory_order_acq_rel);
 | |
| 
 | |
| 	return __atomic_load_n(&_M_i, __m);
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline __int_type
 | |
|       load(memory_order __m = memory_order_seq_cst) const volatile noexcept
 | |
|       {
 | |
| 	// __glibcxx_assert(__m != memory_order_release);
 | |
| 	// __glibcxx_assert(__m != memory_order_acq_rel);
 | |
| 
 | |
| 	return __atomic_load_n(&_M_i, __m);
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline __int_type
 | |
|       exchange(__int_type __i,
 | |
| 	       memory_order __m = memory_order_seq_cst) noexcept
 | |
|       {
 | |
| 	return __atomic_exchange_n(&_M_i, __i, __m);
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline __int_type
 | |
|       exchange(__int_type __i,
 | |
| 	       memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|       {
 | |
| 	return __atomic_exchange_n(&_M_i, __i, __m);
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_weak(__int_type& __i1, __int_type __i2,
 | |
| 			    memory_order __m1, memory_order __m2) noexcept
 | |
|       {
 | |
| 	// __glibcxx_assert(__m2 != memory_order_release);
 | |
| 	// __glibcxx_assert(__m2 != memory_order_acq_rel);
 | |
| 	// __glibcxx_assert(__m2 <= __m1);
 | |
| 
 | |
| 	return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2);
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_weak(__int_type& __i1, __int_type __i2,
 | |
| 			    memory_order __m1,
 | |
| 			    memory_order __m2) volatile noexcept
 | |
|       {
 | |
| 	// __glibcxx_assert(__m2 != memory_order_release);
 | |
| 	// __glibcxx_assert(__m2 != memory_order_acq_rel);
 | |
| 	// __glibcxx_assert(__m2 <= __m1);
 | |
| 
 | |
| 	return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2);
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_weak(__int_type& __i1, __int_type __i2,
 | |
| 			    memory_order __m = memory_order_seq_cst) noexcept
 | |
|       {
 | |
| 	return compare_exchange_weak(__i1, __i2, __m,
 | |
| 				     __calculate_memory_order(__m));
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_weak(__int_type& __i1, __int_type __i2,
 | |
| 		   memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|       {
 | |
| 	return compare_exchange_weak(__i1, __i2, __m,
 | |
| 				     __calculate_memory_order(__m));
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_strong(__int_type& __i1, __int_type __i2,
 | |
| 			      memory_order __m1, memory_order __m2) noexcept
 | |
|       {
 | |
| 	// __glibcxx_assert(__m2 != memory_order_release);
 | |
| 	// __glibcxx_assert(__m2 != memory_order_acq_rel);
 | |
| 	// __glibcxx_assert(__m2 <= __m1);
 | |
| 
 | |
| 	return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2);
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_strong(__int_type& __i1, __int_type __i2,
 | |
| 			      memory_order __m1,
 | |
| 			      memory_order __m2) volatile noexcept
 | |
|       {
 | |
| 	// __glibcxx_assert(__m2 != memory_order_release);
 | |
| 	// __glibcxx_assert(__m2 != memory_order_acq_rel);
 | |
| 	// __glibcxx_assert(__m2 <= __m1);
 | |
| 
 | |
| 	return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2);
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_strong(__int_type& __i1, __int_type __i2,
 | |
| 			      memory_order __m = memory_order_seq_cst) noexcept
 | |
|       {
 | |
| 	return compare_exchange_strong(__i1, __i2, __m,
 | |
| 				       __calculate_memory_order(__m));
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_strong(__int_type& __i1, __int_type __i2,
 | |
| 		 memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|       {
 | |
| 	return compare_exchange_strong(__i1, __i2, __m,
 | |
| 				       __calculate_memory_order(__m));
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline __int_type
 | |
|       fetch_add(__int_type __i,
 | |
| 		memory_order __m = memory_order_seq_cst) noexcept
 | |
|       { return __atomic_fetch_add(&_M_i, __i, __m); }
 | |
| 
 | |
|       __libitm_always_inline __int_type
 | |
|       fetch_add(__int_type __i,
 | |
| 		memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|       { return __atomic_fetch_add(&_M_i, __i, __m); }
 | |
| 
 | |
|       __libitm_always_inline __int_type
 | |
|       fetch_sub(__int_type __i,
 | |
| 		memory_order __m = memory_order_seq_cst) noexcept
 | |
|       { return __atomic_fetch_sub(&_M_i, __i, __m); }
 | |
| 
 | |
|       __libitm_always_inline __int_type
 | |
|       fetch_sub(__int_type __i,
 | |
| 		memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|       { return __atomic_fetch_sub(&_M_i, __i, __m); }
 | |
| 
 | |
|       __libitm_always_inline __int_type
 | |
|       fetch_and(__int_type __i,
 | |
| 		memory_order __m = memory_order_seq_cst) noexcept
 | |
|       { return __atomic_fetch_and(&_M_i, __i, __m); }
 | |
| 
 | |
|       __libitm_always_inline __int_type
 | |
|       fetch_and(__int_type __i,
 | |
| 		memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|       { return __atomic_fetch_and(&_M_i, __i, __m); }
 | |
| 
 | |
|       __libitm_always_inline __int_type
 | |
|       fetch_or(__int_type __i,
 | |
| 	       memory_order __m = memory_order_seq_cst) noexcept
 | |
|       { return __atomic_fetch_or(&_M_i, __i, __m); }
 | |
| 
 | |
|       __libitm_always_inline __int_type
 | |
|       fetch_or(__int_type __i,
 | |
| 	       memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|       { return __atomic_fetch_or(&_M_i, __i, __m); }
 | |
| 
 | |
|       __libitm_always_inline __int_type
 | |
|       fetch_xor(__int_type __i,
 | |
| 		memory_order __m = memory_order_seq_cst) noexcept
 | |
|       { return __atomic_fetch_xor(&_M_i, __i, __m); }
 | |
| 
 | |
|       __libitm_always_inline __int_type
 | |
|       fetch_xor(__int_type __i,
 | |
| 		memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|       { return __atomic_fetch_xor(&_M_i, __i, __m); }
 | |
|     };
 | |
| 
 | |
| 
 | |
|   /// Partial specialization for pointer types.
 | |
|   template<typename _PTp>
 | |
|     struct __atomic_base<_PTp*>
 | |
|     {
 | |
|     private:
 | |
|       typedef _PTp* 	__pointer_type;
 | |
| 
 | |
|       __pointer_type 	_M_p;
 | |
| 
 | |
|     public:
 | |
|       __atomic_base() noexcept = default;
 | |
|       ~__atomic_base() noexcept = default;
 | |
|       __atomic_base(const __atomic_base&) = delete;
 | |
|       __atomic_base& operator=(const __atomic_base&) = delete;
 | |
|       __atomic_base& operator=(const __atomic_base&) volatile = delete;
 | |
| 
 | |
|       // Requires __pointer_type convertible to _M_p.
 | |
|       constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
 | |
| 
 | |
|       operator __pointer_type() const noexcept
 | |
|       { return load(); }
 | |
| 
 | |
|       operator __pointer_type() const volatile noexcept
 | |
|       { return load(); }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator=(__pointer_type __p) noexcept
 | |
|       {
 | |
| 	store(__p);
 | |
| 	return __p;
 | |
|       }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator=(__pointer_type __p) volatile noexcept
 | |
|       {
 | |
| 	store(__p);
 | |
| 	return __p;
 | |
|       }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator++(int) noexcept
 | |
|       { return fetch_add(1); }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator++(int) volatile noexcept
 | |
|       { return fetch_add(1); }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator--(int) noexcept
 | |
|       { return fetch_sub(1); }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator--(int) volatile noexcept
 | |
|       { return fetch_sub(1); }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator++() noexcept
 | |
|       { return __atomic_add_fetch(&_M_p, 1, memory_order_seq_cst); }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator++() volatile noexcept
 | |
|       { return __atomic_add_fetch(&_M_p, 1, memory_order_seq_cst); }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator--() noexcept
 | |
|       { return __atomic_sub_fetch(&_M_p, 1, memory_order_seq_cst); }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator--() volatile noexcept
 | |
|       { return __atomic_sub_fetch(&_M_p, 1, memory_order_seq_cst); }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator+=(ptrdiff_t __d) noexcept
 | |
|       { return __atomic_add_fetch(&_M_p, __d, memory_order_seq_cst); }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator+=(ptrdiff_t __d) volatile noexcept
 | |
|       { return __atomic_add_fetch(&_M_p, __d, memory_order_seq_cst); }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator-=(ptrdiff_t __d) noexcept
 | |
|       { return __atomic_sub_fetch(&_M_p, __d, memory_order_seq_cst); }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator-=(ptrdiff_t __d) volatile noexcept
 | |
|       { return __atomic_sub_fetch(&_M_p, __d, memory_order_seq_cst); }
 | |
| 
 | |
|       bool
 | |
|       is_lock_free() const noexcept
 | |
|       { return __atomic_is_lock_free (sizeof (_M_p), &_M_p); }
 | |
| 
 | |
|       bool
 | |
|       is_lock_free() const volatile noexcept
 | |
|       { return __atomic_is_lock_free (sizeof (_M_p), &_M_p); }
 | |
| 
 | |
|       __libitm_always_inline void
 | |
|       store(__pointer_type __p,
 | |
| 	    memory_order __m = memory_order_seq_cst) noexcept
 | |
|       {
 | |
| 	// __glibcxx_assert(__m != memory_order_acquire);
 | |
| 	// __glibcxx_assert(__m != memory_order_acq_rel);
 | |
| 	// __glibcxx_assert(__m != memory_order_consume);
 | |
| 
 | |
| 	__atomic_store_n(&_M_p, __p, __m);
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline void
 | |
|       store(__pointer_type __p,
 | |
| 	    memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|       {
 | |
| 	// __glibcxx_assert(__m != memory_order_acquire);
 | |
| 	// __glibcxx_assert(__m != memory_order_acq_rel);
 | |
| 	// __glibcxx_assert(__m != memory_order_consume);
 | |
| 
 | |
| 	__atomic_store_n(&_M_p, __p, __m);
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline __pointer_type
 | |
|       load(memory_order __m = memory_order_seq_cst) const noexcept
 | |
|       {
 | |
| 	// __glibcxx_assert(__m != memory_order_release);
 | |
| 	// __glibcxx_assert(__m != memory_order_acq_rel);
 | |
| 
 | |
| 	return __atomic_load_n(&_M_p, __m);
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline __pointer_type
 | |
|       load(memory_order __m = memory_order_seq_cst) const volatile noexcept
 | |
|       {
 | |
| 	// __glibcxx_assert(__m != memory_order_release);
 | |
| 	// __glibcxx_assert(__m != memory_order_acq_rel);
 | |
| 
 | |
| 	return __atomic_load_n(&_M_p, __m);
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline __pointer_type
 | |
|       exchange(__pointer_type __p,
 | |
| 	       memory_order __m = memory_order_seq_cst) noexcept
 | |
|       {
 | |
| 	return __atomic_exchange_n(&_M_p, __p, __m);
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline __pointer_type
 | |
|       exchange(__pointer_type __p,
 | |
| 	       memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|       {
 | |
| 	return __atomic_exchange_n(&_M_p, __p, __m);
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
 | |
| 			      memory_order __m1,
 | |
| 			      memory_order __m2) noexcept
 | |
|       {
 | |
| 	// __glibcxx_assert(__m2 != memory_order_release);
 | |
| 	// __glibcxx_assert(__m2 != memory_order_acq_rel);
 | |
| 	// __glibcxx_assert(__m2 <= __m1);
 | |
| 
 | |
| 	return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2);
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
 | |
| 			      memory_order __m1,
 | |
| 			      memory_order __m2) volatile noexcept
 | |
|       {
 | |
| 	// __glibcxx_assert(__m2 != memory_order_release);
 | |
| 	// __glibcxx_assert(__m2 != memory_order_acq_rel);
 | |
| 	// __glibcxx_assert(__m2 <= __m1);
 | |
| 
 | |
| 	return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2);
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline __pointer_type
 | |
|       fetch_add(ptrdiff_t __d,
 | |
| 		memory_order __m = memory_order_seq_cst) noexcept
 | |
|       { return __atomic_fetch_add(&_M_p, __d, __m); }
 | |
| 
 | |
|       __libitm_always_inline __pointer_type
 | |
|       fetch_add(ptrdiff_t __d,
 | |
| 		memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|       { return __atomic_fetch_add(&_M_p, __d, __m); }
 | |
| 
 | |
|       __libitm_always_inline __pointer_type
 | |
|       fetch_sub(ptrdiff_t __d,
 | |
| 		memory_order __m = memory_order_seq_cst) noexcept
 | |
|       { return __atomic_fetch_sub(&_M_p, __d, __m); }
 | |
| 
 | |
|       __libitm_always_inline __pointer_type
 | |
|       fetch_sub(ptrdiff_t __d,
 | |
| 		memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|       { return __atomic_fetch_sub(&_M_p, __d, __m); }
 | |
|     };
 | |
| 
 | |
| 
 | |
|   /**
 | |
|    * @addtogroup atomics
 | |
|    * @{
 | |
|    */
 | |
| 
 | |
|   /// atomic_bool
 | |
|   // NB: No operators or fetch-operations for this type.
 | |
|   struct atomic_bool
 | |
|   {
 | |
|   private:
 | |
|     __atomic_base<bool>	_M_base;
 | |
| 
 | |
|   public:
 | |
|     atomic_bool() noexcept = default;
 | |
|     ~atomic_bool() noexcept = default;
 | |
|     atomic_bool(const atomic_bool&) = delete;
 | |
|     atomic_bool& operator=(const atomic_bool&) = delete;
 | |
|     atomic_bool& operator=(const atomic_bool&) volatile = delete;
 | |
| 
 | |
|     constexpr atomic_bool(bool __i) noexcept : _M_base(__i) { }
 | |
| 
 | |
|     bool
 | |
|     operator=(bool __i) noexcept
 | |
|     { return _M_base.operator=(__i); }
 | |
| 
 | |
|     operator bool() const noexcept
 | |
|     { return _M_base.load(); }
 | |
| 
 | |
|     operator bool() const volatile noexcept
 | |
|     { return _M_base.load(); }
 | |
| 
 | |
|     bool
 | |
|     is_lock_free() const noexcept { return _M_base.is_lock_free(); }
 | |
| 
 | |
|     bool
 | |
|     is_lock_free() const volatile noexcept { return _M_base.is_lock_free(); }
 | |
| 
 | |
|     __libitm_always_inline void
 | |
|     store(bool __i, memory_order __m = memory_order_seq_cst) noexcept
 | |
|     { _M_base.store(__i, __m); }
 | |
| 
 | |
|     __libitm_always_inline void
 | |
|     store(bool __i, memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|     { _M_base.store(__i, __m); }
 | |
| 
 | |
|     __libitm_always_inline bool
 | |
|     load(memory_order __m = memory_order_seq_cst) const noexcept
 | |
|     { return _M_base.load(__m); }
 | |
| 
 | |
|     __libitm_always_inline bool
 | |
|     load(memory_order __m = memory_order_seq_cst) const volatile noexcept
 | |
|     { return _M_base.load(__m); }
 | |
| 
 | |
|     __libitm_always_inline bool
 | |
|     exchange(bool __i, memory_order __m = memory_order_seq_cst) noexcept
 | |
|     { return _M_base.exchange(__i, __m); }
 | |
| 
 | |
|     __libitm_always_inline bool
 | |
|     exchange(bool __i,
 | |
| 	     memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|     { return _M_base.exchange(__i, __m); }
 | |
| 
 | |
|     __libitm_always_inline bool
 | |
|     compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1,
 | |
| 			  memory_order __m2) noexcept
 | |
|     { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); }
 | |
| 
 | |
|     __libitm_always_inline bool
 | |
|     compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1,
 | |
| 			  memory_order __m2) volatile noexcept
 | |
|     { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); }
 | |
| 
 | |
|     __libitm_always_inline bool
 | |
|     compare_exchange_weak(bool& __i1, bool __i2,
 | |
| 			  memory_order __m = memory_order_seq_cst) noexcept
 | |
|     { return _M_base.compare_exchange_weak(__i1, __i2, __m); }
 | |
| 
 | |
|     __libitm_always_inline bool
 | |
|     compare_exchange_weak(bool& __i1, bool __i2,
 | |
| 		     memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|     { return _M_base.compare_exchange_weak(__i1, __i2, __m); }
 | |
| 
 | |
|     __libitm_always_inline bool
 | |
|     compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1,
 | |
| 			    memory_order __m2) noexcept
 | |
|     { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); }
 | |
| 
 | |
|     __libitm_always_inline bool
 | |
|     compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1,
 | |
| 			    memory_order __m2) volatile noexcept
 | |
|     { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); }
 | |
| 
 | |
|     __libitm_always_inline bool
 | |
|     compare_exchange_strong(bool& __i1, bool __i2,
 | |
| 			    memory_order __m = memory_order_seq_cst) noexcept
 | |
|     { return _M_base.compare_exchange_strong(__i1, __i2, __m); }
 | |
| 
 | |
|     __libitm_always_inline bool
 | |
|     compare_exchange_strong(bool& __i1, bool __i2,
 | |
| 		    memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|     { return _M_base.compare_exchange_strong(__i1, __i2, __m); }
 | |
|   };
 | |
| 
 | |
| 
 | |
|   /// atomic
 | |
|   /// 29.4.3, Generic atomic type, primary class template.
 | |
|   template<typename _Tp>
 | |
|     struct atomic
 | |
|     {
 | |
|     private:
 | |
|       _Tp _M_i;
 | |
| 
 | |
|     public:
 | |
|       atomic() noexcept = default;
 | |
|       ~atomic() noexcept = default;
 | |
|       atomic(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) volatile = delete;
 | |
| 
 | |
|       constexpr atomic(_Tp __i) noexcept : _M_i(__i) { }
 | |
| 
 | |
|       operator _Tp() const noexcept
 | |
|       { return load(); }
 | |
| 
 | |
|       operator _Tp() const volatile noexcept
 | |
|       { return load(); }
 | |
| 
 | |
|       _Tp
 | |
|       operator=(_Tp __i) noexcept 
 | |
|       { store(__i); return __i; }
 | |
| 
 | |
|       _Tp
 | |
|       operator=(_Tp __i) volatile noexcept 
 | |
|       { store(__i); return __i; }
 | |
| 
 | |
|       bool
 | |
|       is_lock_free() const noexcept
 | |
|       { return __atomic_is_lock_free(sizeof(_M_i), &_M_i); }
 | |
| 
 | |
|       bool
 | |
|       is_lock_free() const volatile noexcept
 | |
|       { return __atomic_is_lock_free(sizeof(_M_i), &_M_i); }
 | |
| 
 | |
|       void
 | |
|       store(_Tp __i, memory_order _m = memory_order_seq_cst) noexcept
 | |
|       { __atomic_store(&_M_i, &__i, _m); }
 | |
| 
 | |
|       __libitm_always_inline void
 | |
|       store(_Tp __i, memory_order _m = memory_order_seq_cst) volatile noexcept
 | |
|       { __atomic_store(&_M_i, &__i, _m); }
 | |
| 
 | |
|       __libitm_always_inline _Tp
 | |
|       load(memory_order _m = memory_order_seq_cst) const noexcept
 | |
|       { 
 | |
|         _Tp tmp;
 | |
| 	__atomic_load(&_M_i, &tmp, _m); 
 | |
| 	return tmp;
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline _Tp
 | |
|       load(memory_order _m = memory_order_seq_cst) const volatile noexcept
 | |
|       { 
 | |
|         _Tp tmp;
 | |
| 	__atomic_load(&_M_i, &tmp, _m); 
 | |
| 	return tmp;
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline _Tp
 | |
|       exchange(_Tp __i, memory_order _m = memory_order_seq_cst) noexcept
 | |
|       { 
 | |
|         _Tp tmp;
 | |
| 	__atomic_exchange(&_M_i, &__i, &tmp, _m); 
 | |
| 	return tmp;
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline _Tp
 | |
|       exchange(_Tp __i, 
 | |
| 	       memory_order _m = memory_order_seq_cst) volatile noexcept
 | |
|       { 
 | |
|         _Tp tmp;
 | |
| 	__atomic_exchange(&_M_i, &__i, &tmp, _m); 
 | |
| 	return tmp;
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_weak(_Tp& __e, _Tp __i, memory_order __s, 
 | |
| 			    memory_order __f) noexcept
 | |
|       {
 | |
| 	return __atomic_compare_exchange(&_M_i, &__e, &__i, true, __s, __f); 
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_weak(_Tp& __e, _Tp __i, memory_order __s, 
 | |
| 			    memory_order __f) volatile noexcept
 | |
|       {
 | |
| 	return __atomic_compare_exchange(&_M_i, &__e, &__i, true, __s, __f); 
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_weak(_Tp& __e, _Tp __i,
 | |
| 			    memory_order __m = memory_order_seq_cst) noexcept
 | |
|       { return compare_exchange_weak(__e, __i, __m, __m); }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_weak(_Tp& __e, _Tp __i,
 | |
| 		     memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|       { return compare_exchange_weak(__e, __i, __m, __m); }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_strong(_Tp& __e, _Tp __i, memory_order __s, 
 | |
| 			      memory_order __f) noexcept
 | |
|       {
 | |
| 	return __atomic_compare_exchange(&_M_i, &__e, &__i, false, __s, __f); 
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_strong(_Tp& __e, _Tp __i, memory_order __s, 
 | |
| 			      memory_order __f) volatile noexcept
 | |
|       {
 | |
| 	return __atomic_compare_exchange(&_M_i, &__e, &__i, false, __s, __f); 
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_strong(_Tp& __e, _Tp __i,
 | |
| 			       memory_order __m = memory_order_seq_cst) noexcept
 | |
|       { return compare_exchange_strong(__e, __i, __m, __m); }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_strong(_Tp& __e, _Tp __i,
 | |
| 		     memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|       { return compare_exchange_strong(__e, __i, __m, __m); }
 | |
|     };
 | |
| 
 | |
| 
 | |
|   /// Partial specialization for pointer types.
 | |
|   template<typename _Tp>
 | |
|     struct atomic<_Tp*>
 | |
|     {
 | |
|       typedef _Tp* 			__pointer_type;
 | |
|       typedef __atomic_base<_Tp*>	__base_type;
 | |
|       __base_type			_M_b;
 | |
| 
 | |
|       atomic() noexcept = default;
 | |
|       ~atomic() noexcept = default;
 | |
|       atomic(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) volatile = delete;
 | |
| 
 | |
|       constexpr atomic(__pointer_type __p) noexcept : _M_b(__p) { }
 | |
| 
 | |
|       operator __pointer_type() const noexcept
 | |
|       { return __pointer_type(_M_b); }
 | |
| 
 | |
|       operator __pointer_type() const volatile noexcept
 | |
|       { return __pointer_type(_M_b); }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator=(__pointer_type __p) noexcept
 | |
|       { return _M_b.operator=(__p); }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator=(__pointer_type __p) volatile noexcept
 | |
|       { return _M_b.operator=(__p); }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator++(int) noexcept
 | |
|       { return _M_b++; }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator++(int) volatile noexcept
 | |
|       { return _M_b++; }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator--(int) noexcept
 | |
|       { return _M_b--; }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator--(int) volatile noexcept
 | |
|       { return _M_b--; }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator++() noexcept
 | |
|       { return ++_M_b; }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator++() volatile noexcept
 | |
|       { return ++_M_b; }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator--() noexcept
 | |
|       { return --_M_b; }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator--() volatile noexcept
 | |
|       { return --_M_b; }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator+=(ptrdiff_t __d) noexcept
 | |
|       { return _M_b.operator+=(__d); }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator+=(ptrdiff_t __d) volatile noexcept
 | |
|       { return _M_b.operator+=(__d); }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator-=(ptrdiff_t __d) noexcept
 | |
|       { return _M_b.operator-=(__d); }
 | |
| 
 | |
|       __pointer_type
 | |
|       operator-=(ptrdiff_t __d) volatile noexcept
 | |
|       { return _M_b.operator-=(__d); }
 | |
| 
 | |
|       bool
 | |
|       is_lock_free() const noexcept
 | |
|       { return _M_b.is_lock_free(); }
 | |
| 
 | |
|       bool
 | |
|       is_lock_free() const volatile noexcept
 | |
|       { return _M_b.is_lock_free(); }
 | |
| 
 | |
|       __libitm_always_inline void
 | |
|       store(__pointer_type __p,
 | |
| 	    memory_order __m = memory_order_seq_cst) noexcept
 | |
|       { return _M_b.store(__p, __m); }
 | |
| 
 | |
|       __libitm_always_inline void
 | |
|       store(__pointer_type __p,
 | |
| 	    memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|       { return _M_b.store(__p, __m); }
 | |
| 
 | |
|       __libitm_always_inline __pointer_type
 | |
|       load(memory_order __m = memory_order_seq_cst) const noexcept
 | |
|       { return _M_b.load(__m); }
 | |
| 
 | |
|       __libitm_always_inline __pointer_type
 | |
|       load(memory_order __m = memory_order_seq_cst) const volatile noexcept
 | |
|       { return _M_b.load(__m); }
 | |
| 
 | |
|       __libitm_always_inline __pointer_type
 | |
|       exchange(__pointer_type __p,
 | |
| 	       memory_order __m = memory_order_seq_cst) noexcept
 | |
|       { return _M_b.exchange(__p, __m); }
 | |
| 
 | |
|       __libitm_always_inline __pointer_type
 | |
|       exchange(__pointer_type __p,
 | |
| 	       memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|       { return _M_b.exchange(__p, __m); }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
 | |
| 			    memory_order __m1, memory_order __m2) noexcept
 | |
|       { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
 | |
| 			    memory_order __m1,
 | |
| 			    memory_order __m2) volatile noexcept
 | |
|       { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
 | |
| 			    memory_order __m = memory_order_seq_cst) noexcept
 | |
|       {
 | |
| 	return compare_exchange_weak(__p1, __p2, __m,
 | |
| 				     __calculate_memory_order(__m));
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
 | |
| 		    memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|       {
 | |
| 	return compare_exchange_weak(__p1, __p2, __m,
 | |
| 				     __calculate_memory_order(__m));
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
 | |
| 			      memory_order __m1, memory_order __m2) noexcept
 | |
|       { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
 | |
| 			      memory_order __m1,
 | |
| 			      memory_order __m2) volatile noexcept
 | |
|       { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
 | |
| 			      memory_order __m = memory_order_seq_cst) noexcept
 | |
|       {
 | |
| 	return _M_b.compare_exchange_strong(__p1, __p2, __m,
 | |
| 					    __calculate_memory_order(__m));
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline bool
 | |
|       compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
 | |
| 		    memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|       {
 | |
| 	return _M_b.compare_exchange_strong(__p1, __p2, __m,
 | |
| 					    __calculate_memory_order(__m));
 | |
|       }
 | |
| 
 | |
|       __libitm_always_inline __pointer_type
 | |
|       fetch_add(ptrdiff_t __d,
 | |
| 		memory_order __m = memory_order_seq_cst) noexcept
 | |
|       { return _M_b.fetch_add(__d, __m); }
 | |
| 
 | |
|       __libitm_always_inline __pointer_type
 | |
|       fetch_add(ptrdiff_t __d,
 | |
| 		memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|       { return _M_b.fetch_add(__d, __m); }
 | |
| 
 | |
|       __libitm_always_inline __pointer_type
 | |
|       fetch_sub(ptrdiff_t __d,
 | |
| 		memory_order __m = memory_order_seq_cst) noexcept
 | |
|       { return _M_b.fetch_sub(__d, __m); }
 | |
| 
 | |
|       __libitm_always_inline __pointer_type
 | |
|       fetch_sub(ptrdiff_t __d,
 | |
| 		memory_order __m = memory_order_seq_cst) volatile noexcept
 | |
|       { return _M_b.fetch_sub(__d, __m); }
 | |
|     };
 | |
| 
 | |
| 
 | |
|   /// Explicit specialization for bool.
 | |
|   template<>
 | |
|     struct atomic<bool> : public atomic_bool
 | |
|     {
 | |
|       typedef bool 			__integral_type;
 | |
|       typedef atomic_bool 		__base_type;
 | |
| 
 | |
|       atomic() noexcept = default;
 | |
|       ~atomic() noexcept = default;
 | |
|       atomic(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) volatile = delete;
 | |
| 
 | |
|       constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
 | |
| 
 | |
|       using __base_type::operator __integral_type;
 | |
|       using __base_type::operator=;
 | |
|     };
 | |
| 
 | |
|   /// Explicit specialization for char.
 | |
|   template<>
 | |
|     struct atomic<char> : public atomic_char
 | |
|     {
 | |
|       typedef char 			__integral_type;
 | |
|       typedef atomic_char 		__base_type;
 | |
| 
 | |
|       atomic() noexcept = default;
 | |
|       ~atomic() noexcept = default;
 | |
|       atomic(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) volatile = delete;
 | |
| 
 | |
|       constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
 | |
| 
 | |
|       using __base_type::operator __integral_type;
 | |
|       using __base_type::operator=;
 | |
|     };
 | |
| 
 | |
|   /// Explicit specialization for signed char.
 | |
|   template<>
 | |
|     struct atomic<signed char> : public atomic_schar
 | |
|     {
 | |
|       typedef signed char 		__integral_type;
 | |
|       typedef atomic_schar 		__base_type;
 | |
| 
 | |
|       atomic() noexcept= default;
 | |
|       ~atomic() noexcept = default;
 | |
|       atomic(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) volatile = delete;
 | |
| 
 | |
|       constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
 | |
| 
 | |
|       using __base_type::operator __integral_type;
 | |
|       using __base_type::operator=;
 | |
|     };
 | |
| 
 | |
|   /// Explicit specialization for unsigned char.
 | |
|   template<>
 | |
|     struct atomic<unsigned char> : public atomic_uchar
 | |
|     {
 | |
|       typedef unsigned char 		__integral_type;
 | |
|       typedef atomic_uchar 		__base_type;
 | |
| 
 | |
|       atomic() noexcept= default;
 | |
|       ~atomic() noexcept = default;
 | |
|       atomic(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) volatile = delete;
 | |
| 
 | |
|       constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
 | |
| 
 | |
|       using __base_type::operator __integral_type;
 | |
|       using __base_type::operator=;
 | |
|     };
 | |
| 
 | |
|   /// Explicit specialization for short.
 | |
|   template<>
 | |
|     struct atomic<short> : public atomic_short
 | |
|     {
 | |
|       typedef short 			__integral_type;
 | |
|       typedef atomic_short 		__base_type;
 | |
| 
 | |
|       atomic() noexcept = default;
 | |
|       ~atomic() noexcept = default;
 | |
|       atomic(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) volatile = delete;
 | |
| 
 | |
|       constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
 | |
| 
 | |
|       using __base_type::operator __integral_type;
 | |
|       using __base_type::operator=;
 | |
|     };
 | |
| 
 | |
|   /// Explicit specialization for unsigned short.
 | |
|   template<>
 | |
|     struct atomic<unsigned short> : public atomic_ushort
 | |
|     {
 | |
|       typedef unsigned short 	      	__integral_type;
 | |
|       typedef atomic_ushort 		__base_type;
 | |
| 
 | |
|       atomic() noexcept = default;
 | |
|       ~atomic() noexcept = default;
 | |
|       atomic(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) volatile = delete;
 | |
| 
 | |
|       constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
 | |
| 
 | |
|       using __base_type::operator __integral_type;
 | |
|       using __base_type::operator=;
 | |
|     };
 | |
| 
 | |
|   /// Explicit specialization for int.
 | |
|   template<>
 | |
|     struct atomic<int> : atomic_int
 | |
|     {
 | |
|       typedef int 			__integral_type;
 | |
|       typedef atomic_int 		__base_type;
 | |
| 
 | |
|       atomic() noexcept = default;
 | |
|       ~atomic() noexcept = default;
 | |
|       atomic(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) volatile = delete;
 | |
| 
 | |
|       constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
 | |
| 
 | |
|       using __base_type::operator __integral_type;
 | |
|       using __base_type::operator=;
 | |
|     };
 | |
| 
 | |
|   /// Explicit specialization for unsigned int.
 | |
|   template<>
 | |
|     struct atomic<unsigned int> : public atomic_uint
 | |
|     {
 | |
|       typedef unsigned int		__integral_type;
 | |
|       typedef atomic_uint 		__base_type;
 | |
| 
 | |
|       atomic() noexcept = default;
 | |
|       ~atomic() noexcept = default;
 | |
|       atomic(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) volatile = delete;
 | |
| 
 | |
|       constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
 | |
| 
 | |
|       using __base_type::operator __integral_type;
 | |
|       using __base_type::operator=;
 | |
|     };
 | |
| 
 | |
|   /// Explicit specialization for long.
 | |
|   template<>
 | |
|     struct atomic<long> : public atomic_long
 | |
|     {
 | |
|       typedef long 			__integral_type;
 | |
|       typedef atomic_long 		__base_type;
 | |
| 
 | |
|       atomic() noexcept = default;
 | |
|       ~atomic() noexcept = default;
 | |
|       atomic(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) volatile = delete;
 | |
| 
 | |
|       constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
 | |
| 
 | |
|       using __base_type::operator __integral_type;
 | |
|       using __base_type::operator=;
 | |
|     };
 | |
| 
 | |
|   /// Explicit specialization for unsigned long.
 | |
|   template<>
 | |
|     struct atomic<unsigned long> : public atomic_ulong
 | |
|     {
 | |
|       typedef unsigned long 		__integral_type;
 | |
|       typedef atomic_ulong 		__base_type;
 | |
| 
 | |
|       atomic() noexcept = default;
 | |
|       ~atomic() noexcept = default;
 | |
|       atomic(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) volatile = delete;
 | |
| 
 | |
|       constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
 | |
| 
 | |
|       using __base_type::operator __integral_type;
 | |
|       using __base_type::operator=;
 | |
|     };
 | |
| 
 | |
|   /// Explicit specialization for long long.
 | |
|   template<>
 | |
|     struct atomic<long long> : public atomic_llong
 | |
|     {
 | |
|       typedef long long 		__integral_type;
 | |
|       typedef atomic_llong 		__base_type;
 | |
| 
 | |
|       atomic() noexcept = default;
 | |
|       ~atomic() noexcept = default;
 | |
|       atomic(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) volatile = delete;
 | |
| 
 | |
|       constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
 | |
| 
 | |
|       using __base_type::operator __integral_type;
 | |
|       using __base_type::operator=;
 | |
|     };
 | |
| 
 | |
|   /// Explicit specialization for unsigned long long.
 | |
|   template<>
 | |
|     struct atomic<unsigned long long> : public atomic_ullong
 | |
|     {
 | |
|       typedef unsigned long long       	__integral_type;
 | |
|       typedef atomic_ullong 		__base_type;
 | |
| 
 | |
|       atomic() noexcept = default;
 | |
|       ~atomic() noexcept = default;
 | |
|       atomic(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) volatile = delete;
 | |
| 
 | |
|       constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
 | |
| 
 | |
|       using __base_type::operator __integral_type;
 | |
|       using __base_type::operator=;
 | |
|     };
 | |
| 
 | |
|   /// Explicit specialization for wchar_t.
 | |
|   template<>
 | |
|     struct atomic<wchar_t> : public atomic_wchar_t
 | |
|     {
 | |
|       typedef wchar_t 			__integral_type;
 | |
|       typedef atomic_wchar_t 		__base_type;
 | |
| 
 | |
|       atomic() noexcept = default;
 | |
|       ~atomic() noexcept = default;
 | |
|       atomic(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) volatile = delete;
 | |
| 
 | |
|       constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
 | |
| 
 | |
|       using __base_type::operator __integral_type;
 | |
|       using __base_type::operator=;
 | |
|     };
 | |
| 
 | |
|   /// Explicit specialization for char16_t.
 | |
|   template<>
 | |
|     struct atomic<char16_t> : public atomic_char16_t
 | |
|     {
 | |
|       typedef char16_t 			__integral_type;
 | |
|       typedef atomic_char16_t 		__base_type;
 | |
| 
 | |
|       atomic() noexcept = default;
 | |
|       ~atomic() noexcept = default;
 | |
|       atomic(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) volatile = delete;
 | |
| 
 | |
|       constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
 | |
| 
 | |
|       using __base_type::operator __integral_type;
 | |
|       using __base_type::operator=;
 | |
|     };
 | |
| 
 | |
|   /// Explicit specialization for char32_t.
 | |
|   template<>
 | |
|     struct atomic<char32_t> : public atomic_char32_t
 | |
|     {
 | |
|       typedef char32_t 			__integral_type;
 | |
|       typedef atomic_char32_t 		__base_type;
 | |
| 
 | |
|       atomic() noexcept = default;
 | |
|       ~atomic() noexcept = default;
 | |
|       atomic(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) = delete;
 | |
|       atomic& operator=(const atomic&) volatile = delete;
 | |
| 
 | |
|       constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { }
 | |
| 
 | |
|       using __base_type::operator __integral_type;
 | |
|       using __base_type::operator=;
 | |
|     };
 | |
| 
 | |
| 
 | |
|   // Function definitions, atomic_flag operations.
 | |
|   inline __libitm_always_inline bool
 | |
|   atomic_flag_test_and_set_explicit(atomic_flag* __a,
 | |
| 				    memory_order __m) noexcept
 | |
|   { return __a->test_and_set(__m); }
 | |
| 
 | |
|   inline __libitm_always_inline bool
 | |
|   atomic_flag_test_and_set_explicit(volatile atomic_flag* __a,
 | |
| 				    memory_order __m) noexcept
 | |
|   { return __a->test_and_set(__m); }
 | |
| 
 | |
|   inline __libitm_always_inline void
 | |
|   atomic_flag_clear_explicit(atomic_flag* __a, memory_order __m) noexcept
 | |
|   { __a->clear(__m); }
 | |
| 
 | |
|   inline __libitm_always_inline void
 | |
|   atomic_flag_clear_explicit(volatile atomic_flag* __a,
 | |
| 			     memory_order __m) noexcept
 | |
|   { __a->clear(__m); }
 | |
| 
 | |
|   inline __libitm_always_inline bool
 | |
|   atomic_flag_test_and_set(atomic_flag* __a) noexcept
 | |
|   { return atomic_flag_test_and_set_explicit(__a, memory_order_seq_cst); }
 | |
| 
 | |
|   inline __libitm_always_inline bool
 | |
|   atomic_flag_test_and_set(volatile atomic_flag* __a) noexcept
 | |
|   { return atomic_flag_test_and_set_explicit(__a, memory_order_seq_cst); }
 | |
| 
 | |
|   inline __libitm_always_inline void
 | |
|   atomic_flag_clear(atomic_flag* __a) noexcept
 | |
|   { atomic_flag_clear_explicit(__a, memory_order_seq_cst); }
 | |
| 
 | |
|   inline __libitm_always_inline void
 | |
|   atomic_flag_clear(volatile atomic_flag* __a) noexcept
 | |
|   { atomic_flag_clear_explicit(__a, memory_order_seq_cst); }
 | |
| 
 | |
| 
 | |
|   // Function templates generally applicable to atomic types.
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline bool
 | |
|     atomic_is_lock_free(const atomic<_ITp>* __a) noexcept
 | |
|     { return __a->is_lock_free(); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline bool
 | |
|     atomic_is_lock_free(const volatile atomic<_ITp>* __a) noexcept
 | |
|     { return __a->is_lock_free(); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline void
 | |
|     atomic_init(atomic<_ITp>* __a, _ITp __i) noexcept;
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline void
 | |
|     atomic_init(volatile atomic<_ITp>* __a, _ITp __i) noexcept;
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline void
 | |
|     atomic_store_explicit(atomic<_ITp>* __a, _ITp __i,
 | |
| 			  memory_order __m) noexcept
 | |
|     { __a->store(__i, __m); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline void
 | |
|     atomic_store_explicit(volatile atomic<_ITp>* __a, _ITp __i,
 | |
| 			  memory_order __m) noexcept
 | |
|     { __a->store(__i, __m); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_load_explicit(const atomic<_ITp>* __a, memory_order __m) noexcept
 | |
|     { return __a->load(__m); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_load_explicit(const volatile atomic<_ITp>* __a,
 | |
| 			 memory_order __m) noexcept
 | |
|     { return __a->load(__m); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_exchange_explicit(atomic<_ITp>* __a, _ITp __i,
 | |
| 			     memory_order __m) noexcept
 | |
|     { return __a->exchange(__i, __m); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_exchange_explicit(volatile atomic<_ITp>* __a, _ITp __i,
 | |
| 			     memory_order __m) noexcept
 | |
|     { return __a->exchange(__i, __m); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline bool
 | |
|     atomic_compare_exchange_weak_explicit(atomic<_ITp>* __a,
 | |
| 					  _ITp* __i1, _ITp __i2,
 | |
| 					  memory_order __m1,
 | |
| 					  memory_order __m2) noexcept
 | |
|     { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline bool
 | |
|     atomic_compare_exchange_weak_explicit(volatile atomic<_ITp>* __a,
 | |
| 					  _ITp* __i1, _ITp __i2,
 | |
| 					  memory_order __m1,
 | |
| 					  memory_order __m2) noexcept
 | |
|     { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline bool
 | |
|     atomic_compare_exchange_strong_explicit(atomic<_ITp>* __a,
 | |
| 					    _ITp* __i1, _ITp __i2,
 | |
| 					    memory_order __m1,
 | |
| 					    memory_order __m2) noexcept
 | |
|     { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline bool
 | |
|     atomic_compare_exchange_strong_explicit(volatile atomic<_ITp>* __a,
 | |
| 					    _ITp* __i1, _ITp __i2,
 | |
| 					    memory_order __m1,
 | |
| 					    memory_order __m2) noexcept
 | |
|     { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); }
 | |
| 
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline void
 | |
|     atomic_store(atomic<_ITp>* __a, _ITp __i) noexcept
 | |
|     { atomic_store_explicit(__a, __i, memory_order_seq_cst); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline void
 | |
|     atomic_store(volatile atomic<_ITp>* __a, _ITp __i) noexcept
 | |
|     { atomic_store_explicit(__a, __i, memory_order_seq_cst); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_load(const atomic<_ITp>* __a) noexcept
 | |
|     { return atomic_load_explicit(__a, memory_order_seq_cst); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_load(const volatile atomic<_ITp>* __a) noexcept
 | |
|     { return atomic_load_explicit(__a, memory_order_seq_cst); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_exchange(atomic<_ITp>* __a, _ITp __i) noexcept
 | |
|     { return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_exchange(volatile atomic<_ITp>* __a, _ITp __i) noexcept
 | |
|     { return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline bool
 | |
|     atomic_compare_exchange_weak(atomic<_ITp>* __a,
 | |
| 				 _ITp* __i1, _ITp __i2) noexcept
 | |
|     {
 | |
|       return atomic_compare_exchange_weak_explicit(__a, __i1, __i2,
 | |
| 						   memory_order_seq_cst,
 | |
| 						   memory_order_seq_cst);
 | |
|     }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline bool
 | |
|     atomic_compare_exchange_weak(volatile atomic<_ITp>* __a,
 | |
| 				 _ITp* __i1, _ITp __i2) noexcept
 | |
|     {
 | |
|       return atomic_compare_exchange_weak_explicit(__a, __i1, __i2,
 | |
| 						   memory_order_seq_cst,
 | |
| 						   memory_order_seq_cst);
 | |
|     }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline bool
 | |
|     atomic_compare_exchange_strong(atomic<_ITp>* __a,
 | |
| 				   _ITp* __i1, _ITp __i2) noexcept
 | |
|     {
 | |
|       return atomic_compare_exchange_strong_explicit(__a, __i1, __i2,
 | |
| 						     memory_order_seq_cst,
 | |
| 						     memory_order_seq_cst);
 | |
|     }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline bool
 | |
|     atomic_compare_exchange_strong(volatile atomic<_ITp>* __a,
 | |
| 				   _ITp* __i1, _ITp __i2) noexcept
 | |
|     {
 | |
|       return atomic_compare_exchange_strong_explicit(__a, __i1, __i2,
 | |
| 						     memory_order_seq_cst,
 | |
| 						     memory_order_seq_cst);
 | |
|     }
 | |
| 
 | |
|   // Function templates for atomic_integral operations only, using
 | |
|   // __atomic_base. Template argument should be constricted to
 | |
|   // intergral types as specified in the standard, excluding address
 | |
|   // types.
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_fetch_add_explicit(__atomic_base<_ITp>* __a, _ITp __i,
 | |
| 			      memory_order __m) noexcept
 | |
|     { return __a->fetch_add(__i, __m); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_fetch_add_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
 | |
| 			      memory_order __m) noexcept
 | |
|     { return __a->fetch_add(__i, __m); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_fetch_sub_explicit(__atomic_base<_ITp>* __a, _ITp __i,
 | |
| 			      memory_order __m) noexcept
 | |
|     { return __a->fetch_sub(__i, __m); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_fetch_sub_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
 | |
| 			      memory_order __m) noexcept
 | |
|     { return __a->fetch_sub(__i, __m); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_fetch_and_explicit(__atomic_base<_ITp>* __a, _ITp __i,
 | |
| 			      memory_order __m) noexcept
 | |
|     { return __a->fetch_and(__i, __m); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_fetch_and_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
 | |
| 			      memory_order __m) noexcept
 | |
|     { return __a->fetch_and(__i, __m); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_fetch_or_explicit(__atomic_base<_ITp>* __a, _ITp __i,
 | |
| 			     memory_order __m) noexcept
 | |
|     { return __a->fetch_or(__i, __m); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_fetch_or_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
 | |
| 			     memory_order __m) noexcept
 | |
|     { return __a->fetch_or(__i, __m); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_fetch_xor_explicit(__atomic_base<_ITp>* __a, _ITp __i,
 | |
| 			      memory_order __m) noexcept
 | |
|     { return __a->fetch_xor(__i, __m); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_fetch_xor_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i,
 | |
| 			      memory_order __m) noexcept
 | |
|     { return __a->fetch_xor(__i, __m); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_fetch_add(__atomic_base<_ITp>* __a, _ITp __i) noexcept
 | |
|     { return atomic_fetch_add_explicit(__a, __i, memory_order_seq_cst); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_fetch_add(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept
 | |
|     { return atomic_fetch_add_explicit(__a, __i, memory_order_seq_cst); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_fetch_sub(__atomic_base<_ITp>* __a, _ITp __i) noexcept
 | |
|     { return atomic_fetch_sub_explicit(__a, __i, memory_order_seq_cst); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_fetch_sub(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept
 | |
|     { return atomic_fetch_sub_explicit(__a, __i, memory_order_seq_cst); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_fetch_and(__atomic_base<_ITp>* __a, _ITp __i) noexcept
 | |
|     { return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_fetch_and(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept
 | |
|     { return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_fetch_or(__atomic_base<_ITp>* __a, _ITp __i) noexcept
 | |
|     { return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_fetch_or(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept
 | |
|     { return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_fetch_xor(__atomic_base<_ITp>* __a, _ITp __i) noexcept
 | |
|     { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp
 | |
|     atomic_fetch_xor(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept
 | |
|     { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); }
 | |
| 
 | |
| 
 | |
|   // Partial specializations for pointers.
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp*
 | |
|     atomic_fetch_add_explicit(atomic<_ITp*>* __a, ptrdiff_t __d,
 | |
| 			      memory_order __m) noexcept
 | |
|     { return __a->fetch_add(__d, __m); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp*
 | |
|     atomic_fetch_add_explicit(volatile atomic<_ITp*>* __a, ptrdiff_t __d,
 | |
| 			      memory_order __m) noexcept
 | |
|     { return __a->fetch_add(__d, __m); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp*
 | |
|     atomic_fetch_add(volatile atomic<_ITp*>* __a, ptrdiff_t __d) noexcept
 | |
|     { return __a->fetch_add(__d); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp*
 | |
|     atomic_fetch_add(atomic<_ITp*>* __a, ptrdiff_t __d) noexcept
 | |
|     { return __a->fetch_add(__d); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp*
 | |
|     atomic_fetch_sub_explicit(volatile atomic<_ITp*>* __a,
 | |
| 			      ptrdiff_t __d, memory_order __m) noexcept
 | |
|     { return __a->fetch_sub(__d, __m); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp*
 | |
|     atomic_fetch_sub_explicit(atomic<_ITp*>* __a, ptrdiff_t __d,
 | |
| 			      memory_order __m) noexcept
 | |
|     { return __a->fetch_sub(__d, __m); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp*
 | |
|     atomic_fetch_sub(volatile atomic<_ITp*>* __a, ptrdiff_t __d) noexcept
 | |
|     { return __a->fetch_sub(__d); }
 | |
| 
 | |
|   template<typename _ITp>
 | |
|     __libitm_always_inline _ITp*
 | |
|     atomic_fetch_sub(atomic<_ITp*>* __a, ptrdiff_t __d) noexcept
 | |
|     { return __a->fetch_sub(__d); }
 | |
|   // @} group atomics
 | |
| 
 | |
| // _GLIBCXX_END_NAMESPACE_VERSION
 | |
| } // namespace
 | |
| 
 | |
| #endif
 |