mirror of git://gcc.gnu.org/git/gcc.git
				
				
				
			
		
			
				
	
	
		
			608 lines
		
	
	
		
			17 KiB
		
	
	
	
		
			C++
		
	
	
	
			
		
		
	
	
			608 lines
		
	
	
		
			17 KiB
		
	
	
	
		
			C++
		
	
	
	
| // <shared_mutex> -*- C++ -*-
 | |
| 
 | |
| // Copyright (C) 2013-2015 Free Software Foundation, Inc.
 | |
| //
 | |
| // This file is part of the GNU ISO C++ Library.  This library is free
 | |
| // software; you can redistribute it and/or modify it under the
 | |
| // terms of the GNU General Public License as published by the
 | |
| // Free Software Foundation; either version 3, or (at your option)
 | |
| // any later version.
 | |
| 
 | |
| // This library is distributed in the hope that it will be useful,
 | |
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | |
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | |
| // GNU General Public License for more details.
 | |
| 
 | |
| // Under Section 7 of GPL version 3, you are granted additional
 | |
| // permissions described in the GCC Runtime Library Exception, version
 | |
| // 3.1, as published by the Free Software Foundation.
 | |
| 
 | |
| // You should have received a copy of the GNU General Public License and
 | |
| // a copy of the GCC Runtime Library Exception along with this program;
 | |
| // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
 | |
| // <http://www.gnu.org/licenses/>.
 | |
| 
 | |
| /** @file include/shared_mutex
 | |
|  *  This is a Standard C++ Library header.
 | |
|  */
 | |
| 
 | |
| #ifndef _GLIBCXX_SHARED_MUTEX
 | |
| #define _GLIBCXX_SHARED_MUTEX 1
 | |
| 
 | |
| #pragma GCC system_header
 | |
| 
 | |
| #if __cplusplus <= 201103L
 | |
| # include <bits/c++14_warning.h>
 | |
| #else
 | |
| 
 | |
| #include <bits/c++config.h>
 | |
| #include <mutex>
 | |
| #include <condition_variable>
 | |
| #include <bits/functexcept.h>
 | |
| 
 | |
| namespace std _GLIBCXX_VISIBILITY(default)
 | |
| {
 | |
| _GLIBCXX_BEGIN_NAMESPACE_VERSION
 | |
| 
 | |
|   /**
 | |
|    * @ingroup mutexes
 | |
|    * @{
 | |
|    */
 | |
| 
 | |
| #ifdef _GLIBCXX_USE_C99_STDINT_TR1
 | |
| #ifdef _GLIBCXX_HAS_GTHREADS
 | |
| 
 | |
| #define __cpp_lib_shared_timed_mutex 201402
 | |
| 
 | |
|   /// shared_timed_mutex
 | |
|   class shared_timed_mutex
 | |
|   {
 | |
| #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
 | |
|     typedef chrono::system_clock	__clock_t;
 | |
| 
 | |
| #ifdef PTHREAD_RWLOCK_INITIALIZER
 | |
|     pthread_rwlock_t	_M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
 | |
| 
 | |
|   public:
 | |
|     shared_timed_mutex() = default;
 | |
|     ~shared_timed_mutex() = default;
 | |
| #else
 | |
|     pthread_rwlock_t	_M_rwlock;
 | |
| 
 | |
|   public:
 | |
|     shared_timed_mutex()
 | |
|     {
 | |
|       int __ret = pthread_rwlock_init(&_M_rwlock, NULL);
 | |
|       if (__ret == ENOMEM)
 | |
| 	__throw_bad_alloc();
 | |
|       else if (__ret == EAGAIN)
 | |
| 	__throw_system_error(int(errc::resource_unavailable_try_again));
 | |
|       else if (__ret == EPERM)
 | |
| 	__throw_system_error(int(errc::operation_not_permitted));
 | |
|       // Errors not handled: EBUSY, EINVAL
 | |
|       _GLIBCXX_DEBUG_ASSERT(__ret == 0);
 | |
|     }
 | |
| 
 | |
|     ~shared_timed_mutex()
 | |
|     {
 | |
|       int __ret __attribute((__unused__)) = pthread_rwlock_destroy(&_M_rwlock);
 | |
|       // Errors not handled: EBUSY, EINVAL
 | |
|       _GLIBCXX_DEBUG_ASSERT(__ret == 0);
 | |
|     }
 | |
| #endif
 | |
| 
 | |
|     shared_timed_mutex(const shared_timed_mutex&) = delete;
 | |
|     shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
 | |
| 
 | |
|     // Exclusive ownership
 | |
| 
 | |
|     void
 | |
|     lock()
 | |
|     {
 | |
|       int __ret = pthread_rwlock_wrlock(&_M_rwlock);
 | |
|       if (__ret == EDEADLK)
 | |
| 	__throw_system_error(int(errc::resource_deadlock_would_occur));
 | |
|       // Errors not handled: EINVAL
 | |
|       _GLIBCXX_DEBUG_ASSERT(__ret == 0);
 | |
|     }
 | |
| 
 | |
|     bool
 | |
|     try_lock()
 | |
|     {
 | |
|       int __ret = pthread_rwlock_trywrlock(&_M_rwlock);
 | |
|       if (__ret == EBUSY) return false;
 | |
|       // Errors not handled: EINVAL
 | |
|       _GLIBCXX_DEBUG_ASSERT(__ret == 0);
 | |
|       return true;
 | |
|     }
 | |
| 
 | |
|     template<typename _Rep, typename _Period>
 | |
|       bool
 | |
|       try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
 | |
|       {
 | |
| 	return try_lock_until(__clock_t::now() + __rel_time);
 | |
|       }
 | |
| 
 | |
|     template<typename _Duration>
 | |
|       bool
 | |
|       try_lock_until(const chrono::time_point<__clock_t, _Duration>& __atime)
 | |
|       {
 | |
| 	auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
 | |
| 	auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
 | |
| 
 | |
| 	__gthread_time_t __ts =
 | |
| 	  {
 | |
| 	    static_cast<std::time_t>(__s.time_since_epoch().count()),
 | |
| 	    static_cast<long>(__ns.count())
 | |
| 	  };
 | |
| 
 | |
| 	int __ret = pthread_rwlock_timedwrlock(&_M_rwlock, &__ts);
 | |
| 	// On self-deadlock, we just fail to acquire the lock.  Technically,
 | |
| 	// the program violated the precondition.
 | |
| 	if (__ret == ETIMEDOUT || __ret == EDEADLK)
 | |
| 	  return false;
 | |
| 	// Errors not handled: EINVAL
 | |
| 	_GLIBCXX_DEBUG_ASSERT(__ret == 0);
 | |
| 	return true;
 | |
|       }
 | |
| 
 | |
|     template<typename _Clock, typename _Duration>
 | |
|       bool
 | |
|       try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
 | |
|       {
 | |
| 	// DR 887 - Sync unknown clock to known clock.
 | |
| 	const typename _Clock::time_point __c_entry = _Clock::now();
 | |
| 	const __clock_t::time_point __s_entry = __clock_t::now();
 | |
| 	const auto __delta = __abs_time - __c_entry;
 | |
| 	const auto __s_atime = __s_entry + __delta;
 | |
| 	return try_lock_until(__s_atime);
 | |
|       }
 | |
| 
 | |
|     void
 | |
|     unlock()
 | |
|     {
 | |
|       int __ret __attribute((__unused__)) = pthread_rwlock_unlock(&_M_rwlock);
 | |
|       // Errors not handled: EPERM, EBUSY, EINVAL
 | |
|       _GLIBCXX_DEBUG_ASSERT(__ret == 0);
 | |
|     }
 | |
| 
 | |
|     // Shared ownership
 | |
| 
 | |
|     void
 | |
|     lock_shared()
 | |
|     {
 | |
|       int __ret;
 | |
|       // We retry if we exceeded the maximum number of read locks supported by
 | |
|       // the POSIX implementation; this can result in busy-waiting, but this
 | |
|       // is okay based on the current specification of forward progress
 | |
|       // guarantees by the standard.
 | |
|       do
 | |
| 	__ret = pthread_rwlock_rdlock(&_M_rwlock);
 | |
|       while (__ret == EAGAIN);
 | |
|       if (__ret == EDEADLK)
 | |
| 	__throw_system_error(int(errc::resource_deadlock_would_occur));
 | |
|       // Errors not handled: EINVAL
 | |
|       _GLIBCXX_DEBUG_ASSERT(__ret == 0);
 | |
|     }
 | |
| 
 | |
|     bool
 | |
|     try_lock_shared()
 | |
|     {
 | |
|       int __ret = pthread_rwlock_tryrdlock(&_M_rwlock);
 | |
|       // If the maximum number of read locks has been exceeded, we just fail
 | |
|       // to acquire the lock.  Unlike for lock(), we are not allowed to throw
 | |
|       // an exception.
 | |
|       if (__ret == EBUSY || __ret == EAGAIN) return false;
 | |
|       // Errors not handled: EINVAL
 | |
|       _GLIBCXX_DEBUG_ASSERT(__ret == 0);
 | |
|       return true;
 | |
|     }
 | |
| 
 | |
|     template<typename _Rep, typename _Period>
 | |
|       bool
 | |
|       try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
 | |
|       {
 | |
| 	return try_lock_shared_until(__clock_t::now() + __rel_time);
 | |
|       }
 | |
| 
 | |
|     template<typename _Duration>
 | |
|       bool
 | |
|       try_lock_shared_until(const chrono::time_point<__clock_t,
 | |
| 			    _Duration>& __atime)
 | |
|       {
 | |
| 	auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
 | |
| 	auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
 | |
| 
 | |
| 	__gthread_time_t __ts =
 | |
| 	  {
 | |
| 	    static_cast<std::time_t>(__s.time_since_epoch().count()),
 | |
| 	    static_cast<long>(__ns.count())
 | |
| 	  };
 | |
| 
 | |
| 	int __ret;
 | |
| 	// Unlike for lock(), we are not allowed to throw an exception so if
 | |
| 	// the maximum number of read locks has been exceeded, or we would
 | |
| 	// deadlock, we just try to acquire the lock again (and will time out
 | |
| 	// eventually). 
 | |
| 	// In cases where we would exceed the maximum number of read locks
 | |
| 	// throughout the whole time until the timeout, we will fail to
 | |
| 	// acquire the lock even if it would be logically free; however, this
 | |
| 	// is allowed by the standard, and we made a "strong effort"
 | |
| 	// (see C++14 30.4.1.4p26).
 | |
| 	// For cases where the implementation detects a deadlock we
 | |
| 	// intentionally block and timeout so that an early return isn't
 | |
| 	// mistaken for a spurious failure, which might help users realise
 | |
| 	// there is a deadlock.
 | |
| 	do
 | |
| 	  __ret = pthread_rwlock_timedrdlock(&_M_rwlock, &__ts);
 | |
| 	while (__ret == EAGAIN || __ret == EDEADLK);
 | |
| 	if (__ret == ETIMEDOUT)
 | |
| 	  return false;
 | |
| 	// Errors not handled: EINVAL
 | |
| 	_GLIBCXX_DEBUG_ASSERT(__ret == 0);
 | |
| 	return true;
 | |
|       }
 | |
| 
 | |
|     template<typename _Clock, typename _Duration>
 | |
|       bool
 | |
|       try_lock_shared_until(const chrono::time_point<_Clock,
 | |
| 			    _Duration>& __abs_time)
 | |
|       {
 | |
| 	// DR 887 - Sync unknown clock to known clock.
 | |
| 	const typename _Clock::time_point __c_entry = _Clock::now();
 | |
| 	const __clock_t::time_point __s_entry = __clock_t::now();
 | |
| 	const auto __delta = __abs_time - __c_entry;
 | |
| 	const auto __s_atime = __s_entry + __delta;
 | |
| 	return try_lock_shared_until(__s_atime);
 | |
|       }
 | |
| 
 | |
|     void
 | |
|     unlock_shared()
 | |
|     {
 | |
|       unlock();
 | |
|     }
 | |
| 
 | |
| #else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
 | |
| 
 | |
|     // Must use the same clock as condition_variable
 | |
|     typedef chrono::system_clock	__clock_t;
 | |
| 
 | |
|     // Based on Howard Hinnant's reference implementation from N2406.
 | |
| 
 | |
|     // The high bit of _M_state is the write-entered flag which is set to
 | |
|     // indicate a writer has taken the lock or is queuing to take the lock.
 | |
|     // The remaining bits are the count of reader locks.
 | |
|     //
 | |
|     // To take a reader lock, block on gate1 while the write-entered flag is
 | |
|     // set or the maximum number of reader locks is held, then increment the
 | |
|     // reader lock count.
 | |
|     // To release, decrement the count, then if the write-entered flag is set
 | |
|     // and the count is zero then signal gate2 to wake a queued writer,
 | |
|     // otherwise if the maximum number of reader locks was held signal gate1
 | |
|     // to wake a reader.
 | |
|     //
 | |
|     // To take a writer lock, block on gate1 while the write-entered flag is
 | |
|     // set, then set the write-entered flag to start queueing, then block on
 | |
|     // gate2 while the number of reader locks is non-zero.
 | |
|     // To release, unset the write-entered flag and signal gate1 to wake all
 | |
|     // blocked readers and writers.
 | |
|     //
 | |
|     // This means that when no reader locks are held readers and writers get
 | |
|     // equal priority. When one or more reader locks is held a writer gets
 | |
|     // priority and no more reader locks can be taken while the writer is
 | |
|     // queued.
 | |
| 
 | |
|     // Only locked when accessing _M_state or waiting on condition variables.
 | |
|     mutex		_M_mut;
 | |
|     // Used to block while write-entered is set or reader count at maximum.
 | |
|     condition_variable	_M_gate1;
 | |
|     // Used to block queued writers while reader count is non-zero.
 | |
|     condition_variable	_M_gate2;
 | |
|     // The write-entered flag and reader count.
 | |
|     unsigned		_M_state;
 | |
| 
 | |
|     static constexpr unsigned _S_write_entered
 | |
|       = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
 | |
|     static constexpr unsigned _S_max_readers = ~_S_write_entered;
 | |
| 
 | |
|     // Test whether the write-entered flag is set. _M_mut must be locked.
 | |
|     bool _M_write_entered() const { return _M_state & _S_write_entered; }
 | |
| 
 | |
|     // The number of reader locks currently held. _M_mut must be locked.
 | |
|     unsigned _M_readers() const { return _M_state & _S_max_readers; }
 | |
| 
 | |
|   public:
 | |
|     shared_timed_mutex() : _M_state(0) {}
 | |
| 
 | |
|     ~shared_timed_mutex()
 | |
|     {
 | |
|       _GLIBCXX_DEBUG_ASSERT( _M_state == 0 );
 | |
|     }
 | |
| 
 | |
|     shared_timed_mutex(const shared_timed_mutex&) = delete;
 | |
|     shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
 | |
| 
 | |
|     // Exclusive ownership
 | |
| 
 | |
|     void
 | |
|     lock()
 | |
|     {
 | |
|       unique_lock<mutex> __lk(_M_mut);
 | |
|       // Wait until we can set the write-entered flag.
 | |
|       _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
 | |
|       _M_state |= _S_write_entered;
 | |
|       // Then wait until there are no more readers.
 | |
|       _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
 | |
|     }
 | |
| 
 | |
|     bool
 | |
|     try_lock()
 | |
|     {
 | |
|       unique_lock<mutex> __lk(_M_mut, try_to_lock);
 | |
|       if (__lk.owns_lock() && _M_state == 0)
 | |
| 	{
 | |
| 	  _M_state = _S_write_entered;
 | |
| 	  return true;
 | |
| 	}
 | |
|       return false;
 | |
|     }
 | |
| 
 | |
|     template<typename _Rep, typename _Period>
 | |
|       bool
 | |
|       try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
 | |
|       {
 | |
| 	return try_lock_until(__clock_t::now() + __rel_time);
 | |
|       }
 | |
| 
 | |
|     template<typename _Clock, typename _Duration>
 | |
|       bool
 | |
|       try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
 | |
|       {
 | |
| 	unique_lock<mutex> __lk(_M_mut);
 | |
| 	if (!_M_gate1.wait_until(__lk, __abs_time,
 | |
| 				 [=]{ return !_M_write_entered(); }))
 | |
| 	  {
 | |
| 	    return false;
 | |
| 	  }
 | |
| 	_M_state |= _S_write_entered;
 | |
| 	if (!_M_gate2.wait_until(__lk, __abs_time,
 | |
| 				 [=]{ return _M_readers() == 0; }))
 | |
| 	  {
 | |
| 	    _M_state ^= _S_write_entered;
 | |
| 	    // Wake all threads blocked while the write-entered flag was set.
 | |
| 	    _M_gate1.notify_all();
 | |
| 	    return false;
 | |
| 	  }
 | |
| 	return true;
 | |
|       }
 | |
| 
 | |
|     void
 | |
|     unlock()
 | |
|     {
 | |
|       lock_guard<mutex> __lk(_M_mut);
 | |
|       _GLIBCXX_DEBUG_ASSERT( _M_write_entered() );
 | |
|       _M_state = 0;
 | |
|       // call notify_all() while mutex is held so that another thread can't
 | |
|       // lock and unlock the mutex then destroy *this before we make the call.
 | |
|       _M_gate1.notify_all();
 | |
|     }
 | |
| 
 | |
|     // Shared ownership
 | |
| 
 | |
|     void
 | |
|     lock_shared()
 | |
|     {
 | |
|       unique_lock<mutex> __lk(_M_mut);
 | |
|       _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
 | |
|       ++_M_state;
 | |
|     }
 | |
| 
 | |
|     bool
 | |
|     try_lock_shared()
 | |
|     {
 | |
|       unique_lock<mutex> __lk(_M_mut, try_to_lock);
 | |
|       if (!__lk.owns_lock())
 | |
| 	return false;
 | |
|       if (_M_state < _S_max_readers)
 | |
| 	{
 | |
| 	  ++_M_state;
 | |
| 	  return true;
 | |
| 	}
 | |
|       return false;
 | |
|     }
 | |
| 
 | |
|     template<typename _Rep, typename _Period>
 | |
|       bool
 | |
|       try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
 | |
|       {
 | |
| 	return try_lock_shared_until(__clock_t::now() + __rel_time);
 | |
|       }
 | |
| 
 | |
|     template <typename _Clock, typename _Duration>
 | |
|       bool
 | |
|       try_lock_shared_until(const chrono::time_point<_Clock,
 | |
| 						     _Duration>& __abs_time)
 | |
|       {
 | |
| 	unique_lock<mutex> __lk(_M_mut);
 | |
| 	if (!_M_gate1.wait_until(__lk, __abs_time,
 | |
| 				 [=]{ return _M_state < _S_max_readers; }))
 | |
| 	  {
 | |
| 	    return false;
 | |
| 	  }
 | |
| 	++_M_state;
 | |
| 	return true;
 | |
|       }
 | |
| 
 | |
|     void
 | |
|     unlock_shared()
 | |
|     {
 | |
|       lock_guard<mutex> __lk(_M_mut);
 | |
|       _GLIBCXX_DEBUG_ASSERT( _M_readers() > 0 );
 | |
|       auto __prev = _M_state--;
 | |
|       if (_M_write_entered())
 | |
| 	{
 | |
| 	  // Wake the queued writer if there are no more readers.
 | |
| 	  if (_M_readers() == 0)
 | |
| 	    _M_gate2.notify_one();
 | |
| 	  // No need to notify gate1 because we give priority to the queued
 | |
| 	  // writer, and that writer will eventually notify gate1 after it
 | |
| 	  // clears the write-entered flag.
 | |
| 	}
 | |
|       else
 | |
| 	{
 | |
| 	  // Wake any thread that was blocked on reader overflow.
 | |
| 	  if (__prev == _S_max_readers)
 | |
| 	    _M_gate1.notify_one();
 | |
| 	}
 | |
|     }
 | |
| #endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
 | |
|   };
 | |
| #endif // _GLIBCXX_HAS_GTHREADS
 | |
| 
 | |
|   /// shared_lock
 | |
|   template<typename _Mutex>
 | |
|     class shared_lock
 | |
|     {
 | |
|     public:
 | |
|       typedef _Mutex mutex_type;
 | |
| 
 | |
|       // Shared locking
 | |
| 
 | |
|       shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
 | |
| 
 | |
|       explicit
 | |
|       shared_lock(mutex_type& __m) : _M_pm(&__m), _M_owns(true)
 | |
|       { __m.lock_shared(); }
 | |
| 
 | |
|       shared_lock(mutex_type& __m, defer_lock_t) noexcept
 | |
|       : _M_pm(&__m), _M_owns(false) { }
 | |
| 
 | |
|       shared_lock(mutex_type& __m, try_to_lock_t)
 | |
|       : _M_pm(&__m), _M_owns(__m.try_lock_shared()) { }
 | |
| 
 | |
|       shared_lock(mutex_type& __m, adopt_lock_t)
 | |
|       : _M_pm(&__m), _M_owns(true) { }
 | |
| 
 | |
|       template<typename _Clock, typename _Duration>
 | |
| 	shared_lock(mutex_type& __m,
 | |
| 		    const chrono::time_point<_Clock, _Duration>& __abs_time)
 | |
|       : _M_pm(&__m), _M_owns(__m.try_lock_shared_until(__abs_time)) { }
 | |
| 
 | |
|       template<typename _Rep, typename _Period>
 | |
| 	shared_lock(mutex_type& __m,
 | |
| 		    const chrono::duration<_Rep, _Period>& __rel_time)
 | |
|       : _M_pm(&__m), _M_owns(__m.try_lock_shared_for(__rel_time)) { }
 | |
| 
 | |
|       ~shared_lock()
 | |
|       {
 | |
| 	if (_M_owns)
 | |
| 	  _M_pm->unlock_shared();
 | |
|       }
 | |
| 
 | |
|       shared_lock(shared_lock const&) = delete;
 | |
|       shared_lock& operator=(shared_lock const&) = delete;
 | |
| 
 | |
|       shared_lock(shared_lock&& __sl) noexcept : shared_lock()
 | |
|       { swap(__sl); }
 | |
| 
 | |
|       shared_lock&
 | |
|       operator=(shared_lock&& __sl) noexcept
 | |
|       {
 | |
| 	shared_lock(std::move(__sl)).swap(*this);
 | |
| 	return *this;
 | |
|       }
 | |
| 
 | |
|       void
 | |
|       lock()
 | |
|       {
 | |
| 	_M_lockable();
 | |
| 	_M_pm->lock_shared();
 | |
| 	_M_owns = true;
 | |
|       }
 | |
| 
 | |
|       bool
 | |
|       try_lock()
 | |
|       {
 | |
| 	_M_lockable();
 | |
| 	return _M_owns = _M_pm->try_lock_shared();
 | |
|       }
 | |
| 
 | |
|       template<typename _Rep, typename _Period>
 | |
| 	bool
 | |
| 	try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
 | |
| 	{
 | |
| 	  _M_lockable();
 | |
| 	  return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
 | |
| 	}
 | |
| 
 | |
|       template<typename _Clock, typename _Duration>
 | |
| 	bool
 | |
| 	try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
 | |
| 	{
 | |
| 	  _M_lockable();
 | |
| 	  return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
 | |
| 	}
 | |
| 
 | |
|       void
 | |
|       unlock()
 | |
|       {
 | |
| 	if (!_M_owns)
 | |
| 	  __throw_system_error(int(errc::resource_deadlock_would_occur));
 | |
| 	_M_pm->unlock_shared();
 | |
| 	_M_owns = false;
 | |
|       }
 | |
| 
 | |
|       // Setters
 | |
| 
 | |
|       void
 | |
|       swap(shared_lock& __u) noexcept
 | |
|       {
 | |
| 	std::swap(_M_pm, __u._M_pm);
 | |
| 	std::swap(_M_owns, __u._M_owns);
 | |
|       }
 | |
| 
 | |
|       mutex_type*
 | |
|       release() noexcept
 | |
|       {
 | |
| 	_M_owns = false;
 | |
| 	return std::exchange(_M_pm, nullptr);
 | |
|       }
 | |
| 
 | |
|       // Getters
 | |
| 
 | |
|       bool owns_lock() const noexcept { return _M_owns; }
 | |
| 
 | |
|       explicit operator bool() const noexcept { return _M_owns; }
 | |
| 
 | |
|       mutex_type* mutex() const noexcept { return _M_pm; }
 | |
| 
 | |
|     private:
 | |
|       void
 | |
|       _M_lockable() const
 | |
|       {
 | |
| 	if (_M_pm == nullptr)
 | |
| 	  __throw_system_error(int(errc::operation_not_permitted));
 | |
| 	if (_M_owns)
 | |
| 	  __throw_system_error(int(errc::resource_deadlock_would_occur));
 | |
|       }
 | |
| 
 | |
|       mutex_type*	_M_pm;
 | |
|       bool		_M_owns;
 | |
|     };
 | |
| 
 | |
|   /// Swap specialization for shared_lock
 | |
|   template<typename _Mutex>
 | |
|     void
 | |
|     swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
 | |
|     { __x.swap(__y); }
 | |
| 
 | |
| #endif // _GLIBCXX_USE_C99_STDINT_TR1
 | |
| 
 | |
|   // @} group mutexes
 | |
| _GLIBCXX_END_NAMESPACE_VERSION
 | |
| } // namespace
 | |
| 
 | |
| #endif // C++14
 | |
| 
 | |
| #endif // _GLIBCXX_SHARED_MUTEX
 |