/** \file atomic_shared.h * * \author George T. Talbot * \author Peter Dimov * * \brief This file defines a template class that can wrap * boost::shared_ptr and provide thread-safe atomic * read/write operations. * * (C) Copyright George Talbot, Peter Dimov 2006. * * Distributed under the Boost Software License, Version 1.0. (See * accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) */ #ifndef ATOMIC_SHARED_H #define ATOMIC_SHARED_H #include #include #include #include #include #define ATOMIC_SHARED_ABORT #ifdef ATOMIC_ABORT_ON_ERROR #define ATOMIC_SHARED_ABORT ::abort(); #else #define ATOMIC_SHARED_ABORT #endif /** A wrapper for shared_ptr that implements atomic assignments and compare_and_swap. */ template class atomic_shared { /** Lock a spinlock protecting an atomic pointer. */ struct LockSpinlock { explicit LockSpinlock(pthread_spinlock_t& lock) : m_lock(lock) { int rv = ::pthread_spin_lock(&m_lock); if (rv) { errno = rv; ATOMIC_SHARED_ABORT throw std::runtime_error("can't lock spinlock"); } } LockSpinlock(pthread_spinlock_t& lock, int) // Assumes already locked. : m_lock(lock) { } ~LockSpinlock() { int rv = ::pthread_spin_unlock(&m_lock); assert(rv == 0); } private: pthread_spinlock_t& m_lock; }; typedef boost::shared_ptr base_ptr; /** Initialize a spinlock protecting an atomic pointer. */ static void init(pthread_spinlock_t& l) { int rv = ::pthread_spin_init(&l, PTHREAD_PROCESS_PRIVATE); if (rv) { ATOMIC_SHARED_ABORT errno = rv; throw std::runtime_error("can't initialize spinlock"); } } public: typedef typename base_ptr::element_type element_type; ///< Type of element to which pointer points. typedef typename base_ptr::value_type value_type; ///< Same as element_type. typedef typename base_ptr::pointer pointer; ///< Pointer to refcounted object. typedef typename base_ptr::reference reference; ///< Reference to refcounted object. /** Initialize an empty pointer. */ atomic_shared() { init(spinlock); } /** Create a managed pointer from a bare one. */ template explicit atomic_shared(Y* p) : ptr(p) { init(spinlock); } /** Create a managed pointer from a bare one, and a custom deleter. */ template atomic_shared(Y * p, D d) : ptr(p, d) { init(spinlock); } /** Create a managed pointer from a shared_ptr. * * WARNING: The caller assumes the responsibility that the creation * of the atomic_shared is occurring in the same thread as * the base_ptr, and that the base_ptr will go out of scope * before the atomic_shared is visible to other threads. * * Maybe this shouldn't be here at all. */ atomic_shared(const base_ptr& p) : ptr(p) { init(spinlock); } /** Copy constructor. */ atomic_shared(const atomic_shared& p) { init(spinlock); LockSpinlock l(p.spinlock); ptr = p.ptr; } /** Assignment */ atomic_shared& operator=(atomic_shared p) // local copy to avoid deadlock. { if (&p != this) { LockSpinlock l(spinlock); ptr = p.ptr; } return *this; } /** Type-converting copy constructor. */ template atomic_shared(const atomic_shared& p) { init(spinlock); LockSpinlock l(p.spinlock); ptr = p.ptr; } /** Type-converting assignment. */ template atomic_shared& operator=(atomic_shared p) // local copy to avoid deadlock. { LockSpinlock l(spinlock); ptr = p.ptr; return *this; } /** Destructor with rudimentary error checking. */ ~atomic_shared() { // Lock the spinlock to hold off all other copiers. int rv = ::pthread_spin_lock(&spinlock); assert(rv == 0); // Destroy the spinlock--this should cause all other copiers to // either assert or throw. rv = ::pthread_spin_destroy(&spinlock); // For debugging, drop us into the debugger if we can't destroy the // spinlock. assert(rv == 0); } /** Reset the pointer to an empty pointer. */ void reset() { LockSpinlock l(spinlock); ptr.reset(); } /** Reset the pointer and replace it with a new bare pointer. */ template void reset(Y* p) { LockSpinlock l(spinlock); ptr.reset(p); } /** Reset the pointer and replace it with a new bare pointer with custom deleter. */ template void reset(Y* p, D d) { LockSpinlock l(spinlock); ptr.reset(p, d); } // Any reads of what the pointer points to happen without locking. reference operator*() const { return ptr.operator*(); } T* operator->() const { return ptr.operator->(); } T* get() const { return ptr.get(); } operator bool() const { return ptr; } bool operator!() const { return !ptr; } template ptrdiff_t operator-(const atomic_shared& x) const { return ptr - x.ptr; } bool unique() const { return ptr.unique(); } long use_count() const { return ptr.use_count(); } #define ATOMIC_SHARED_LOGIC_OP(OP) \ template \ bool operator OP (const atomic_shared& x) const \ { \ return ptr OP x.ptr; \ } \ template \ bool operator OP (const Y* x) const \ { \ return ptr OP x; \ } ATOMIC_SHARED_LOGIC_OP(==) ATOMIC_SHARED_LOGIC_OP(!=) ATOMIC_SHARED_LOGIC_OP(<) ATOMIC_SHARED_LOGIC_OP(<=) ATOMIC_SHARED_LOGIC_OP(>) ATOMIC_SHARED_LOGIC_OP(>=) #undef ATOMIC_SHARED_LOGIC_OP #define ATOMIC_SHARED_ARITH_OP(OP) \ atomic_shared operator OP (ptrdiff_t p) const \ { \ LockSpinlock l(spinlock); \ return ptr OP p; \ } ATOMIC_SHARED_ARITH_OP(+) ATOMIC_SHARED_ARITH_OP(-) #undef ATOMIC_SHARED_ARITH_OP #define ATOMIC_SHARED_ADD_OP(OP) \ atomic_shared& operator OP (ptrdiff_t p) \ { \ LockSpinlock l(spinlock); \ ptr OP p; \ return *this; \ } ATOMIC_SHARED_ADD_OP(+=) ATOMIC_SHARED_ADD_OP(-=) #undef ATOMIC_SHARED_ADD_OP #define ATOMIC_SHARED_INC_OP(OP) \ atomic_shared& operator OP () \ { \ LockSpinlock l(spinlock); \ OP ptr; \ return *this; \ } \ atomic_shared operator OP(int) \ { \ atomic_shared rv(*this); \ LockSpinlock l(spinlock); \ OP ptr; \ return rv; \ } ATOMIC_SHARED_INC_OP(++) ATOMIC_SHARED_INC_OP(--) #undef ATOMIC_SHARED_INC_OP /** Atomically swap the pointer with another pointer. */ void swap(atomic_shared& other) { atomic_shared temp(*this); atomic_shared temp2(other); { LockSpinlock l(spinlock); ptr = temp2.ptr; } { LockSpinlock l(other.spinlock); other = temp.ptr; } } /** Attempt to set the value of the pointer with another pointer, but only if the * pointer is the same as a previously sampled value of the pointer. * * @return true if the swap was successful. */ template bool compare_and_set(const atomic_shared& cmp, atomic_shared xchg) // Note that xchg is a local copy. { int rv = ::pthread_spin_trylock(&spinlock); switch (rv) { case EBUSY: return false; case 0: break; default: ATOMIC_SHARED_ABORT errno = rv; throw std::logic_error("can't pthread_spin_trylock"); } LockSpinlock l(spinlock, 0); // already locked bool r = ptr == cmp.ptr; if (r) ptr.swap(xchg.ptr); return r; } private: template friend class atomic_shared; base_ptr ptr; mutable pthread_spinlock_t spinlock; }; // atomic_shared #endif