|
Boost-Commit : |
Subject: [Boost-commit] svn:boost r58866 - in sandbox/fiber: boost/fiber boost/fiber/spin libs/fiber/doc
From: oliver.kowalke_at_[hidden]
Date: 2010-01-10 10:41:47
Author: olli
Date: 2010-01-10 10:41:46 EST (Sun, 10 Jan 2010)
New Revision: 58866
URL: http://svn.boost.org/trac/boost/changeset/58866
Log:
- internal classes of channels moved in ns detail
Text files modified:
sandbox/fiber/boost/fiber/bounded_channel.hpp | 4
sandbox/fiber/boost/fiber/spin/bounded_channel.hpp | 438 ++++++++++++++++++++-------------------
sandbox/fiber/boost/fiber/spin/unbounded_channel.hpp | 272 ++++++++++++------------
sandbox/fiber/boost/fiber/unbounded_channel.hpp | 4
sandbox/fiber/libs/fiber/doc/overview.qbk | 11
5 files changed, 373 insertions(+), 356 deletions(-)
Modified: sandbox/fiber/boost/fiber/bounded_channel.hpp
==============================================================================
--- sandbox/fiber/boost/fiber/bounded_channel.hpp (original)
+++ sandbox/fiber/boost/fiber/bounded_channel.hpp 2010-01-10 10:41:46 EST (Sun, 10 Jan 2010)
@@ -48,10 +48,10 @@
next()
{}
- inline friend void intrusive_ptr_add_ref( node * p)
+ friend void intrusive_ptr_add_ref( node * p)
{ ++p->use_count; }
- inline friend void intrusive_ptr_release( node * p)
+ friend void intrusive_ptr_release( node * p)
{ if ( --p->use_count == 0) delete p; }
};
Modified: sandbox/fiber/boost/fiber/spin/bounded_channel.hpp
==============================================================================
--- sandbox/fiber/boost/fiber/spin/bounded_channel.hpp (original)
+++ sandbox/fiber/boost/fiber/spin/bounded_channel.hpp 2010-01-10 10:41:46 EST (Sun, 10 Jan 2010)
@@ -27,292 +27,298 @@
namespace boost {
namespace fibers {
namespace spin {
+namespace detail {
template< typename T >
-class bounded_channel
+class bounded_channel_base : private noncopyable
{
public:
typedef optional< T > value_type;
private:
- class impl : private noncopyable
+ struct node
{
- private:
- struct node
- {
- typedef intrusive_ptr< node > ptr;
+ typedef intrusive_ptr< node > ptr;
- atomic< std::size_t > use_count;
- value_type va;
- ptr next;
-
- node() :
- use_count( 0),
- va(),
- next()
- {}
-
- inline friend void intrusive_ptr_add_ref( node * p)
- { p->use_count.fetch_add( 1, memory_order_relaxed); }
-
- inline friend void intrusive_ptr_release( node * p)
+ atomic< std::size_t > use_count;
+ value_type va;
+ ptr next;
+
+ node() :
+ use_count( 0),
+ va(),
+ next()
+ {}
+
+ friend void intrusive_ptr_add_ref( node * p)
+ { p->use_count.fetch_add( 1, memory_order_relaxed); }
+
+ friend void intrusive_ptr_release( node * p)
+ {
+ if ( p->use_count.fetch_sub( 1, memory_order_release) == 1)
{
- if ( p->use_count.fetch_sub( 1, memory_order_release) == 1)
- {
- atomic_thread_fence( memory_order_acquire);
- delete p;
- }
+ atomic_thread_fence( memory_order_acquire);
+ delete p;
}
- };
+ }
+ };
- enum state
- {
- ACTIVE = 0,
- DEACTIVE
- };
-
- atomic< state > state_;
- atomic< std::size_t > count_;
- typename node::ptr head_;
- mutable mutex head_mtx_;
- typename node::ptr tail_;
- mutable mutex tail_mtx_;
- condition not_empty_cond_;
- condition not_full_cond_;
- unsigned int hwm_;
- unsigned int lwm_;
- atomic< std::size_t > use_count_;
-
- bool active_() const
- { return ACTIVE == state_.load(); }
-
- void deactivate_()
- { state_.store( DEACTIVE); }
-
- std::size_t size_() const
- { return count_.load(); }
+ enum state
+ {
+ ACTIVE = 0,
+ DEACTIVE
+ };
- bool empty_() const
- { return head_ == get_tail_(); }
+ atomic< state > state_;
+ atomic< std::size_t > count_;
+ typename node::ptr head_;
+ mutable mutex head_mtx_;
+ typename node::ptr tail_;
+ mutable mutex tail_mtx_;
+ condition not_empty_cond_;
+ condition not_full_cond_;
+ unsigned int hwm_;
+ unsigned int lwm_;
+ atomic< std::size_t > use_count_;
+
+ bool active_() const
+ { return ACTIVE == state_.load(); }
+
+ void deactivate_()
+ { state_.store( DEACTIVE); }
+
+ std::size_t size_() const
+ { return count_.load(); }
- bool full_() const
- { return size_() >= hwm_; }
+ bool empty_() const
+ { return head_ == get_tail_(); }
- typename node::ptr get_tail_() const
- {
- mutex::scoped_lock lk( tail_mtx_);
- typename node::ptr tmp = tail_;
- return tmp;
- }
+ bool full_() const
+ { return size_() >= hwm_; }
- typename node::ptr pop_head_()
- {
- typename node::ptr old_head = head_;
- head_ = old_head->next;
- count_.fetch_sub( 1);
- return old_head;
- }
+ typename node::ptr get_tail_() const
+ {
+ mutex::scoped_lock lk( tail_mtx_);
+ typename node::ptr tmp = tail_;
+ return tmp;
+ }
- public:
- impl(
- std::size_t hwm,
- std::size_t lwm) :
- state_( ACTIVE),
- count_( 0),
- head_( new node() ),
- head_mtx_(),
- tail_( head_),
- tail_mtx_(),
- not_empty_cond_(),
- not_full_cond_(),
- hwm_( hwm),
- lwm_( lwm),
- use_count_( 0)
- {
- if ( hwm_ < lwm_)
- throw invalid_watermark();
- }
+ typename node::ptr pop_head_()
+ {
+ typename node::ptr old_head = head_;
+ head_ = old_head->next;
+ count_.fetch_sub( 1);
+ return old_head;
+ }
- impl( std::size_t wm) :
- state_( ACTIVE),
- count_( 0),
- head_( new node() ),
- head_mtx_(),
- tail_( head_),
- tail_mtx_(),
- not_empty_cond_(),
- not_full_cond_(),
- hwm_( wm),
- lwm_( wm),
- use_count_( 0)
- {}
+public:
+ bounded_channel_base(
+ std::size_t hwm,
+ std::size_t lwm) :
+ state_( ACTIVE),
+ count_( 0),
+ head_( new node() ),
+ head_mtx_(),
+ tail_( head_),
+ tail_mtx_(),
+ not_empty_cond_(),
+ not_full_cond_(),
+ hwm_( hwm),
+ lwm_( lwm),
+ use_count_( 0)
+ {
+ if ( hwm_ < lwm_)
+ throw invalid_watermark();
+ }
+
+ bounded_channel_base( std::size_t wm) :
+ state_( ACTIVE),
+ count_( 0),
+ head_( new node() ),
+ head_mtx_(),
+ tail_( head_),
+ tail_mtx_(),
+ not_empty_cond_(),
+ not_full_cond_(),
+ hwm_( wm),
+ lwm_( wm),
+ use_count_( 0)
+ {}
- void upper_bound_( std::size_t hwm)
- {
- if ( hwm < lwm_)
- throw invalid_watermark();
- unsigned int tmp( hwm_);
- hwm_ = hwm;
- if ( hwm_ > tmp) not_full_cond_.notify_one();
- }
+ void upper_bound_( std::size_t hwm)
+ {
+ if ( hwm < lwm_)
+ throw invalid_watermark();
+ unsigned int tmp( hwm_);
+ hwm_ = hwm;
+ if ( hwm_ > tmp) not_full_cond_.notify_one();
+ }
- std::size_t upper_bound() const
- { return hwm_; }
+ std::size_t upper_bound() const
+ { return hwm_; }
- void lower_bound_( std::size_t lwm)
- {
- if ( lwm > hwm_ )
- throw invalid_watermark();
- unsigned int tmp( lwm_);
- lwm_ = lwm;
- if ( lwm_ > tmp) not_full_cond_.notify_one();
- }
+ void lower_bound_( std::size_t lwm)
+ {
+ if ( lwm > hwm_ )
+ throw invalid_watermark();
+ unsigned int tmp( lwm_);
+ lwm_ = lwm;
+ if ( lwm_ > tmp) not_full_cond_.notify_one();
+ }
- std::size_t lower_bound() const
- { return lwm_; }
+ std::size_t lower_bound() const
+ { return lwm_; }
- bool active() const
- { return active_(); }
+ bool active() const
+ { return active_(); }
- void deactivate()
- { deactivate_(); }
+ void deactivate()
+ { deactivate_(); }
- bool empty() const
- {
- mutex::scoped_lock lk( head_mtx_);
- return empty_();
- }
+ bool empty() const
+ {
+ mutex::scoped_lock lk( head_mtx_);
+ return empty_();
+ }
- void put( T const& t)
+ void put( T const& t)
+ {
+ typename node::ptr new_node( new node() );
{
- typename node::ptr new_node( new node() );
- {
- mutex::scoped_lock lk( tail_mtx_);
+ mutex::scoped_lock lk( tail_mtx_);
- if ( full_() )
- {
- while ( active_() && full_() )
- not_full_cond_.wait( lk);
- }
-
- if ( ! active_() )
- throw std::runtime_error("queue is not active");
-
- tail_->va = t;
- tail_->next = new_node;
- tail_ = new_node;
- count_.fetch_add( 1);
+ if ( full_() )
+ {
+ while ( active_() && full_() )
+ not_full_cond_.wait( lk);
}
- not_empty_cond_.notify_one();
+
+ if ( ! active_() )
+ throw std::runtime_error("queue is not active");
+
+ tail_->va = t;
+ tail_->next = new_node;
+ tail_ = new_node;
+ count_.fetch_add( 1);
}
+ not_empty_cond_.notify_one();
+ }
- bool take( value_type & va)
+ bool take( value_type & va)
+ {
+ mutex::scoped_lock lk( head_mtx_);
+ bool empty = empty_();
+ if ( ! active_() && empty)
+ return false;
+ if ( empty)
{
- mutex::scoped_lock lk( head_mtx_);
- bool empty = empty_();
- if ( ! active_() && empty)
- return false;
- if ( empty)
+ try
{
- try
- {
- while ( active_() && empty_() )
- not_empty_cond_.wait( lk);
- }
- catch ( fiber_interrupted const&)
- { return false; }
+ while ( active_() && empty_() )
+ not_empty_cond_.wait( lk);
}
- if ( ! active_() && empty_() )
- return false;
- swap( va, head_->va);
- pop_head_();
- if ( size_() <= lwm_)
- {
- if ( lwm_ == hwm_)
- not_full_cond_.notify_one();
- else
- // more than one producer could be waiting
- // for submiting an action object
- not_full_cond_.notify_all();
- }
- return va;
+ catch ( fiber_interrupted const&)
+ { return false; }
+ }
+ if ( ! active_() && empty_() )
+ return false;
+ swap( va, head_->va);
+ pop_head_();
+ if ( size_() <= lwm_)
+ {
+ if ( lwm_ == hwm_)
+ not_full_cond_.notify_one();
+ else
+ // more than one producer could be waiting
+ // for submiting an action object
+ not_full_cond_.notify_all();
}
+ return va;
+ }
- bool try_take( value_type & va)
- {
- mutex::scoped_lock lk( head_mtx_);
- if ( empty_() )
- return false;
- swap( va, head_->va);
- pop_head_();
- bool valid = va;
- if ( valid && size_() <= lwm_)
- {
- if ( lwm_ == hwm_)
- not_full_cond_.notify_one();
- else
- // more than one producer could be waiting
- // in order to submit an task
- not_full_cond_.notify_all();
- }
- return valid;
+ bool try_take( value_type & va)
+ {
+ mutex::scoped_lock lk( head_mtx_);
+ if ( empty_() )
+ return false;
+ swap( va, head_->va);
+ pop_head_();
+ bool valid = va;
+ if ( valid && size_() <= lwm_)
+ {
+ if ( lwm_ == hwm_)
+ not_full_cond_.notify_one();
+ else
+ // more than one producer could be waiting
+ // in order to submit an task
+ not_full_cond_.notify_all();
}
+ return valid;
+ }
- inline friend void intrusive_ptr_add_ref( impl * p)
- { p->use_count_.fetch_add( 1, memory_order_relaxed); }
-
- inline friend void intrusive_ptr_release( impl * p)
+ friend void intrusive_ptr_add_ref( bounded_channel_base * p)
+ { p->use_count_.fetch_add( 1, memory_order_relaxed); }
+
+ friend void intrusive_ptr_release( bounded_channel_base * p)
+ {
+ if ( p->use_count_.fetch_sub( 1, memory_order_release) == 1)
{
- if ( p->use_count_.fetch_sub( 1, memory_order_release) == 1)
- {
- atomic_thread_fence( memory_order_acquire);
- delete p;
- }
+ atomic_thread_fence( memory_order_acquire);
+ delete p;
}
- };
+ }
+};
+
+}
+
+template< typename T >
+class bounded_channel
+{
+private:
+ typedef typename detail::bounded_channel_base< T >::value_type value_type;
- intrusive_ptr< impl > impl_;
+ intrusive_ptr< detail::bounded_channel_base< T > > base_;
public:
bounded_channel(
std::size_t hwm,
std::size_t lwm) :
- impl_( new impl( hwm, lwm) )
+ base_( new detail::bounded_channel_base< T >( hwm, lwm) )
{}
bounded_channel( std::size_t wm) :
- impl_( new impl( wm) )
+ base_( new detail::bounded_channel_base< T >( wm) )
{}
void upper_bound( std::size_t hwm)
- { impl_->upper_bound( hwm); }
+ { base_->upper_bound( hwm); }
std::size_t upper_bound() const
- { return impl_->upper_bound(); }
+ { return base_->upper_bound(); }
void lower_bound( std::size_t lwm)
- { impl_->lower_bound( lwm); }
+ { base_->lower_bound( lwm); }
std::size_t lower_bound() const
- { return impl_->lower_bound(); }
+ { return base_->lower_bound(); }
bool active() const
- { return impl_->active(); }
+ { return base_->active(); }
void deactivate()
- { impl_->deactivate(); }
+ { base_->deactivate(); }
bool empty() const
- { return impl_->empty(); }
+ { return base_->empty(); }
void put( T const& t)
- { impl_->put( t); }
+ { base_->put( t); }
bool take( value_type & va)
- { return impl_->take( va);}
+ { return base_->take( va);}
bool try_take( value_type & va)
- { return impl_->try_take( va); }
+ { return base_->try_take( va); }
};
}}}
Modified: sandbox/fiber/boost/fiber/spin/unbounded_channel.hpp
==============================================================================
--- sandbox/fiber/boost/fiber/spin/unbounded_channel.hpp (original)
+++ sandbox/fiber/boost/fiber/spin/unbounded_channel.hpp 2010-01-10 10:41:46 EST (Sun, 10 Jan 2010)
@@ -27,190 +27,196 @@
namespace boost {
namespace fibers {
namespace spin {
+namespace detail {
template< typename T >
-class unbounded_channel
+class unbounded_channel_base : private noncopyable
{
public:
typedef optional< T > value_type;
private:
- class impl : private noncopyable
+ struct node
{
- private:
- struct node
- {
- typedef intrusive_ptr< node > ptr;
+ typedef intrusive_ptr< node > ptr;
+
+ atomic< std::size_t > use_count;
+ value_type va;
+ ptr next;
+
+ node() :
+ use_count( 0),
+ va(),
+ next()
+ {}
- atomic< std::size_t > use_count;
- value_type va;
- ptr next;
-
- node() :
- use_count( 0),
- va(),
- next()
- {}
-
- inline friend void intrusive_ptr_add_ref( node * p)
- { p->use_count.fetch_add( 1, memory_order_relaxed); }
-
- inline friend void intrusive_ptr_release( node * p)
+ friend void intrusive_ptr_add_ref( node * p)
+ { p->use_count.fetch_add( 1, memory_order_relaxed); }
+
+ friend void intrusive_ptr_release( node * p)
+ {
+ if ( p->use_count.fetch_sub( 1, memory_order_release) == 1)
{
- if ( p->use_count.fetch_sub( 1, memory_order_release) == 1)
- {
- atomic_thread_fence( memory_order_acquire);
- delete p;
- }
+ atomic_thread_fence( memory_order_acquire);
+ delete p;
}
- };
+ }
+ };
- enum state
- {
- ACTIVE = 0,
- DEACTIVE
- };
-
- atomic< state > state_;
- typename node::ptr head_;
- mutable mutex head_mtx_;
- typename node::ptr tail_;
- mutable mutex tail_mtx_;
- condition not_empty_cond_;
- atomic< std::size_t > use_count_;
-
- bool active_() const
- { return ACTIVE == state_.load(); }
+ enum state
+ {
+ ACTIVE = 0,
+ DEACTIVE
+ };
- void deactivate_()
- { state_.store( DEACTIVE); }
+ atomic< state > state_;
+ typename node::ptr head_;
+ mutable mutex head_mtx_;
+ typename node::ptr tail_;
+ mutable mutex tail_mtx_;
+ condition not_empty_cond_;
+ atomic< std::size_t > use_count_;
+
+ bool active_() const
+ { return ACTIVE == state_.load(); }
- bool empty_() const
- { return head_ == get_tail_(); }
+ void deactivate_()
+ { state_.store( DEACTIVE); }
- typename node::ptr get_tail_() const
- {
- mutex::scoped_lock lk( tail_mtx_);
- typename node::ptr tmp = tail_;
- return tmp;
- }
+ bool empty_() const
+ { return head_ == get_tail_(); }
- typename node::ptr pop_head_()
- {
- typename node::ptr old_head = head_;
- head_ = old_head->next;
- return old_head;
- }
+ typename node::ptr get_tail_() const
+ {
+ mutex::scoped_lock lk( tail_mtx_);
+ typename node::ptr tmp = tail_;
+ return tmp;
+ }
- public:
- impl() :
- state_( ACTIVE),
- head_( new node() ),
- head_mtx_(),
- tail_( head_),
- tail_mtx_(),
- not_empty_cond_(),
- use_count_( 0)
- {}
+ typename node::ptr pop_head_()
+ {
+ typename node::ptr old_head = head_;
+ head_ = old_head->next;
+ return old_head;
+ }
- bool active() const
- { return active_(); }
+public:
+ unbounded_channel_base() :
+ state_( ACTIVE),
+ head_( new node() ),
+ head_mtx_(),
+ tail_( head_),
+ tail_mtx_(),
+ not_empty_cond_(),
+ use_count_( 0)
+ {}
- void deactivate()
- { deactivate_(); }
+ bool active() const
+ { return active_(); }
- bool empty() const
- {
- mutex::scoped_lock lk( head_mtx_);
- return empty_();
- }
+ void deactivate()
+ { deactivate_(); }
- void put( T const& t)
+ bool empty() const
+ {
+ mutex::scoped_lock lk( head_mtx_);
+ return empty_();
+ }
+
+ void put( T const& t)
+ {
+ typename node::ptr new_node( new node() );
{
- typename node::ptr new_node( new node() );
- {
- mutex::scoped_lock lk( tail_mtx_);
+ mutex::scoped_lock lk( tail_mtx_);
- if ( ! active_() )
- throw std::runtime_error("queue is not active");
+ if ( ! active_() )
+ throw std::runtime_error("queue is not active");
- tail_->va = t;
- tail_->next = new_node;
- tail_ = new_node;
- }
- not_empty_cond_.notify_one();
+ tail_->va = t;
+ tail_->next = new_node;
+ tail_ = new_node;
}
+ not_empty_cond_.notify_one();
+ }
- bool take( value_type & va)
+ bool take( value_type & va)
+ {
+ mutex::scoped_lock lk( head_mtx_);
+ bool empty = empty_();
+ if ( ! active_() && empty)
+ return false;
+ if ( empty)
{
- mutex::scoped_lock lk( head_mtx_);
- bool empty = empty_();
- if ( ! active_() && empty)
- return false;
- if ( empty)
+ try
{
- try
- {
- while ( active_() && empty_() )
- not_empty_cond_.wait( lk);
- }
- catch ( fiber_interrupted const&)
- { return false; }
+ while ( active_() && empty_() )
+ not_empty_cond_.wait( lk);
}
- if ( ! active_() && empty_() )
- return false;
- swap( va, head_->va);
- pop_head_();
- return va;
+ catch ( fiber_interrupted const&)
+ { return false; }
}
+ if ( ! active_() && empty_() )
+ return false;
+ swap( va, head_->va);
+ pop_head_();
+ return va;
+ }
- bool try_take( value_type & va)
+ bool try_take( value_type & va)
+ {
+ mutex::scoped_lock lk( head_mtx_);
+ if ( empty_() )
+ return false;
+ swap( va, head_->va);
+ pop_head_();
+ return va;
+ }
+
+ friend void intrusive_ptr_add_ref( unbounded_channel_base * p)
+ { p->use_count_.fetch_add( 1, memory_order_relaxed); }
+
+ friend void intrusive_ptr_release( unbounded_channel_base * p)
+ {
+ if ( p->use_count_.fetch_sub( 1, memory_order_release) == 1)
{
- mutex::scoped_lock lk( head_mtx_);
- if ( empty_() )
- return false;
- swap( va, head_->va);
- pop_head_();
- return va;
+ atomic_thread_fence( memory_order_acquire);
+ delete p;
}
+ }
+};
- inline friend void intrusive_ptr_add_ref( impl * p)
- { p->use_count_.fetch_add( 1, memory_order_relaxed); }
-
- inline friend void intrusive_ptr_release( impl * p)
- {
- if ( p->use_count_.fetch_sub( 1, memory_order_release) == 1)
- {
- atomic_thread_fence( memory_order_acquire);
- delete p;
- }
- }
- };
+}
+
+template< typename T >
+class unbounded_channel
+{
+private:
+ typedef typename detail::unbounded_channel_base< T >::value_type value_type;
- intrusive_ptr< impl > impl_;
+ intrusive_ptr< detail::unbounded_channel_base< T > > base_;
public:
unbounded_channel() :
- impl_( new impl() )
+ base_( new detail::unbounded_channel_base< T >() )
{}
bool active() const
- { return impl_->active(); }
+ { return base_->active(); }
void deactivate()
- { impl_->deactivate(); }
+ { base_->deactivate(); }
bool empty()
- { return impl_->empty(); }
+ { return base_->empty(); }
void put( T const& t)
- { impl_->put( t); }
+ { base_->put( t); }
bool take( value_type & va)
- { return impl_->take( va); }
+ { return base_->take( va); }
bool try_take( value_type & va)
- { return impl_->try_take( va); }
+ { return base_->try_take( va); }
};
}}}
Modified: sandbox/fiber/boost/fiber/unbounded_channel.hpp
==============================================================================
--- sandbox/fiber/boost/fiber/unbounded_channel.hpp (original)
+++ sandbox/fiber/boost/fiber/unbounded_channel.hpp 2010-01-10 10:41:46 EST (Sun, 10 Jan 2010)
@@ -49,10 +49,10 @@
next()
{}
- inline friend void intrusive_ptr_add_ref( node * p)
+ friend void intrusive_ptr_add_ref( node * p)
{ ++p->use_count; }
- inline friend void intrusive_ptr_release( node * p)
+ friend void intrusive_ptr_release( node * p)
{ if ( --p->use_count == 0) delete p; }
};
Modified: sandbox/fiber/libs/fiber/doc/overview.qbk
==============================================================================
--- sandbox/fiber/libs/fiber/doc/overview.qbk (original)
+++ sandbox/fiber/libs/fiber/doc/overview.qbk 2010-01-10 10:41:46 EST (Sun, 10 Jan 2010)
@@ -31,13 +31,18 @@
#include <boost/fiber.hpp>
-which includes all the other headers in turn. The classes and functions reside in namespaces
-`boost::fibers` and `boost::this_fiber`.
+which includes all the other headers in turn.
+
+Used namespaces are:
+
+ namespace boost::fibers
+ namespace boost::this_fiber
-[note __boost_fiber__ requires [*Boost Library 1.41.0] .]
[warning This library is ['not] an official Boost library]
+[note __boost_fiber__ requires [*Boost Library 1.41.0] .]
+
__boost_fiber__ depends uppon __boost_atomic__, __boost_move__ and uses some code from __boost_thread__
(especially a specialisation of future).
Boost-Commit list run by bdawes at acm.org, david.abrahams at rcn.com, gregod at cs.rpi.edu, cpdaniel at pacbell.net, john at johnmaddock.co.uk