|
Boost : |
From: Peter Dimov (pdimov_at_[hidden])
Date: 2006-10-27 08:49:18
Peter Dimov wrote:
> Howard Hinnant wrote:
>> So the fast path would check these bits and avoid the notify_all if
>> they weren't set.
>
> Thanks, I'll try this tomorrow. Later today, I mean.
Works splendidly on uniprocessor, but displays the same odd behavior I've
been seeing with the other algorithms on my Pentium D (with XP64):
0R+4W+0F: 46
0R+0W+4F: 14
0R+4W+4F: 10 (!)
The Windows scheduler doesn't like us. Or I've made a mistake somewhere.
Even if I didn't, it seems to me that the bit doesn't help in the 4W case
since there's always a writer waiting, so we hit the notify_all.
#ifndef BOOST_DETAIL_RW_MUTEX_HPP_INCLUDED
#define BOOST_DETAIL_RW_MUTEX_HPP_INCLUDED
// MS compatible compilers support #pragma once
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
// Copyright (c) 2005, 2006 Peter Dimov
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#define RW_MUTEX_NAME "Terekhov/Hinnant, lock-free"
#include <algorithm>
#include <cassert>
#include <boost/thread/mutex.hpp>
#include <boost/thread/condition.hpp>
#include <boost/detail/interlocked.hpp>
template<class T> inline T atomic_load_none( T const * pt )
{
return *static_cast<T const volatile *>( pt );
}
template<class T> inline void atomic_store_rel( T * pt, T v )
{
// x86-specific implementation
*static_cast<T volatile *>( pt ) = v;
}
inline bool atomic_compare_exchange( long * destination, long * comparand,
long exchange )
{
long ov = *comparand;
long nv = BOOST_INTERLOCKED_COMPARE_EXCHANGE( destination, exchange,
ov );
if( ov == nv )
{
return true;
}
else
{
*comparand = nv;
return false;
}
}
class rw_mutex
{
private:
rw_mutex( rw_mutex const & );
rw_mutex & operator=( rw_mutex const & );
private:
boost::mutex mx_;
boost::condition cn_r_;
boost::condition cn_w_;
enum
{
state_wp_mask = 0x80000000ul, // writer pending/active
state_cw_mask = 0x40000000ul, // someone waiting on cn_w_
state_rd_mask = 0x3FFFFFFFul, // reader count
};
long state_;
public:
rw_mutex(): state_( 0 )
{
}
~rw_mutex()
{
}
void rdlock()
{
long st = atomic_load_none( &state_ );
for( ;; )
{
if( st & state_wp_mask )
{
boost::mutex::scoped_lock lock( mx_ );
if( atomic_compare_exchange( &state_, &st, st |
state_cw_mask ) )
{
cn_w_.wait( lock );
st = state_;
}
}
else
{
if( atomic_compare_exchange( &state_, &st, st + 1 ) )
return; // got read lock
}
}
}
void lock()
{
long st = atomic_load_none( &state_ );
for( ;; )
{
if( st & state_wp_mask )
{
boost::mutex::scoped_lock lock( mx_ );
if( atomic_compare_exchange( &state_, &st, st |
state_cw_mask ) )
{
cn_w_.wait( lock );
st = state_;
}
}
else
{
if( atomic_compare_exchange( &state_, &st, st |
state_wp_mask ) )
{
if( st & state_rd_mask )
{
boost::mutex::scoped_lock lock( mx_ );
while( state_ & state_rd_mask )
{
cn_r_.wait( lock );
}
}
return;
}
}
}
}
void rdunlock()
{
long st = BOOST_INTERLOCKED_EXCHANGE_ADD( &state_, -1 );
if( ( st & ~state_cw_mask ) == ( state_wp_mask | 1 ) ) // we were
the last reader and a writer is waiting
{
{
boost::mutex::scoped_lock lock( mx_ );
}
cn_r_.notify_one();
}
}
void unlock()
{
long st = BOOST_INTERLOCKED_EXCHANGE( &state_, 0 );
if( st & state_cw_mask )
{
{
boost::mutex::scoped_lock lock( mx_ );
}
cn_w_.notify_all();
}
}
public:
//long retries( int ix )
//{
// return InterlockedExchange( &retries_[ ix ], 0 );
//}
public:
// lock classes
class scoped_read_lock
{
private:
rw_mutex & mx_;
scoped_read_lock( scoped_read_lock const & );
scoped_read_lock & operator=( scoped_read_lock const & );
public:
scoped_read_lock( rw_mutex & mx ): mx_( mx )
{
mx_.rdlock();
}
~scoped_read_lock()
{
mx_.rdunlock();
}
};
class scoped_write_lock
{
private:
rw_mutex & mx_;
scoped_write_lock( scoped_write_lock const & );
scoped_write_lock & operator=( scoped_write_lock const & );
public:
scoped_write_lock( rw_mutex & mx ): mx_( mx )
{
mx_.lock();
}
~scoped_write_lock()
{
mx_.unlock();
}
};
};
#endif // #ifndef BOOST_DETAIL_RW_MUTEX_HPP_INCLUDED
Boost list run by bdawes at acm.org, gregod at cs.rpi.edu, cpdaniel at pacbell.net, john at johnmaddock.co.uk