Boost logo

Boost-Commit :

Subject: [Boost-commit] svn:boost r81969 - in trunk: boost/atomic boost/atomic/detail libs/atomic/src
From: andrey.semashev_at_[hidden]
Date: 2012-12-15 08:24:03


Author: andysem
Date: 2012-12-15 08:24:01 EST (Sat, 15 Dec 2012)
New Revision: 81969
URL: http://svn.boost.org/trac/boost/changeset/81969

Log:
Code cleanup. Implemented optimized atomic operations for Windows.
Added:
   trunk/boost/atomic/detail/type-classification.hpp
      - copied, changed from r81948, /trunk/boost/atomic/detail/type-classifier.hpp
   trunk/boost/atomic/detail/windows.hpp (contents, props changed)
Removed:
   trunk/boost/atomic/detail/type-classifier.hpp
Text files modified:
   trunk/boost/atomic/atomic.hpp | 48 ++----
   trunk/boost/atomic/detail/base.hpp | 32 ++-
   trunk/boost/atomic/detail/cas32strong.hpp | 39 ++--
   trunk/boost/atomic/detail/cas32weak.hpp | 38 ++--
   trunk/boost/atomic/detail/cas64strong.hpp | 17 +-
   trunk/boost/atomic/detail/gcc-alpha.hpp | 4
   trunk/boost/atomic/detail/gcc-armv6plus.hpp | 20 +-
   trunk/boost/atomic/detail/gcc-cas.hpp | 14 +
   trunk/boost/atomic/detail/gcc-ppc.hpp | 74 +++++----
   trunk/boost/atomic/detail/gcc-sparcv9.hpp | 58 +++---
   trunk/boost/atomic/detail/gcc-x86.hpp | 94 ++++++-----
   trunk/boost/atomic/detail/generic-cas.hpp | 22 +-
   trunk/boost/atomic/detail/interlocked.hpp | 315 ++++++++++++++++++++++-----------------
   trunk/boost/atomic/detail/linux-arm.hpp | 20 +-
   trunk/boost/atomic/detail/lockpool.hpp | 51 +++---
   trunk/boost/atomic/detail/platform.hpp | 4
   trunk/boost/atomic/detail/type-classification.hpp | 76 ++-------
   trunk/libs/atomic/src/lockpool.cpp | 9 +
   18 files changed, 475 insertions(+), 460 deletions(-)

Modified: trunk/boost/atomic/atomic.hpp
==============================================================================
--- trunk/boost/atomic/atomic.hpp (original)
+++ trunk/boost/atomic/atomic.hpp 2012-12-15 08:24:01 EST (Sat, 15 Dec 2012)
@@ -14,7 +14,7 @@
 
 #include <boost/atomic/detail/config.hpp>
 #include <boost/atomic/detail/platform.hpp>
-#include <boost/atomic/detail/type-classifier.hpp>
+#include <boost/atomic/detail/type-classification.hpp>
 #include <boost/type_traits/is_signed.hpp>
 
 #ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
@@ -67,26 +67,26 @@
 
 #ifndef BOOST_ATOMIC_THREAD_FENCE
 #define BOOST_ATOMIC_THREAD_FENCE 0
-static inline void
-atomic_thread_fence(memory_order)
+inline void atomic_thread_fence(memory_order)
 {
 }
 #endif
 
 #ifndef BOOST_ATOMIC_SIGNAL_FENCE
 #define BOOST_ATOMIC_SIGNAL_FENCE 0
-static inline void
-atomic_signal_fence(memory_order order)
+inline void atomic_signal_fence(memory_order order)
 {
     atomic_thread_fence(order);
 }
 #endif
 
 template<typename T>
-class atomic : public atomics::detail::base_atomic<T, typename atomics::detail::type_classifier<T>::test, sizeof(T), boost::is_signed<T>::value > {
+class atomic :
+ public atomics::detail::base_atomic<T, typename atomics::detail::classify<T>::type, atomics::detail::storage_size_of<T>::value, boost::is_signed<T>::value >
+{
 private:
     typedef T value_type;
- typedef atomics::detail::base_atomic<T, typename atomics::detail::type_classifier<T>::test, sizeof(T), boost::is_signed<T>::value > super;
+ typedef atomics::detail::base_atomic<T, typename atomics::detail::classify<T>::type, atomics::detail::storage_size_of<T>::value, boost::is_signed<T>::value > super;
 public:
     atomic(void) : super() {}
     explicit atomic(const value_type & v) : super(v) {}
@@ -124,10 +124,18 @@
 #endif
 typedef atomic<void*> atomic_address;
 typedef atomic<bool> atomic_bool;
+typedef atomic<wchar_t> atomic_wchar_t;
+#if !defined(BOOST_NO_CXX11_CHAR16_T)
+typedef atomic<char16_t> atomic_char16_t;
+#endif
+#if !defined(BOOST_NO_CXX11_CHAR32_T)
+typedef atomic<char32_t> atomic_char32_t;
+#endif
 
 #ifndef BOOST_ATOMIC_FLAG_LOCK_FREE
 #define BOOST_ATOMIC_FLAG_LOCK_FREE 0
-class atomic_flag {
+class atomic_flag
+{
 public:
     atomic_flag(void) : v_(false) {}
 
@@ -149,30 +157,6 @@
 };
 #endif
 
-typedef atomic<char> atomic_char;
-typedef atomic<unsigned char> atomic_uchar;
-typedef atomic<signed char> atomic_schar;
-typedef atomic<uint8_t> atomic_uint8_t;
-typedef atomic<int8_t> atomic_int8_t;
-typedef atomic<unsigned short> atomic_ushort;
-typedef atomic<short> atomic_short;
-typedef atomic<uint16_t> atomic_uint16_t;
-typedef atomic<int16_t> atomic_int16_t;
-typedef atomic<unsigned int> atomic_uint;
-typedef atomic<int> atomic_int;
-typedef atomic<uint32_t> atomic_uint32_t;
-typedef atomic<int32_t> atomic_int32_t;
-typedef atomic<unsigned long> atomic_ulong;
-typedef atomic<long> atomic_long;
-typedef atomic<uint64_t> atomic_uint64_t;
-typedef atomic<int64_t> atomic_int64_t;
-#ifdef BOOST_HAS_LONG_LONG
-typedef atomic<boost::ulong_long_type> atomic_ullong;
-typedef atomic<boost::long_long_type> atomic_llong;
-#endif
-typedef atomic<void*> atomic_address;
-typedef atomic<bool> atomic_bool;
-
 }
 
 #endif

Modified: trunk/boost/atomic/detail/base.hpp
==============================================================================
--- trunk/boost/atomic/detail/base.hpp (original)
+++ trunk/boost/atomic/detail/base.hpp 2012-12-15 08:24:01 EST (Sat, 15 Dec 2012)
@@ -13,6 +13,8 @@
 
 #include <string.h>
 
+#include <cstddef>
+#include <boost/cstdint.hpp>
 #include <boost/atomic/detail/config.hpp>
 #include <boost/atomic/detail/lockpool.hpp>
 
@@ -121,7 +123,7 @@
 namespace atomics {
 namespace detail {
 
-static inline memory_order
+inline memory_order
 calculate_failure_order(memory_order order)
 {
     switch(order) {
@@ -134,7 +136,7 @@
     }
 }
 
-template<typename T, typename C , unsigned int Size, bool Sign>
+template<typename T, typename C, unsigned int Size, bool Sign>
 class base_atomic {
 private:
     typedef base_atomic this_type;
@@ -145,15 +147,15 @@
 
     explicit base_atomic(const value_type & v)
     {
- memcpy(&v_, &v, Size);
+ memcpy(&v_, &v, sizeof(value_type));
     }
 
     void
- store(value_type v, memory_order /*order*/ = memory_order_seq_cst) volatile
+ store(value_type const& v, memory_order /*order*/ = memory_order_seq_cst) volatile
     {
         guard_type guard(const_cast<char *>(v_));
 
- memcpy(const_cast<char *>(v_), &v, Size);
+ memcpy(const_cast<char *>(v_), &v, sizeof(value_type));
     }
 
     value_type
@@ -162,24 +164,24 @@
         guard_type guard(const_cast<const char *>(v_));
 
         value_type v;
- memcpy(&v, const_cast<const char *>(v_), Size);
+ memcpy(&v, const_cast<const char *>(v_), sizeof(value_type));
         return v;
     }
 
     bool
     compare_exchange_strong(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order /*success_order*/,
         memory_order /*failure_order*/) volatile
     {
         guard_type guard(const_cast<char *>(v_));
 
- if (memcmp(const_cast<char *>(v_), &expected, Size) == 0) {
- memcpy(const_cast<char *>(v_), &desired, Size);
+ if (memcmp(const_cast<char *>(v_), &expected, sizeof(value_type)) == 0) {
+ memcpy(const_cast<char *>(v_), &desired, sizeof(value_type));
             return true;
         } else {
- memcpy(&expected, const_cast<char *>(v_), Size);
+ memcpy(&expected, const_cast<char *>(v_), sizeof(value_type));
             return false;
         }
     }
@@ -187,7 +189,7 @@
     bool
     compare_exchange_weak(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -195,14 +197,14 @@
     }
 
     value_type
- exchange(value_type v, memory_order /*order*/=memory_order_seq_cst) volatile
+ exchange(value_type const& v, memory_order /*order*/=memory_order_seq_cst) volatile
     {
         guard_type guard(const_cast<char *>(v_));
 
         value_type tmp;
- memcpy(&tmp, const_cast<char *>(v_), Size);
+ memcpy(&tmp, const_cast<char *>(v_), sizeof(value_type));
 
- memcpy(const_cast<char *>(v_), &v, Size);
+ memcpy(const_cast<char *>(v_), &v, sizeof(value_type));
         return tmp;
     }
 
@@ -217,7 +219,7 @@
     base_atomic(const base_atomic &) /* = delete */ ;
     void operator=(const base_atomic &) /* = delete */ ;
 
- char v_[Size];
+ char v_[sizeof(value_type)];
 };
 
 template<typename T, unsigned int Size, bool Sign>

Modified: trunk/boost/atomic/detail/cas32strong.hpp
==============================================================================
--- trunk/boost/atomic/detail/cas32strong.hpp (original)
+++ trunk/boost/atomic/detail/cas32strong.hpp 2012-12-15 08:24:01 EST (Sat, 15 Dec 2012)
@@ -10,6 +10,8 @@
 // Build 8-, 16- and 32-bit atomic operations from
 // a platform_cmpxchg32_strong primitive.
 
+#include <cstddef>
+#include <boost/cstdint.hpp>
 #include <boost/memory_order.hpp>
 #include <boost/atomic/detail/config.hpp>
 #include <boost/atomic/detail/base.hpp>
@@ -600,14 +602,14 @@
     typedef T value_type;
     typedef uint32_t storage_type;
 public:
- explicit base_atomic(value_type v) : v_(0)
+ explicit base_atomic(value_type const& v)
     {
         memcpy(&v_, &v, sizeof(value_type));
     }
- base_atomic(void) : v_(0) {}
+ base_atomic(void) {}
 
     void
- store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         storage_type tmp = 0;
         memcpy(&tmp, &v, sizeof(value_type));
@@ -628,7 +630,7 @@
     }
 
     value_type
- exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         value_type original = load(memory_order_relaxed);
         do {
@@ -639,7 +641,7 @@
     bool
     compare_exchange_weak(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -649,11 +651,10 @@
     bool
     compare_exchange_strong(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
-
         storage_type expected_s = 0, desired_s = 0;
         memcpy(&expected_s, &expected, sizeof(value_type));
         memcpy(&desired_s, &desired, sizeof(value_type));
@@ -690,14 +691,14 @@
     typedef T value_type;
     typedef uint32_t storage_type;
 public:
- explicit base_atomic(value_type v) : v_(0)
+ explicit base_atomic(value_type const& v)
     {
         memcpy(&v_, &v, sizeof(value_type));
     }
- base_atomic(void) : v_(0) {}
+ base_atomic(void) {}
 
     void
- store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         storage_type tmp = 0;
         memcpy(&tmp, &v, sizeof(value_type));
@@ -718,7 +719,7 @@
     }
 
     value_type
- exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         value_type original = load(memory_order_relaxed);
         do {
@@ -729,7 +730,7 @@
     bool
     compare_exchange_weak(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -739,7 +740,7 @@
     bool
     compare_exchange_strong(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -780,14 +781,14 @@
     typedef T value_type;
     typedef uint32_t storage_type;
 public:
- explicit base_atomic(value_type v) : v_(0)
+ explicit base_atomic(value_type const& v) : v_(0)
     {
         memcpy(&v_, &v, sizeof(value_type));
     }
- base_atomic(void) : v_(0) {}
+ base_atomic(void) {}
 
     void
- store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         storage_type tmp = 0;
         memcpy(&tmp, &v, sizeof(value_type));
@@ -808,7 +809,7 @@
     }
 
     value_type
- exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         value_type original = load(memory_order_relaxed);
         do {
@@ -819,7 +820,7 @@
     bool
     compare_exchange_weak(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -829,7 +830,7 @@
     bool
     compare_exchange_strong(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {

Modified: trunk/boost/atomic/detail/cas32weak.hpp
==============================================================================
--- trunk/boost/atomic/detail/cas32weak.hpp (original)
+++ trunk/boost/atomic/detail/cas32weak.hpp 2012-12-15 08:24:01 EST (Sat, 15 Dec 2012)
@@ -7,6 +7,8 @@
 //
 // Copyright (c) 2011 Helge Bahmann
 
+#include <cstddef>
+#include <boost/cstdint.hpp>
 #include <boost/memory_order.hpp>
 #include <boost/atomic/detail/config.hpp>
 #include <boost/atomic/detail/base.hpp>
@@ -619,14 +621,14 @@
     typedef T value_type;
     typedef uint32_t storage_type;
 public:
- explicit base_atomic(value_type v) : v_(0)
+ explicit base_atomic(value_type const& v) : v_(0)
     {
         memcpy(&v_, &v, sizeof(value_type));
     }
- base_atomic(void) : v_(0) {}
+ base_atomic(void) {}
 
     void
- store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         storage_type tmp = 0;
         memcpy(&tmp, &v, sizeof(value_type));
@@ -647,7 +649,7 @@
     }
 
     value_type
- exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         value_type original = load(memory_order_relaxed);
         do {
@@ -658,7 +660,7 @@
     bool
     compare_exchange_weak(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -683,7 +685,7 @@
     bool
     compare_exchange_strong(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -717,14 +719,14 @@
     typedef T value_type;
     typedef uint32_t storage_type;
 public:
- explicit base_atomic(value_type v) : v_(0)
+ explicit base_atomic(value_type const& v) : v_(0)
     {
         memcpy(&v_, &v, sizeof(value_type));
     }
- base_atomic(void) : v_(0) {}
+ base_atomic(void) {}
 
     void
- store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         storage_type tmp = 0;
         memcpy(&tmp, &v, sizeof(value_type));
@@ -745,7 +747,7 @@
     }
 
     value_type
- exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         value_type original = load(memory_order_relaxed);
         do {
@@ -756,7 +758,7 @@
     bool
     compare_exchange_weak(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -781,7 +783,7 @@
     bool
     compare_exchange_strong(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -815,14 +817,14 @@
     typedef T value_type;
     typedef uint32_t storage_type;
 public:
- explicit base_atomic(value_type v) : v_(0)
+ explicit base_atomic(value_type const& v) : v_(0)
     {
         memcpy(&v_, &v, sizeof(value_type));
     }
- base_atomic(void) : v_(0) {}
+ base_atomic(void) {}
 
     void
- store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         storage_type tmp = 0;
         memcpy(&tmp, &v, sizeof(value_type));
@@ -843,7 +845,7 @@
     }
 
     value_type
- exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         value_type original = load(memory_order_relaxed);
         do {
@@ -854,7 +856,7 @@
     bool
     compare_exchange_weak(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -879,7 +881,7 @@
     bool
     compare_exchange_strong(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {

Modified: trunk/boost/atomic/detail/cas64strong.hpp
==============================================================================
--- trunk/boost/atomic/detail/cas64strong.hpp (original)
+++ trunk/boost/atomic/detail/cas64strong.hpp 2012-12-15 08:24:01 EST (Sat, 15 Dec 2012)
@@ -11,6 +11,8 @@
 // primitive. It is assumed that 64-bit loads/stores are not
 // atomic, so they are funnelled through cmpxchg as well.
 
+#include <cstddef>
+#include <boost/cstdint.hpp>
 #include <boost/memory_order.hpp>
 #include <boost/atomic/detail/config.hpp>
 #include <boost/atomic/detail/base.hpp>
@@ -347,16 +349,16 @@
     typedef T value_type;
     typedef uint64_t storage_type;
 public:
- explicit base_atomic(value_type v) : v_(0)
+ explicit base_atomic(value_type const& v) : v_(0)
     {
         memcpy(&v_, &v, sizeof(value_type));
     }
- base_atomic(void) : v_(0) {}
+ base_atomic(void) {}
 
     void
- store(value_type value, memory_order order = memory_order_seq_cst) volatile
+ store(value_type const& value, memory_order order = memory_order_seq_cst) volatile
     {
- storage_type value_s;
+ storage_type value_s = 0;
         memcpy(&value_s, &value, sizeof(value_s));
         platform_fence_before_store(order);
         platform_store64(value_s, &v_);
@@ -374,7 +376,7 @@
     }
 
     value_type
- exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         value_type original = load(memory_order_relaxed);
         do {
@@ -385,7 +387,7 @@
     bool
     compare_exchange_weak(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -395,11 +397,10 @@
     bool
     compare_exchange_strong(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
-
         storage_type expected_s = 0, desired_s = 0;
         memcpy(&expected_s, &expected, sizeof(value_type));
         memcpy(&desired_s, &desired, sizeof(value_type));

Modified: trunk/boost/atomic/detail/gcc-alpha.hpp
==============================================================================
--- trunk/boost/atomic/detail/gcc-alpha.hpp (original)
+++ trunk/boost/atomic/detail/gcc-alpha.hpp 2012-12-15 08:24:01 EST (Sat, 15 Dec 2012)
@@ -49,7 +49,7 @@
 namespace atomics {
 namespace detail {
 
-static inline void fence_before(memory_order order)
+inline void fence_before(memory_order order)
 {
     switch(order) {
         case memory_order_consume:
@@ -61,7 +61,7 @@
     }
 }
 
-static inline void fence_after(memory_order order)
+inline void fence_after(memory_order order)
 {
     switch(order) {
         case memory_order_acquire:

Modified: trunk/boost/atomic/detail/gcc-armv6plus.hpp
==============================================================================
--- trunk/boost/atomic/detail/gcc-armv6plus.hpp (original)
+++ trunk/boost/atomic/detail/gcc-armv6plus.hpp 2012-12-15 08:24:01 EST (Sat, 15 Dec 2012)
@@ -9,6 +9,8 @@
 // Copyright (c) 2009 Phil Endecott
 // ARM Code by Phil Endecott, based on other architectures.
 
+#include <cstddef>
+#include <boost/cstdint.hpp>
 #include <boost/atomic/detail/config.hpp>
 
 #ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
@@ -82,7 +84,7 @@
 #define BOOST_ATOMIC_ARM_DMB "mcr\tp15, 0, r0, c7, c10, 5\n"
 #endif
 
-static inline void
+inline void
 arm_barrier(void)
 {
     int brtmp;
@@ -94,7 +96,7 @@
     );
 }
 
-static inline void
+inline void
 platform_fence_before(memory_order order)
 {
     switch(order) {
@@ -107,7 +109,7 @@
     }
 }
 
-static inline void
+inline void
 platform_fence_after(memory_order order)
 {
     switch(order) {
@@ -119,27 +121,27 @@
     }
 }
 
-static inline void
+inline void
 platform_fence_before_store(memory_order order)
 {
     platform_fence_before(order);
 }
 
-static inline void
+inline void
 platform_fence_after_store(memory_order order)
 {
     if (order == memory_order_seq_cst)
         arm_barrier();
 }
 
-static inline void
+inline void
 platform_fence_after_load(memory_order order)
 {
     platform_fence_after(order);
 }
 
 template<typename T>
-bool
+inline bool
 platform_cmpxchg32(T & expected, T desired, volatile T * ptr)
 {
     int success;
@@ -169,7 +171,7 @@
 }
 
 #define BOOST_ATOMIC_THREAD_FENCE 2
-static inline void
+inline void
 atomic_thread_fence(memory_order order)
 {
     switch(order) {
@@ -183,7 +185,7 @@
 }
 
 #define BOOST_ATOMIC_SIGNAL_FENCE 2
-static inline void
+inline void
 atomic_signal_fence(memory_order)
 {
     __asm__ __volatile__ ("" ::: "memory");

Modified: trunk/boost/atomic/detail/gcc-cas.hpp
==============================================================================
--- trunk/boost/atomic/detail/gcc-cas.hpp (original)
+++ trunk/boost/atomic/detail/gcc-cas.hpp 2012-12-15 08:24:01 EST (Sat, 15 Dec 2012)
@@ -10,6 +10,8 @@
 #ifndef BOOST_ATOMIC_DETAIL_GENERIC_CAS_HPP
 #define BOOST_ATOMIC_DETAIL_GENERIC_CAS_HPP
 
+#include <cstddef>
+#include <boost/cstdint.hpp>
 #include <boost/atomic/detail/config.hpp>
 
 #ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
@@ -38,19 +40,19 @@
 namespace atomics {
 namespace detail {
 
-static inline void
+inline void
 platform_fence_before(memory_order)
 {
     /* empty, as compare_and_swap is synchronizing already */
 }
 
-static inline void
+inline void
 platform_fence_after(memory_order)
 {
     /* empty, as compare_and_swap is synchronizing already */
 }
 
-static inline void
+inline void
 platform_fence_before_store(memory_order order)
 {
     switch(order) {
@@ -66,14 +68,14 @@
     }
 }
 
-static inline void
+inline void
 platform_fence_after_store(memory_order order)
 {
     if (order == memory_order_seq_cst)
         __sync_synchronize();
 }
 
-static inline void
+inline void
 platform_fence_after_load(memory_order order)
 {
     switch(order) {
@@ -90,7 +92,7 @@
 }
 
 template<typename T>
-bool
+inline bool
 platform_cmpxchg32_strong(T & expected, T desired, volatile T * ptr)
 {
     T found = __sync_val_compare_and_swap(ptr, expected, desired);

Modified: trunk/boost/atomic/detail/gcc-ppc.hpp
==============================================================================
--- trunk/boost/atomic/detail/gcc-ppc.hpp (original)
+++ trunk/boost/atomic/detail/gcc-ppc.hpp 2012-12-15 08:24:01 EST (Sat, 15 Dec 2012)
@@ -7,6 +7,8 @@
 // See accompanying file LICENSE_1_0.txt or copy at
 // http://www.boost.org/LICENSE_1_0.txt)
 
+#include <cstddef>
+#include <boost/cstdint.hpp>
 #include <boost/atomic/detail/config.hpp>
 
 #ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
@@ -59,7 +61,7 @@
 namespace atomics {
 namespace detail {
 
-static inline void
+inline void
 ppc_fence_before(memory_order order)
 {
     switch(order) {
@@ -75,7 +77,7 @@
     }
 }
 
-static inline void
+inline void
 ppc_fence_after(memory_order order)
 {
     switch(order) {
@@ -89,7 +91,7 @@
     }
 }
 
-static inline void
+inline void
 ppc_fence_after_store(memory_order order)
 {
     switch(order) {
@@ -2113,14 +2115,14 @@
     typedef T value_type;
     typedef uint32_t storage_type;
 public:
- explicit base_atomic(value_type v) : v_(0)
+ explicit base_atomic(value_type const& v)
     {
         memcpy(&v_, &v, sizeof(value_type));
     }
- base_atomic(void) : v_(0) {}
+ base_atomic(void) {}
 
     void
- store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         storage_type tmp = 0;
         memcpy(&tmp, &v, sizeof(value_type));
@@ -2154,7 +2156,7 @@
     }
 
     value_type
- exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         storage_type tmp = 0, original;
         memcpy(&tmp, &v, sizeof(value_type));
@@ -2169,14 +2171,15 @@
             : "cr0"
         );
         ppc_fence_after(order);
- memcpy(&v, &original, sizeof(value_type));
- return v;
+ value_type res;
+ memcpy(&res, &original, sizeof(value_type));
+ return res;
     }
 
     bool
     compare_exchange_weak(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -2211,7 +2214,7 @@
     bool
     compare_exchange_strong(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -2262,14 +2265,14 @@
     typedef T value_type;
     typedef uint32_t storage_type;
 public:
- explicit base_atomic(value_type v) : v_(0)
+ explicit base_atomic(value_type const& v)
     {
         memcpy(&v_, &v, sizeof(value_type));
     }
- base_atomic(void) : v_(0) {}
+ base_atomic(void) {}
 
     void
- store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         storage_type tmp = 0;
         memcpy(&tmp, &v, sizeof(value_type));
@@ -2303,7 +2306,7 @@
     }
 
     value_type
- exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         storage_type tmp = 0, original;
         memcpy(&tmp, &v, sizeof(value_type));
@@ -2318,14 +2321,15 @@
             : "cr0"
         );
         ppc_fence_after(order);
- memcpy(&v, &original, sizeof(value_type));
- return v;
+ value_type res;
+ memcpy(&res, &original, sizeof(value_type));
+ return res;
     }
 
     bool
     compare_exchange_weak(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -2360,7 +2364,7 @@
     bool
     compare_exchange_strong(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -2411,14 +2415,14 @@
     typedef T value_type;
     typedef uint32_t storage_type;
 public:
- explicit base_atomic(value_type v) : v_(0)
+ explicit base_atomic(value_type const& v) : v_(0)
     {
         memcpy(&v_, &v, sizeof(value_type));
     }
- base_atomic(void) : v_(0) {}
+ base_atomic(void) {}
 
     void
- store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         storage_type tmp = 0;
         memcpy(&tmp, &v, sizeof(value_type));
@@ -2452,7 +2456,7 @@
     }
 
     value_type
- exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         storage_type tmp = 0, original;
         memcpy(&tmp, &v, sizeof(value_type));
@@ -2467,14 +2471,15 @@
             : "cr0"
         );
         ppc_fence_after(order);
- memcpy(&v, &original, sizeof(value_type));
- return v;
+ value_type res;
+ memcpy(&res, &original, sizeof(value_type));
+ return res;
     }
 
     bool
     compare_exchange_weak(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -2509,7 +2514,7 @@
     bool
     compare_exchange_strong(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -2562,14 +2567,14 @@
     typedef T value_type;
     typedef uint64_t storage_type;
 public:
- explicit base_atomic(value_type v)
+ explicit base_atomic(value_type const& v) : v_(0)
     {
         memcpy(&v_, &v, sizeof(value_type));
     }
     base_atomic(void) {}
 
     void
- store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         storage_type tmp;
         memcpy(&tmp, &v, sizeof(value_type));
@@ -2603,7 +2608,7 @@
     }
 
     value_type
- exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         storage_type tmp = 0, original;
         memcpy(&tmp, &v, sizeof(value_type));
@@ -2618,14 +2623,15 @@
             : "cr0"
         );
         ppc_fence_after(order);
- memcpy(&v, &original, sizeof(value_type));
- return v;
+ value_type res;
+ memcpy(&res, &original, sizeof(value_type));
+ return res;
     }
 
     bool
     compare_exchange_weak(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -2660,7 +2666,7 @@
     bool
     compare_exchange_strong(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {

Modified: trunk/boost/atomic/detail/gcc-sparcv9.hpp
==============================================================================
--- trunk/boost/atomic/detail/gcc-sparcv9.hpp (original)
+++ trunk/boost/atomic/detail/gcc-sparcv9.hpp 2012-12-15 08:24:01 EST (Sat, 15 Dec 2012)
@@ -7,6 +7,8 @@
 // See accompanying file LICENSE_1_0.txt or copy at
 // http://www.boost.org/LICENSE_1_0.txt)
 
+#include <cstddef>
+#include <boost/cstdint.hpp>
 #include <boost/atomic/detail/config.hpp>
 
 #ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
@@ -17,7 +19,7 @@
 namespace atomics {
 namespace detail {
 
-static inline void
+inline void
 platform_fence_before(memory_order order)
 {
     switch(order) {
@@ -37,7 +39,7 @@
     }
 }
 
-static inline void
+inline void
 platform_fence_after(memory_order order)
 {
     switch(order) {
@@ -60,7 +62,7 @@
     }
 }
 
-static inline void
+inline void
 platform_fence_after_store(memory_order order)
 {
     switch(order) {
@@ -71,7 +73,7 @@
 }
 
 
-static inline void
+inline void
 platform_fence_after_load(memory_order order)
 {
     platform_fence_after(order);
@@ -134,7 +136,7 @@
 namespace boost {
 
 #define BOOST_ATOMIC_THREAD_FENCE 2
-static inline void
+inline void
 atomic_thread_fence(memory_order order)
 {
     switch(order) {
@@ -159,7 +161,7 @@
 }
 
 #define BOOST_ATOMIC_SIGNAL_FENCE 2
-static inline void
+inline void
 atomic_signal_fence(memory_order)
 {
     __asm__ __volatile__ ("" ::: "memory");
@@ -178,7 +180,7 @@
     typedef int32_t storage_type;
 public:
     explicit base_atomic(value_type v) : v_(v) {}
- base_atomic(void) : v_(0) {}
+ base_atomic(void) {}
 
     void
     store(value_type v, memory_order order = memory_order_seq_cst) volatile
@@ -300,7 +302,7 @@
     typedef uint32_t storage_type;
 public:
     explicit base_atomic(value_type v) : v_(v) {}
- base_atomic(void) : v_(0) {}
+ base_atomic(void) {}
 
     void
     store(value_type v, memory_order order = memory_order_seq_cst) volatile
@@ -422,7 +424,7 @@
     typedef int32_t storage_type;
 public:
     explicit base_atomic(value_type v) : v_(v) {}
- base_atomic(void) : v_(0) {}
+ base_atomic(void) {}
 
     void
     store(value_type v, memory_order order = memory_order_seq_cst) volatile
@@ -544,7 +546,7 @@
     typedef uint32_t storage_type;
 public:
     explicit base_atomic(value_type v) : v_(v) {}
- base_atomic(void) : v_(0) {}
+ base_atomic(void) {}
 
     void
     store(value_type v, memory_order order = memory_order_seq_cst) volatile
@@ -957,15 +959,14 @@
     typedef T value_type;
     typedef uint32_t storage_type;
 public:
- explicit base_atomic(value_type v)
+ explicit base_atomic(value_type const& v) : v_(0)
     {
- v_ = 0;
         memcpy(&v_, &v, sizeof(value_type));
     }
     base_atomic(void) {}
 
     void
- store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         storage_type tmp = 0;
         memcpy(&tmp, &v, sizeof(value_type));
@@ -985,7 +986,7 @@
     }
 
     value_type
- exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         value_type tmp = load(memory_order_relaxed);
         do {} while(!compare_exchange_weak(tmp, v, order, memory_order_relaxed));
@@ -995,7 +996,7 @@
     bool
     compare_exchange_strong(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -1021,7 +1022,7 @@
     bool
     compare_exchange_weak(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -1047,15 +1048,14 @@
     typedef T value_type;
     typedef uint32_t storage_type;
 public:
- explicit base_atomic(value_type v)
+ explicit base_atomic(value_type const& v) : v_(0)
     {
- v_ = 0;
         memcpy(&v_, &v, sizeof(value_type));
     }
     base_atomic(void) {}
 
     void
- store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         storage_type tmp = 0;
         memcpy(&tmp, &v, sizeof(value_type));
@@ -1075,7 +1075,7 @@
     }
 
     value_type
- exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         value_type tmp = load(memory_order_relaxed);
         do {} while(!compare_exchange_weak(tmp, v, order, memory_order_relaxed));
@@ -1085,7 +1085,7 @@
     bool
     compare_exchange_strong(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -1111,7 +1111,7 @@
     bool
     compare_exchange_weak(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -1137,16 +1137,16 @@
     typedef T value_type;
     typedef uint32_t storage_type;
 public:
- explicit base_atomic(value_type v)
+ explicit base_atomic(value_type const& v) : v_(0)
     {
         memcpy(&v_, &v, sizeof(value_type));
     }
     base_atomic(void) {}
 
     void
- store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
- storage_type tmp;
+ storage_type tmp = 0;
         memcpy(&tmp, &v, sizeof(value_type));
         platform_fence_before(order);
         const_cast<volatile storage_type &>(v_) = tmp;
@@ -1164,7 +1164,7 @@
     }
 
     value_type
- exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         value_type tmp = load(memory_order_relaxed);
         do {} while(!compare_exchange_weak(tmp, v, order, memory_order_relaxed));
@@ -1174,11 +1174,11 @@
     bool
     compare_exchange_strong(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
- storage_type expected_s, desired_s;
+ storage_type expected_s = 0, desired_s = 0;
         memcpy(&expected_s, &expected, sizeof(value_type));
         memcpy(&desired_s, &desired, sizeof(value_type));
         platform_fence_before(success_order);
@@ -1200,7 +1200,7 @@
     bool
     compare_exchange_weak(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {

Modified: trunk/boost/atomic/detail/gcc-x86.hpp
==============================================================================
--- trunk/boost/atomic/detail/gcc-x86.hpp (original)
+++ trunk/boost/atomic/detail/gcc-x86.hpp 2012-12-15 08:24:01 EST (Sat, 15 Dec 2012)
@@ -8,6 +8,8 @@
 // See accompanying file LICENSE_1_0.txt or copy at
 // http://www.boost.org/LICENSE_1_0.txt)
 
+#include <cstddef>
+#include <boost/cstdint.hpp>
 #include <boost/atomic/detail/config.hpp>
 
 #ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
@@ -24,7 +26,7 @@
 # define BOOST_ATOMIC_X86_FENCE_INSTR "lock ; addl $0, (%%esp)\n"
 #endif
 
-static inline void
+inline void
 platform_fence_before(memory_order order)
 {
     switch(order) {
@@ -44,7 +46,7 @@
     }
 }
 
-static inline void
+inline void
 platform_fence_after(memory_order order)
 {
     switch(order) {
@@ -67,7 +69,7 @@
     }
 }
 
-static inline void
+inline void
 platform_fence_after_load(memory_order order)
 {
     switch(order) {
@@ -87,7 +89,7 @@
     }
 }
 
-static inline void
+inline void
 platform_fence_before_store(memory_order order)
 {
     switch(order) {
@@ -107,7 +109,7 @@
     }
 }
 
-static inline void
+inline void
 platform_fence_after_store(memory_order order)
 {
     switch(order) {
@@ -198,7 +200,7 @@
 namespace boost {
 
 #define BOOST_ATOMIC_THREAD_FENCE 2
-static inline void
+inline void
 atomic_thread_fence(memory_order order)
 {
     switch(order) {
@@ -223,7 +225,7 @@
 }
 
 #define BOOST_ATOMIC_SIGNAL_FENCE 2
-static inline void
+inline void
 atomic_signal_fence(memory_order)
 {
     __asm__ __volatile__ ("" ::: "memory");
@@ -1125,14 +1127,14 @@
     typedef T value_type;
     typedef uint8_t storage_type;
 public:
- explicit base_atomic(value_type v)
+ explicit base_atomic(value_type const& v)
     {
         memcpy(&v_, &v, sizeof(value_type));
     }
     base_atomic(void) {}
 
     void
- store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         if (order != memory_order_seq_cst) {
             storage_type tmp;
@@ -1155,7 +1157,7 @@
     }
 
     value_type
- exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         storage_type tmp;
         memcpy(&tmp, &v, sizeof(value_type));
@@ -1165,14 +1167,15 @@
             : "+q" (tmp), "+m" (v_)
         );
         platform_fence_after(order);
- memcpy(&v, &tmp, sizeof(value_type));
- return v;
+ value_type res;
+ memcpy(&res, &tmp, sizeof(value_type));
+ return res;
     }
 
     bool
     compare_exchange_strong(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -1198,7 +1201,7 @@
     bool
     compare_exchange_weak(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -1224,14 +1227,14 @@
     typedef T value_type;
     typedef uint16_t storage_type;
 public:
- explicit base_atomic(value_type v)
+ explicit base_atomic(value_type const& v)
     {
         memcpy(&v_, &v, sizeof(value_type));
     }
     base_atomic(void) {}
 
     void
- store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         if (order != memory_order_seq_cst) {
             storage_type tmp;
@@ -1254,7 +1257,7 @@
     }
 
     value_type
- exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         storage_type tmp;
         memcpy(&tmp, &v, sizeof(value_type));
@@ -1264,14 +1267,15 @@
             : "+q" (tmp), "+m" (v_)
         );
         platform_fence_after(order);
- memcpy(&v, &tmp, sizeof(value_type));
- return v;
+ value_type res;
+ memcpy(&res, &tmp, sizeof(value_type));
+ return res;
     }
 
     bool
     compare_exchange_strong(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -1297,7 +1301,7 @@
     bool
     compare_exchange_weak(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -1323,17 +1327,17 @@
     typedef T value_type;
     typedef uint32_t storage_type;
 public:
- explicit base_atomic(value_type v)
+ explicit base_atomic(value_type const& v) : v_(0)
     {
         memcpy(&v_, &v, sizeof(value_type));
     }
     base_atomic(void) {}
 
     void
- store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         if (order != memory_order_seq_cst) {
- storage_type tmp;
+ storage_type tmp = 0;
             memcpy(&tmp, &v, sizeof(value_type));
             platform_fence_before(order);
             const_cast<volatile storage_type &>(v_) = tmp;
@@ -1353,9 +1357,9 @@
     }
 
     value_type
- exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
- storage_type tmp;
+ storage_type tmp = 0;
         memcpy(&tmp, &v, sizeof(value_type));
         platform_fence_before(order);
         __asm__ (
@@ -1363,18 +1367,19 @@
             : "+q" (tmp), "+m" (v_)
         );
         platform_fence_after(order);
- memcpy(&v, &tmp, sizeof(value_type));
- return v;
+ value_type res;
+ memcpy(&res, &tmp, sizeof(value_type));
+ return res;
     }
 
     bool
     compare_exchange_strong(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
- storage_type expected_s, desired_s;
+ storage_type expected_s = 0, desired_s = 0;
         memcpy(&expected_s, &expected, sizeof(value_type));
         memcpy(&desired_s, &desired, sizeof(value_type));
         storage_type previous_s = expected_s;
@@ -1396,7 +1401,7 @@
     bool
     compare_exchange_weak(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -1423,17 +1428,17 @@
     typedef T value_type;
     typedef uint64_t storage_type;
 public:
- explicit base_atomic(value_type v)
+ explicit base_atomic(value_type const& v) : v_(0)
     {
         memcpy(&v_, &v, sizeof(value_type));
     }
     base_atomic(void) {}
 
     void
- store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
         if (order != memory_order_seq_cst) {
- storage_type tmp;
+ storage_type tmp = 0;
             memcpy(&tmp, &v, sizeof(value_type));
             platform_fence_before(order);
             const_cast<volatile storage_type &>(v_) = tmp;
@@ -1453,9 +1458,9 @@
     }
 
     value_type
- exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
     {
- storage_type tmp;
+ storage_type tmp = 0;
         memcpy(&tmp, &v, sizeof(value_type));
         platform_fence_before(order);
         __asm__ (
@@ -1463,18 +1468,19 @@
             : "+q" (tmp), "+m" (v_)
         );
         platform_fence_after(order);
- memcpy(&v, &tmp, sizeof(value_type));
- return v;
+ value_type res;
+ memcpy(&res, &tmp, sizeof(value_type));
+ return res;
     }
 
     bool
     compare_exchange_strong(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
- storage_type expected_s, desired_s;
+ storage_type expected_s = 0, desired_s = 0;
         memcpy(&expected_s, &expected, sizeof(value_type));
         memcpy(&desired_s, &desired, sizeof(value_type));
         storage_type previous_s = expected_s;
@@ -1496,7 +1502,7 @@
     bool
     compare_exchange_weak(
         value_type & expected,
- value_type desired,
+ value_type const& desired,
         memory_order success_order,
         memory_order failure_order) volatile
     {
@@ -1520,7 +1526,7 @@
 #if !defined(__x86_64__) && (defined(__i686__) || defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8))
 
 template<typename T>
-bool
+inline bool
 platform_cmpxchg64_strong(T & expected, T desired, volatile T * ptr)
 {
 #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
@@ -1560,7 +1566,7 @@
 }
 
 template<typename T>
-void
+inline void
 platform_store64(T value, volatile T * ptr)
 {
     T expected = *ptr;
@@ -1569,7 +1575,7 @@
 }
 
 template<typename T>
-T
+inline T
 platform_load64(const volatile T * ptr)
 {
     T expected = *ptr;

Modified: trunk/boost/atomic/detail/generic-cas.hpp
==============================================================================
--- trunk/boost/atomic/detail/generic-cas.hpp (original)
+++ trunk/boost/atomic/detail/generic-cas.hpp 2012-12-15 08:24:01 EST (Sat, 15 Dec 2012)
@@ -7,8 +7,8 @@
 // See accompanying file LICENSE_1_0.txt or copy at
 // http://www.boost.org/LICENSE_1_0.txt)
 
-#include <stdint.h>
-
+#include <cstddef>
+#include <boost/cstdint.hpp>
 #include <boost/memory_order.hpp>
 #include <boost/atomic/detail/config.hpp>
 #include <boost/atomic/detail/base.hpp>
@@ -25,7 +25,7 @@
 
 #if defined(__GNUC__)
     namespace boost { namespace atomics { namespace detail {
- static inline int32_t
+ inline int32_t
     fenced_compare_exchange_strong_32(volatile int32_t *ptr, int32_t expected, int32_t desired)
     {
         return __sync_val_compare_and_swap_4(ptr, expected, desired);
@@ -33,7 +33,7 @@
     #define BOOST_ATOMIC_HAVE_CAS32 1
 
     #if defined(__amd64__) || defined(__i686__)
- static inline int64_t
+ inline int64_t
     fenced_compare_exchange_strong_64(int64_t *ptr, int64_t expected, int64_t desired)
     {
         return __sync_val_compare_and_swap_8(ptr, expected, desired);
@@ -50,14 +50,14 @@
     #endif
 
     namespace boost { namespace atomics { namespace detail {
- static inline int32_t
+ inline int32_t
     fenced_compare_exchange_strong(int32_t *ptr, int32_t expected, int32_t desired)
     {
         return _InterlockedCompareExchange(reinterpret_cast<volatile long*>(ptr), desired, expected);
     }
     #define BOOST_ATOMIC_HAVE_CAS32 1
     #if defined(_WIN64)
- static inline int64_t
+ inline int64_t
     fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired)
     {
         return _InterlockedCompareExchange64(ptr, desired, expected);
@@ -68,21 +68,21 @@
 
 #elif (defined(__ICC) || defined(__ECC))
     namespace boost { namespace atomics { namespace detail {
- static inline int32_t
+ inline int32_t
     fenced_compare_exchange_strong_32(int32_t *ptr, int32_t expected, int32_t desired)
     {
         return _InterlockedCompareExchange((void*)ptr, desired, expected);
     }
     #define BOOST_ATOMIC_HAVE_CAS32 1
     #if defined(__x86_64)
- static inline int64_t
+ inline int64_t
     fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired)
     {
         return cas64<int>(ptr, expected, desired);
     }
     #define BOOST_ATOMIC_HAVE_CAS64 1
     #elif defined(__ECC) //IA-64 version
- static inline int64_t
+ inline int64_t
     fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired)
     {
         return _InterlockedCompareExchange64((void*)ptr, desired, expected);
@@ -94,7 +94,7 @@
 #elif (defined(__SUNPRO_CC) && defined(__sparc))
     #include <sys/atomic.h>
     namespace boost { namespace atomics { namespace detail {
- static inline int32_t
+ inline int32_t
     fenced_compare_exchange_strong_32(int32_t *ptr, int32_t expected, int32_t desired)
     {
         return atomic_cas_32((volatile unsigned int*)ptr, expected, desired);
@@ -102,7 +102,7 @@
     #define BOOST_ATOMIC_HAVE_CAS32 1
 
     /* FIXME: check for 64 bit mode */
- static inline int64_t
+ inline int64_t
     fenced_compare_exchange_strong_64(int64_t *ptr, int64_t expected, int64_t desired)
     {
         return atomic_cas_64((volatile unsigned long long*)ptr, expected, desired);

Modified: trunk/boost/atomic/detail/interlocked.hpp
==============================================================================
--- trunk/boost/atomic/detail/interlocked.hpp (original)
+++ trunk/boost/atomic/detail/interlocked.hpp 2012-12-15 08:24:01 EST (Sat, 15 Dec 2012)
@@ -2,162 +2,205 @@
 #define BOOST_ATOMIC_DETAIL_INTERLOCKED_HPP
 
 // Copyright (c) 2009 Helge Bahmann
+// Copyright (c) 2012 Andrey Semashev
 //
 // Distributed under the Boost Software License, Version 1.0.
 // See accompanying file LICENSE_1_0.txt or copy at
 // http://www.boost.org/LICENSE_1_0.txt)
 
-#include <boost/detail/interlocked.hpp>
 #include <boost/atomic/detail/config.hpp>
 
 #ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
 #pragma once
 #endif
 
-namespace boost {
-namespace atomics {
-namespace detail {
+#if defined(_WIN32_WCE)
 
-static inline void
-x86_full_fence(void)
-{
- long tmp;
- BOOST_INTERLOCKED_EXCHANGE(&tmp, 0);
-}
-
-static inline void
-platform_fence_before(memory_order)
-{
-}
-
-static inline void
-platform_fence_after(memory_order)
-{
-}
-
-static inline void
-platform_fence_before_store(memory_order)
-{
-}
-
-static inline void
-platform_fence_after_store(memory_order order)
-{
- if (order == memory_order_seq_cst)
- x86_full_fence();
-}
-
-static inline void
-platform_fence_after_load(memory_order order)
-{
- if (order == memory_order_seq_cst) {
- x86_full_fence();
- }
-}
-
-template<typename T>
-bool
-platform_cmpxchg32_strong(T & expected, T desired, volatile T * ptr)
-{
- T prev = expected;
- expected = (T)BOOST_INTERLOCKED_COMPARE_EXCHANGE((long *)(ptr), (long)desired, (long)expected);
- bool success = (prev==expected);
- return success;
-}
+#include <boost/detail/interlocked.hpp>
 
-#if defined(_WIN64)
-template<typename T>
-bool
-platform_cmpxchg64_strong(T & expected, T desired, volatile T * ptr)
-{
- T prev = expected;
- expected = (T) _InterlockedCompareExchange64((long long *)(ptr), (long long)desired, (long long)expected);
- bool success = (prev==expected);
- return success;
-}
-
-template<typename T>
-void
-platform_store64(T value, volatile T * ptr)
-{
- *ptr = value;
-}
-
-template<typename T>
-T
-platform_load64(volatile T * ptr)
-{
- return *ptr;
-}
-#endif
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) BOOST_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare)
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) BOOST_INTERLOCKED_EXCHANGE(dest, newval)
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) BOOST_INTERLOCKED_EXCHANGE_ADD(dest, addend)
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) BOOST_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare)
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) BOOST_INTERLOCKED_EXCHANGE_POINTER(dest, newval)
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_INTERLOCKED_EXCHANGE_ADD((long*)(dest), byte_offset))
+
+#elif defined(_MSC_VER)
+
+#include <intrin.h>
+
+#pragma intrinsic(_InterlockedCompareExchange)
+#pragma intrinsic(_InterlockedExchangeAdd)
+#pragma intrinsic(_InterlockedExchange)
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) _InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) _InterlockedExchangeAdd((long*)(dest), (long)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) _InterlockedExchange((long*)(dest), (long)(newval))
+
+#if _MSC_VER >= 1400
+
+#pragma intrinsic(_InterlockedAnd)
+#pragma intrinsic(_InterlockedOr)
+#pragma intrinsic(_InterlockedXor)
+
+#define BOOST_ATOMIC_INTERLOCKED_AND(dest, arg) _InterlockedAnd((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR(dest, arg) _InterlockedOr((long*)(dest), (long)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR(dest, arg) _InterlockedXor((long*)(dest), (long)(arg))
+
+#endif // _MSC_VER >= 1400
+
+#if _MSC_VER >= 1600
+
+// MSVC 2010 and later provide intrinsics for 8 and 16 bit integers.
+// Note that for each bit count these macros must be either all defined or all not defined.
+// Otherwise atomic<> operations will be implemented inconsistently.
+
+#pragma intrinsic(_InterlockedCompareExchange8)
+#pragma intrinsic(_InterlockedExchangeAdd8)
+#pragma intrinsic(_InterlockedExchange8)
+#pragma intrinsic(_InterlockedAnd8)
+#pragma intrinsic(_InterlockedOr8)
+#pragma intrinsic(_InterlockedXor8)
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(dest, exchange, compare) _InterlockedCompareExchange8((char*)(dest), (char)(exchange), (char)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(dest, addend) _InterlockedExchangeAdd8((char*)(dest), (char)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(dest, newval) _InterlockedExchange8((char*)(dest), (char)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_AND8(dest, arg) _InterlockedAnd8((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR8(dest, arg) _InterlockedOr8((char*)(dest), (char)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR8(dest, arg) _InterlockedXor8((char*)(dest), (char)(arg))
+
+#pragma intrinsic(_InterlockedCompareExchange16)
+#pragma intrinsic(_InterlockedExchangeAdd16)
+#pragma intrinsic(_InterlockedExchange16)
+#pragma intrinsic(_InterlockedAnd16)
+#pragma intrinsic(_InterlockedOr16)
+#pragma intrinsic(_InterlockedXor16)
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(dest, exchange, compare) _InterlockedCompareExchange16((short*)(dest), (short)(exchange), (short)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(dest, addend) _InterlockedExchangeAdd16((short*)(dest), (short)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(dest, newval) _InterlockedExchange16((short*)(dest), (short)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_AND16(dest, arg) _InterlockedAnd16((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR16(dest, arg) _InterlockedOr16((short*)(dest), (short)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR16(dest, arg) _InterlockedXor16((short*)(dest), (short)(arg))
+
+#endif // _MSC_VER >= 1600
+
+#if defined(_M_AMD64) || defined(_M_IA64)
+
+#pragma intrinsic(_InterlockedCompareExchange64)
+#pragma intrinsic(_InterlockedExchangeAdd64)
+#pragma intrinsic(_InterlockedExchange64)
+#pragma intrinsic(_InterlockedAnd64)
+#pragma intrinsic(_InterlockedOr64)
+#pragma intrinsic(_InterlockedXor64)
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) _InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) _InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) _InterlockedExchange64((__int64*)(dest), (__int64)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_AND64(dest, arg) _InterlockedAnd64((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_OR64(dest, arg) _InterlockedOr64((__int64*)(dest), (__int64)(arg))
+#define BOOST_ATOMIC_INTERLOCKED_XOR64(dest, arg) _InterlockedXor64((__int64*)(dest), (__int64)(arg))
+
+#pragma intrinsic(_InterlockedCompareExchangePointer)
+#pragma intrinsic(_InterlockedExchangePointer)
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) _InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) _InterlockedExchangePointer((void**)(dest), (void*)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64((long*)(dest), byte_offset))
+
+#else // defined(_M_AMD64)
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)_InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare)))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)_InterlockedExchange((long*)(dest), (long)(newval)))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD((long*)(dest), byte_offset))
 
-}
-}
+#endif // defined(_M_AMD64)
+
+#else // defined(_MSC_VER)
+
+#if defined(BOOST_USE_WINDOWS_H)
+
+#include <windows.h>
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) InterlockedExchange((long*)(dest), (long)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) InterlockedExchangeAdd((long*)(dest), (long)(addend))
 
-#define BOOST_ATOMIC_THREAD_FENCE 2
-static inline void
-atomic_thread_fence(memory_order order)
-{
- if (order == memory_order_seq_cst) {
- atomics::detail::x86_full_fence();
- }
-}
-
-class atomic_flag {
-private:
- atomic_flag(const atomic_flag &) /* = delete */ ;
- atomic_flag & operator=(const atomic_flag &) /* = delete */ ;
- uint32_t v_;
-public:
- atomic_flag(void) : v_(false) {}
-
- void
- clear(memory_order order = memory_order_seq_cst) volatile
- {
- atomics::detail::platform_fence_before_store(order);
- const_cast<volatile uint32_t &>(v_) = 0;
- atomics::detail::platform_fence_after_store(order);
- }
-
- bool
- test_and_set(memory_order order = memory_order_seq_cst) volatile
- {
- atomics::detail::platform_fence_before(order);
- uint32_t expected = v_;
- do {
- if (expected == 1)
- break;
- } while (!atomics::detail::platform_cmpxchg32_strong(expected, (uint32_t)1, &v_));
- atomics::detail::platform_fence_after(order);
- return expected != 0;
- }
-};
-
-}
-
-#define BOOST_ATOMIC_FLAG_LOCK_FREE 2
-
-#include <boost/atomic/detail/base.hpp>
-
-#if !defined(BOOST_ATOMIC_FORCE_FALLBACK)
-
-#define BOOST_ATOMIC_CHAR_LOCK_FREE 2
-#define BOOST_ATOMIC_SHORT_LOCK_FREE 2
-#define BOOST_ATOMIC_INT_LOCK_FREE 2
-#define BOOST_ATOMIC_LONG_LOCK_FREE 2
 #if defined(_WIN64)
-#define BOOST_ATOMIC_LLONG_LOCK_FREE 2
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) InterlockedExchange64((__int64*)(dest), (__int64)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) InterlockedExchangePointer((void**)(dest), (void*)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, byte_offset))
+
+#else // defined(_WIN64)
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, byte_offset))
+
+#endif // defined(_WIN64)
+
+#else // defined(BOOST_USE_WINDOWS_H)
+
+#if defined(__MINGW64__)
+#define BOOST_ATOMIC_INTERLOCKED_IMPORT
 #else
-#define BOOST_ATOMIC_LLONG_LOCK_FREE 0
+#define BOOST_ATOMIC_INTERLOCKED_IMPORT __declspec(dllimport)
 #endif
-#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
-#define BOOST_ATOMIC_BOOL_LOCK_FREE 2
 
-#include <boost/atomic/detail/cas32strong.hpp>
+namespace boost {
+namespace atomics {
+namespace detail {
+
+extern "C" {
+
+BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedCompareExchange(long volatile*, long, long);
+BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedExchange(long volatile*, long);
+BOOST_ATOMIC_INTERLOCKED_IMPORT long __stdcall InterlockedExchangeAdd(long volatile*, long);
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchange((long*)(dest), (long)(exchange), (long)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval) boost::atomics::detail::InterlockedExchange((long*)(dest), (long)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, addend) boost::atomics::detail::InterlockedExchangeAdd((long*)(dest), (long)(addend))
+
 #if defined(_WIN64)
-#include <boost/atomic/detail/cas64strong.hpp>
-#endif
 
-#endif /* !defined(BOOST_ATOMIC_FORCE_FALLBACK) */
+BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedCompareExchange64(__int64 volatile*, __int64, __int64);
+BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedExchange64(__int64 volatile*, __int64);
+BOOST_ATOMIC_INTERLOCKED_IMPORT __int64 __stdcall InterlockedExchangeAdd64(__int64 volatile*, __int64);
+
+BOOST_ATOMIC_INTERLOCKED_IMPORT void* __stdcall InterlockedCompareExchangePointer(void* volatile *, void*, void*);
+BOOST_ATOMIC_INTERLOCKED_IMPORT void* __stdcall InterlockedExchangePointer(void* volatile *, void*);
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchange64((__int64*)(dest), (__int64)(exchange), (__int64)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(dest, newval) boost::atomics::detail::InterlockedExchange64((__int64*)(dest), (__int64)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, addend) boost::atomics::detail::InterlockedExchangeAdd64((__int64*)(dest), (__int64)(addend))
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) boost::atomics::detail::InterlockedCompareExchangePointer((void**)(dest), (void*)(exchange), (void*)(compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) boost::atomics::detail::InterlockedExchangePointer((void**)(dest), (void*)(newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(dest, byte_offset))
+
+#else // defined(_WIN64)
+
+#define BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(dest, exchange, compare) ((void*)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(dest, exchange, compare))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(dest, newval) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE(dest, newval))
+#define BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(dest, byte_offset) ((void*)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(dest, byte_offset))
+
+#endif // defined(_WIN64)
+
+} // extern "C"
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#undef BOOST_ATOMIC_INTERLOCKED_IMPORT
+
+#endif // defined(BOOST_USE_WINDOWS_H)
+
+#endif // defined(_MSC_VER)
 
 #endif

Modified: trunk/boost/atomic/detail/linux-arm.hpp
==============================================================================
--- trunk/boost/atomic/detail/linux-arm.hpp (original)
+++ trunk/boost/atomic/detail/linux-arm.hpp 2012-12-15 08:24:01 EST (Sat, 15 Dec 2012)
@@ -30,6 +30,8 @@
 // emulated CAS is only good enough to provide compare_exchange_weak
 // semantics.
 
+#include <cstddef>
+#include <boost/cstdint.hpp>
 #include <boost/memory_order.hpp>
 #include <boost/atomic/detail/config.hpp>
 
@@ -41,14 +43,14 @@
 namespace atomics {
 namespace detail {
 
-static inline void
+inline void
 arm_barrier(void)
 {
     void (*kernel_dmb)(void) = (void (*)(void)) 0xffff0fa0;
     kernel_dmb();
 }
 
-static inline void
+inline void
 platform_fence_before(memory_order order)
 {
     switch(order) {
@@ -61,7 +63,7 @@
     }
 }
 
-static inline void
+inline void
 platform_fence_after(memory_order order)
 {
     switch(order) {
@@ -73,27 +75,27 @@
     }
 }
 
-static inline void
+inline void
 platform_fence_before_store(memory_order order)
 {
     platform_fence_before(order);
 }
 
-static inline void
+inline void
 platform_fence_after_store(memory_order order)
 {
     if (order == memory_order_seq_cst)
         arm_barrier();
 }
 
-static inline void
+inline void
 platform_fence_after_load(memory_order order)
 {
     platform_fence_after(order);
 }
 
 template<typename T>
-bool
+inline bool
 platform_cmpxchg32(T & expected, T desired, volatile T * ptr)
 {
     typedef T (*kernel_cmpxchg32_t)(T oldval, T newval, volatile T * ptr);
@@ -110,7 +112,7 @@
 }
 
 #define BOOST_ATOMIC_THREAD_FENCE 2
-static inline void
+inline void
 atomic_thread_fence(memory_order order)
 {
     switch(order) {
@@ -124,7 +126,7 @@
 }
 
 #define BOOST_ATOMIC_SIGNAL_FENCE 2
-static inline void
+inline void
 atomic_signal_fence(memory_order)
 {
     __asm__ __volatile__ ("" ::: "memory");

Modified: trunk/boost/atomic/detail/lockpool.hpp
==============================================================================
--- trunk/boost/atomic/detail/lockpool.hpp (original)
+++ trunk/boost/atomic/detail/lockpool.hpp 2012-12-15 08:24:01 EST (Sat, 15 Dec 2012)
@@ -7,7 +7,6 @@
 // See accompanying file LICENSE_1_0.txt or copy at
 // http://www.boost.org/LICENSE_1_0.txt)
 
-
 #include <boost/atomic/detail/config.hpp>
 #ifndef BOOST_ATOMIC_FLAG_LOCK_FREE
 #include <boost/thread/mutex.hpp>
@@ -23,45 +22,52 @@
 
 #ifndef BOOST_ATOMIC_FLAG_LOCK_FREE
 
-class lockpool {
+class lockpool
+{
 public:
     typedef mutex lock_type;
- class scoped_lock {
+ class scoped_lock
+ {
     private:
- mutex::scoped_lock guard;
+ lock_type& mtx_;
+
+ scoped_lock(scoped_lock const&) /* = delete */;
+ scoped_lock& operator=(scoped_lock const&) /* = delete */;
+
     public:
         explicit
- scoped_lock(const volatile void * addr) : guard( lock_for(addr) )
+ scoped_lock(const volatile void * addr) : mtx_(get_lock_for(addr))
+ {
+ mtx_.lock();
+ }
+ ~scoped_lock()
         {
+ mtx_.unlock();
         }
     };
-private:
- static BOOST_ATOMIC_DECL mutex pool_[41];
 
- static mutex &
- lock_for(const volatile void * addr)
- {
- std::size_t index = reinterpret_cast<std::size_t>(addr) % 41;
- return pool_[index];
- }
+private:
+ static BOOST_ATOMIC_DECL lock_type& get_lock_for(const volatile void * addr);
 };
 
 #else
 
-class lockpool {
+class lockpool
+{
 public:
     typedef atomic_flag lock_type;
 
- class scoped_lock {
+ class scoped_lock
+ {
     private:
- atomic_flag & flag_;
+ atomic_flag& flag_;
 
         scoped_lock(const scoped_lock &) /* = delete */;
- void operator=(const scoped_lock &) /* = delete */;
+ scoped_lock& operator=(const scoped_lock &) /* = delete */;
 
     public:
         explicit
- scoped_lock(const volatile void * addr) : flag_( lock_for(addr) )
+ scoped_lock(const volatile void * addr) : flag_(get_lock_for(addr))
         {
             do {
             } while (flag_.test_and_set(memory_order_acquire));
@@ -74,14 +80,7 @@
     };
 
 private:
- static BOOST_ATOMIC_DECL atomic_flag pool_[41];
-
- static lock_type &
- lock_for(const volatile void * addr)
- {
- std::size_t index = reinterpret_cast<std::size_t>(addr) % 41;
- return pool_[index];
- }
+ static BOOST_ATOMIC_DECL lock_type& get_lock_for(const volatile void * addr);
 };
 
 #endif

Modified: trunk/boost/atomic/detail/platform.hpp
==============================================================================
--- trunk/boost/atomic/detail/platform.hpp (original)
+++ trunk/boost/atomic/detail/platform.hpp 2012-12-15 08:24:01 EST (Sat, 15 Dec 2012)
@@ -44,9 +44,9 @@
 
     #include <boost/atomic/detail/gcc-sparcv9.hpp>
 
-#elif defined(BOOST_USE_WINDOWS_H) || defined(_WIN32_CE) || defined(BOOST_MSVC) || defined(BOOST_INTEL_WIN) || defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__)
+#elif defined(BOOST_WINDOWS) || defined(_WIN32_CE)
 
- #include <boost/atomic/detail/interlocked.hpp>
+ #include <boost/atomic/detail/windows.hpp>
 
 #elif 0 && defined(__GNUC__) /* currently does not work correctly */
 

Copied: trunk/boost/atomic/detail/type-classification.hpp (from r81948, /trunk/boost/atomic/detail/type-classifier.hpp)
==============================================================================
--- /trunk/boost/atomic/detail/type-classifier.hpp (original)
+++ trunk/boost/atomic/detail/type-classification.hpp 2012-12-15 08:24:01 EST (Sat, 15 Dec 2012)
@@ -1,5 +1,5 @@
-#ifndef BOOST_ATOMIC_DETAIL_TYPE_CLASSIFIER_HPP
-#define BOOST_ATOMIC_DETAIL_TYPE_CLASSIFIER_HPP
+#ifndef BOOST_ATOMIC_DETAIL_TYPE_CLASSIFICATION_HPP
+#define BOOST_ATOMIC_DETAIL_TYPE_CLASSIFICATION_HPP
 
 // Copyright (c) 2011 Helge Bahmann
 //
@@ -8,6 +8,7 @@
 // http://www.boost.org/LICENSE_1_0.txt)
 
 #include <boost/atomic/detail/config.hpp>
+#include <boost/type_traits/is_integral.hpp>
 
 #ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
 #pragma once
@@ -17,71 +18,28 @@
 namespace atomics {
 namespace detail {
 
-template<typename T>
-struct type_classifier {
- typedef void test;
+template<typename T, bool IsInt = boost::is_integral<T>::value>
+struct classify
+{
+ typedef void type;
 };
 
-template<>
-struct type_classifier<char> {typedef int test;};
-template<>
-struct type_classifier<unsigned char> {typedef int test;};
-template<>
-struct type_classifier<signed char> {typedef int test;};
-template<>
-struct type_classifier<unsigned short> {typedef int test;};
-template<>
-struct type_classifier<signed short> {typedef int test;};
-template<>
-struct type_classifier<unsigned int> {typedef int test;};
-template<>
-struct type_classifier<signed int> {typedef int test;};
-template<>
-struct type_classifier<unsigned long> {typedef int test;};
-template<>
-struct type_classifier<long> {typedef int test;};
-#ifdef BOOST_HAS_LONG_LONG
-template<> struct type_classifier<unsigned long long>
-{typedef int test;};
-template<> struct type_classifier<signed long long>
-{typedef int test;};
-#endif
+template<typename T>
+struct classify<T, true> {typedef int type;};
 
 template<typename T>
-struct type_classifier<T *> {typedef void * test;};
+struct classify<T*, false> {typedef void* type;};
 
 template<typename T>
-struct sign_trait {
- typedef void test;
+struct storage_size_of
+{
+ enum _
+ {
+ size = sizeof(T),
+ value = (size == 3 ? 4 : (size == 5 || size == 6 || size == 7 ? 8 : size))
+ };
 };
 
-template<>
-struct sign_trait<char> {typedef int test;};
-template<>
-struct sign_trait<unsigned char> {typedef unsigned int test;};
-template<>
-struct sign_trait<signed char> {typedef int test;};
-template<>
-struct sign_trait<unsigned short> {typedef unsigned int test;};
-template<>
-struct sign_trait<signed short> {typedef int test;};
-template<>
-struct sign_trait<unsigned int> {typedef unsigned int test;};
-template<>
-struct sign_trait<signed int> {typedef int test;};
-template<>
-struct sign_trait<unsigned long> {typedef unsigned int test;};
-template<>
-struct sign_trait<long> {typedef int test;};
-#ifdef BOOST_HAS_LONG_LONG
-template<> struct sign_trait<unsigned long long>
-{typedef unsigned int test;};
-template<> struct sign_trait<signed long long>
-{typedef int test;};
-#endif
-
-
-
 }}}
 
 #endif

Deleted: trunk/boost/atomic/detail/type-classifier.hpp
==============================================================================
--- trunk/boost/atomic/detail/type-classifier.hpp 2012-12-15 08:24:01 EST (Sat, 15 Dec 2012)
+++ (empty file)
@@ -1,87 +0,0 @@
-#ifndef BOOST_ATOMIC_DETAIL_TYPE_CLASSIFIER_HPP
-#define BOOST_ATOMIC_DETAIL_TYPE_CLASSIFIER_HPP
-
-// Copyright (c) 2011 Helge Bahmann
-//
-// Distributed under the Boost Software License, Version 1.0.
-// See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-#include <boost/atomic/detail/config.hpp>
-
-#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
-#pragma once
-#endif
-
-namespace boost {
-namespace atomics {
-namespace detail {
-
-template<typename T>
-struct type_classifier {
- typedef void test;
-};
-
-template<>
-struct type_classifier<char> {typedef int test;};
-template<>
-struct type_classifier<unsigned char> {typedef int test;};
-template<>
-struct type_classifier<signed char> {typedef int test;};
-template<>
-struct type_classifier<unsigned short> {typedef int test;};
-template<>
-struct type_classifier<signed short> {typedef int test;};
-template<>
-struct type_classifier<unsigned int> {typedef int test;};
-template<>
-struct type_classifier<signed int> {typedef int test;};
-template<>
-struct type_classifier<unsigned long> {typedef int test;};
-template<>
-struct type_classifier<long> {typedef int test;};
-#ifdef BOOST_HAS_LONG_LONG
-template<> struct type_classifier<unsigned long long>
-{typedef int test;};
-template<> struct type_classifier<signed long long>
-{typedef int test;};
-#endif
-
-template<typename T>
-struct type_classifier<T *> {typedef void * test;};
-
-template<typename T>
-struct sign_trait {
- typedef void test;
-};
-
-template<>
-struct sign_trait<char> {typedef int test;};
-template<>
-struct sign_trait<unsigned char> {typedef unsigned int test;};
-template<>
-struct sign_trait<signed char> {typedef int test;};
-template<>
-struct sign_trait<unsigned short> {typedef unsigned int test;};
-template<>
-struct sign_trait<signed short> {typedef int test;};
-template<>
-struct sign_trait<unsigned int> {typedef unsigned int test;};
-template<>
-struct sign_trait<signed int> {typedef int test;};
-template<>
-struct sign_trait<unsigned long> {typedef unsigned int test;};
-template<>
-struct sign_trait<long> {typedef int test;};
-#ifdef BOOST_HAS_LONG_LONG
-template<> struct sign_trait<unsigned long long>
-{typedef unsigned int test;};
-template<> struct sign_trait<signed long long>
-{typedef int test;};
-#endif
-
-
-
-}}}
-
-#endif

Added: trunk/boost/atomic/detail/windows.hpp
==============================================================================
--- (empty file)
+++ trunk/boost/atomic/detail/windows.hpp 2012-12-15 08:24:01 EST (Sat, 15 Dec 2012)
@@ -0,0 +1,1585 @@
+#ifndef BOOST_ATOMIC_DETAIL_WINDOWS_HPP
+#define BOOST_ATOMIC_DETAIL_WINDOWS_HPP
+
+// Copyright (c) 2009 Helge Bahmann
+// Copyright (c) 2012 Andrey Semashev
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#include <cstddef>
+#include <boost/cstdint.hpp>
+#include <boost/type_traits/make_signed.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/interlocked.hpp>
+
+#ifdef BOOST_ATOMIC_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#ifdef _MSC_VER
+#pragma warning(push)
+// 'order' : unreferenced formal parameter
+#pragma warning(disable: 4100)
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+// Define hardware barriers
+#if defined(_MSC_VER) && (defined(_M_AMD64) || (defined(_M_IX86) && defined(_M_IX86_FP) && _M_IX86_FP >= 2))
+extern "C" void _mm_mfence(void);
+#pragma intrinsic(_mm_mfence)
+#endif
+
+BOOST_FORCEINLINE void x86_full_fence(void)
+{
+#if defined(_MSC_VER) && (defined(_M_AMD64) || (defined(_M_IX86) && defined(_M_IX86_FP) && _M_IX86_FP >= 2))
+ // Use mfence only if SSE2 is available
+ _mm_mfence();
+#else
+ long tmp;
+ BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&tmp, 0);
+#endif
+}
+
+// Define compiler barriers
+#if defined(_MSC_VER) && _MSC_VER >= 1310
+
+extern "C" void _ReadWriteBarrier();
+#pragma intrinsic(_ReadWriteBarrier)
+
+#define BOOST_ATOMIC_READ_WRITE_BARRIER() _ReadWriteBarrier()
+
+#if _MSC_VER >= 1400
+
+extern "C" void _ReadBarrier();
+#pragma intrinsic(_ReadBarrier)
+extern "C" void _WriteBarrier();
+#pragma intrinsic(_WriteBarrier)
+
+#define BOOST_ATOMIC_READ_BARRIER() _ReadBarrier()
+#define BOOST_ATOMIC_WRITE_BARRIER() _WriteBarrier()
+
+#endif
+#endif
+
+#ifndef BOOST_ATOMIC_READ_WRITE_BARRIER
+#define BOOST_ATOMIC_READ_WRITE_BARRIER()
+#endif
+#ifndef BOOST_ATOMIC_READ_BARRIER
+#define BOOST_ATOMIC_READ_BARRIER() BOOST_ATOMIC_READ_WRITE_BARRIER()
+#endif
+#ifndef BOOST_ATOMIC_WRITE_BARRIER
+#define BOOST_ATOMIC_WRITE_BARRIER() BOOST_ATOMIC_READ_WRITE_BARRIER()
+#endif
+
+// MSVC (up to 2012, inclusively) optimizer generates a very poor code for switch-case in fence functions.
+// Issuing unconditional compiler barriers generates better code. We may re-enable the main branch if MSVC optimizer improves.
+#ifdef BOOST_MSVC
+#define BOOST_ATOMIC_DETAIL_BAD_SWITCH_CASE_OPTIMIZER
+#endif
+
+BOOST_FORCEINLINE void
+platform_fence_before(memory_order order)
+{
+#ifdef BOOST_ATOMIC_DETAIL_BAD_SWITCH_CASE_OPTIMIZER
+
+ BOOST_ATOMIC_READ_WRITE_BARRIER();
+
+#else
+
+ switch(order)
+ {
+ case memory_order_relaxed:
+ case memory_order_consume:
+ case memory_order_acquire:
+ break;
+ case memory_order_release:
+ case memory_order_acq_rel:
+ BOOST_ATOMIC_WRITE_BARRIER();
+ /* release */
+ break;
+ case memory_order_seq_cst:
+ BOOST_ATOMIC_READ_WRITE_BARRIER();
+ /* seq */
+ break;
+ }
+
+#endif
+}
+
+BOOST_FORCEINLINE void
+platform_fence_after(memory_order order)
+{
+#ifdef BOOST_ATOMIC_DETAIL_BAD_SWITCH_CASE_OPTIMIZER
+
+ BOOST_ATOMIC_READ_WRITE_BARRIER();
+
+#else
+
+ switch(order)
+ {
+ case memory_order_relaxed:
+ case memory_order_release:
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ case memory_order_acq_rel:
+ BOOST_ATOMIC_READ_BARRIER();
+ break;
+ case memory_order_seq_cst:
+ BOOST_ATOMIC_READ_WRITE_BARRIER();
+ /* seq */
+ break;
+ }
+
+#endif
+}
+
+BOOST_FORCEINLINE void
+platform_fence_before_store(memory_order order)
+{
+#ifdef BOOST_ATOMIC_DETAIL_BAD_SWITCH_CASE_OPTIMIZER
+
+ BOOST_ATOMIC_WRITE_BARRIER();
+
+#else
+
+ switch(order)
+ {
+ case memory_order_relaxed:
+ case memory_order_acquire:
+ case memory_order_consume:
+ break;
+ case memory_order_acq_rel:
+ case memory_order_release:
+ case memory_order_seq_cst:
+ BOOST_ATOMIC_WRITE_BARRIER();
+ break;
+ }
+
+#endif
+}
+
+BOOST_FORCEINLINE void
+platform_fence_after_store(memory_order order)
+{
+#ifdef BOOST_ATOMIC_DETAIL_BAD_SWITCH_CASE_OPTIMIZER
+
+ BOOST_ATOMIC_WRITE_BARRIER();
+ if (order == memory_order_seq_cst)
+ x86_full_fence();
+
+#else
+
+ switch(order)
+ {
+ case memory_order_relaxed:
+ case memory_order_acquire:
+ case memory_order_consume:
+ break;
+ case memory_order_acq_rel:
+ case memory_order_release:
+ BOOST_ATOMIC_WRITE_BARRIER();
+ break;
+ case memory_order_seq_cst:
+ x86_full_fence();
+ break;
+ }
+
+#endif
+}
+
+BOOST_FORCEINLINE void
+platform_fence_after_load(memory_order order)
+{
+#ifdef BOOST_ATOMIC_DETAIL_BAD_SWITCH_CASE_OPTIMIZER
+
+ BOOST_ATOMIC_READ_BARRIER();
+ if (order == memory_order_seq_cst)
+ x86_full_fence();
+
+#else
+
+ switch(order)
+ {
+ case memory_order_relaxed:
+ case memory_order_consume:
+ break;
+ case memory_order_acquire:
+ case memory_order_acq_rel:
+ BOOST_ATOMIC_READ_BARRIER();
+ break;
+ case memory_order_release:
+ break;
+ case memory_order_seq_cst:
+ x86_full_fence();
+ break;
+ }
+
+#endif
+}
+
+} // namespace detail
+} // namespace atomics
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+BOOST_FORCEINLINE void
+atomic_thread_fence(memory_order order)
+{
+#ifdef BOOST_ATOMIC_DETAIL_BAD_SWITCH_CASE_OPTIMIZER
+
+ BOOST_ATOMIC_READ_WRITE_BARRIER();
+ if (order == memory_order_seq_cst)
+ atomics::detail::x86_full_fence();
+
+#else
+
+ switch (order)
+ {
+ case memory_order_relaxed:
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ BOOST_ATOMIC_READ_BARRIER();
+ break;
+ case memory_order_release:
+ BOOST_ATOMIC_WRITE_BARRIER();
+ break;
+ case memory_order_acq_rel:
+ BOOST_ATOMIC_READ_WRITE_BARRIER();
+ break;
+ case memory_order_seq_cst:
+ atomics::detail::x86_full_fence();
+ break;
+ }
+
+#endif
+}
+
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+BOOST_FORCEINLINE void
+atomic_signal_fence(memory_order order)
+{
+#ifdef BOOST_ATOMIC_DETAIL_BAD_SWITCH_CASE_OPTIMIZER
+
+ BOOST_ATOMIC_READ_WRITE_BARRIER();
+
+#else
+
+ switch (order)
+ {
+ case memory_order_relaxed:
+ break;
+ case memory_order_consume:
+ case memory_order_acquire:
+ BOOST_ATOMIC_READ_BARRIER();
+ break;
+ case memory_order_release:
+ BOOST_ATOMIC_WRITE_BARRIER();
+ break;
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ BOOST_ATOMIC_READ_WRITE_BARRIER();
+ break;
+ }
+
+#endif
+}
+
+#undef BOOST_ATOMIC_READ_WRITE_BARRIER
+#undef BOOST_ATOMIC_READ_BARRIER
+#undef BOOST_ATOMIC_WRITE_BARRIER
+
+class atomic_flag
+{
+private:
+ atomic_flag(const atomic_flag &) /* = delete */ ;
+ atomic_flag & operator=(const atomic_flag &) /* = delete */ ;
+#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE8
+ char v_;
+#else
+ long v_;
+#endif
+public:
+ atomic_flag(void) : v_(0) {}
+
+ void
+ clear(memory_order order = memory_order_seq_cst) volatile
+ {
+ atomics::detail::platform_fence_before_store(order);
+#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE8
+ BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&v_, 0);
+#else
+ BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, 0);
+#endif
+ atomics::detail::platform_fence_after_store(order);
+ }
+
+ bool
+ test_and_set(memory_order order = memory_order_seq_cst) volatile
+ {
+ atomics::detail::platform_fence_before(order);
+#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE8
+ const char old = BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&v_, 1);
+#else
+ const long old = BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, 1);
+#endif
+ atomics::detail::platform_fence_after(order);
+ return old != 0;
+ }
+};
+
+} // namespace boost
+
+#define BOOST_ATOMIC_FLAG_LOCK_FREE 2
+
+#include <boost/atomic/detail/base.hpp>
+
+#if !defined(BOOST_ATOMIC_FORCE_FALLBACK)
+
+#define BOOST_ATOMIC_CHAR_LOCK_FREE 2
+#define BOOST_ATOMIC_SHORT_LOCK_FREE 2
+#define BOOST_ATOMIC_INT_LOCK_FREE 2
+#define BOOST_ATOMIC_LONG_LOCK_FREE 2
+#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64)
+#define BOOST_ATOMIC_LLONG_LOCK_FREE 2
+#else
+#define BOOST_ATOMIC_LLONG_LOCK_FREE 0
+#endif
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+#define BOOST_ATOMIC_BOOL_LOCK_FREE 2
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+#if defined(_MSC_VER)
+#pragma warning(push)
+// 'char' : forcing value to bool 'true' or 'false' (performance warning)
+#pragma warning(disable: 4800)
+#endif
+
+template<typename T, bool Sign>
+class base_atomic<T, int, 1, Sign>
+{
+ typedef base_atomic this_type;
+ typedef T value_type;
+#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8
+ typedef value_type storage_type;
+#else
+ typedef uint32_t storage_type;
+#endif
+ typedef T difference_type;
+public:
+ explicit base_atomic(value_type v) : v_(v) {}
+ base_atomic(void) {}
+
+ void
+ store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+ if (order != memory_order_seq_cst) {
+ platform_fence_before(order);
+ v_ = static_cast< storage_type >(v);
+ } else {
+ exchange(v, order);
+ }
+ }
+
+ value_type
+ load(memory_order order = memory_order_seq_cst) const volatile
+ {
+ value_type v = static_cast< value_type >(v_);
+ platform_fence_after_load(order);
+ return v;
+ }
+
+ value_type
+ fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+ platform_fence_before(order);
+#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD8(&v_, v));
+#else
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&v_, v));
+#endif
+ platform_fence_after(order);
+ return v;
+ }
+
+ value_type
+ fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+ typedef typename make_signed< value_type >::type signed_value_type;
+ return fetch_add(static_cast< value_type >(-static_cast< signed_value_type >(v)), order);
+ }
+
+ value_type
+ exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+ platform_fence_before(order);
+#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE8
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&v_, v));
+#else
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, v));
+#endif
+ platform_fence_after(order);
+ return v;
+ }
+
+ bool
+ compare_exchange_strong(
+ value_type & expected,
+ value_type desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ value_type previous = expected;
+ platform_fence_before(success_order);
+#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8
+ value_type oldval = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(&v_, desired, previous));
+#else
+ value_type oldval = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&v_, desired, previous));
+#endif
+ bool success = (previous == oldval);
+ if (success)
+ platform_fence_after(success_order);
+ else
+ platform_fence_after(failure_order);
+ expected = oldval;
+ return success;
+ }
+
+ bool
+ compare_exchange_weak(
+ value_type & expected,
+ value_type desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ return compare_exchange_strong(expected, desired, success_order, failure_order);
+ }
+
+ value_type
+ fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+#ifdef BOOST_ATOMIC_INTERLOCKED_AND8
+ platform_fence_before(order);
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_AND8(&v_, v));
+ platform_fence_after(order);
+ return v;
+#elif defined(BOOST_ATOMIC_INTERLOCKED_AND)
+ platform_fence_before(order);
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_AND(&v_, v));
+ platform_fence_after(order);
+ return v;
+#else
+ value_type tmp = load(memory_order_relaxed);
+ do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed));
+ return tmp;
+#endif
+ }
+
+ value_type
+ fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+#ifdef BOOST_ATOMIC_INTERLOCKED_OR8
+ platform_fence_before(order);
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_OR8(&v_, v));
+ platform_fence_after(order);
+ return v;
+#elif defined(BOOST_ATOMIC_INTERLOCKED_OR)
+ platform_fence_before(order);
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_OR(&v_, v));
+ platform_fence_after(order);
+ return v;
+#else
+ value_type tmp = load(memory_order_relaxed);
+ do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed));
+ return tmp;
+#endif
+ }
+
+ value_type
+ fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+#ifdef BOOST_ATOMIC_INTERLOCKED_XOR8
+ platform_fence_before(order);
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_XOR8(&v_, v));
+ platform_fence_after(order);
+ return v;
+#elif defined(BOOST_ATOMIC_INTERLOCKED_XOR)
+ platform_fence_before(order);
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&v_, v));
+ platform_fence_after(order);
+ return v;
+#else
+ value_type tmp = load(memory_order_relaxed);
+ do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed));
+ return tmp;
+#endif
+ }
+
+ bool
+ is_lock_free(void) const volatile
+ {
+ return true;
+ }
+
+ BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
+private:
+ base_atomic(const base_atomic &) /* = delete */ ;
+ void operator=(const base_atomic &) /* = delete */ ;
+ storage_type v_;
+};
+
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif
+
+template<typename T, bool Sign>
+class base_atomic<T, int, 2, Sign>
+{
+ typedef base_atomic this_type;
+ typedef T value_type;
+#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16
+ typedef value_type storage_type;
+#else
+ typedef uint32_t storage_type;
+#endif
+ typedef T difference_type;
+public:
+ explicit base_atomic(value_type v) : v_(v) {}
+ base_atomic(void) {}
+
+ void
+ store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+ if (order != memory_order_seq_cst) {
+ platform_fence_before(order);
+ v_ = static_cast< storage_type >(v);
+ } else {
+ exchange(v, order);
+ }
+ }
+
+ value_type
+ load(memory_order order = memory_order_seq_cst) const volatile
+ {
+ value_type v = static_cast< value_type >(v_);
+ platform_fence_after_load(order);
+ return v;
+ }
+
+ value_type
+ fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+ platform_fence_before(order);
+#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD16(&v_, v));
+#else
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&v_, v));
+#endif
+ platform_fence_after(order);
+ return v;
+ }
+
+ value_type
+ fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+ typedef typename make_signed< value_type >::type signed_value_type;
+ return fetch_add(static_cast< value_type >(-static_cast< signed_value_type >(v)), order);
+ }
+
+ value_type
+ exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+ platform_fence_before(order);
+#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE16
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&v_, v));
+#else
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, v));
+#endif
+ platform_fence_after(order);
+ return v;
+ }
+
+ bool
+ compare_exchange_strong(
+ value_type & expected,
+ value_type desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ value_type previous = expected;
+ platform_fence_before(success_order);
+#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16
+ value_type oldval = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(&v_, desired, previous));
+#else
+ value_type oldval = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&v_, desired, previous));
+#endif
+ bool success = (previous == oldval);
+ if (success)
+ platform_fence_after(success_order);
+ else
+ platform_fence_after(failure_order);
+ expected = oldval;
+ return success;
+ }
+
+ bool
+ compare_exchange_weak(
+ value_type & expected,
+ value_type desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ return compare_exchange_strong(expected, desired, success_order, failure_order);
+ }
+
+ value_type
+ fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+#ifdef BOOST_ATOMIC_INTERLOCKED_AND16
+ platform_fence_before(order);
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_AND16(&v_, v));
+ platform_fence_after(order);
+ return v;
+#elif defined(BOOST_ATOMIC_INTERLOCKED_AND)
+ platform_fence_before(order);
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_AND(&v_, v));
+ platform_fence_after(order);
+ return v;
+#else
+ value_type tmp = load(memory_order_relaxed);
+ do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed));
+ return tmp;
+#endif
+ }
+
+ value_type
+ fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+#ifdef BOOST_ATOMIC_INTERLOCKED_OR16
+ platform_fence_before(order);
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_OR16(&v_, v));
+ platform_fence_after(order);
+ return v;
+#elif defined(BOOST_ATOMIC_INTERLOCKED_OR)
+ platform_fence_before(order);
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_OR(&v_, v));
+ platform_fence_after(order);
+ return v;
+#else
+ value_type tmp = load(memory_order_relaxed);
+ do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed));
+ return tmp;
+#endif
+ }
+
+ value_type
+ fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+#ifdef BOOST_ATOMIC_INTERLOCKED_XOR16
+ platform_fence_before(order);
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_XOR16(&v_, v));
+ platform_fence_after(order);
+ return v;
+#elif defined(BOOST_ATOMIC_INTERLOCKED_XOR)
+ platform_fence_before(order);
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&v_, v));
+ platform_fence_after(order);
+ return v;
+#else
+ value_type tmp = load(memory_order_relaxed);
+ do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed));
+ return tmp;
+#endif
+ }
+
+ bool
+ is_lock_free(void) const volatile
+ {
+ return true;
+ }
+
+ BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
+private:
+ base_atomic(const base_atomic &) /* = delete */ ;
+ void operator=(const base_atomic &) /* = delete */ ;
+ storage_type v_;
+};
+
+template<typename T, bool Sign>
+class base_atomic<T, int, 4, Sign>
+{
+ typedef base_atomic this_type;
+ typedef T value_type;
+ typedef value_type storage_type;
+ typedef T difference_type;
+public:
+ explicit base_atomic(value_type v) : v_(v) {}
+ base_atomic(void) {}
+
+ void
+ store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+ if (order != memory_order_seq_cst) {
+ platform_fence_before(order);
+ v_ = static_cast< storage_type >(v);
+ } else {
+ exchange(v, order);
+ }
+ }
+
+ value_type
+ load(memory_order order = memory_order_seq_cst) const volatile
+ {
+ value_type v = static_cast< value_type >(v_);
+ platform_fence_after_load(order);
+ return v;
+ }
+
+ value_type
+ fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+ platform_fence_before(order);
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD(&v_, v));
+ platform_fence_after(order);
+ return v;
+ }
+
+ value_type
+ fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+ typedef typename make_signed< value_type >::type signed_value_type;
+ return fetch_add(static_cast< value_type >(-static_cast< signed_value_type >(v)), order);
+ }
+
+ value_type
+ exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+ platform_fence_before(order);
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, v));
+ platform_fence_after(order);
+ return v;
+ }
+
+ bool
+ compare_exchange_strong(
+ value_type & expected,
+ value_type desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ value_type previous = expected;
+ platform_fence_before(success_order);
+ value_type oldval = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&v_, desired, previous));
+ bool success = (previous == oldval);
+ if (success)
+ platform_fence_after(success_order);
+ else
+ platform_fence_after(failure_order);
+ expected = oldval;
+ return success;
+ }
+
+ bool
+ compare_exchange_weak(
+ value_type & expected,
+ value_type desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ return compare_exchange_strong(expected, desired, success_order, failure_order);
+ }
+
+ value_type
+ fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+#if defined(BOOST_ATOMIC_INTERLOCKED_AND)
+ platform_fence_before(order);
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_AND(&v_, v));
+ platform_fence_after(order);
+ return v;
+#else
+ value_type tmp = load(memory_order_relaxed);
+ do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed));
+ return tmp;
+#endif
+ }
+
+ value_type
+ fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+#if defined(BOOST_ATOMIC_INTERLOCKED_OR)
+ platform_fence_before(order);
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_OR(&v_, v));
+ platform_fence_after(order);
+ return v;
+#else
+ value_type tmp = load(memory_order_relaxed);
+ do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed));
+ return tmp;
+#endif
+ }
+
+ value_type
+ fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+#if defined(BOOST_ATOMIC_INTERLOCKED_XOR)
+ platform_fence_before(order);
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_XOR(&v_, v));
+ platform_fence_after(order);
+ return v;
+#else
+ value_type tmp = load(memory_order_relaxed);
+ do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed));
+ return tmp;
+#endif
+ }
+
+ bool
+ is_lock_free(void) const volatile
+ {
+ return true;
+ }
+
+ BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
+private:
+ base_atomic(const base_atomic &) /* = delete */ ;
+ void operator=(const base_atomic &) /* = delete */ ;
+ storage_type v_;
+};
+
+#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64)
+
+template<typename T, bool Sign>
+class base_atomic<T, int, 8, Sign>
+{
+ typedef base_atomic this_type;
+ typedef T value_type;
+ typedef value_type storage_type;
+ typedef T difference_type;
+public:
+ explicit base_atomic(value_type v) : v_(v) {}
+ base_atomic(void) {}
+
+ void
+ store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+ if (order != memory_order_seq_cst) {
+ platform_fence_before(order);
+ v_ = static_cast< storage_type >(v);
+ } else {
+ exchange(v, order);
+ }
+ }
+
+ value_type
+ load(memory_order order = memory_order_seq_cst) const volatile
+ {
+ value_type v = static_cast< value_type >(v_);
+ platform_fence_after_load(order);
+ return v;
+ }
+
+ value_type
+ fetch_add(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+ platform_fence_before(order);
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD64(&v_, v));
+ platform_fence_after(order);
+ return v;
+ }
+
+ value_type
+ fetch_sub(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+ typedef typename make_signed< value_type >::type signed_value_type;
+ return fetch_add(static_cast< value_type >(-static_cast< signed_value_type >(v)), order);
+ }
+
+ value_type
+ exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+ platform_fence_before(order);
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&v_, v));
+ platform_fence_after(order);
+ return v;
+ }
+
+ bool
+ compare_exchange_strong(
+ value_type & expected,
+ value_type desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ value_type previous = expected;
+ platform_fence_before(success_order);
+ value_type oldval = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(&v_, desired, previous));
+ bool success = (previous == oldval);
+ if (success)
+ platform_fence_after(success_order);
+ else
+ platform_fence_after(failure_order);
+ expected = oldval;
+ return success;
+ }
+
+ bool
+ compare_exchange_weak(
+ value_type & expected,
+ value_type desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ return compare_exchange_strong(expected, desired, success_order, failure_order);
+ }
+
+ value_type
+ fetch_and(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+#if defined(BOOST_ATOMIC_INTERLOCKED_AND64)
+ platform_fence_before(order);
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_AND64(&v_, v));
+ platform_fence_after(order);
+ return v;
+#else
+ value_type tmp = load(memory_order_relaxed);
+ do {} while(!compare_exchange_weak(tmp, tmp & v, order, memory_order_relaxed));
+ return tmp;
+#endif
+ }
+
+ value_type
+ fetch_or(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+#if defined(BOOST_ATOMIC_INTERLOCKED_OR64)
+ platform_fence_before(order);
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_OR64(&v_, v));
+ platform_fence_after(order);
+ return v;
+#else
+ value_type tmp = load(memory_order_relaxed);
+ do {} while(!compare_exchange_weak(tmp, tmp | v, order, memory_order_relaxed));
+ return tmp;
+#endif
+ }
+
+ value_type
+ fetch_xor(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+#if defined(BOOST_ATOMIC_INTERLOCKED_XOR64)
+ platform_fence_before(order);
+ v = static_cast< value_type >(BOOST_ATOMIC_INTERLOCKED_XOR64(&v_, v));
+ platform_fence_after(order);
+ return v;
+#else
+ value_type tmp = load(memory_order_relaxed);
+ do {} while(!compare_exchange_weak(tmp, tmp ^ v, order, memory_order_relaxed));
+ return tmp;
+#endif
+ }
+
+ bool
+ is_lock_free(void) const volatile
+ {
+ return true;
+ }
+
+ BOOST_ATOMIC_DECLARE_INTEGRAL_OPERATORS
+private:
+ base_atomic(const base_atomic &) /* = delete */ ;
+ void operator=(const base_atomic &) /* = delete */ ;
+ storage_type v_;
+};
+
+#endif // defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64)
+
+// MSVC 2012 fails to recognize sizeof(T) as a constant expression in template specializations
+enum msvc_sizeof_pointer_workaround { sizeof_pointer = sizeof(void*) };
+
+template<bool Sign>
+class base_atomic<void*, void*, sizeof_pointer, Sign>
+{
+ typedef base_atomic this_type;
+ typedef void* value_type;
+public:
+ explicit base_atomic(value_type v) : v_(v) {}
+ base_atomic(void) {}
+
+ void
+ store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+ if (order != memory_order_seq_cst) {
+ platform_fence_before(order);
+ const_cast<volatile value_type &>(v_) = v;
+ } else {
+ exchange(v, order);
+ }
+ }
+
+ value_type load(memory_order order = memory_order_seq_cst) const volatile
+ {
+ value_type v = const_cast<const volatile value_type &>(v_);
+ platform_fence_after_load(order);
+ return v;
+ }
+
+ value_type exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+ platform_fence_before(order);
+ v = (value_type)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(&v_, v);
+ platform_fence_after(order);
+ return v;
+ }
+
+ bool compare_exchange_strong(value_type & expected, value_type desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ value_type previous = expected;
+ platform_fence_before(success_order);
+ value_type oldval = (value_type)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(&v_, desired, previous);
+ bool success = (previous == oldval);
+ if (success)
+ platform_fence_after(success_order);
+ else
+ platform_fence_after(failure_order);
+ expected = oldval;
+ return success;
+ }
+
+ bool compare_exchange_weak(value_type & expected, value_type desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ return compare_exchange_strong(expected, desired, success_order, failure_order);
+ }
+
+ bool
+ is_lock_free(void) const volatile
+ {
+ return true;
+ }
+
+ BOOST_ATOMIC_DECLARE_BASE_OPERATORS
+private:
+ base_atomic(const base_atomic &) /* = delete */ ;
+ void operator=(const base_atomic &) /* = delete */ ;
+ value_type v_;
+};
+
+template<typename T, bool Sign>
+class base_atomic<T*, void*, sizeof_pointer, Sign>
+{
+ typedef base_atomic this_type;
+ typedef T* value_type;
+ typedef ptrdiff_t difference_type;
+public:
+ explicit base_atomic(value_type v) : v_(v) {}
+ base_atomic(void) {}
+
+ void
+ store(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+ if (order != memory_order_seq_cst) {
+ platform_fence_before(order);
+ const_cast<volatile value_type &>(v_) = v;
+ } else {
+ exchange(v, order);
+ }
+ }
+
+ value_type
+ load(memory_order order = memory_order_seq_cst) const volatile
+ {
+ value_type v = const_cast<const volatile value_type &>(v_);
+ platform_fence_after_load(order);
+ return v;
+ }
+
+ value_type
+ exchange(value_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+ platform_fence_before(order);
+ v = (value_type)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_POINTER(&v_, v);
+ platform_fence_after(order);
+ return v;
+ }
+
+ bool
+ compare_exchange_strong(
+ value_type & expected,
+ value_type desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ value_type previous = expected;
+ platform_fence_before(success_order);
+ value_type oldval = (value_type)BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE_POINTER(&v_, desired, previous);
+ bool success = (previous == oldval);
+ if (success)
+ platform_fence_after(success_order);
+ else
+ platform_fence_after(failure_order);
+ expected = oldval;
+ return success;
+ }
+
+ bool
+ compare_exchange_weak(
+ value_type & expected,
+ value_type desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ return compare_exchange_strong(expected, desired, success_order, failure_order);
+ }
+
+ value_type
+ fetch_add(difference_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+ v = v * sizeof(*v_);
+ platform_fence_before(order);
+ value_type res = (value_type)BOOST_ATOMIC_INTERLOCKED_EXCHANGE_ADD_POINTER(&v_, v);
+ platform_fence_after(order);
+ return res;
+ }
+
+ value_type
+ fetch_sub(difference_type v, memory_order order = memory_order_seq_cst) volatile
+ {
+ return fetch_add(-v, order);
+ }
+
+ bool
+ is_lock_free(void) const volatile
+ {
+ return true;
+ }
+
+ BOOST_ATOMIC_DECLARE_POINTER_OPERATORS
+private:
+ base_atomic(const base_atomic &) /* = delete */ ;
+ void operator=(const base_atomic &) /* = delete */ ;
+ value_type v_;
+};
+
+
+template<typename T, bool Sign>
+class base_atomic<T, void, 1, Sign>
+{
+ typedef base_atomic this_type;
+ typedef T value_type;
+#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8
+ typedef uint8_t storage_type;
+#else
+ typedef uint32_t storage_type;
+#endif
+public:
+ explicit base_atomic(value_type const& v) : v_(0)
+ {
+ memcpy(&v_, &v, sizeof(value_type));
+ }
+ base_atomic(void) {}
+
+ void
+ store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
+ {
+ if (order != memory_order_seq_cst) {
+ storage_type tmp = 0;
+ memcpy(&tmp, &v, sizeof(value_type));
+ platform_fence_before(order);
+ const_cast<volatile storage_type &>(v_) = tmp;
+ } else {
+ exchange(v, order);
+ }
+ }
+
+ value_type
+ load(memory_order order = memory_order_seq_cst) const volatile
+ {
+ storage_type tmp = const_cast<volatile storage_type &>(v_);
+ platform_fence_after_load(order);
+ value_type v;
+ memcpy(&v, &tmp, sizeof(value_type));
+ return v;
+ }
+
+ value_type
+ exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
+ {
+ storage_type tmp = 0;
+ memcpy(&tmp, &v, sizeof(value_type));
+ platform_fence_before(order);
+#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE8
+ tmp = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE8(&v_, tmp));
+#else
+ tmp = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, tmp));
+#endif
+ platform_fence_after(order);
+ value_type res;
+ memcpy(&res, &tmp, sizeof(value_type));
+ return res;
+ }
+
+ bool
+ compare_exchange_strong(
+ value_type & expected,
+ value_type const& desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ storage_type expected_s = 0, desired_s = 0;
+ memcpy(&expected_s, &expected, sizeof(value_type));
+ memcpy(&desired_s, &desired, sizeof(value_type));
+ platform_fence_before(success_order);
+#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8
+ storage_type oldval = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE8(&v_, desired_s, expected_s));
+#else
+ storage_type oldval = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&v_, desired_s, expected_s));
+#endif
+ bool success = (oldval == expected_s);
+ if (success)
+ platform_fence_after(success_order);
+ else
+ platform_fence_after(failure_order);
+ memcpy(&expected, &oldval, sizeof(value_type));
+ return success;
+ }
+
+ bool
+ compare_exchange_weak(
+ value_type & expected,
+ value_type const& desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ return compare_exchange_strong(expected, desired, success_order, failure_order);
+ }
+
+ bool
+ is_lock_free(void) const volatile
+ {
+ return true;
+ }
+
+ BOOST_ATOMIC_DECLARE_BASE_OPERATORS
+private:
+ base_atomic(const base_atomic &) /* = delete */ ;
+ void operator=(const base_atomic &) /* = delete */ ;
+ storage_type v_;
+};
+
+template<typename T, bool Sign>
+class base_atomic<T, void, 2, Sign>
+{
+ typedef base_atomic this_type;
+ typedef T value_type;
+#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16
+ typedef uint16_t storage_type;
+#else
+ typedef uint32_t storage_type;
+#endif
+public:
+ explicit base_atomic(value_type const& v) : v_(0)
+ {
+ memcpy(&v_, &v, sizeof(value_type));
+ }
+ base_atomic(void) {}
+
+ void
+ store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
+ {
+ if (order != memory_order_seq_cst) {
+ storage_type tmp = 0;
+ memcpy(&tmp, &v, sizeof(value_type));
+ platform_fence_before(order);
+ const_cast<volatile storage_type &>(v_) = tmp;
+ } else {
+ exchange(v, order);
+ }
+ }
+
+ value_type
+ load(memory_order order = memory_order_seq_cst) const volatile
+ {
+ storage_type tmp = const_cast<volatile storage_type &>(v_);
+ platform_fence_after_load(order);
+ value_type v;
+ memcpy(&v, &tmp, sizeof(value_type));
+ return v;
+ }
+
+ value_type
+ exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
+ {
+ storage_type tmp = 0;
+ memcpy(&tmp, &v, sizeof(value_type));
+ platform_fence_before(order);
+#ifdef BOOST_ATOMIC_INTERLOCKED_EXCHANGE16
+ tmp = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE16(&v_, tmp));
+#else
+ tmp = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, tmp));
+#endif
+ platform_fence_after(order);
+ value_type res;
+ memcpy(&res, &tmp, sizeof(value_type));
+ return res;
+ }
+
+ bool
+ compare_exchange_strong(
+ value_type & expected,
+ value_type const& desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ storage_type expected_s = 0, desired_s = 0;
+ memcpy(&expected_s, &expected, sizeof(value_type));
+ memcpy(&desired_s, &desired, sizeof(value_type));
+ platform_fence_before(success_order);
+#ifdef BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16
+ storage_type oldval = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE16(&v_, desired_s, expected_s));
+#else
+ storage_type oldval = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&v_, desired_s, expected_s));
+#endif
+ bool success = (oldval == expected_s);
+ if (success)
+ platform_fence_after(success_order);
+ else
+ platform_fence_after(failure_order);
+ memcpy(&expected, &oldval, sizeof(value_type));
+ return success;
+ }
+
+ bool
+ compare_exchange_weak(
+ value_type & expected,
+ value_type const& desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ return compare_exchange_strong(expected, desired, success_order, failure_order);
+ }
+
+ bool
+ is_lock_free(void) const volatile
+ {
+ return true;
+ }
+
+ BOOST_ATOMIC_DECLARE_BASE_OPERATORS
+private:
+ base_atomic(const base_atomic &) /* = delete */ ;
+ void operator=(const base_atomic &) /* = delete */ ;
+ storage_type v_;
+};
+
+template<typename T, bool Sign>
+class base_atomic<T, void, 4, Sign>
+{
+ typedef base_atomic this_type;
+ typedef T value_type;
+ typedef uint32_t storage_type;
+public:
+ explicit base_atomic(value_type const& v) : v_(0)
+ {
+ memcpy(&v_, &v, sizeof(value_type));
+ }
+ base_atomic(void) {}
+
+ void
+ store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
+ {
+ if (order != memory_order_seq_cst) {
+ storage_type tmp = 0;
+ memcpy(&tmp, &v, sizeof(value_type));
+ platform_fence_before(order);
+ const_cast<volatile storage_type &>(v_) = tmp;
+ } else {
+ exchange(v, order);
+ }
+ }
+
+ value_type
+ load(memory_order order = memory_order_seq_cst) const volatile
+ {
+ storage_type tmp = const_cast<volatile storage_type &>(v_);
+ platform_fence_after_load(order);
+ value_type v;
+ memcpy(&v, &tmp, sizeof(value_type));
+ return v;
+ }
+
+ value_type
+ exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
+ {
+ storage_type tmp = 0;
+ memcpy(&tmp, &v, sizeof(value_type));
+ platform_fence_before(order);
+ tmp = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE(&v_, tmp));
+ platform_fence_after(order);
+ value_type res;
+ memcpy(&res, &tmp, sizeof(value_type));
+ return res;
+ }
+
+ bool
+ compare_exchange_strong(
+ value_type & expected,
+ value_type const& desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ storage_type expected_s = 0, desired_s = 0;
+ memcpy(&expected_s, &expected, sizeof(value_type));
+ memcpy(&desired_s, &desired, sizeof(value_type));
+ platform_fence_before(success_order);
+ storage_type oldval = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE(&v_, desired_s, expected_s));
+ bool success = (oldval == expected_s);
+ if (success)
+ platform_fence_after(success_order);
+ else
+ platform_fence_after(failure_order);
+ memcpy(&expected, &oldval, sizeof(value_type));
+ return success;
+ }
+
+ bool
+ compare_exchange_weak(
+ value_type & expected,
+ value_type const& desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ return compare_exchange_strong(expected, desired, success_order, failure_order);
+ }
+
+ bool
+ is_lock_free(void) const volatile
+ {
+ return true;
+ }
+
+ BOOST_ATOMIC_DECLARE_BASE_OPERATORS
+private:
+ base_atomic(const base_atomic &) /* = delete */ ;
+ void operator=(const base_atomic &) /* = delete */ ;
+ storage_type v_;
+};
+
+#if defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64)
+
+template<typename T, bool Sign>
+class base_atomic<T, void, 8, Sign>
+{
+ typedef base_atomic this_type;
+ typedef T value_type;
+ typedef uint64_t storage_type;
+public:
+ explicit base_atomic(value_type const& v) : v_(0)
+ {
+ memcpy(&v_, &v, sizeof(value_type));
+ }
+ base_atomic(void) {}
+
+ void
+ store(value_type const& v, memory_order order = memory_order_seq_cst) volatile
+ {
+ if (order != memory_order_seq_cst) {
+ storage_type tmp = 0;
+ memcpy(&tmp, &v, sizeof(value_type));
+ platform_fence_before(order);
+ const_cast<volatile storage_type &>(v_) = tmp;
+ } else {
+ exchange(v, order);
+ }
+ }
+
+ value_type
+ load(memory_order order = memory_order_seq_cst) const volatile
+ {
+ storage_type tmp = const_cast<volatile storage_type &>(v_);
+ platform_fence_after_load(order);
+ value_type v;
+ memcpy(&v, &tmp, sizeof(value_type));
+ return v;
+ }
+
+ value_type
+ exchange(value_type const& v, memory_order order = memory_order_seq_cst) volatile
+ {
+ storage_type tmp = 0;
+ memcpy(&tmp, &v, sizeof(value_type));
+ platform_fence_before(order);
+ tmp = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_EXCHANGE64(&v_, tmp));
+ platform_fence_after(order);
+ value_type res;
+ memcpy(&res, &tmp, sizeof(value_type));
+ return res;
+ }
+
+ bool
+ compare_exchange_strong(
+ value_type & expected,
+ value_type const& desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ storage_type expected_s = 0, desired_s = 0;
+ memcpy(&expected_s, &expected, sizeof(value_type));
+ memcpy(&desired_s, &desired, sizeof(value_type));
+ platform_fence_before(success_order);
+ storage_type oldval = static_cast< storage_type >(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64(&v_, desired_s, expected_s));
+ bool success = (oldval == expected_s);
+ if (success)
+ platform_fence_after(success_order);
+ else
+ platform_fence_after(failure_order);
+ memcpy(&expected, &oldval, sizeof(value_type));
+ return success;
+ }
+
+ bool
+ compare_exchange_weak(
+ value_type & expected,
+ value_type const& desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ return compare_exchange_strong(expected, desired, success_order, failure_order);
+ }
+
+ bool
+ is_lock_free(void) const volatile
+ {
+ return true;
+ }
+
+ BOOST_ATOMIC_DECLARE_BASE_OPERATORS
+private:
+ base_atomic(const base_atomic &) /* = delete */ ;
+ void operator=(const base_atomic &) /* = delete */ ;
+ storage_type v_;
+};
+
+#endif // defined(BOOST_ATOMIC_INTERLOCKED_COMPARE_EXCHANGE64)
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#endif /* !defined(BOOST_ATOMIC_FORCE_FALLBACK) */
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+#endif

Modified: trunk/libs/atomic/src/lockpool.cpp
==============================================================================
--- trunk/libs/atomic/src/lockpool.cpp (original)
+++ trunk/libs/atomic/src/lockpool.cpp 2012-12-15 08:24:01 EST (Sat, 15 Dec 2012)
@@ -10,7 +10,14 @@
 namespace atomics {
 namespace detail {
 
-lockpool::lock_type lockpool::pool_[41];
+static lockpool::lock_type lock_pool_[41];
+
+// NOTE: This function must NOT be inline. Otherwise MSVC 9 will sometimes generate broken code for modulus operation which result in crashes.
+BOOST_ATOMIC_DECL lockpool::lock_type& lockpool::get_lock_for(const volatile void* addr)
+{
+ std::size_t index = reinterpret_cast<std::size_t>(addr) % (sizeof(lock_pool_) / sizeof(*lock_pool_));
+ return lock_pool_[index];
+}
 
 }
 }


Boost-Commit list run by bdawes at acm.org, david.abrahams at rcn.com, gregod at cs.rpi.edu, cpdaniel at pacbell.net, john at johnmaddock.co.uk