diff -Naur boost_1.36_old/boost/detail/atomic_count_gcc_armv6.hpp boost_1.36_new/boost/detail/atomic_count_gcc_armv6.hpp --- boost_1.36_old/boost/detail/atomic_count_gcc_armv6.hpp 1970-01-01 01:00:00.000000000 +0100 +++ boost_1.36_new/boost/detail/atomic_count_gcc_armv6.hpp 2008-09-26 08:55:17.000000000 +0200 @@ -0,0 +1,114 @@ +#ifndef BOOST_DETAIL_ATOMIC_COUNT_GCC_ARM_HPP_INCLUDED +#define BOOST_DETAIL_ATOMIC_COUNT_GCC_ARM_HPP_INCLUDED + +// +// boost/detail/atomic_count_gcc_armv6.hpp +// +// Copyright © 2008 by HALE electronic, GmbH., Austria. All rights reserved. +// Written by Rudolf Dittrich, rudolf.dittrich@hale.at. +// +// HALE electronic, GmbH. +// Eugen Müller Straße 18 +// A-5020 Salzburg, AUSTRIA +// http://www.hale.at/ +// + +// +// Contains an ARMv6 or higher optimized implementation of the atomic count. +// We've implemented this version as we get a linking error when we try to use +// boost-threading library (version 1.36_0). It seems that the GCC's built-in +// functions for atomic memory access are not implemented or cannot be used +// due to spooky things that happen somewhere in the thread library. +// + +namespace boost +{ + +namespace detail +{ + +class atomic_count +{ +public: + + explicit atomic_count( long v ) : value_( v ) {} + + void operator++() + { + int result; + + // Implements the __sync_add_and_fetch() function which is normaly provided + // by gcc lib and works as its name already indicate. + // *ptr OP= value; return *ptr; + // + __asm__ __volatile__( + "1: ldrex %0, [%2]\n" + " add %0, %0, #1\n" + " strex %1, %0, [%2]\n" + " cmp %1, #0\n" + " bne 1b" + : "=&r"(value_), "=&r"(result) // output parameters + : "r"(&value_) // input parameters + : "memory", "cc" // clobbers + ); + } + + long operator--() + { + // implements the __sync_add_and_fetch() function which is normaly provided + // by gcc lib and works as its name already indicate. + // *ptr OP= value; return *ptr; + // + int result; + + __asm__ __volatile__( + "1: ldrex %0, [%2]\n" + " add %0, %0, #-1\n" + " strex %1, %0, [%2]\n" + " cmp %1, #0\n" + " bne 1b" + : "=&r"(value_), "=&r"(result) // output parameters + : "r"(&value_) // input parameters + : "memory", "cc" // clobbers + ); + + return (long)value_; + } + + operator long() const + { + // Implements the __sync_fetch_and_add() function. + // tmp = *ptr; *ptr OP= value; return tmp; + // As this function just returns the value_ as cast we must + // not make any operation on it, thus can be optimized in this + // special case. + // + int result; + + __asm__ __volatile__( + "1: ldrex %0, [%2]\n" + " strex %1, %0, [%2]\n" + " cmp %1, #0\n" + " bne 1b" + : "=&r"(value_), "=&r"(result) // output parameters. + : "r"(&value_) // input parameters. + : "memory", "cc" // clobbers + ); + + return value_; + } + +private: + + atomic_count(atomic_count const &); + atomic_count & operator=(atomic_count const &); + + mutable long value_; +}; + +} // namespace detail + +} // namespace boost + +#endif + diff -Naur boost_1.36_old/boost/detail/atomic_count_gcc_armv6.hpp.rej boost_1.36_new/boost/detail/atomic_count_gcc_armv6.hpp.rej --- boost_1.36_old/boost/detail/atomic_count_gcc_armv6.hpp.rej 1970-01-01 01:00:00.000000000 +0100 +++ boost_1.36_new/boost/detail/atomic_count_gcc_armv6.hpp.rej 2008-09-26 09:20:24.000000000 +0200 @@ -0,0 +1,117 @@ +*************** +*** 0 **** +--- 1,114 ---- ++ #ifndef BOOST_DETAIL_ATOMIC_COUNT_GCC_ARM_HPP_INCLUDED ++ #define BOOST_DETAIL_ATOMIC_COUNT_GCC_ARM_HPP_INCLUDED ++ ++ // ++ // boost/detail/atomic_count_gcc_armv6.hpp ++ // ++ // Copyright © 2008 by HALE electronic, GmbH., Austria. All rights reserved. ++ // Written by Rudolf Dittrich, rudolf.dittrich@hale.at. ++ // ++ // HALE electronic, GmbH. ++ // Eugen Müller Straße 18 ++ // A-5020 Salzburg, AUSTRIA ++ // http://www.hale.at/ ++ // ++ ++ // ++ // Contains an ARMv6 or higher optimized implementation of the atomic count. ++ // We've implemented this version as we get a linking error when we try to use ++ // boost-threading library (version 1.36_0). It seems that the GCC's built-in ++ // functions for atomic memory access are not implemented or cannot be used ++ // due to spooky things that happen somewhere in the thread library. ++ // ++ ++ namespace boost ++ { ++ ++ namespace detail ++ { ++ ++ class atomic_count ++ { ++ public: ++ ++ explicit atomic_count( long v ) : value_( v ) {} ++ ++ void operator++() ++ { ++ int result; ++ ++ // Implements the __sync_add_and_fetch() function which is normaly provided ++ // by gcc lib and works as its name already indicate. ++ // *ptr OP= value; return *ptr; ++ // ++ __asm__ __volatile__( ++ "1: ldrex %0, [%2]\n" ++ " add %0, %0, #1\n" ++ " strex %1, %0, [%2]\n" ++ " cmp %1, #0\n" ++ " bne 1b" ++ : "=&r"(value_), "=&r"(result) // output parameters ++ : "r"(&value_) // input parameters ++ : "memory", "cc" // clobbers ++ ); ++ } ++ ++ long operator--() ++ { ++ // implements the __sync_add_and_fetch() function which is normaly provided ++ // by gcc lib and works as its name already indicate. ++ // *ptr OP= value; return *ptr; ++ // ++ int result; ++ ++ __asm__ __volatile__( ++ "1: ldrex %0, [%2]\n" ++ " add %0, %0, #-1\n" ++ " strex %1, %0, [%2]\n" ++ " cmp %1, #0\n" ++ " bne 1b" ++ : "=&r"(value_), "=&r"(result) // output parameters ++ : "r"(&value_) // input parameters ++ : "memory", "cc" // clobbers ++ ); ++ ++ return (long)value_; ++ } ++ ++ operator long() const ++ { ++ // Implements the __sync_fetch_and_add() function. ++ // tmp = *ptr; *ptr OP= value; return tmp; ++ // As this function just returns the value_ as cast we must ++ // not make any operation on it, thus can be optimized in this ++ // special case. ++ // ++ int result; ++ ++ __asm__ __volatile__( ++ "1: ldrex %0, [%2]\n" ++ " strex %1, %0, [%2]\n" ++ " cmp %1, #0\n" ++ " bne 1b" ++ : "=&r"(value_), "=&r"(result) // output parameters. ++ : "r"(&value_) // input parameters. ++ : "memory", "cc" // clobbers ++ ); ++ ++ return value_; ++ } ++ ++ private: ++ ++ atomic_count(atomic_count const &); ++ atomic_count & operator=(atomic_count const &); ++ ++ mutable long value_; ++ }; ++ ++ } // namespace detail ++ ++ } // namespace boost ++ ++ #endif ++ diff -Naur boost_1.36_old/boost/detail/atomic_count.hpp boost_1.36_new/boost/detail/atomic_count.hpp --- boost_1.36_old/boost/detail/atomic_count.hpp 2008-09-26 09:28:06.000000000 +0200 +++ boost_1.36_new/boost/detail/atomic_count.hpp 2008-09-26 08:55:58.000000000 +0200 @@ -100,6 +100,9 @@ # include +#elif defined( __GNUC__ ) && ( __GNUC__ * 100 + __GNUC_MINOR__ >= 401 ) && defined( __ARM_ARCH_6J__ ) || defined( __ARM_ARCH_7A__ ) +#include + #elif defined( __GNUC__ ) && ( __GNUC__ * 100 + __GNUC_MINOR__ >= 401 ) # include diff -Naur boost_1.36_old/boost/detail/sp_counted_base_gcc_armv6.hpp boost_1.36_new/boost/detail/sp_counted_base_gcc_armv6.hpp --- boost_1.36_old/boost/detail/sp_counted_base_gcc_armv6.hpp 1970-01-01 01:00:00.000000000 +0100 +++ boost_1.36_new/boost/detail/sp_counted_base_gcc_armv6.hpp 2008-09-26 08:55:17.000000000 +0200 @@ -0,0 +1,166 @@ +#ifndef BOOST_DETAIL_SP_COUNTED_BASE_ARM_HPP_INCLUDED +#define BOOST_DETAIL_SP_COUNTED_BASE_ARM_HPP_INCLUDED + +// +// boost/detail/sp_counted_base_gcc_armv6.hpp +// +// Copyright © 2008 by HALE electronic, GmbH., Austria. All rights reserved. +// Written by Rudolf Dittrich, rudolf.dittrich@hale.at. +// +// HALE electronic, GmbH. +// Eugen Müller Straße 18 +// A-5020 Salzburg, AUSTRIA +// http://www.hale.at/ +// + +#include + +// +// Contains optimized atomic operations for ARMv6 and higher architectures which are +// needed by shared pointer for reference counting. For further implementation +// details we refer /details/atomic_count_gcc_armv6.hpp +// + +namespace boost +{ + +namespace detail +{ + +inline void atomic_increment( int *pw ) +{ + // ++*pw + int result; + + __asm__ __volatile__( + "1: ldrex %0, [%2]\n" + " add %0, %0, #1\n" + " strex %1, %0, [%2]\n" + " cmp %1, #0\n" + " bne 1b" + : "=&r" (*pw), "=&r" (result) // output operands + : "r" (pw) // input operands + : "memory", "cc" // list of clobbered registers + ); +} + +inline int atomic_decrement( int *pw ) +{ + // return --*pv + int result; + + __asm__ __volatile__( + "1: ldrex %0, [%2]\n" + " add %0, %0, #-1\n" + " strex %1, %0, [%2]\n" + " cmp %1, #0\n" + " bne 1b" + : "=&r" (*pw), "=&r" (result) // output operands + : "r" (pw) // input operands + : "memory", "cc" // list of clobbered registers + ); + + return *pw; +} + +inline int atomic_conditional_increment( int *pw ) +{ + // if (0 != *pw) + // ++*pw; + // return *pw + int result; + + __asm__ __volatile__( + "1: ldrex %0, [%2]\n" + " cmp %0, #0\n" + " beq 2f\n" + " add %0, %0, #1\n" + "2: strex %1, %0, [%2]\n" + " cmp %1, #0\n" + " bne 1b" + : "=&r"(*pw), "=&r"(result) // output operands + : "r"(pw) // input operands + : "memory","cc" // list of clobbered registers + ); + + return *pw; +} + +class sp_counted_base +{ +private: + + sp_counted_base( sp_counted_base const & ); + sp_counted_base & operator= ( sp_counted_base const & ); + + int use_count_; // #shared + int weak_count_; // #weak + (#shared != 0) + +public: + + sp_counted_base(): use_count_( 1 ), weak_count_( 1 ) + { + } + + virtual ~sp_counted_base() // nothrow + { + } + + // dispose() is called when use_count_ drops to zero, to release + // the resources managed by *this. + + virtual void dispose() = 0; // nothrow + + // destroy() is called when weak_count_ drops to zero. + + virtual void destroy() // nothrow + { + delete this; + } + + virtual void * get_deleter( sp_typeinfo const & ti ) = 0; + + void add_ref_copy() + { + atomic_increment( &use_count_ ); + } + + bool add_ref_lock() // true on success + { + return atomic_conditional_increment( &use_count_ ) != 0; + } + + void release() // nothrow + { + if( atomic_decrement( &use_count_ ) == 0 ) + { + dispose(); + weak_release(); + } + } + + void weak_add_ref() // nothrow + { + atomic_increment( &weak_count_ ); + } + + void weak_release() // nothrow + { + if( atomic_decrement( &weak_count_ ) == 0 ) + { + destroy(); + } + } + + long use_count() const // nothrow + { + return static_cast( use_count_ ); + } +}; + +} // namespace detail + +} // namespace boost + + +#endif diff -Naur boost_1.36_old/boost/detail/sp_counted_base_gcc_armv6.hpp.rej boost_1.36_new/boost/detail/sp_counted_base_gcc_armv6.hpp.rej --- boost_1.36_old/boost/detail/sp_counted_base_gcc_armv6.hpp.rej 1970-01-01 01:00:00.000000000 +0100 +++ boost_1.36_new/boost/detail/sp_counted_base_gcc_armv6.hpp.rej 2008-09-26 09:20:25.000000000 +0200 @@ -0,0 +1,169 @@ +*************** +*** 0 **** +--- 1,166 ---- ++ #ifndef BOOST_DETAIL_SP_COUNTED_BASE_ARM_HPP_INCLUDED ++ #define BOOST_DETAIL_SP_COUNTED_BASE_ARM_HPP_INCLUDED ++ ++ // ++ // boost/detail/sp_counted_base_gcc_armv6.hpp ++ // ++ // Copyright © 2008 by HALE electronic, GmbH., Austria. All rights reserved. ++ // Written by Rudolf Dittrich, rudolf.dittrich@hale.at. ++ // ++ // HALE electronic, GmbH. ++ // Eugen Müller Straße 18 ++ // A-5020 Salzburg, AUSTRIA ++ // http://www.hale.at/ ++ // ++ ++ #include ++ ++ // ++ // Contains optimized atomic operations for ARMv6 and higher architectures which are ++ // needed by shared pointer for reference counting. For further implementation ++ // details we refer /details/atomic_count_gcc_armv6.hpp ++ // ++ ++ namespace boost ++ { ++ ++ namespace detail ++ { ++ ++ inline void atomic_increment( int *pw ) ++ { ++ // ++*pw ++ int result; ++ ++ __asm__ __volatile__( ++ "1: ldrex %0, [%2]\n" ++ " add %0, %0, #1\n" ++ " strex %1, %0, [%2]\n" ++ " cmp %1, #0\n" ++ " bne 1b" ++ : "=&r" (*pw), "=&r" (result) // output operands ++ : "r" (pw) // input operands ++ : "memory", "cc" // list of clobbered registers ++ ); ++ } ++ ++ inline int atomic_decrement( int *pw ) ++ { ++ // return --*pv ++ int result; ++ ++ __asm__ __volatile__( ++ "1: ldrex %0, [%2]\n" ++ " add %0, %0, #-1\n" ++ " strex %1, %0, [%2]\n" ++ " cmp %1, #0\n" ++ " bne 1b" ++ : "=&r" (*pw), "=&r" (result) // output operands ++ : "r" (pw) // input operands ++ : "memory", "cc" // list of clobbered registers ++ ); ++ ++ return *pw; ++ } ++ ++ inline int atomic_conditional_increment( int *pw ) ++ { ++ // if (0 != *pw) ++ // ++*pw; ++ // return *pw ++ int result; ++ ++ __asm__ __volatile__( ++ "1: ldrex %0, [%2]\n" ++ " cmp %0, #0\n" ++ " beq 2f\n" ++ " add %0, %0, #1\n" ++ "2: strex %1, %0, [%2]\n" ++ " cmp %1, #0\n" ++ " bne 1b" ++ : "=&r"(*pw), "=&r"(result) // output operands ++ : "r"(pw) // input operands ++ : "memory","cc" // list of clobbered registers ++ ); ++ ++ return *pw; ++ } ++ ++ class sp_counted_base ++ { ++ private: ++ ++ sp_counted_base( sp_counted_base const & ); ++ sp_counted_base & operator= ( sp_counted_base const & ); ++ ++ int use_count_; // #shared ++ int weak_count_; // #weak + (#shared != 0) ++ ++ public: ++ ++ sp_counted_base(): use_count_( 1 ), weak_count_( 1 ) ++ { ++ } ++ ++ virtual ~sp_counted_base() // nothrow ++ { ++ } ++ ++ // dispose() is called when use_count_ drops to zero, to release ++ // the resources managed by *this. ++ ++ virtual void dispose() = 0; // nothrow ++ ++ // destroy() is called when weak_count_ drops to zero. ++ ++ virtual void destroy() // nothrow ++ { ++ delete this; ++ } ++ ++ virtual void * get_deleter( sp_typeinfo const & ti ) = 0; ++ ++ void add_ref_copy() ++ { ++ atomic_increment( &use_count_ ); ++ } ++ ++ bool add_ref_lock() // true on success ++ { ++ return atomic_conditional_increment( &use_count_ ) != 0; ++ } ++ ++ void release() // nothrow ++ { ++ if( atomic_decrement( &use_count_ ) == 0 ) ++ { ++ dispose(); ++ weak_release(); ++ } ++ } ++ ++ void weak_add_ref() // nothrow ++ { ++ atomic_increment( &weak_count_ ); ++ } ++ ++ void weak_release() // nothrow ++ { ++ if( atomic_decrement( &weak_count_ ) == 0 ) ++ { ++ destroy(); ++ } ++ } ++ ++ long use_count() const // nothrow ++ { ++ return static_cast( use_count_ ); ++ } ++ }; ++ ++ } // namespace detail ++ ++ } // namespace boost ++ ++ ++ #endif diff -Naur boost_1.36_old/boost/detail/sp_counted_base.hpp boost_1.36_new/boost/detail/sp_counted_base.hpp --- boost_1.36_old/boost/detail/sp_counted_base.hpp 2008-09-26 09:28:28.000000000 +0200 +++ boost_1.36_new/boost/detail/sp_counted_base.hpp 2008-09-26 08:56:17.000000000 +0200 @@ -49,6 +49,9 @@ #elif defined(__GNUC__) && ( __GNUC__ * 100 + __GNUC_MINOR__ >= 401 ) && !defined( __arm__ ) && !defined( __hppa ) # include +#elif defined(__GNUC__) && defined( __ARM_ARCH_6J__ ) || defined( __ARM_ARCH_7A__ ) +#include + #elif defined(__GNUC__) && ( defined( __sparcv8 ) || defined( __sparcv9 ) ) # include