|
Boost-Commit : |
Subject: [Boost-commit] svn:boost r85553 - in branches/release: boost/mpi boost/mpi/collectives boost/mpi/detail libs/mpi libs/mpi/doc libs/mpi/example libs/mpi/src libs/mpi/src/python libs/mpi/test
From: troyer_at_[hidden]
Date: 2013-09-03 15:31:55
Author: troyer
Date: 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013)
New Revision: 85553
URL: http://svn.boost.org/trac/boost/changeset/85553
Log:
Merged Boost.MPI patches to release
Added:
branches/release/boost/mpi/inplace.hpp
- copied unchanged from r85527, trunk/boost/mpi/inplace.hpp
branches/release/libs/mpi/example/global_min.cpp
- copied unchanged from r85527, trunk/libs/mpi/example/global_min.cpp
branches/release/libs/mpi/example/hello_world_groups.cpp
- copied unchanged from r84739, trunk/libs/mpi/example/hello_world_groups.cpp
branches/release/libs/mpi/example/in_place_global_min.cpp
- copied unchanged from r85527, trunk/libs/mpi/example/in_place_global_min.cpp
branches/release/libs/mpi/test/groups_test.cpp
- copied unchanged from r84739, trunk/libs/mpi/test/groups_test.cpp
branches/release/libs/mpi/test/mt_init_test.cpp
- copied unchanged from r84739, trunk/libs/mpi/test/mt_init_test.cpp
branches/release/libs/mpi/test/mt_level_test.cpp
- copied unchanged from r84739, trunk/libs/mpi/test/mt_level_test.cpp
Properties modified:
branches/release/boost/mpi/ (props changed)
branches/release/libs/mpi/ (props changed)
Text files modified:
branches/release/boost/mpi/collectives.hpp | 30 +++++-
branches/release/boost/mpi/collectives/all_reduce.hpp | 31 ++++++
branches/release/boost/mpi/collectives/gather.hpp | 7 +
branches/release/boost/mpi/collectives/reduce.hpp | 19 ++++
branches/release/boost/mpi/communicator.hpp | 3
branches/release/boost/mpi/config.hpp | 7 +
branches/release/boost/mpi/detail/binary_buffer_iprimitive.hpp | 4
branches/release/boost/mpi/detail/forward_skeleton_iarchive.hpp | 1
branches/release/boost/mpi/detail/ignore_iprimitive.hpp | 2
branches/release/boost/mpi/detail/ignore_oprimitive.hpp | 2
branches/release/boost/mpi/detail/mpi_datatype_primitive.hpp | 27 +++++-
branches/release/boost/mpi/detail/packed_iprimitive.hpp | 4
branches/release/boost/mpi/detail/packed_oprimitive.hpp | 3
branches/release/boost/mpi/environment.hpp | 82 ++++++++++++++++++
branches/release/boost/mpi/inplace.hpp | 63 ++++++++++++++
branches/release/boost/mpi/packed_iarchive.hpp | 44 ++++++---
branches/release/boost/mpi/packed_oarchive.hpp | 39 +++++--
branches/release/libs/mpi/doc/Jamfile.v2 | 1
branches/release/libs/mpi/doc/mpi.qbk | 175 +++++++++++++++++++++++++++++++++------
branches/release/libs/mpi/example/global_min.cpp | 31 +++++++
branches/release/libs/mpi/example/hello_world_groups.cpp | 46 ++++++++++
branches/release/libs/mpi/example/in_place_global_min.cpp | 29 ++++++
branches/release/libs/mpi/src/broadcast.cpp | 1
branches/release/libs/mpi/src/communicator.cpp | 5
branches/release/libs/mpi/src/environment.cpp | 94 +++++++++++++++++++++
branches/release/libs/mpi/src/python/datatypes.cpp | 2
branches/release/libs/mpi/src/python/py_environment.cpp | 22 +++-
branches/release/libs/mpi/src/python/py_nonblocking.cpp | 4
branches/release/libs/mpi/test/Jamfile.v2 | 6 +
branches/release/libs/mpi/test/all_reduce_test.cpp | 102 +++++++++++++++++++++-
branches/release/libs/mpi/test/broadcast_test.cpp | 1
branches/release/libs/mpi/test/groups_test.cpp | 59 +++++++++++++
branches/release/libs/mpi/test/mt_init_test.cpp | 27 ++++++
branches/release/libs/mpi/test/mt_level_test.cpp | 107 ++++++++++++++++++++++++
branches/release/libs/mpi/test/nonblocking_test.cpp | 2
35 files changed, 984 insertions(+), 98 deletions(-)
Modified: branches/release/boost/mpi/collectives.hpp
==============================================================================
--- branches/release/boost/mpi/collectives.hpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/boost/mpi/collectives.hpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -19,10 +19,10 @@
#define BOOST_MPI_COLLECTIVES_HPP
#include <boost/mpi/communicator.hpp>
+#include <boost/mpi/inplace.hpp>
#include <vector>
namespace boost { namespace mpi {
-
/**
* @brief Gather the values stored at every process into vectors of
* values from each process.
@@ -94,12 +94,15 @@
*
* @param comm The communicator over which the reduction will
* occur.
- *
- * @param in_value The local value to be combined with the local
+ * @param value The local value to be combined with the local
* values of every other process. For reducing arrays, @c in_values
* is a pointer to the local values to be reduced and @c n is the
* number of values to reduce. See @c reduce for more information.
*
+ * If wrapped in a @c inplace_t object, combine the usage of both
+ * input and $c out_value and the local value will be overwritten
+ * (a convenience function @c inplace is provided for the wrapping).
+ *
* @param out_value Will receive the result of the reduction
* operation. If this parameter is omitted, the outgoing value will
* instead be returned.
@@ -116,26 +119,39 @@
* gives the implementation additional lattitude to optimize the
* reduction operation.
*
+ * @param n Indicated the size of the buffers of array type.
* @returns If no @p out_value parameter is supplied, returns the
* result of the reduction operation.
*/
template<typename T, typename Op>
void
-all_reduce(const communicator& comm, const T& in_value, T& out_value, Op op);
-
+all_reduce(const communicator& comm, const T* value, int n, T* out_value,
+ Op op);
+/**
+ * \overload
+ */
+template<typename T, typename Op>
+void
+all_reduce(const communicator& comm, const T& value, T& out_value, Op op);
/**
* \overload
*/
template<typename T, typename Op>
-T all_reduce(const communicator& comm, const T& in_value, Op op);
+T all_reduce(const communicator& comm, const T& value, Op op);
/**
* \overload
*/
template<typename T, typename Op>
void
-all_reduce(const communicator& comm, const T* in_values, int n, T* out_values,
+all_reduce(const communicator& comm, inplace_t<T*> value, int n,
Op op);
+/**
+ * \overload
+ */
+template<typename T, typename Op>
+void
+all_reduce(const communicator& comm, inplace_t<T> value, Op op);
/**
* @brief Send data from every process to every other process.
Modified: branches/release/boost/mpi/collectives/all_reduce.hpp
==============================================================================
--- branches/release/boost/mpi/collectives/all_reduce.hpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/boost/mpi/collectives/all_reduce.hpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -12,12 +12,15 @@
#ifndef BOOST_MPI_ALL_REDUCE_HPP
#define BOOST_MPI_ALL_REDUCE_HPP
+#include <vector>
+
+#include <boost/mpi/inplace.hpp>
+
// All-reduce falls back to reduce() + broadcast() in some cases.
#include <boost/mpi/collectives/broadcast.hpp>
#include <boost/mpi/collectives/reduce.hpp>
namespace boost { namespace mpi {
-
namespace detail {
/**********************************************************************
* Simple reduction with MPI_Allreduce *
@@ -67,7 +70,17 @@
T* out_values, Op op, mpl::false_ /*is_mpi_op*/,
mpl::false_ /*is_mpi_datatype*/)
{
- reduce(comm, in_values, n, out_values, op, 0);
+ if (in_values == MPI_IN_PLACE) {
+ // if in_values matches the in place tag, then the output
+ // buffer actually contains the input data.
+ // But we can just go back to the out of place
+ // implementation in this case.
+ // it's not clear how/if we can avoid the copy.
+ std::vector<T> tmp_in( out_values, out_values + n);
+ reduce(comm, &(tmp_in[0]), n, out_values, op, 0);
+ } else {
+ reduce(comm, in_values, n, out_values, op, 0);
+ }
broadcast(comm, out_values, n, 0);
}
} // end namespace detail
@@ -83,6 +96,20 @@
template<typename T, typename Op>
inline void
+all_reduce(const communicator& comm, inplace_t<T*> inout_values, int n, Op op)
+{
+ all_reduce(comm, static_cast<const T*>(MPI_IN_PLACE), n, inout_values.buffer, op);
+}
+
+template<typename T, typename Op>
+inline void
+all_reduce(const communicator& comm, inplace_t<T> inout_values, Op op)
+{
+ all_reduce(comm, static_cast<const T*>(MPI_IN_PLACE), 1, &(inout_values.buffer), op);
+}
+
+template<typename T, typename Op>
+inline void
all_reduce(const communicator& comm, const T& in_value, T& out_value, Op op)
{
detail::all_reduce_impl(comm, &in_value, 1, &out_value, op,
Modified: branches/release/boost/mpi/collectives/gather.hpp
==============================================================================
--- branches/release/boost/mpi/collectives/gather.hpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/boost/mpi/collectives/gather.hpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -131,7 +131,12 @@
gather(const communicator& comm, const T* in_values, int n,
std::vector<T>& out_values, int root)
{
- ::boost::mpi::gather(comm, in_values, n, &out_values[0], root);
+ if (comm.rank() == root) {
+ out_values.resize(comm.size() * n);
+ ::boost::mpi::gather(comm, in_values, n, &out_values[0], root);
+ }
+ else
+ ::boost::mpi::gather(comm, in_values, n, root);
}
template<typename T>
Modified: branches/release/boost/mpi/collectives/reduce.hpp
==============================================================================
--- branches/release/boost/mpi/collectives/reduce.hpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/boost/mpi/collectives/reduce.hpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -330,6 +330,25 @@
is_mpi_op<Op, T>(), is_mpi_datatype<T>());
}
+template<typename T, typename Op>
+void
+reduce(const communicator & comm, std::vector<T> const & in_values, Op op,
+ int root)
+{
+ reduce(comm, &in_values.front(), in_values.size(), op, root);
+}
+
+template<typename T, typename Op>
+void
+reduce(const communicator & comm, std::vector<T> const & in_values,
+ std::vector<T> & out_values, Op op, int root)
+{
+ out_values.resize(in_values.size());
+ reduce(comm, &in_values.front(), in_values.size(), &out_values.front(), op,
+ root);
+}
+
+
template<typename T, typename Op>
void
reduce(const communicator& comm, const T& in_value, T& out_value, Op op,
Modified: branches/release/boost/mpi/communicator.hpp
==============================================================================
--- branches/release/boost/mpi/communicator.hpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/boost/mpi/communicator.hpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -13,6 +13,7 @@
#ifndef BOOST_MPI_COMMUNICATOR_HPP
#define BOOST_MPI_COMMUNICATOR_HPP
+#include <boost/assert.hpp>
#include <boost/mpi/config.hpp>
#include <boost/mpi/exception.hpp>
#include <boost/optional.hpp>
@@ -869,6 +870,8 @@
{
void operator()(MPI_Comm* comm) const
{
+ BOOST_ASSERT( comm != 0 );
+ BOOST_ASSERT(*comm != MPI_COMM_NULL);
int finalized;
BOOST_MPI_CHECK_RESULT(MPI_Finalized, (&finalized));
if (!finalized)
Modified: branches/release/boost/mpi/config.hpp
==============================================================================
--- branches/release/boost/mpi/config.hpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/boost/mpi/config.hpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -27,7 +27,7 @@
// If this is an MPI-2 implementation, define configuration macros for
// the features we are interested in.
-#if defined(MPI_VERSION) && MPI_VERSION == 2
+#if defined(MPI_VERSION) && MPI_VERSION >= 2
/** @brief Determine if the MPI implementation has support for memory
* allocation.
*
@@ -48,6 +48,11 @@
* environment class will provide a default constructor. This macro is
* always defined for MPI-2 implementations. */
# define BOOST_MPI_HAS_NOARG_INITIALIZATION
+#else
+// If this is an MPI-1.x implementation, no arg initialization for
+// mpi environement could still be available, but not mandatory.
+// Undef this if no arg init is available:
+//# define BOOST_MPI_HAS_NOARG_INITIALIZATION
#endif
#if defined(MPIAPI)
Modified: branches/release/boost/mpi/detail/binary_buffer_iprimitive.hpp
==============================================================================
--- branches/release/boost/mpi/detail/binary_buffer_iprimitive.hpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/boost/mpi/detail/binary_buffer_iprimitive.hpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -21,6 +21,7 @@
#include <vector>
#include <boost/mpi/allocator.hpp>
#include <cstring> // for memcpy
+#include <cassert>
namespace boost { namespace mpi {
@@ -107,7 +108,8 @@
void load_impl(void * p, int l)
{
assert(position+l<=static_cast<int>(buffer_.size()));
- std::memcpy(p,&buffer_[position],l);
+ if (l)
+ std::memcpy(p,&buffer_[position],l);
position += l;
}
Modified: branches/release/boost/mpi/detail/forward_skeleton_iarchive.hpp
==============================================================================
--- branches/release/boost/mpi/detail/forward_skeleton_iarchive.hpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/boost/mpi/detail/forward_skeleton_iarchive.hpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -61,6 +61,7 @@
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::class_id_type)
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::class_id_reference_type)
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::object_id_type)
+BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::object_reference_type)
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::tracking_type)
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(archive::class_name_type)
BOOST_ARCHIVE_FORWARD_IMPLEMENTATION(serialization::collection_size_type)
Modified: branches/release/boost/mpi/detail/ignore_iprimitive.hpp
==============================================================================
--- branches/release/boost/mpi/detail/ignore_iprimitive.hpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/boost/mpi/detail/ignore_iprimitive.hpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -44,7 +44,7 @@
/// don't do anything when loading primitive types
template<class T>
- void load(T & t)
+ void load(T &)
{
}
};
Modified: branches/release/boost/mpi/detail/ignore_oprimitive.hpp
==============================================================================
--- branches/release/boost/mpi/detail/ignore_oprimitive.hpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/boost/mpi/detail/ignore_oprimitive.hpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -52,7 +52,7 @@
/// don't do anything when saving primitive types
template<class T>
- void save(const T & t)
+ void save(const T &)
{
}
};
Modified: branches/release/boost/mpi/detail/mpi_datatype_primitive.hpp
==============================================================================
--- branches/release/boost/mpi/detail/mpi_datatype_primitive.hpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/boost/mpi/detail/mpi_datatype_primitive.hpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -49,7 +49,11 @@
: is_committed(false),
origin()
{
+#if defined(MPI_VERSION) && MPI_VERSION >= 2
+ BOOST_MPI_CHECK_RESULT(MPI_Get_address,(const_cast<void*>(orig), &origin));
+#else
BOOST_MPI_CHECK_RESULT(MPI_Address,(const_cast<void*>(orig), &origin));
+#endif
}
void save_binary(void const *address, std::size_t count)
@@ -72,7 +76,8 @@
{
if (!is_committed)
{
- BOOST_MPI_CHECK_RESULT(MPI_Type_struct,
+#if defined(MPI_VERSION) && MPI_VERSION >= 2
+ BOOST_MPI_CHECK_RESULT(MPI_Type_create_struct,
(
addresses.size(),
boost::serialization::detail::get_data(lengths),
@@ -80,9 +85,18 @@
boost::serialization::detail::get_data(types),
&datatype_
));
-
+#else
+ BOOST_MPI_CHECK_RESULT(MPI_Type_struct,
+ (
+ addresses.size(),
+ boost::serialization::detail::get_data(lengths),
+ boost::serialization::detail::get_data(addresses),
+ boost::serialization::detail::get_data(types),
+ &datatype_
+ ));
+#endif
BOOST_MPI_CHECK_RESULT(MPI_Type_commit,(&datatype_));
-
+
is_committed = true;
}
@@ -105,8 +119,11 @@
// store address, type and length
MPI_Aint a;
- BOOST_MPI_CHECK_RESULT(MPI_Address,(const_cast<void*>(p), &a));
-
+#if defined(MPI_VERSION) && MPI_VERSION >= 2
+ BOOST_MPI_CHECK_RESULT(MPI_Get_address,(const_cast<void*>(p), &a));
+#else
+ BOOST_MPI_CHECK_RESULT(MPI_Address,(const_cast<void*>(p), &a));
+#endif
addresses.push_back(a-origin);
types.push_back(t);
lengths.push_back(l);
Modified: branches/release/boost/mpi/detail/packed_iprimitive.hpp
==============================================================================
--- branches/release/boost/mpi/detail/packed_iprimitive.hpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/boost/mpi/detail/packed_iprimitive.hpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -94,7 +94,9 @@
load(l);
s.resize(l);
// note breaking a rule here - could be a problem on some platform
- load_impl(const_cast<CharType *>(s.data()),get_mpi_datatype(CharType()),l);
+ if (l)
+ load_impl(const_cast<CharType *>(s.data()),
+ get_mpi_datatype(CharType()),l);
}
private:
Modified: branches/release/boost/mpi/detail/packed_oprimitive.hpp
==============================================================================
--- branches/release/boost/mpi/detail/packed_oprimitive.hpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/boost/mpi/detail/packed_oprimitive.hpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -81,7 +81,8 @@
{
unsigned int l = static_cast<unsigned int>(s.size());
save(l);
- save_impl(s.data(),get_mpi_datatype(CharType()),s.size());
+ if (l)
+ save_impl(s.data(),get_mpi_datatype(CharType()),s.size());
}
private:
Modified: branches/release/boost/mpi/environment.hpp
==============================================================================
--- branches/release/boost/mpi/environment.hpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/boost/mpi/environment.hpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -17,9 +17,46 @@
#include <boost/noncopyable.hpp>
#include <boost/optional.hpp>
#include <string>
+#include <iosfwd>
namespace boost { namespace mpi {
+namespace threading {
+/** @brief specify the supported threading level.
+ *
+ * Based on MPI 2 standard/8.7.3
+ */
+enum level {
+ /** Only one thread will execute.
+ */
+ single = MPI_THREAD_SINGLE,
+ /** Only main thread will do MPI calls.
+ *
+ * The process may be multi-threaded, but only the main
+ * thread will make MPI calls (all MPI calls are ``funneled''
+ * to the main thread).
+ */
+ funneled = MPI_THREAD_FUNNELED,
+ /** Only one thread at the time do MPI calls.
+ *
+ * The process may be multi-threaded, and multiple
+ * threads may make MPI calls, but only one at a time:
+ * MPI calls are not made concurrently from two distinct
+ * threads (all MPI calls are ``serialized'').
+ */
+ serialized = MPI_THREAD_SERIALIZED,
+ /** Multiple thread may do MPI calls.
+ *
+ * Multiple threads may call MPI, with no restrictions.
+ */
+ multiple = MPI_THREAD_MULTIPLE
+};
+
+/** Formated output for threading level. */
+std::ostream& operator<<(std::ostream& out, level l);
+/** Formated input for threading level. */
+std::istream& operator>>(std::istream& in, level& l);
+} // namespace threading
/** @brief Initialize, finalize, and query the MPI environment.
*
* The @c environment class is used to initialize, finalize, and
@@ -62,6 +99,22 @@
* program if it is destructed due to an uncaught exception.
*/
explicit environment(bool abort_on_exception = true);
+ /** Initialize the MPI environment.
+ *
+ * If the MPI environment has not already been initialized,
+ * initializes MPI with a call to @c MPI_Init_thread. Since this
+ * constructor does not take command-line arguments (@c argc and @c
+ * argv), it is only available when the underlying MPI
+ * implementation supports calling @c MPI_Init with @c NULL
+ * arguments, indicated by the macro @c
+ * BOOST_MPI_HAS_NOARG_INITIALIZATION.
+ *
+ * @param mt_level the required level of threading support.
+ *
+ * @param abort_on_exception When true, this object will abort the
+ * program if it is destructed due to an uncaught exception.
+ */
+ explicit environment(threading::level mt_level, bool abort_on_exception = true);
#endif
/** Initialize the MPI environment.
@@ -80,6 +133,25 @@
*/
environment(int& argc, char** &argv, bool abort_on_exception = true);
+ /** Initialize the MPI environment.
+ *
+ * If the MPI environment has not already been initialized,
+ * initializes MPI with a call to @c MPI_Init_thread.
+ *
+ * @param argc The number of arguments provided in @p argv, as
+ * passed into the program's @c main function.
+ *
+ * @param argv The array of argument strings passed to the program
+ * via @c main.
+ *
+ * @param mt_level the required level of threading support
+ *
+ * @param abort_on_exception When true, this object will abort the
+ * program if it is destructed due to an uncaught exception.
+ */
+ environment(int& argc, char** &argv, threading::level mt_level,
+ bool abort_on_exception = true);
+
/** Shuts down the MPI environment.
*
* If this @c environment object was used to initialize the MPI
@@ -185,13 +257,21 @@
*/
static std::string processor_name();
+ /** Query the current level of thread support.
+ */
+ static threading::level thread_level();
+
+ /** Are we in the main thread?
+ */
+ static bool is_main_thread();
+
private:
/// Whether this environment object called MPI_Init
bool i_initialized;
/// Whether we should abort if the destructor is
bool abort_on_exception;
-
+
/// The number of reserved tags.
static const int num_reserved_tags = 1;
};
Copied: branches/release/boost/mpi/inplace.hpp (from r85527, trunk/boost/mpi/inplace.hpp)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ branches/release/boost/mpi/inplace.hpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553, copy of r85527, trunk/boost/mpi/inplace.hpp)
@@ -0,0 +1,63 @@
+// Copyright (C) 2005-2006 Alain Miniussi <alain.miniussi -at- oca.eu>.
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// Message Passing Interface 1.1 -- Section 4. MPI Collectives
+
+/** @file inplace.hpp
+ *
+ * This header provides helpers to indicate to MPI collective operation
+ * that a buffer can be use both as an input and output.
+ */
+#ifndef BOOST_MPI_INPLACE_HPP
+#define BOOST_MPI_INPLACE_HPP
+
+#include <boost/mpi/communicator.hpp>
+#include <vector>
+
+namespace boost { namespace mpi {
+
+/**
+ * @brief Wrapper type to explicitly indicate that a input data
+ * can be overriden with an output value.
+ */
+template <typename T>
+struct inplace_t {
+ inplace_t(T& inout) : buffer(inout) {}
+ T& buffer;
+};
+
+template <typename T>
+struct inplace_t<T*> {
+ inplace_t(T* inout) : buffer(inout) {}
+ T* buffer;
+};
+
+
+/**
+ * @brief Wrapp a input data to indicate that it can be overriden
+ * with an ouput value.
+ * @param inout the contributing input value, it will be overriden
+ * with the output value where one is expected. If it is a pointer,
+ * the number of elements will be provided separatly.
+ * @returns The wrapped value or pointer.
+ */
+template<typename T>
+inplace_t<T>
+inplace(T& inout) {
+ return inplace_t<T>(inout);
+}
+/**
+ * \overload
+ */
+template<typename T>
+inplace_t<T*>
+inplace(T* inout) {
+ return inplace_t<T*>(inout);
+}
+} } // end namespace boost::mpi
+
+#endif // BOOST_MPI_INPLACE_HPP
+
Modified: branches/release/boost/mpi/packed_iarchive.hpp
==============================================================================
--- branches/release/boost/mpi/packed_iarchive.hpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/boost/mpi/packed_iarchive.hpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -22,6 +22,7 @@
#include <boost/archive/detail/auto_link_archive.hpp>
#include <boost/archive/detail/common_iarchive.hpp>
#include <boost/archive/shared_ptr_helper.hpp>
+#include <boost/archive/basic_archive.hpp>
#include <boost/mpi/detail/packed_iprimitive.hpp>
#include <boost/mpi/detail/binary_buffer_iprimitive.hpp>
#include <boost/serialization/string.hpp>
@@ -37,14 +38,16 @@
typedef packed_iprimitive iprimitive;
#endif
-/** @brief An archive that packs binary data into an MPI buffer.
+
+/** @brief An archive that unpacks binary data from an MPI buffer.
*
- * The @c packed_iarchive class is an Archiver (as in the
- * Boost.Serialization library) that packs binary data into a buffer
- * for transmission via MPI. It can operate on any Serializable data
- * type and will use the @c MPI_Pack function of the underlying MPI
- * implementation to perform serialization.
+ * The @c packed_oarchive class is an Archiver (as in the
+ * Boost.Serialization library) that unpacks binary data from a
+ * buffer received via MPI. It can operate on any Serializable data
+ * type and will use the @c MPI_Unpack function of the underlying MPI
+ * implementation to perform deserialization.
*/
+
class BOOST_MPI_DECL packed_iarchive
: public iprimitive
, public archive::detail::common_iarchive<packed_iarchive>
@@ -52,40 +55,37 @@
{
public:
/**
- * Construct a @c packed_iarchive for transmission over the given
+ * Construct a @c packed_iarchive to receive data over the given
* MPI communicator and with an initial buffer.
*
* @param comm The communicator over which this archive will be
- * sent.
+ * received.
*
- * @param b A user-defined buffer that will be filled with the
- * binary representation of serialized objects.
+ * @param b A user-defined buffer that contains the binary
+ * representation of serialized objects.
*
* @param flags Control the serialization of the data types. Refer
* to the Boost.Serialization documentation before changing the
* default flags.
- *
- * @param position Set the offset into buffer @p b at which
- * deserialization will begin.
*/
+
packed_iarchive(MPI_Comm const & comm, buffer_type & b, unsigned int flags = boost::archive::no_header, int position = 0)
: iprimitive(b,comm,position),
archive::detail::common_iarchive<packed_iarchive>(flags)
{}
/**
- * Construct a @c packed_iarchive for transmission over the given
+ * Construct a @c packed_iarchive to receive data over the given
* MPI communicator.
*
* @param comm The communicator over which this archive will be
- * sent.
- *
- * @param s The size of the buffer to be received.
+ * received.
*
* @param flags Control the serialization of the data types. Refer
* to the Boost.Serialization documentation before changing the
* default flags.
*/
+
packed_iarchive
( MPI_Comm const & comm , std::size_t s=0,
unsigned int flags = boost::archive::no_header)
@@ -121,6 +121,16 @@
// input archives need to ignore the optional information
void load_override(archive::class_id_optional_type & /*t*/, int){}
+ void load_override(archive::class_id_type & t, int version){
+ int_least16_t x=0;
+ * this->This() >> x;
+ t = boost::archive::class_id_type(x);
+ }
+
+ void load_override(archive::class_id_reference_type & t, int version){
+ load_override(static_cast<archive::class_id_type &>(t), version);
+ }
+
void load_override(archive::class_name_type & t, int)
{
std::string cn;
Modified: branches/release/boost/mpi/packed_oarchive.hpp
==============================================================================
--- branches/release/boost/mpi/packed_oarchive.hpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/boost/mpi/packed_oarchive.hpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -19,6 +19,7 @@
#define BOOST_MPI_PACKED_OARCHIVE_HPP
#include <boost/mpi/datatype.hpp>
+#include <boost/archive/basic_archive.hpp>
#include <boost/archive/detail/auto_link_archive.hpp>
#include <boost/archive/detail/common_oarchive.hpp>
#include <boost/archive/shared_ptr_helper.hpp>
@@ -36,13 +37,13 @@
typedef packed_oprimitive oprimitive;
#endif
-/** @brief An archive that unpacks binary data from an MPI buffer.
+/** @brief An archive that packs binary data into an MPI buffer.
*
- * The @c packed_oarchive class is an Archiver (as in the
- * Boost.Serialization library) that unpacks binary data from a
- * buffer received via MPI. It can operate on any Serializable data
- * type and will use the @c MPI_Unpack function of the underlying MPI
- * implementation to perform deserialization.
+ * The @c packed_iarchive class is an Archiver (as in the
+ * Boost.Serialization library) that packs binary data into a buffer
+ * for transmission via MPI. It can operate on any Serializable data
+ * type and will use the @c MPI_Pack function of the underlying MPI
+ * implementation to perform serialization.
*/
class BOOST_MPI_DECL packed_oarchive
@@ -52,35 +53,42 @@
{
public:
/**
- * Construct a @c packed_oarchive to receive data over the given
+ * Construct a @c packed_oarchive for transmission over the given
* MPI communicator and with an initial buffer.
*
* @param comm The communicator over which this archive will be
- * received.
+ * sent.
*
- * @param b A user-defined buffer that contains the binary
- * representation of serialized objects.
+ * @param b A user-defined buffer that will be filled with the
+ * binary representation of serialized objects.
*
* @param flags Control the serialization of the data types. Refer
* to the Boost.Serialization documentation before changing the
* default flags.
+ *
+ * @param position Set the offset into buffer @p b at which
+ * deserialization will begin.
*/
+
packed_oarchive( MPI_Comm const & comm, buffer_type & b, unsigned int flags = boost::archive::no_header)
: oprimitive(b,comm),
archive::detail::common_oarchive<packed_oarchive>(flags)
{}
/**
- * Construct a @c packed_oarchive to receive data over the given
+ * Construct a @c packed_oarchive for transmission over the given
* MPI communicator.
*
* @param comm The communicator over which this archive will be
- * received.
+ * sent.
+ *
+ * @param s The size of the buffer to be received.
*
* @param flags Control the serialization of the data types. Refer
* to the Boost.Serialization documentation before changing the
* default flags.
*/
+
packed_oarchive ( MPI_Comm const & comm, unsigned int flags = boost::archive::no_header)
: oprimitive(internal_buffer_,comm),
archive::detail::common_oarchive<packed_oarchive>(flags)
@@ -93,7 +101,7 @@
archive::detail::common_oarchive<packed_oarchive>::save_override(x,version);
}
- // Save it directly using the primnivites
+ // Save it directly using the primitives
template<class T>
void save_override(T const& x, int /*version*/, mpl::true_)
{
@@ -117,6 +125,11 @@
* this->This() << s;
}
+ void save_override(archive::class_id_type & t, int version){
+ const boost::int_least16_t x = t;
+ * this->This() << x;
+ }
+
private:
/// An internal buffer to be used when the user does not supply his
/// own buffer.
Modified: branches/release/libs/mpi/doc/Jamfile.v2
==============================================================================
--- branches/release/libs/mpi/doc/Jamfile.v2 Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/libs/mpi/doc/Jamfile.v2 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -31,6 +31,7 @@
../../../boost/mpi/status.hpp
../../../boost/mpi/request.hpp
../../../boost/mpi/timer.hpp
+ ../../../boost/mpi/inplace.hpp
../../../boost/mpi/python.hpp
]
: <doxygen:param>MACRO_EXPANSION=YES
Modified: branches/release/libs/mpi/doc/mpi.qbk
==============================================================================
--- branches/release/libs/mpi/doc/mpi.qbk Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/libs/mpi/doc/mpi.qbk 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -2,7 +2,7 @@
[authors [Gregor, Douglas], [Troyer, Matthias] ]
[copyright 2005 2006 2007 Douglas Gregor, Matthias Troyer, Trustees of Indiana University]
[purpose
- An generic, user-friendly interface to MPI, the Message
+ A generic, user-friendly interface to MPI, the Message
Passing Interface.
]
[id mpi]
@@ -27,7 +27,7 @@
Boost.Python]]
[def _Python_ [@http://www.python.org Python]]
[def _LAM_ [@http://www.lam-mpi.org/ LAM/MPI]]
-[def _MPICH_ [@http://www-unix.mcs.anl.gov/mpi/mpich/ MPICH]]
+[def _MPICH_ [@http://www-unix.mcs.anl.gov/mpi/mpich/ MPICH2]]
[def _OpenMPI_ [@http://www.open-mpi.org OpenMPI]]
[def _accumulate_ [@http://www.sgi.com/tech/stl/accumulate.html
`accumulate`]]
@@ -99,9 +99,9 @@
available. Boost.MPI should work with any of the
implementations, although it has only been tested extensively with:
-* [@http://www.open-mpi.org Open MPI 1.0.x]
-* [@http://www.lam-mpi.org LAM/MPI 7.x]
-* [@http://www-unix.mcs.anl.gov/mpi/mpich/ MPICH 1.2.x]
+* [@http://www.open-mpi.org Open MPI]
+* [@http://www.lam-mpi.org LAM/MPI ]
+* [@http://www-unix.mcs.anl.gov/mpi/mpich/ MPICH2]
You can test your implementation using the following simple program,
which passes a message from one processor to another. Each processor
@@ -110,7 +110,7 @@
#include <mpi.h>
#include <iostream>
- int main(int argc, char* argv[])
+ int main()
{
MPI_Init(&argc, &argv);
@@ -304,9 +304,9 @@
#include <iostream>
namespace mpi = boost::mpi;
- int main(int argc, char* argv[])
+ int main()
{
- mpi::environment env(argc, argv);
+ mpi::environment env;
mpi::communicator world;
std::cout << "I am process " << world.rank() << " of " << world.size()
<< "." << std::endl;
@@ -332,6 +332,24 @@
writing "I am a process" before another process has finished writing
"of 7.".
+If you should still have an MPI library supporting only MPI 1.1 you
+will need to pass the command line arguments to the environment
+constructor as shown in this example:
+
+ #include <boost/mpi/environment.hpp>
+ #include <boost/mpi/communicator.hpp>
+ #include <iostream>
+ namespace mpi = boost::mpi;
+
+ int main(int argc, char* argv[])
+ {
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+ std::cout << "I am process " << world.rank() << " of " << world.size()
+ << "." << std::endl;
+ return 0;
+ }
+
[section:point_to_point Point-to-Point communication]
As a message passing library, MPI's primary purpose is to routine
@@ -353,9 +371,9 @@
#include <boost/serialization/string.hpp>
namespace mpi = boost::mpi;
- int main(int argc, char* argv[])
+ int main()
{
- mpi::environment env(argc, argv);
+ mpi::environment env;
mpi::communicator world;
if (world.rank() == 0) {
@@ -425,9 +443,9 @@
#include <boost/serialization/string.hpp>
namespace mpi = boost::mpi;
- int main(int argc, char* argv[])
+ int main()
{
- mpi::environment env(argc, argv);
+ mpi::environment env;
mpi::communicator world;
if (world.rank() == 0) {
@@ -610,9 +628,9 @@
#include <boost/serialization/string.hpp>
namespace mpi = boost::mpi;
- int main(int argc, char* argv[])
+ int main()
{
- mpi::environment env(argc, argv);
+ mpi::environment env;
mpi::communicator world;
std::string value;
@@ -658,9 +676,9 @@
#include <cstdlib>
namespace mpi = boost::mpi;
- int main(int argc, char* argv[])
+ int main()
{
- mpi::environment env(argc, argv);
+ mpi::environment env;
mpi::communicator world;
std::srand(time(0) + world.rank());
@@ -712,16 +730,16 @@
function object. For instance, we can randomly generate values in each
process and the compute the minimum value over all processes via a
call to [funcref boost::mpi::reduce `reduce`]
-(`random_min.cpp`)::
+(`random_min.cpp`):
#include <boost/mpi.hpp>
#include <iostream>
#include <cstdlib>
namespace mpi = boost::mpi;
- int main(int argc, char* argv[])
+ int main()
{
- mpi::environment env(argc, argv);
+ mpi::environment env;
mpi::communicator world;
std::srand(time(0) + world.rank());
@@ -752,9 +770,9 @@
#include <boost/serialization/string.hpp>
namespace mpi = boost::mpi;
- int main(int argc, char* argv[])
+ int main()
{
- mpi::environment env(argc, argv);
+ mpi::environment env;
mpi::communicator world;
std::string names[10] = { "zero ", "one ", "two ", "three ",
@@ -831,6 +849,64 @@
processes. This variant is useful, for instance, in establishing
global minimum or maximum values.
+The following code (`global_min.cpp`) shows a broadcasting version of
+the `random_min.cpp` example:
+
+ #include <boost/mpi.hpp>
+ #include <iostream>
+ #include <cstdlib>
+ namespace mpi = boost::mpi;
+
+ int main(int argc, char* argv[])
+ {
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+
+ std::srand(world.rank());
+ int my_number = std::rand();
+ int minimum;
+
+ all_reduce(world, my_number, minimum, mpi::minimum<int>());
+
+ if (world.rank() == 0) {
+ std::cout << "The minimum value is " << minimum << std::endl;
+ }
+
+ return 0;
+ }
+
+In that example we provide both input and output values, requiring
+twice as much space, which can be a problem depending on the size
+of the transmitted data.
+If there is no need to preserve the input value, the ouput value
+can be omitted. In that case the input value will be overriden with
+the output value and Boost.MPI is able, in some situation, to implement
+the operation with a more space efficient solution (using the `MPI_IN_PLACE`
+flag of the MPI C mapping), as in the following example (`in_place_global_min.cpp`):
+
+ #include <boost/mpi.hpp>
+ #include <iostream>
+ #include <cstdlib>
+ namespace mpi = boost::mpi;
+
+ int main(int argc, char* argv[])
+ {
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+
+ std::srand(world.rank());
+ int my_number = std::rand();
+
+ all_reduce(world, my_number, mpi::minimum<int>());
+
+ if (world.rank() == 0) {
+ std::cout << "The minimum value is " << my_number << std::endl;
+ }
+
+ return 0;
+ }
+
+
[endsect]
[endsect]
@@ -866,9 +942,9 @@
void generate_data(mpi::communicator local, mpi::communicator world);
void collect_data(mpi::communicator local, mpi::communicator world);
- int main(int argc, char* argv[])
+ int main()
{
- mpi::environment env(argc, argv);
+ mpi::environment env;
mpi::communicator world;
bool is_generator = world.rank() < 2 * world.size() / 3;
@@ -1263,7 +1339,10 @@
[[C Function/Constant] [Boost.MPI Equivalent]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node56.html#Node56
-`MPI_Address`]] [used automatically in Boost.MPI]]
+`MPI_Address`]] [used automatically in Boost.MPI for MPI version 1.x]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-20-html/node76.htm#Node76
+`MPI_Get_address`]] [used automatically in Boost.MPI for MPI version 2.0 and higher]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node58.html#Node58
`MPI_Type_commit`]] [used automatically in Boost.MPI]]
@@ -1293,7 +1372,10 @@
`MPI_Type_size`]] [used automatically in Boost.MPI]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node55.html#Node55
-`MPI_Type_struct`]] [user-defined classes and structs]]
+`MPI_Type_struct`]] [user-defined classes and structs with MPI 1.x]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-20-html/node76.htm#Node76
+`MPI_Type_create_struct`]] [user-defined classes and structs with MPI 2.0 and higher]]
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node57.html#Node57
`MPI_Type_ub`]] [unsupported]]
@@ -1376,6 +1458,10 @@
[[[@http://www.mpi-forum.org/docs/mpi-11-html/node71.html#Node71
`MPI_Scatterv`]] [most uses supported by [funcref boost::mpi::scatter `scatter`]]]
+
+ [[[@http://www.mpi-forum.org/docs/mpi-20-html/node145.htm#Node145
+`MPI_IN_PLACE`]] [supported implicitly by [funcref boost::mpi::all_reduce
+`all_reduce` by omiting the ouput value]]]
]
Boost.MPI uses function objects to specify how reductions should occur
@@ -1846,7 +1932,7 @@
`skeleton_proxy` objects can be received on the other end via `recv()`,
which stores a newly-created instance of your data structure with the
-same "shape" as the sender in its `"object` attribute:
+same "shape" as the sender in its `"object"` attribute:
shape = mpi.world.recv(0, 0)
my_data_structure = shape.object
@@ -1867,7 +1953,6 @@
The skeleton/content mechanism is a structured way to exploit the
interaction between custom-built MPI datatypes and `MPI_BOTTOM`, to
eliminate extra buffer copies.
-[endsect]
[section:python_compatbility C++/Python MPI Compatibility]
Boost.MPI is a C++ library whose facilities have been exposed to Python
@@ -1950,6 +2035,42 @@
and the C MPI library.
[endsect]
+[section:threading Threads]
+
+There are an increasing number of hybrid parrallel applications that mix
+distributed and shared memory parallelism. To know how to support that model,
+one need to know what level of threading support is guaranteed by the MPI
+implementation. There are 4 ordered level of possible threading support described
+by [classref boost::mpi::threading::level mpi::threading::level].
+At the lowest level, you should not use threads at all, at the highest level, any
+thread can perform MPI call.
+
+If you want to use multi-threading in your MPI application, you should indicate
+in the environment constructor your preffered threading support. Then probe the
+one the librarie did provide, and decide what you can do with it (it could be
+nothing, then aborting is a valid option):
+
+ #include <boost/mpi/environment.hpp>
+ #include <boost/mpi/communicator.hpp>
+ #include <iostream>
+ namespace mpi = boost::mpi;
+ namespace mt = mpi::threading;
+
+ int main()
+ {
+ mpi::environment env(mt::funneled);
+ if (env.thread_level() < mt::funneled) {
+ env.abort(-1);
+ }
+ mpi::communicator world;
+ std::cout << "I am process " << world.rank() << " of " << world.size()
+ << "." << std::endl;
+ return 0;
+ }
+
+
+[endsect]
+
[section:performance Performance Evaluation]
Message-passing performance is crucial in high-performance distributed
Copied: branches/release/libs/mpi/example/global_min.cpp (from r85527, trunk/libs/mpi/example/global_min.cpp)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ branches/release/libs/mpi/example/global_min.cpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553, copy of r85527, trunk/libs/mpi/example/global_min.cpp)
@@ -0,0 +1,31 @@
+// Copyright (C) 2013 Alain Miniussi <alain.miniussi_at_oca.eu>
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// An example using Boost.MPI's all_reduce() that compute the minimum
+// of each process's value and broadcast the result to all the processes.
+
+#include <boost/mpi.hpp>
+#include <iostream>
+#include <cstdlib>
+namespace mpi = boost::mpi;
+
+int main(int argc, char* argv[])
+{
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+
+ std::srand(world.rank());
+ int my_number = std::rand();
+ int minimum;
+
+ all_reduce(world, my_number, minimum, mpi::minimum<int>());
+
+ if (world.rank() == 0) {
+ std::cout << "The minimum value is " << minimum << std::endl;
+ }
+
+ return 0;
+}
Copied: branches/release/libs/mpi/example/hello_world_groups.cpp (from r84739, trunk/libs/mpi/example/hello_world_groups.cpp)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ branches/release/libs/mpi/example/hello_world_groups.cpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553, copy of r84739, trunk/libs/mpi/example/hello_world_groups.cpp)
@@ -0,0 +1,46 @@
+// Copyright (C) 2013 Andreas Hehn <hehn_at_[hidden]>, ETH Zurich
+// based on
+// hellp-world_broadcast.cpp (C) 2006 Douglas Gregor <doug.gregor_at_[hidden]>
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// A simple Hello world! example
+// using boost::mpi::group and boost::mpi::broadcast()
+
+#include <stdexcept>
+#include <boost/mpi/environment.hpp>
+#include <boost/mpi/communicator.hpp>
+#include <boost/mpi/group.hpp>
+#include <boost/mpi/collectives.hpp>
+
+#include <boost/serialization/string.hpp>
+
+namespace mpi = boost::mpi;
+
+int main(int argc, char* argv[])
+{
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+ if(world.size() < 2)
+ throw std::runtime_error("Please run with at least 2 MPI processes!");
+
+ int group_a_ranks[2] = {0,1};
+
+ mpi::group world_group = world.group();
+ mpi::group group_a = world_group.include(group_a_ranks,group_a_ranks+2);
+
+ mpi::communicator comm_a(world,group_a);
+
+ std::string value("Hello world!");
+ if(comm_a)
+ {
+ if(comm_a.rank() == 0) {
+ value = "Hello group a!";
+ }
+ mpi::broadcast(comm_a, value, 0);
+ }
+ std::cout << "Process #" << world.rank() << " says " << value << std::endl;
+ return 0;
+}
Copied: branches/release/libs/mpi/example/in_place_global_min.cpp (from r85527, trunk/libs/mpi/example/in_place_global_min.cpp)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ branches/release/libs/mpi/example/in_place_global_min.cpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553, copy of r85527, trunk/libs/mpi/example/in_place_global_min.cpp)
@@ -0,0 +1,29 @@
+// Copyright (C) 2013 Alain Miniussi <alain.miniussi_at_oca.eu>
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// An example using Boost.MPI's all_reduce() that compute the minimum
+// of each process's value and broadcast the result to all the processes.
+#include <boost/mpi.hpp>
+#include <iostream>
+#include <cstdlib>
+namespace mpi = boost::mpi;
+
+int main(int argc, char* argv[])
+{
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+
+ std::srand(world.rank());
+ int my_number = std::rand();
+
+ all_reduce(world, my_number, mpi::minimum<int>());
+
+ if (world.rank() == 0) {
+ std::cout << "The minimum value is " << my_number << std::endl;
+ }
+
+ return 0;
+}
Modified: branches/release/libs/mpi/src/broadcast.cpp
==============================================================================
--- branches/release/libs/mpi/src/broadcast.cpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/libs/mpi/src/broadcast.cpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -10,6 +10,7 @@
#include <boost/mpi/skeleton_and_content.hpp>
#include <boost/mpi/detail/point_to_point.hpp>
#include <boost/mpi/environment.hpp>
+#include <cassert>
namespace boost { namespace mpi {
Modified: branches/release/libs/mpi/src/communicator.cpp
==============================================================================
--- branches/release/libs/mpi/src/communicator.cpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/libs/mpi/src/communicator.cpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -63,7 +63,8 @@
MPI_Comm newcomm;
BOOST_MPI_CHECK_RESULT(MPI_Comm_create,
((MPI_Comm)comm, (MPI_Group)subgroup, &newcomm));
- comm_ptr.reset(new MPI_Comm(newcomm), comm_free());
+ if(newcomm != MPI_COMM_NULL)
+ comm_ptr.reset(new MPI_Comm(newcomm), comm_free());
}
int communicator::size() const
@@ -118,8 +119,6 @@
status communicator::probe(int source, int tag) const
{
- typedef optional<status> result_type;
-
status stat;
BOOST_MPI_CHECK_RESULT(MPI_Probe,
(source, tag, MPI_Comm(*this), &stat.m_status));
Modified: branches/release/libs/mpi/src/environment.cpp
==============================================================================
--- branches/release/libs/mpi/src/environment.cpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/libs/mpi/src/environment.cpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -9,10 +9,57 @@
#include <boost/mpi/exception.hpp>
#include <boost/mpi/detail/mpi_datatype_cache.hpp>
#include <cassert>
+#include <string>
#include <exception>
#include <stdexcept>
+#include <ostream>
namespace boost { namespace mpi {
+namespace threading {
+std::istream& operator>>(std::istream& in, level& l)
+{
+ std::string tk;
+ in >> tk;
+ if (!in.bad()) {
+ if (tk == "single") {
+ l = single;
+ } else if (tk == "funneled") {
+ l = funneled;
+ } else if (tk == "serialized") {
+ l = serialized;
+ } else if (tk == "multiple") {
+ l = multiple;
+ } else {
+ in.setstate(std::ios::badbit);
+ }
+ }
+ return in;
+}
+
+std::ostream& operator<<(std::ostream& out, level l)
+{
+ switch(l) {
+ case single:
+ out << "single";
+ break;
+ case funneled:
+ out << "funneled";
+ break;
+ case serialized:
+ out << "serialized";
+ break;
+ case multiple:
+ out << "multiple";
+ break;
+ default:
+ out << "<level error>[" << int(l) << ']';
+ out.setstate(std::ios::badbit);
+ break;
+ }
+ return out;
+}
+
+} // namespace threading
#ifdef BOOST_MPI_HAS_NOARG_INITIALIZATION
environment::environment(bool abort_on_exception)
@@ -26,6 +73,21 @@
MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
}
+
+environment::environment(threading::level mt_level, bool abort_on_exception)
+ : i_initialized(false),
+ abort_on_exception(abort_on_exception)
+{
+ // It is not clear that we can pass null in MPI_Init_thread.
+ int dummy_thread_level = 0;
+ if (!initialized()) {
+ BOOST_MPI_CHECK_RESULT(MPI_Init_thread,
+ (0, 0, int(mt_level), &dummy_thread_level ));
+ i_initialized = true;
+ }
+
+ MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
+}
#endif
environment::environment(int& argc, char** &argv, bool abort_on_exception)
@@ -40,6 +102,22 @@
MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
}
+environment::environment(int& argc, char** &argv, threading::level mt_level,
+ bool abort_on_exception)
+ : i_initialized(false),
+ abort_on_exception(abort_on_exception)
+{
+ // It is not clear that we can pass null in MPI_Init_thread.
+ int dummy_thread_level = 0;
+ if (!initialized()) {
+ BOOST_MPI_CHECK_RESULT(MPI_Init_thread,
+ (&argc, &argv, int(mt_level), &dummy_thread_level));
+ i_initialized = true;
+ }
+
+ MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
+}
+
environment::~environment()
{
if (i_initialized) {
@@ -122,4 +200,20 @@
return std::string(name, len);
}
+threading::level environment::thread_level()
+{
+ int level;
+
+ BOOST_MPI_CHECK_RESULT(MPI_Query_thread, (&level));
+ return static_cast<threading::level>(level);
+}
+
+bool environment::is_main_thread()
+{
+ int isit;
+
+ BOOST_MPI_CHECK_RESULT(MPI_Is_thread_main, (&isit));
+ return static_cast<bool>(isit);
+}
+
} } // end namespace boost::mpi
Modified: branches/release/libs/mpi/src/python/datatypes.cpp
==============================================================================
--- branches/release/libs/mpi/src/python/datatypes.cpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/libs/mpi/src/python/datatypes.cpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -17,7 +17,9 @@
void export_datatypes()
{
+#if PY_MAJOR_VERSION < 3
register_serialized(long(0), &PyInt_Type);
+#endif
register_serialized(false, &PyBool_Type);
register_serialized(double(0.0), &PyFloat_Type);
}
Modified: branches/release/libs/mpi/src/python/py_environment.cpp
==============================================================================
--- branches/release/libs/mpi/src/python/py_environment.cpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/libs/mpi/src/python/py_environment.cpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -11,6 +11,9 @@
* This file reflects the Boost.MPI "environment" class into Python
* methods at module level.
*/
+
+#include <locale>
+#include <string>
#include <boost/python.hpp>
#include <boost/mpi.hpp>
@@ -30,14 +33,22 @@
* zero-initialized before it is used.
*/
static environment* env;
-
+
bool mpi_init(list python_argv, bool abort_on_exception)
{
// If MPI is already initialized, do nothing.
if (environment::initialized())
return false;
- // Convert Python argv into C-style argc/argv.
+#if PY_MAJOR_VERSION >= 3
+ #ifdef BOOST_MPI_HAS_NOARG_INITIALIZATION
+ env = new environment(abort_on_exception);
+ #else
+ #error No argument initialization, supported from MPI 1.2 and up, is needed when using Boost.MPI with Python 3.x
+ #endif
+#else
+
+ // Convert Python argv into C-style argc/argv.
int my_argc = extract<int>(python_argv.attr("__len__")());
char** my_argv = new char*[my_argc];
for (int arg = 0; arg < my_argc; ++arg)
@@ -52,9 +63,10 @@
if (mpi_argv != my_argv)
PySys_SetArgv(mpi_argc, mpi_argv);
- for (int arg = 0; arg < my_argc; ++arg)
- free(my_argv[arg]);
- delete [] my_argv;
+ for (int arg = 0; arg < mpi_argc; ++arg)
+ free(mpi_argv[arg]);
+ delete [] mpi_argv;
+#endif
return true;
}
Modified: branches/release/libs/mpi/src/python/py_nonblocking.cpp
==============================================================================
--- branches/release/libs/mpi/src/python/py_nonblocking.cpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/libs/mpi/src/python/py_nonblocking.cpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -118,7 +118,7 @@
pair<status, request_list::iterator> result =
wait_any(requests.begin(), requests.end());
- return make_tuple(
+ return boost::python::make_tuple(
result.second->get_value_or_none(),
result.first,
distance(requests.begin(), result.second));
@@ -134,7 +134,7 @@
test_any(requests.begin(), requests.end());
if (result)
- return make_tuple(
+ return boost::python::make_tuple(
result->second->get_value_or_none(),
result->first,
distance(requests.begin(), result->second));
Modified: branches/release/libs/mpi/test/Jamfile.v2
==============================================================================
--- branches/release/libs/mpi/test/Jamfile.v2 Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/libs/mpi/test/Jamfile.v2 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -23,6 +23,11 @@
[ mpi-test broadcast_test : : : 2 17 ]
[ mpi-test gather_test ]
[ mpi-test is_mpi_op_test : : : 1 ]
+ [ mpi-test mt_level_test : : : 1 ]
+ [ mpi-test mt_init_test-single : mt_init_test.cpp : <testing.arg>"single" : 1 4 ]
+ [ mpi-test mt_init_test-funneled : mt_init_test.cpp : <testing.arg>"funneled" : 1 4 ]
+ [ mpi-test mt_init_test-serialized : mt_init_test.cpp : <testing.arg>"serialized" : 1 4 ]
+ [ mpi-test mt_init_test-multiple : mt_init_test.cpp : <testing.arg>"multiple" : 1 4 ]
# Note: Microsoft MPI fails nonblocking_test on 1 processor
[ mpi-test nonblocking_test ]
[ mpi-test reduce_test ]
@@ -33,5 +38,6 @@
[ mpi-test skeleton_content_test : : : 2 3 4 7 8 13 17 ]
[ mpi-test graph_topology_test : : : 2 7 13 ]
[ mpi-test pointer_test : : : 2 ]
+ [ mpi-test groups_test ]
;
}
Modified: branches/release/libs/mpi/test/all_reduce_test.cpp
==============================================================================
--- branches/release/libs/mpi/test/all_reduce_test.cpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/libs/mpi/test/all_reduce_test.cpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -9,6 +9,7 @@
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/test/minimal.hpp>
+#include <vector>
#include <algorithm>
#include <boost/serialization/string.hpp>
#include <boost/iterator/counting_iterator.hpp>
@@ -58,6 +59,16 @@
return point(p1.x + p2.x, p1.y + p2.y, p1.z + p2.z);
}
+// test lexical order
+bool operator<(const point& p1, const point& p2)
+{
+ return (p1.x < p2.x
+ ? true
+ : (p1.x > p2.x
+ ? false
+ : p1.y < p2.y ));
+}
+
namespace boost { namespace mpi {
template <>
@@ -67,22 +78,29 @@
template<typename Generator, typename Op>
void
-all_reduce_test(const communicator& comm, Generator generator,
- const char* type_kind, Op op, const char* op_kind,
- typename Generator::result_type init)
+all_reduce_one_test(const communicator& comm, Generator generator,
+ const char* type_kind, Op op, const char* op_kind,
+ typename Generator::result_type init, bool in_place)
{
typedef typename Generator::result_type value_type;
value_type value = generator(comm.rank());
using boost::mpi::all_reduce;
+ using boost::mpi::inplace;
if (comm.rank() == 0) {
std::cout << "Reducing to " << op_kind << " of " << type_kind << "...";
std::cout.flush();
}
- value_type result_value = all_reduce(comm, value, op);
-
+ value_type result_value;
+ if (in_place) {
+ all_reduce(comm, inplace(value), op);
+ result_value = value;
+ } else {
+ result_value = all_reduce(comm, value, op);
+ }
+
// Compute expected result
std::vector<value_type> generated_values;
for (int p = 0; p < comm.size(); ++p)
@@ -97,6 +115,69 @@
(comm.barrier)();
}
+template<typename Generator, typename Op>
+void
+all_reduce_array_test(const communicator& comm, Generator generator,
+ const char* type_kind, Op op, const char* op_kind,
+ typename Generator::result_type init, bool in_place)
+{
+ typedef typename Generator::result_type value_type;
+ value_type value = generator(comm.rank());
+ std::vector<value_type> send(10, value);
+
+ using boost::mpi::all_reduce;
+ using boost::mpi::inplace;
+
+ if (comm.rank() == 0) {
+ char const* place = in_place ? "in place" : "out of place";
+ std::cout << "Reducing (" << place << ") array to " << op_kind << " of " << type_kind << "...";
+ std::cout.flush();
+ }
+ std::vector<value_type> result;
+ if (in_place) {
+ all_reduce(comm, inplace(&(send[0])), send.size(), op);
+ result.swap(send);
+ } else {
+ std::vector<value_type> recv(10, value_type());
+ all_reduce(comm, &(send[0]), send.size(), &(recv[0]), op);
+ result.swap(recv);
+ }
+
+ // Compute expected result
+ std::vector<value_type> generated_values;
+ for (int p = 0; p < comm.size(); ++p)
+ generated_values.push_back(generator(p));
+ value_type expected_result = std::accumulate(generated_values.begin(),
+ generated_values.end(),
+ init, op);
+
+ bool got_expected_result = (std::equal_range(result.begin(), result.end(),
+ expected_result)
+ == std::make_pair(result.begin(), result.end()));
+ BOOST_CHECK(got_expected_result);
+ if (got_expected_result && comm.rank() == 0)
+ std::cout << "OK." << std::endl;
+
+ (comm.barrier)();
+}
+
+// Test the 4 families of all reduce: (value, array) X (in place, out of place)
+template<typename Generator, typename Op>
+void
+all_reduce_test(const communicator& comm, Generator generator,
+ const char* type_kind, Op op, const char* op_kind,
+ typename Generator::result_type init)
+{
+ const bool in_place = true;
+ const bool out_of_place = false;
+ all_reduce_one_test(comm, generator, type_kind, op, op_kind, init, in_place);
+ all_reduce_one_test(comm, generator, type_kind, op, op_kind, init, out_of_place);
+ all_reduce_array_test(comm, generator, type_kind, op, op_kind,
+ init, in_place);
+ all_reduce_array_test(comm, generator, type_kind, op, op_kind,
+ init, out_of_place);
+}
+
// Generates integers to test with all_reduce()
struct int_generator
{
@@ -168,6 +249,11 @@
return x.value == y.value;
}
+bool operator<(const wrapped_int& x, const wrapped_int& y)
+{
+ return x.value < y.value;
+}
+
// Generates wrapped_its to test with all_reduce()
struct wrapped_int_generator
{
@@ -196,6 +282,8 @@
environment env(argc, argv);
communicator comm;
+ const bool in_place = true;
+ const bool out_of_place = false;
// Built-in MPI datatypes with built-in MPI operations
all_reduce_test(comm, int_generator(), "integers", std::plus<int>(), "sum",
@@ -215,8 +303,8 @@
// Built-in MPI datatypes with user-defined operations
all_reduce_test(comm, int_generator(17), "integers", secret_int_bit_and(),
"bitwise and", -1);
-
- // Arbitrary types with user-defined, commutative operations.
+
+ // Arbitrary types with user-defined, commutative operations.
all_reduce_test(comm, wrapped_int_generator(17), "wrapped integers",
std::plus<wrapped_int>(), "sum", wrapped_int(0));
Modified: branches/release/libs/mpi/test/broadcast_test.cpp
==============================================================================
--- branches/release/libs/mpi/test/broadcast_test.cpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/libs/mpi/test/broadcast_test.cpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -55,7 +55,6 @@
using boost::mpi::get_content;
using boost::make_counting_iterator;
using boost::mpi::broadcast;
- using boost::mpi::get_content;
typedef std::list<int>::iterator iterator;
Copied: branches/release/libs/mpi/test/groups_test.cpp (from r84739, trunk/libs/mpi/test/groups_test.cpp)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ branches/release/libs/mpi/test/groups_test.cpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553, copy of r84739, trunk/libs/mpi/test/groups_test.cpp)
@@ -0,0 +1,59 @@
+// Copyright (C) 2013 Andreas Hehn <hehn_at_[hidden]>, ETH Zurich
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// A test of communicators created from groups.
+
+#include <boost/mpi/environment.hpp>
+#include <boost/mpi/communicator.hpp>
+#include <boost/mpi/group.hpp>
+#include <boost/test/minimal.hpp>
+#include <vector>
+#include <algorithm>
+
+namespace mpi = boost::mpi;
+
+
+template <typename T>
+struct iota
+{
+ iota() : state(0){};
+ T operator()()
+ {
+ return state++;
+ }
+ T state;
+};
+
+void group_test(const mpi::communicator& comm)
+{
+ std::vector<int> grp_a_ranks(comm.size() / 2);
+ std::generate(grp_a_ranks.begin(),grp_a_ranks.end(),iota<int>());
+
+ mpi::group grp_a = comm.group().include(grp_a_ranks.begin(),grp_a_ranks.end());
+ mpi::group grp_b = comm.group().exclude(grp_a_ranks.begin(),grp_a_ranks.end());
+
+ mpi::communicator part_a(comm,grp_a);
+ mpi::communicator part_b(comm,grp_b);
+
+ if(part_a)
+ {
+ std::cout << "comm rank: " << comm.rank() << " -> part_a rank:" << part_a.rank() << std::endl;
+ BOOST_CHECK(part_a.rank() == comm.rank());
+ }
+ if(part_b)
+ {
+ std::cout << "comm rank: " << comm.rank() << " -> part_b rank:" << part_b.rank() << std::endl;
+ BOOST_CHECK(part_b.rank() == comm.rank() - comm.size()/2);
+ }
+}
+
+int test_main(int argc, char* argv[])
+{
+ mpi::environment env(argc,argv);
+ mpi::communicator comm;
+ group_test(comm);
+ return 0;
+}
Copied: branches/release/libs/mpi/test/mt_init_test.cpp (from r84739, trunk/libs/mpi/test/mt_init_test.cpp)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ branches/release/libs/mpi/test/mt_init_test.cpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553, copy of r84739, trunk/libs/mpi/test/mt_init_test.cpp)
@@ -0,0 +1,27 @@
+// Copyright (C) 2013 Alain Miniussi <alain.miniussi_at_oca.eu>
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// test threading::level operations
+
+#include <boost/mpi/environment.hpp>
+#include <boost/test/minimal.hpp>
+#include <iostream>
+#include <sstream>
+
+namespace mpi = boost::mpi;
+
+int
+test_main(int argc, char* argv[]) {
+ mpi::threading::level required = mpi::threading::level(-1);
+ BOOST_CHECK(argc == 2);
+ std::istringstream cmdline(argv[1]);
+ cmdline >> required;
+ BOOST_CHECK(!cmdline.bad());
+ mpi::environment env(argc,argv,required);
+ BOOST_CHECK(env.thread_level() >= mpi::threading::single);
+ BOOST_CHECK(env.thread_level() <= mpi::threading::multiple);
+ return 0;
+}
Copied: branches/release/libs/mpi/test/mt_level_test.cpp (from r84739, trunk/libs/mpi/test/mt_level_test.cpp)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ branches/release/libs/mpi/test/mt_level_test.cpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553, copy of r84739, trunk/libs/mpi/test/mt_level_test.cpp)
@@ -0,0 +1,107 @@
+// Copyright (C) 2013 Alain Miniussi <alain.miniussi_at_oca.eu>
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// test threading::level operations
+
+#include <boost/mpi/environment.hpp>
+#include <boost/test/minimal.hpp>
+#include <iostream>
+#include <sstream>
+
+namespace mpi = boost::mpi;
+
+void
+test_threading_level_io(mpi::threading::level orig) {
+ std::ostringstream out;
+ namespace mt = boost::mpi::threading;
+ mt::level printed = mt::level(-1);
+
+ out << orig;
+ BOOST_CHECK(out.good());
+ std::string orig_str(out.str());
+ std::cout << "orig string:" << orig_str << '\n';
+ std::istringstream in(orig_str);
+ in >> printed;
+ BOOST_CHECK(!in.bad());
+ std::cout << "orig: " << orig << ", printed: " << printed << std::endl;
+ BOOST_CHECK(orig == printed);
+}
+
+void
+test_threading_levels_io() {
+ namespace mt = boost::mpi::threading;
+ test_threading_level_io(mt::single);
+ test_threading_level_io(mt::funneled);
+ test_threading_level_io(mt::serialized);
+ test_threading_level_io(mt::multiple);
+}
+
+void
+test_threading_level_cmp() {
+ namespace mt = boost::mpi::threading;
+ BOOST_CHECK(mt::single == mt::single);
+ BOOST_CHECK(mt::funneled == mt::funneled);
+ BOOST_CHECK(mt::serialized == mt::serialized);
+ BOOST_CHECK(mt::multiple == mt::multiple);
+
+ BOOST_CHECK(mt::single != mt::funneled);
+ BOOST_CHECK(mt::single != mt::serialized);
+ BOOST_CHECK(mt::single != mt::multiple);
+
+ BOOST_CHECK(mt::funneled != mt::single);
+ BOOST_CHECK(mt::funneled != mt::serialized);
+ BOOST_CHECK(mt::funneled != mt::multiple);
+
+ BOOST_CHECK(mt::serialized != mt::single);
+ BOOST_CHECK(mt::serialized != mt::funneled);
+ BOOST_CHECK(mt::serialized != mt::multiple);
+
+ BOOST_CHECK(mt::multiple != mt::single);
+ BOOST_CHECK(mt::multiple != mt::funneled);
+ BOOST_CHECK(mt::multiple != mt::serialized);
+
+ BOOST_CHECK(mt::single < mt::funneled);
+ BOOST_CHECK(mt::funneled > mt::single);
+ BOOST_CHECK(mt::single < mt::serialized);
+ BOOST_CHECK(mt::serialized > mt::single);
+ BOOST_CHECK(mt::single < mt::multiple);
+ BOOST_CHECK(mt::multiple > mt::single);
+
+ BOOST_CHECK(mt::funneled < mt::serialized);
+ BOOST_CHECK(mt::serialized > mt::funneled);
+ BOOST_CHECK(mt::funneled < mt::multiple);
+ BOOST_CHECK(mt::multiple > mt::funneled);
+
+ BOOST_CHECK(mt::serialized < mt::multiple);
+ BOOST_CHECK(mt::multiple > mt::serialized);
+
+ BOOST_CHECK(mt::single <= mt::single);
+ BOOST_CHECK(mt::single <= mt::funneled);
+ BOOST_CHECK(mt::funneled >= mt::single);
+ BOOST_CHECK(mt::single <= mt::serialized);
+ BOOST_CHECK(mt::serialized >= mt::single);
+ BOOST_CHECK(mt::single <= mt::multiple);
+ BOOST_CHECK(mt::multiple >= mt::single);
+
+ BOOST_CHECK(mt::funneled <= mt::funneled);
+ BOOST_CHECK(mt::funneled <= mt::serialized);
+ BOOST_CHECK(mt::serialized >= mt::funneled);
+ BOOST_CHECK(mt::funneled <= mt::multiple);
+ BOOST_CHECK(mt::multiple >= mt::funneled);
+
+ BOOST_CHECK(mt::serialized <= mt::serialized);
+ BOOST_CHECK(mt::serialized <= mt::multiple);
+ BOOST_CHECK(mt::multiple >= mt::serialized);
+
+ BOOST_CHECK(mt::multiple <= mt::multiple);
+}
+
+int
+test_main(int argc, char* argv[]) {
+ test_threading_levels_io();
+ test_threading_level_cmp();
+ return 0;
+}
Modified: branches/release/libs/mpi/test/nonblocking_test.cpp
==============================================================================
--- branches/release/libs/mpi/test/nonblocking_test.cpp Tue Sep 3 14:45:18 2013 (r85552)
+++ branches/release/libs/mpi/test/nonblocking_test.cpp 2013-09-03 15:31:55 EDT (Tue, 03 Sep 2013) (r85553)
@@ -28,7 +28,7 @@
mk_all_except_test_all // use for serialized types
};
-static char* method_kind_names[mk_all] = {
+static const char* method_kind_names[mk_all] = {
"wait_any",
"test_any",
"wait_all",
Boost-Commit list run by bdawes at acm.org, david.abrahams at rcn.com, gregod at cs.rpi.edu, cpdaniel at pacbell.net, john at johnmaddock.co.uk