Boost logo

Boost-Commit :

Subject: [Boost-commit] svn:boost r68237 - sandbox/guild/pool/boost/pool
From: pbristow_at_[hidden]
Date: 2011-01-18 13:12:30


Author: pbristow
Date: 2011-01-18 13:12:26 EST (Tue, 18 Jan 2011)
New Revision: 68237
URL: http://svn.boost.org/trac/boost/changeset/68237

Log:
Added more Doxygen comments

Text files modified:
   sandbox/guild/pool/boost/pool/pool.hpp | 299 ++++++++++++++++++++++++++++-----------
   sandbox/guild/pool/boost/pool/pool_alloc.hpp | 160 ++++++++++++++++----
   sandbox/guild/pool/boost/pool/poolfwd.hpp | 6
   sandbox/guild/pool/boost/pool/simple_segregated_storage.hpp | 182 ++++++++++++++++--------
   sandbox/guild/pool/boost/pool/singleton_pool.hpp | 97 +++++++++---
   5 files changed, 542 insertions(+), 202 deletions(-)

Modified: sandbox/guild/pool/boost/pool/pool.hpp
==============================================================================
--- sandbox/guild/pool/boost/pool/pool.hpp (original)
+++ sandbox/guild/pool/boost/pool/pool.hpp 2011-01-18 13:12:26 EST (Tue, 18 Jan 2011)
@@ -47,7 +47,7 @@
 /*!
   \file
   \brief Fast memory allocator.
- \details Fast memory allocator, and guarantees proper alignment of all allocated chunks.
+ \details Fast memory allocator that guarantees proper alignment of all allocated chunks.
   Provides two UserAllocator classes and a template class pool,
   which extends and generalizes the framework provided by the simple segregated storage solution.
   For information on other pool-based interfaces, see the other pool interfaces.
@@ -67,23 +67,31 @@
 
 */
 
-namespace boost {
+namespace boost
+{
 
+//! Default User allocator new used as default template parameter for UserAllocator. Uese new and delete.
 struct default_user_allocator_new_delete
 {
- typedef std::size_t size_type;
- typedef std::ptrdiff_t difference_type;
+ typedef std::size_t size_type; //!< An unsigned integral type that can represent the size of the largest object to be allocated.
+ typedef std::ptrdiff_t difference_type; //!< A signed integral type that can represent the difference of any two pointers.
 
   static char * malloc BOOST_PREVENT_MACRO_SUBSTITUTION(const size_type bytes)
- { return new (std::nothrow) char[bytes]; }
+ { //! Attempts to allocate n bytes from the system. Returns 0 if out-of-memory
+ return new (std::nothrow) char[bytes];
+ }
   static void free BOOST_PREVENT_MACRO_SUBSTITUTION(char * const block)
- { delete [] block; }
+ { //! Attempts to de-allocate block.
+ //! \pre Block must have been previously returned from a call to UserAllocator::malloc.
+ delete [] block;
+ }
 };
 
-struct default_user_allocator_malloc_free
+ //! Default User allocator malloc used as template parameter.
+ struct default_user_allocator_malloc_free
 {
- typedef std::size_t size_type;
- typedef std::ptrdiff_t difference_type;
+ typedef std::size_t size_type; //!< An unsigned integral type that can represent the size of the largest object to be allocated.
+ typedef std::ptrdiff_t difference_type; //!< A signed integral type that can represent the difference of any two pointers.
 
   static char * malloc BOOST_PREVENT_MACRO_SUBSTITUTION(const size_type bytes)
   { return static_cast<char *>(std::malloc(bytes)); }
@@ -91,17 +99,30 @@
   { std::free(block); }
 };
 
-namespace details {
+namespace details
+{ //! Implemention only.
 
-// PODptr is a class that pretends to be a "pointer" to different class types
-// that don't really exist. It provides member functions to access the "data"
-// of the "object" it points to. Since these "class" types are of variable
-// size, and contains some information at the *end* of its memory (for
-// alignment reasons), PODptr must contain the size of this "class" as well as
-// the pointer to this "object".
 template <typename SizeType>
 class PODptr
-{
+{ //! PODptr is a class that pretends to be a "pointer" to different class types
+ //! that don't really exist. It provides member functions to access the "data"
+ //! of the "object" it points to. Since these "class" types are of variable
+ //! size, and contains some information at the *end* of its memory
+ //! (for alignment reasons),
+ //! PODptr must contain the size of this "class" as well as the pointer to this "object".
+
+ /*! \details A PODptr holds the location and size of a memory block allocated from the system. Each memory block is split logically into three sections:\n
+Chunk area. This section may be different sizes. PODptr does not care what the size of the chunks is, but it does care (and keep track of) the total size of the chunk area.\n
+Next pointer. This section is always the same size for a given SizeType. It holds a pointer to the location of the next memory block in the memory block list, or 0 if there is no such block.\n
+Next size. This section is always the same size for a given SizeType. It holds the size of the next memory block in the memory block list.\n
+
+The PODptr class just provides cleaner ways of dealing with raw memory blocks.
+
+A PODptr object is either valid or invalid. An invalid PODptr is analogous to a null pointer.
+The default constructor for PODptr will result in an invalid object.
+Calling the member function invalidate will result in that object becoming invalid.
+The member function valid can be used to test for validity.
+*/
   public:
     typedef SizeType size_type;
 
@@ -110,7 +131,9 @@
     size_type sz;
 
     char * ptr_next_size() const
- { return (ptr + sz - sizeof(size_type)); }
+ {
+ return (ptr + sz - sizeof(size_type));
+ }
     char * ptr_next_ptr() const
     {
       return (ptr_next_size() -
@@ -119,83 +142,143 @@
 
   public:
     PODptr(char * const nptr, const size_type nsize)
- :ptr(nptr), sz(nsize) { }
+ :ptr(nptr), sz(nsize)
+ {
+ //! A PODptr may be created to point to a memory block by passing
+ //! the address and size of that memory block into the constructor.
+ //! A PODptr constructed in this way is valid.
+ }
     PODptr()
- :ptr(0), sz(0) { }
+ : ptr(0), sz(0)
+ { //! default constructor for PODptr will result in an invalid object.
+ }
 
- bool valid() const { return (begin() != 0); }
- void invalidate() { begin() = 0; }
- char * & begin() { return ptr; }
- char * begin() const { return ptr; }
- char * end() const { return ptr_next_ptr(); }
- size_type total_size() const { return sz; }
+ bool valid() const
+ { //! A PODptr object is either valid or invalid.
+ //! An invalid PODptr is analogous to a null pointer.
+ //! \returns true if PODptr is valid, false if invalid.
+ return (begin() != 0);
+ }
+ void invalidate()
+ { //! Make object invalid.
+ begin() = 0;
+ }
+ char * & begin()
+ { //! Each PODptr keeps the address and size of its memory block.
+ //! \returns The address of its memory block.
+ return ptr;
+ }
+ char * begin() const
+ { //! Each PODptr keeps the address and size of its memory block.
+ //! \return The address of its memory block.
+ return ptr;
+ }
+ char * end() const
+ { //! \returns begin() plus element_size (a 'past the end' value).
+ return ptr_next_ptr();
+ }
+ size_type total_size() const
+ { //! Each PODptr keeps the address and size of its memory block.
+ //! The address may be read or written by the member functions begin.
+ //! The size of the memory block may only be read,
+ //! \returns size of the memory block.
+ return sz;
+ }
     size_type element_size() const
- {
+ { //! \returns size of element pointer area.
       return (sz - sizeof(size_type) -
           pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value);
     }
 
     size_type & next_size() const
- {
+ { //!
+ //! \returns next_size.
       return *(static_cast<size_type *>(static_cast<void*>((ptr_next_size()))));
     }
     char * & next_ptr() const
- { return *(static_cast<char **>(static_cast<void*>(ptr_next_ptr()))); }
+ { //! \returns pointer to next pointer area.
+ return *(static_cast<char **>(static_cast<void*>(ptr_next_ptr())));
+ }
 
     PODptr next() const
- { return PODptr<size_type>(next_ptr(), next_size()); }
+ { //! \returns next PODptr.
+ return PODptr<size_type>(next_ptr(), next_size());
+ }
     void next(const PODptr & arg) const
- {
+ { //! Sets next PODptr.
       next_ptr() = arg.begin();
       next_size() = arg.total_size();
     }
-};
+}; // class PODptr
 
 } // namespace details
 
 template <typename UserAllocator>
-class pool: protected simple_segregated_storage<
- typename UserAllocator::size_type>
+//! \tparam UserAllocator type - the method that the Pool will use to allocate memory from the system.
+class pool: protected simple_segregated_storage < typename UserAllocator::size_type >
 {/*! \class boost::pool::pool
- \brief A fast memory allocator, and guarantees proper alignment of all allocated chunks.
- \tparam UserAllocator Defines the method that the Pool will use to allocate memory from the system.
+ \brief A fast memory allocator that guarantees proper alignment of all allocated chunks.
+ \details Whenever an object of type pool needs memory from the system,
+ it will request it from its UserAllocator template parameter.
+ The amount requested is determined using a doubling algorithm;
+ that is, each time more system memory is allocated,
+ the amount of system memory requested is doubled.
+
+ Users may control the doubling algorithm by using the following extensions.
+
+ Users may pass an additional constructor parameter to pool.
+ This parameter is of type size_type,
+ and is the number of chunks to request from the system
+ the first time that object needs to allocate system memory.
+ The default is 32. This parameter may not be 0.
+
+
 */
   public:
- typedef UserAllocator user_allocator;
- typedef typename UserAllocator::size_type size_type;
- typedef typename UserAllocator::difference_type difference_type;
+ typedef UserAllocator user_allocator; //!< User allocator.
+ typedef typename UserAllocator::size_type size_type; //!< An unsigned integral type that can represent the size of the largest object to be allocated.
+ typedef typename UserAllocator::difference_type difference_type; //!< A signed integral type that can represent the difference of any two pointers.
 
   private:
     BOOST_STATIC_CONSTANT(unsigned, min_alloc_size =
         (::boost::details::pool::ct_lcm<sizeof(void *), sizeof(size_type)>::value) );
 
- // Returns 0 if out-of-memory
- // Called if malloc/ordered_malloc needs to resize the free list
+ //! \returns 0 if out-of-memory.
+ //! Called if malloc/ordered_malloc needs to resize the free list.
     void * malloc_need_resize(); //! Called if malloc needs to resize the free list.
     void * ordered_malloc_need_resize(); //! Called if ordered_malloc needs to resize the free list.
 
   protected:
- details::PODptr<size_type> list;
+ details::PODptr<size_type> list; //!< List structure holding ordered blocks.
 
- simple_segregated_storage<size_type> & store() { return *this; }
- const simple_segregated_storage<size_type> & store() const { return *this; }
+ simple_segregated_storage<size_type> & store()
+ { //! \returns pointer to store.
+ return *this;
+ }
+ const simple_segregated_storage<size_type> & store() const
+ { //! \returns pointer to store.
+ return *this;
+ }
     const size_type requested_size;
     size_type next_size;
     size_type start_size;
     size_type max_size;
 
- // finds which POD in the list 'chunk' was allocated from
+ //! finds which POD in the list 'chunk' was allocated from.
     details::PODptr<size_type> find_POD(void * const chunk) const;
 
- // is_from() tests a chunk to determine if it belongs in a block
+ // is_from() tests a chunk to determine if it belongs in a block.
     static bool is_from(void * const chunk, char * const i,
         const size_type sizeof_i)
- { //! \returns true if chunk was allocated from u or may be returned
- //! as the result of a future allocation from u.
- //!" Returns false if chunk was allocated from some other pool
+ { //! \param chunk chunk to check if is from this pool.
+ //! \param i memory chunk at i with element sizeof_i.
+ //! \param sizeof_i element size (size of the chunk area of that block, not the total size of that block).
+ //! \returns true if chunk was allocated or may be returned.
+ //! as the result of a future allocation.
+ //! Returns false if chunk was allocated from some other pool,
       //! or may be returned as the result of a future allocation from some other pool.
- //! Otherwise, the return value is meaningless;
- //! note that this function may not be used to reliably test random pointer values.
+ //! Otherwise, the return value is meaningless.
+ //! Note that this function may not be used to reliably test random pointer values.
 
       // We use std::less_equal and std::less to test 'chunk'
       // against the array bounds because standard operators
@@ -210,22 +293,33 @@
     }
 
     size_type alloc_size() const
- { //! \returns allocated size.
+ { //! Calculated size of the memory chunks that will be allocated by this Pool.
+ //! For alignment reasons, this is defined to be lcm(requested_size, sizeof(void *), sizeof(size_type)).
+ //! \returns allocated size.
       const unsigned min_size = min_alloc_size;
       return details::pool::lcm<size_type>(requested_size, min_size);
     }
 
- // for the sake of code readability :)
     static void * & nextof(void * const ptr)
- { return *(static_cast<void **>(ptr)); }
+ { //! \returns Pointer dereferenced.
+ //! (Provided and used for the sake of code readability :)
+ return *(static_cast<void **>(ptr));
+ }
 
   public:
     // pre: npartition_size != 0 && nnext_size != 0
     explicit pool(const size_type nrequested_size,
         const size_type nnext_size = 32,
         const size_type nmax_size = 0)
- :list(0, 0), requested_size(nrequested_size), next_size(nnext_size), start_size(nnext_size),max_size(nmax_size)
+ :
+ list(0, 0), requested_size(nrequested_size), next_size(nnext_size), start_size(nnext_size),max_size(nmax_size)
     { //! Constructs a new empty Pool that can be used to allocate chunks of size RequestedSize.
+ //! \param nrequested_size Requested chunk size
+ //! \param nnext_size parameter is of type size_type,
+ //! is the number of chunks to request from the system
+ //! the first time that object needs to allocate system memory.
+ //! The default is 32. This parameter may not be 0.
+ //! \param nmax_size is the maximum size of ?
                 }
 
     ~pool()
@@ -242,13 +336,27 @@
     // Returns true if memory was actually deallocated
     bool purge_memory();
 
- size_type get_next_size() const { return next_size; }
- void set_next_size(const size_type nnext_size) { next_size = start_size = nnext_size; }
- size_type get_max_size() const { return max_size; }
- void set_max_size(const size_type nmax_size) { max_size = nmax_size; }
+ size_type get_next_size() const
+ { //! Number of chunks to request from the system the next time that object needs to allocate system memory. This value should never be 0.
+ //! \returns next_size;
+ return next_size;
+ }
+ void set_next_size(const size_type nnext_size)
+ { //! Set number of chunks to request from the system the next time that object needs to allocate system memory. This value should never be set to 0.
+ //! \returns nnext_size.
+ next_size = start_size = nnext_size;
+ }
+ size_type get_max_size() const
+ { //! \returns max_size.
+ return max_size;
+ }
+ void set_max_size(const size_type nmax_size)
+ { //! Set max_size.
+ max_size = nmax_size;
+ }
     size_type get_requested_size() const
- { //! \returns the value passed into the constructor.
- //! This value will not change during the lifetime of a Pool object.
+ { //! \returns the requested size passed into the constructor.
+ //! (This value will not change during the lifetime of a Pool object).
                         return requested_size;
           }
 
@@ -259,8 +367,8 @@
     void * malloc BOOST_PREVENT_MACRO_SUBSTITUTION()
     { //! Allocates a chunk of memory. Searches in the list of memory blocks
       //! for a block that has a free chunk, and returns that free chunk if found.
- //! Otherwise, creates a new memory block, adds its free list to t's free list,
- //! and returns a free chunk from that block.
+ //! Otherwise, creates a new memory block, adds its free list to pool's free list,
+ //! \returns a free chunk from that block.
       //! If a new memory block cannot be allocated, returns 0. Amortized O(1).
 
       // Look for a non-empty storage
@@ -270,7 +378,10 @@
     }
 
     void * ordered_malloc()
- { //! Same as malloc, only merges the free lists, to preserve order. Amortized O(1).
+ { //! Same as malloc, only merges the free lists, to preserve order. Amortized O(1).
+ //! \returns a free chunk from that block.
+ //! If a new memory block cannot be allocated, returns 0. Amortized O(1).
+
       // Look for a non-empty storage
       if (!store().empty())
         return (store().malloc)();
@@ -280,13 +391,19 @@
     // Returns 0 if out-of-memory
     // Allocate a contiguous section of n chunks
     void * ordered_malloc(size_type n);
- //! Same as malloc, only allocates enough contiguous chunks to cover n * requested_size bytes. Amortized O(n).
+ //! Same as malloc, only allocates enough contiguous chunks to cover n * requested_size bytes. Amortized O(n).
+ //! \returns a free chunk from that block.
+ //! If a new memory block cannot be allocated, returns 0. Amortized O(1).
 
     // pre: 'chunk' must have been previously
     // returned by *this.malloc().
     void free BOOST_PREVENT_MACRO_SUBSTITUTION(void * const chunk)
- {//! Deallocates a chunk of memory. Note that chunk may not be 0. O(1).
+ { //! Deallocates a chunk of memory. Note that chunk may not be 0. O(1).
       //! chunk must have been previously returned by t.malloc() or t.ordered_malloc().
+ //! Assumes that chunk actually refers to a block of chunks
+ //! spanning n * partition_sz bytes.
+ //! deallocates each chunk in that block.
+ //! Note that chunk may not be 0. O(n).
       (store().free)(chunk);
     }
 
@@ -302,7 +419,11 @@
     // pre: 'chunk' must have been previously
     // returned by *this.malloc(n).
     void free BOOST_PREVENT_MACRO_SUBSTITUTION(void * const chunks, const size_type n)
- {
+ { //! Assumes that chunk actually refers to a block of chunks.
+ //! chunk must have been previously returned by t.ordered_malloc(n)
+ //! spanning n * partition_sz bytes.
+ //! Deallocates each chunk in that block.
+ //! Note that chunk may not be 0. O(n).
       const size_type partition_size = alloc_size();
       const size_type total_req_size = n * requested_size;
       const size_type num_chunks = total_req_size / partition_size +
@@ -341,8 +462,10 @@
 
 template <typename UserAllocator>
 bool pool<UserAllocator>::release_memory()
-{
- // This is the return value: it will be set to true when we actually call
+{ //! pool must be ordered. Frees every memory block that doesn't have any allocated chunks.
+ //! \returns true if at least one memory block was freed.
+
+ // ret is the return value: it will be set to true when we actually call
   // UserAllocator::free(..)
   bool ret = false;
 
@@ -464,7 +587,12 @@
 
 template <typename UserAllocator>
 bool pool<UserAllocator>::purge_memory()
-{
+{ //! pool must be ordered.
+ //! Frees every memory block.
+ //! This function invalidates any pointers previously returned
+ //! by allocation functions of t.
+ //! \returns true if at least one memory block was freed.
+
   details::PODptr<size_type> iter = list;
 
   if (!iter.valid())
@@ -491,8 +619,9 @@
 
 template <typename UserAllocator>
 void * pool<UserAllocator>::malloc_need_resize()
-{
- // No memory in any of our storages; make a new storage,
+{ //! No memory in any of our storages; make a new storage,
+ //! Allocates chunk in newly malloc aftert resize.
+ //! \returns pointer to chunk.
   const size_type partition_size = alloc_size();
   const size_type POD_size = next_size * partition_size +
       details::pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type);
@@ -520,8 +649,8 @@
 
 template <typename UserAllocator>
 void * pool<UserAllocator>::ordered_malloc_need_resize()
-{
- // No memory in any of our storages; make a new storage,
+{ //! No memory in any of our storages; make a new storage,
+ //! \returns pointer to new chunk.
   const size_type partition_size = alloc_size();
   const size_type POD_size = next_size * partition_size +
       details::pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type);
@@ -567,14 +696,16 @@
     node.next(prev.next());
     prev.next(node);
   }
-
   // and return a chunk from it.
   return (store().malloc)();
 }
 
 template <typename UserAllocator>
 void * pool<UserAllocator>::ordered_malloc(const size_type n)
-{
+{ //! Gets address of a chunk n, allocating new memory if not already available.
+ //! \returns Address of chunk n if allocated ok.
+ //! \returns 0 if not enough memory for n chunks.
+
   const size_type partition_size = alloc_size();
   const size_type total_req_size = n * requested_size;
   const size_type num_chunks = total_req_size / partition_size +
@@ -585,7 +716,7 @@
   if (ret != 0)
     return ret;
 
- // Not enougn memory in our storages; make a new storage,
+ // Not enough memory in our storages; make a new storage,
   BOOST_USING_STD_MAX();
   next_size = max BOOST_PREVENT_MACRO_SUBSTITUTION(next_size, num_chunks);
   const size_type POD_size = next_size * partition_size +
@@ -595,7 +726,7 @@
     return 0;
   const details::PODptr<size_type> node(ptr, POD_size);
 
- // Split up block so we can use what wasn't requested
+ // Split up block so we can use what wasn't requested.
   if (next_size > num_chunks)
     store().add_ordered_block(node.begin() + num_chunks * partition_size,
         node.element_size() - num_chunks * partition_size, partition_size);
@@ -603,7 +734,7 @@
   next_size <<= 1;
 
   // insert it into the list,
- // handle border case
+ // handle border case.
   if (!list.valid() || std::greater<void *>()(list.begin(), node.begin()))
   {
     node.next(list);
@@ -616,7 +747,7 @@
     while (true)
     {
       // if we're about to hit the end or
- // if we've found where "node" goes
+ // if we've found where "node" goes.
       if (prev.next_ptr() == 0
           || std::greater<void *>()(prev.next_ptr(), node.begin()))
         break;
@@ -635,8 +766,9 @@
 template <typename UserAllocator>
 details::PODptr<typename pool<UserAllocator>::size_type>
 pool<UserAllocator>::find_POD(void * const chunk) const
-{
- // We have to find which storage this chunk is from.
+{ //! find which PODptr storage memory that this chunk is from.
+ //! \returns the PODptr that holds this chunk.
+ // Iterate down list to find which storage this chunk is from.
   details::PODptr<size_type> iter = list;
   while (iter.valid())
   {
@@ -650,4 +782,5 @@
 
 } // namespace boost
 
-#endif
+#endif // #ifdef BOOST_POOL_HPP
+

Modified: sandbox/guild/pool/boost/pool/pool_alloc.hpp
==============================================================================
--- sandbox/guild/pool/boost/pool/pool_alloc.hpp (original)
+++ sandbox/guild/pool/boost/pool/pool_alloc.hpp 2011-01-18 13:12:26 EST (Tue, 18 Jan 2011)
@@ -9,6 +9,70 @@
 #ifndef BOOST_POOL_ALLOC_HPP
 #define BOOST_POOL_ALLOC_HPP
 
+/*!
+ \file
+ \brief Standard Pool allocators.
+ \details provides two template types that can be used for fast and efficient memory allocation.
+ These types both satisfy the Standard Allocator requirements [20.1.5]
+ and the additional requirements in [20.1.5/4],
+ so they can be used with Standard or user-supplied containers.\n
+ For information on other pool-based interfaces, see the other pool interfaces.
+
+ \n\n
+ Both of the pool allocators above satisfy all Standard Allocator requirements,
+ as laid out in the Standard [20.1.5].
+ They also both satisfy the additional requirements found in [20.1.5/4];
+ this permits their usage with any Standard-compliant container.
+
+ In addition, the fast_pool_allocator also provides an additional allocation
+ and an additional deallocation function:
+
+ Symbol Table\n
+ Symbol Meaning\n
+ PoolAlloc fast_pool_allocator<T, UserAllocator>\n
+ p value of type T *\n
+ \n
+ Additional allocation/deallocation functions (fast_pool_allocator only)\n
+ Expression Return Type Semantic Equivalence\n
+ PoolAlloc::allocate() T * PoolAlloc::allocate(1)\n
+ PoolAlloc::deallocate(p) void PoolAlloc::deallocate(p, 1)\n
+
+The typedef user_allocator publishes the value of the UserAllocator template parameter.
+
+Notes\n
+
+If the allocation functions run out of memory, they will throw std::bad_alloc.
+
+The underlying Pool type used by the allocators is accessible through the Singleton Pool Interface.
+The identifying tag used for pool_allocator is pool_allocator_tag,
+and the tag used for fast_pool_allocator is fast_pool_allocator_tag.
+All template parameters of the allocators (including implementation-specific ones)
+determine the type of the underlying Pool,
+with the exception of the first parameter T, whose size is used instead.
+
+Since the size of T is used to determine the type of the underlying Pool,
+each allocator for different types of the same size will share the same underlying pool.
+The tag class prevents pools from being shared between pool_allocator and fast_pool_allocator.
+For example, on a system where
+sizeof(int) == sizeof(void *), pool_allocator<int> and pool_allocator<void *>
+will both allocate/deallocate from/to the same pool.
+
+If there is only one thread running before main() starts and after main() ends,
+then both allocators are completely thread-safe.\n
+
+Compiler and STL Notes\n
+
+A number of common STL libraries contain bugs in their using of allocators.
+Specifically, they pass null pointers to the deallocate function,
+which is explicitly forbidden by the Standard [20.1.5 Table 32].
+PoolAlloc will work around these libraries if it detects them;
+currently, workarounds are in place for:\n
+
+Borland C++ (Builder and command-line compiler) with default (RogueWave) library, ver. 5 and earlier\n
+STLport (with any compiler), ver. 4.0 and earlier\n
+
+*/
+
 // std::numeric_limits
 #include <boost/limits.hpp>
 // new, std::bad_alloc
@@ -30,20 +94,24 @@
 
 namespace boost {
 
-struct pool_allocator_tag { };
+ //! Tag to identify pool_allocator when used as template parameter.
+ struct pool_allocator_tag
+{
+};
 
-template <typename T,
- typename UserAllocator,
- typename Mutex,
- unsigned NextSize,
- unsigned MaxSize>
+template <typename T, //! tparam T type of object to allocate/deallocate.
+ typename UserAllocator, //!< Defines the method that the underlying Pool will use to allocate memory from the system. See User Allocators for details.
+ typename Mutex, //!< Allows the user to determine the type of synchronization to be used on the underlying singleton pool. See the extensions to the public interface of singleton pool for more information.
+ unsigned NextSize, //!< The value of this parameter is passed to the underlying Pool when it is created.
+ unsigned MaxSize> //!< Limit on the maximum size used.
 class pool_allocator
-{
+{ //! Allocate a pool of memory.
   public:
     typedef T value_type;
     typedef UserAllocator user_allocator;
- typedef Mutex mutex;
- BOOST_STATIC_CONSTANT(unsigned, next_size = NextSize);
+ typedef Mutex mutex; //!< typedef mutex publishs the value of the template parameter Mutex.
+ //!> BOOST_STATIC_CONSTANT static const value next_size publishes the values of the template parameter NextSize.
+ BOOST_STATIC_CONSTANT(unsigned, next_size = NextSize);
 
     typedef value_type * pointer;
     typedef const value_type * const_pointer;
@@ -52,33 +120,35 @@
     typedef typename pool<UserAllocator>::size_type size_type;
     typedef typename pool<UserAllocator>::difference_type difference_type;
 
- template <typename U>
+ template <typename U> //!\tparam U ???
+ //! TODO explanation of use needed.
     struct rebind
- {
- typedef pool_allocator<U, UserAllocator, Mutex, NextSize,MaxSize> other;
+ { //
+ typedef pool_allocator<U, UserAllocator, Mutex, NextSize, MaxSize> other;
     };
 
   public:
     pool_allocator()
- {
+ { //! Construction of default singleton_pool IFF an
+ //! instance of this allocator is constructed during global initialization.
       // Required to ensure construction of singleton_pool IFF an
- // instace of this allocator is constructed during global
- // initialization. See ticket #2359 for a complete explaination
+ // instance of this allocator is constructed during global
+ // initialization. See ticket #2359 for a complete explanation
       // ( http://svn.boost.org/trac/boost/ticket/2359 )
       singleton_pool<pool_allocator_tag, sizeof(T), UserAllocator, Mutex,
                      NextSize, MaxSize>::is_from(0);
     }
 
- // default copy constructor
+ // default copy constructor.
 
- // default assignment operator
+ // default assignment operator.
 
     // not explicit, mimicking std::allocator [20.4.1]
     template <typename U>
     pool_allocator(const pool_allocator<U, UserAllocator, Mutex, NextSize, MaxSize> &)
- {
+ { //! Construction of singleton_pool using template U.
       // Required to ensure construction of singleton_pool IFF an
- // instace of this allocator is constructed during global
+ // instance of this allocator is constructed during global
       // initialization. See ticket #2359 for a complete explaination
       // ( http://svn.boost.org/trac/boost/ticket/2359 )
       singleton_pool<pool_allocator_tag, sizeof(T), UserAllocator, Mutex,
@@ -98,7 +168,7 @@
     static void destroy(const pointer ptr)
     {
       ptr->~T();
- (void) ptr; // avoid unused variable warning
+ (void) ptr; // avoid unused variable warning.
     }
 
     bool operator==(const pool_allocator &) const
@@ -128,6 +198,7 @@
     }
 };
 
+//! pool_allocator Pool memory allocator.
 template<
     typename UserAllocator,
     typename Mutex,
@@ -139,13 +210,29 @@
     typedef void* pointer;
     typedef const void* const_pointer;
     typedef void value_type;
- template <class U> struct rebind {
- typedef pool_allocator<U, UserAllocator, Mutex, NextSize, MaxSize> other;
+ //! Need explanation of rebind. TODO.
+ template <class U> struct rebind
+ { //! Rebind.
+ typedef pool_allocator<U, UserAllocator, Mutex, NextSize, MaxSize> other;
     };
 };
 
-struct fast_pool_allocator_tag { };
+//! Tag to identify pool_allocator when used as template parameter.
+struct fast_pool_allocator_tag
+{
+};
+
+ /*! Fast Pool memory allocator.
 
+ pool_allocator is a more general-purpose solution, geared towards
+ efficiently servicing requests for any number of contiguous chunks.
+ fast_pool_allocator is also a general-purpose solution,
+ but is geared towards efficiently servicing requests for one chunk at a time;
+ it will work for contiguous chunks, but not as well as pool_allocator.
+ If you are seriously concerned about performance,
+ use fast_pool_allocator when dealing with containers such as std::list,
+ and use pool_allocator when dealing with containers such as std::vector.
+*/
 template <typename T,
     typename UserAllocator,
     typename Mutex,
@@ -166,6 +253,7 @@
     typedef typename pool<UserAllocator>::size_type size_type;
     typedef typename pool<UserAllocator>::difference_type difference_type;
 
+ //! ??? TODO rebind description needed.
     template <typename U>
     struct rebind
     {
@@ -182,25 +270,25 @@
       singleton_pool<fast_pool_allocator_tag, sizeof(T),
                      UserAllocator, Mutex, NextSize, MaxSize>::is_from(0);
     }
-
- // default copy constructor
 
- // default assignment operator
+ // Default copy constructor used.
 
- // not explicit, mimicking std::allocator [20.4.1]
+ // Default assignment operator used.
+
+ // Not explicit, mimicking std::allocator [20.4.1]
     template <typename U>
     fast_pool_allocator(
         const fast_pool_allocator<U, UserAllocator, Mutex, NextSize, MaxSize> &)
     {
       // Required to ensure construction of singleton_pool IFF an
- // instace of this allocator is constructed during global
+ // instance of this allocator is constructed during global
       // initialization. See ticket #2359 for a complete explaination
       // ( http://svn.boost.org/trac/boost/ticket/2359 )
       singleton_pool<fast_pool_allocator_tag, sizeof(T),
                      UserAllocator, Mutex, NextSize, MaxSize>::is_from(0);
     }
 
- // default destructor
+ // Default destructor used.
 
     static pointer address(reference r)
     { return &r; }
@@ -213,7 +301,7 @@
     void destroy(const pointer ptr)
     {
       ptr->~T();
- (void) ptr; // avoid unused variable warning
+ (void) ptr; // Avoid unused variable warning.
     }
 
     bool operator==(const fast_pool_allocator &) const
@@ -223,7 +311,7 @@
 
     static pointer allocate(const size_type n)
     {
- const pointer ret = (n == 1) ?
+ const pointer ret = (n == 1) ?
           static_cast<pointer>(
               (singleton_pool<fast_pool_allocator_tag, sizeof(T),
                   UserAllocator, Mutex, NextSize, MaxSize>::malloc)() ) :
@@ -265,17 +353,19 @@
     }
 };
 
+//! Fast pool memory allocator.
 template<
- typename UserAllocator,
- typename Mutex,
- unsigned NextSize,
- unsigned MaxSize>
+ typename UserAllocator, //!< Defines the method that the underlying Pool will use to allocate memory from the system. See User Allocators for details.
+ typename Mutex, //!< Allows the user to determine the type of synchronization to be used on the underlying singleton pool. See the extensions to the public interface of singleton pool for more information.
+ unsigned NextSize, //!< The value of this parameter is passed to the underlying Pool when it is created.
+ unsigned MaxSize> //!< Limit on the maximum size used.
 class fast_pool_allocator<void, UserAllocator, Mutex, NextSize, MaxSize>
 {
 public:
     typedef void* pointer;
     typedef const void* const_pointer;
     typedef void value_type;
+ //! need explanation of rebind TODO.
     template <class U> struct rebind {
         typedef fast_pool_allocator<U, UserAllocator, Mutex, NextSize, MaxSize> other;
     };

Modified: sandbox/guild/pool/boost/pool/poolfwd.hpp
==============================================================================
--- sandbox/guild/pool/boost/pool/poolfwd.hpp (original)
+++ sandbox/guild/pool/boost/pool/poolfwd.hpp 2011-01-18 13:12:26 EST (Tue, 18 Jan 2011)
@@ -9,6 +9,12 @@
 #ifndef BOOST_POOLFWD_HPP
 #define BOOST_POOLFWD_HPP
 
+/*!
+ \file
+ \brief Forward declarations of all public (non-implemention) classes.
+*/
+
+
 #include <boost/config.hpp> // for workarounds
 
 // std::size_t

Modified: sandbox/guild/pool/boost/pool/simple_segregated_storage.hpp
==============================================================================
--- sandbox/guild/pool/boost/pool/simple_segregated_storage.hpp (original)
+++ sandbox/guild/pool/boost/pool/simple_segregated_storage.hpp 2011-01-18 13:12:26 EST (Tue, 18 Jan 2011)
@@ -9,6 +9,18 @@
 #ifndef BOOST_SIMPLE_SEGREGATED_STORAGE_HPP
 #define BOOST_SIMPLE_SEGREGATED_STORAGE_HPP
 
+/*!
+ \file
+ \brief Simple Segregated Storage.
+ \details Simple Segregated Storage Implementation.
+ Simple Segregated Storage is the basic idea behind the Boost Pool library.
+ Simple Segregated Storage is the simplest, and probably the fastest,
+ memory allocation/deallocation algorithm.
+ It begins by partitioning a memory block into fixed-size chunks.
+ Where the block comes from is not important until implementation time.
+ A Pool is some object that uses Simple Segregated Storage in this fashion.
+*/
+
 // std::greater
 #include <functional>
 
@@ -16,6 +28,12 @@
 
 namespace boost {
 
+/*!
+ Simple Segregated Storage is the simplest, and probably the fastest,
+ memory allocation/deallocation algorithm.
+ It begins by partitioning a memory block into fixed-size chunks.
+ Where the block comes from is not important until implementation time.
+*/
 template <typename SizeType>
 class simple_segregated_storage
 {
@@ -26,38 +44,61 @@
     simple_segregated_storage(const simple_segregated_storage &);
     void operator=(const simple_segregated_storage &);
 
- // pre: (n > 0), (start != 0), (nextof(start) != 0)
- // post: (start != 0)
+ //! Try to malloc size n of partition_size at start location.
+ //! \pre (n > 0), (start != 0), (nextof(start) != 0)
+ //! \post (start != 0)
     static void * try_malloc_n(void * & start, size_type n,
         size_type partition_size);
 
   protected:
- void * first;
-
- // Traverses the free list referred to by "first",
- // and returns the iterator previous to where
- // "ptr" would go if it was in the free list.
- // Returns 0 if "ptr" would go at the beginning
- // of the free list (i.e., before "first")
+ void * first; /*!< This data member is the free list.
+ It points to the first chunk in the free list,
+ or is equal to 0 if the free list is empty.
+ */
+
+ //! \fn find_prev Traverses the free list referred to by "first",
+ //! and returns the iterator previous to where
+ //! "ptr" would go if it was in the free list.
+ //! \returns 0 if "ptr" would go at the beginning
+ //! of the free list (i.e., before "first").
+
+ //! Note that this function finds the location previous to where ptr would go
+ //! if it was in the free list.
+ //! It does not find the entry in the free list before ptr
+ //! (unless ptr is already in the free list).
+ //! Specifically, find_prev(0) will return 0,
+ //! not the last entry in the free list.
     void * find_prev(void * ptr);
 
     // for the sake of code readability :)
     static void * & nextof(void * const ptr)
- { return *(static_cast<void **>(ptr)); }
+ { //! The return value is just *ptr cast to the appropriate type. ptr must not be 0. (For the sake of code readability :)
+ //! As an example, let us assume that we want to truncate the free list after the first chunk.
+ //! That is, we want to set *first to 0; this will result in a free list with only one entry.
+ //! The normal way to do this is to first cast first to a pointer to a pointer to void,
+ //! and then dereference and assign (*static_cast<void **>(first) = 0;).
+ //! This can be done more easily through the use of this convenience function (nextof(first) = 0;).
+ //! \returns dereferenced pointer.
+ return *(static_cast<void **>(ptr));
+ }
 
   public:
     // Post: empty()
     simple_segregated_storage()
- :first(0) { }
-
- // pre: npartition_sz >= sizeof(void *)
- // npartition_sz = sizeof(void *) * i, for some integer i
- // nsz >= npartition_sz
- // block is properly aligned for an array of object of
- // size npartition_sz and array of void *
- // The requirements above guarantee that any pointer to a chunk
- // (which is a pointer to an element in an array of npartition_sz)
- // may be cast to void **.
+ :first(0)
+ { //! Construct empty storage area.
+ //! \post empty()
+ }
+
+ //! Segregate block into chunks.
+ //! \pre npartition_sz >= sizeof(void *)
+ //! \pre npartition_sz = sizeof(void *) * i, for some integer i
+ //! \pre nsz >= npartition_sz
+ //! \pre Block is properly aligned for an array of object of
+ //! size npartition_sz and array of void *.
+ //! The requirements above guarantee that any pointer to a chunk
+ //! (which is a pointer to an element in an array of npartition_sz)
+ //! may be cast to void **.
     static void * segregate(void * block,
         size_type nsz, size_type npartition_sz,
         void * end = 0);
@@ -66,9 +107,12 @@
     // Post: !empty()
     void add_block(void * const block,
         const size_type nsz, const size_type npartition_sz)
- {
- // Segregate this block and merge its free list into the
- // free list referred to by "first"
+ { //! Add block
+ //! Segregate this block and merge its free list into the
+ //! free list referred to by "first".
+ //! \pre Same as segregate.
+ //! \post !empty()
+
       first = segregate(block, nsz, npartition_sz, first);
     }
 
@@ -76,10 +120,10 @@
     // Post: !empty()
     void add_ordered_block(void * const block,
         const size_type nsz, const size_type npartition_sz)
- {
- // This (slower) version of add_block segregates the
- // block and merges its free list into our free list
- // in the proper order
+ { //! add block (ordered into list)
+ //! This (slower) version of add_block segregates the
+ //! block and merges its free list into our free list
+ //! in the proper order.
 
       // Find where "block" would go in the free list
       void * const loc = find_prev(block);
@@ -91,16 +135,21 @@
         nextof(loc) = segregate(block, nsz, npartition_sz, nextof(loc));
     }
 
- // default destructor
+ // default destructor.
 
- bool empty() const { return (first == 0); }
+ bool empty() const
+ { //! \returns true if simple_segregated_storage is empty.
+ return (first == 0);
+ }
 
     // pre: !empty()
     void * malloc BOOST_PREVENT_MACRO_SUBSTITUTION()
- {
+ { //! Create a chunk
+ //! \pre !empty()
+ //! Increment the "first" pointer to point to the next chunk.
       void * const ret = first;
 
- // Increment the "first" pointer to point to the next chunk
+ // Increment the "first" pointer to point to the next chunk.
       first = nextof(first);
       return ret;
     }
@@ -109,7 +158,9 @@
     // same free list
     // post: !empty()
     void free BOOST_PREVENT_MACRO_SUBSTITUTION(void * const chunk)
- {
+ { //! Freen a chunk.
+ //! \pre chunk was previously returned from a malloc() referring to the same free list.
+ //! \post !empty()
       nextof(chunk) = first;
       first = chunk;
     }
@@ -118,9 +169,11 @@
     // same free list
     // post: !empty()
     void ordered_free(void * const chunk)
- {
- // This (slower) implementation of 'free' places the memory
- // back in the list in its proper order.
+ { //! This (slower) implementation of 'free' places the memory
+ //! back in the list in its proper order.
+ //! \pre chunk was previously returned from a malloc() referring to the same free list
+ //! \post !empty().
+
 
       // Find where "chunk" goes in the free list
       void * const loc = find_prev(chunk);
@@ -137,26 +190,35 @@
 
     // Note: if you're allocating/deallocating n a lot, you should
     // be using an ordered pool.
- void * malloc_n(size_type n, size_type partition_size);
-
- // pre: chunks was previously allocated from *this with the same
- // values for n and partition_size
+ // pre: chunks was previously allocated from *this with the same
+ // values for n and partition_size.
     // post: !empty()
- // Note: if you're allocating/deallocating n a lot, you should
- // be using an ordered pool.
+ void * malloc_n(size_type n, size_type partition_size);
+ //! \pre chunks was previously allocated from *this with the same
+ //! values for n and partition_size.
+ //! \post !empty()
+ //! Note: if you're allocating/deallocating n a lot, you should
+ //! be using an ordered pool.
+
     void free_n(void * const chunks, const size_type n,
         const size_type partition_size)
- {
+ { //! Free N chunks.
+ //! \pre chunks was previously allocated from *this with the same
+ //! values for n and partition_size.
+
       if(n != 0)
         add_block(chunks, n * partition_size, partition_size);
     }
 
     // pre: chunks was previously allocated from *this with the same
- // values for n and partition_size
+ // values for n and partition_size.
     // post: !empty()
     void ordered_free_n(void * const chunks, const size_type n,
         const size_type partition_size)
- {
+ { //! Free n chunkcs from order list.
+ //! \pre chunks was previously allocated from *this with the same
+ //! values for n and partition_size.
+
       if(n != 0)
         add_ordered_block(chunks, n * partition_size, partition_size);
     }
@@ -188,10 +250,10 @@
     const size_type partition_sz,
     void * const end)
 {
- // Get pointer to last valid chunk, preventing overflow on size calculations
- // The division followed by the multiplication just makes sure that
- // old == block + partition_sz * i, for some integer i, even if the
- // block size (sz) is not a multiple of the partition size.
+ //! Get pointer to last valid chunk, preventing overflow on size calculations
+ //! The division followed by the multiplication just makes sure that
+ //! old == block + partition_sz * i, for some integer i, even if the
+ //! block size (sz) is not a multiple of the partition size.
   char * old = static_cast<char *>(block)
       + ((sz - partition_sz) / partition_sz) * partition_sz;
 
@@ -214,18 +276,18 @@
   return block;
 }
 
-// The following function attempts to find n contiguous chunks
-// of size partition_size in the free list, starting at start.
-// If it succeds, it returns the last chunk in that contiguous
-// sequence, so that the sequence is known by [start, {retval}]
-// If it fails, it does do either because it's at the end of the
-// free list or hits a non-contiguous chunk. In either case,
-// it will return 0, and set start to the last considered
-// chunk. You are at the end of the free list if
-// nextof(start) == 0. Otherwise, start points to the last
-// chunk in the contiguous sequence, and nextof(start) points
-// to the first chunk in the next contiguous sequence (assuming
-// an ordered free list)
+//! The following function attempts to find n contiguous chunks
+//! of size partition_size in the free list, starting at start.
+//! If it succeds, it returns the last chunk in that contiguous
+//! sequence, so that the sequence is known by [start, {retval}]
+//! If it fails, it does do either because it's at the end of the
+//! free list or hits a non-contiguous chunk. In either case,
+//! it will return 0, and set start to the last considered
+//! chunk. You are at the end of the free list if
+//! nextof(start) == 0. Otherwise, start points to the last
+//! chunk in the contiguous sequence, and nextof(start) points
+//! to the first chunk in the next contiguous sequence (assuming
+//! an ordered free list).
 template <typename SizeType>
 void * simple_segregated_storage<SizeType>::try_malloc_n(
     void * & start, size_type n, const size_type partition_size)

Modified: sandbox/guild/pool/boost/pool/singleton_pool.hpp
==============================================================================
--- sandbox/guild/pool/boost/pool/singleton_pool.hpp (original)
+++ sandbox/guild/pool/boost/pool/singleton_pool.hpp 2011-01-18 13:12:26 EST (Tue, 18 Jan 2011)
@@ -9,6 +9,31 @@
 #ifndef BOOST_SINGLETON_POOL_HPP
 #define BOOST_SINGLETON_POOL_HPP
 
+/*!
+ \file
+ \brief Singleton_pool class allows other pool interfaces
+ for types of the same size to share the same pool.
+
+ /details singleton_pool.hpp provides a template class singleton_pool,
+ which provides access to a pool as a singleton object.\n
+ For information on other pool-based interfaces, see the other pool interfaces.\n
+
+ Notes\n
+ The underlying pool p referenced by the static functions
+ in singleton_pool is actually declared in a way that it is:\n
+
+ 1 Thread-safe if there is only one thread running before main() begins and after main() ends
+ -- all of the static functions of singleton_pool synchronize their access to p.
+
+ 2 Guaranteed to be constructed before it is used --
+ thus, the simple static object in the synopsis above would actually be an incorrect implementation.
+ The actual implementation to guarantee this is considerably more complicated.
+
+ 3 Note too that a different underlying pool p exists
+ for each different set of template parameters,
+ including implementation-specific ones.
+*/
+
 #include <boost/pool/poolfwd.hpp>
 
 // boost::pool
@@ -20,19 +45,22 @@
 
 namespace boost {
 
-//
-// The singleton_pool class allows other pool interfaces for types of the same
-// size to share the same pool
-//
-template <typename Tag, unsigned RequestedSize,
- typename UserAllocator,
- typename Mutex,
- unsigned NextSize,
- unsigned MaxSize>
+ //! singleton pool class allows other pool interfaces
+ //! for types of the same size to share the same pool.
+ template <typename Tag, unsigned RequestedSize,
+ typename UserAllocator, //!< User allocator, default = default_user_allocator_new_delete
+ typename Mutex, //!< The typedef mutex publish the values of the template parameter Mutex (default details::pool::default_mutex).
+ unsigned NextSize, //!< The typedef static const value next_size publish the values of the template parameter NextSize, (default 32).
+ unsigned MaxSize> //!< Maximum size.
 struct singleton_pool
 {
   public:
- typedef Tag tag;
+ typedef Tag tag; /*!< The Tag template parameter allows
+ different unbounded sets of singleton pools to exist.
+ For example, the pool allocators use two tag classes to ensure that the
+ two different allocator types never share the same underlying singleton pool.
+ Tag is never actually used by singleton_pool.
+ */
     typedef Mutex mutex;
     typedef UserAllocator user_allocator;
     typedef typename pool<UserAllocator>::size_type size_type;
@@ -43,10 +71,30 @@
 
   private:
     struct pool_type: Mutex
- {
+ { /*! Mutex This class is the type of mutex to use to protect
+ simultaneous access to the underlying Pool.
+ It is exposed so that users may declare some singleton pools normally
+ (i.e., with synchronization),
+ but some singleton pools without synchronization
+ (by specifying details::pool::null_mutex) for efficiency reasons.
+ */
       pool<UserAllocator> p;
- pool_type():p(RequestedSize, NextSize) { }
- };
+ pool_type()
+ :
+ p(RequestedSize, NextSize)
+ { /*! Pool allocation.
+ See The pair of functions size_type get_next_size() const; and void set_next_size(size_type);
+ that allow users to explicitly read and write the next_size value.
+ This value is the number of chunks to request from the system
+ the next time that object needs to allocate system memory.
+
+ param RequestedSize value of this parameter is passed to the underlying Pool when it is created.
+ param NextSize value of this parameter is passed to the underlying Pool when it is created.
+
+ \pre NextSize value should never be set to 0.
+ */
+ }
+ }; // struct pool_type: Mutex
 
     typedef details::pool::singleton_default<pool_type> singleton;
 
@@ -54,66 +102,67 @@
 
   public:
     static void * malloc BOOST_PREVENT_MACRO_SUBSTITUTION()
- {
+ { //! Equivalent to SingletonPool::p.malloc(); synchronized.
       pool_type & p = singleton::instance();
       details::pool::guard<Mutex> g(p);
       return (p.p.malloc)();
     }
     static void * ordered_malloc()
- {
+ { //! Equivalent to SingletonPool::p.ordered_malloc(); synchronized.
       pool_type & p = singleton::instance();
       details::pool::guard<Mutex> g(p);
       return p.p.ordered_malloc();
     }
     static void * ordered_malloc(const size_type n)
- {
+ { //! Equivalent to SingletonPool::p.ordered_malloc(n); synchronized.
       pool_type & p = singleton::instance();
       details::pool::guard<Mutex> g(p);
       return p.p.ordered_malloc(n);
     }
     static bool is_from(void * const ptr)
- {
+ { //! Equivalent to SingletonPool::p.is_from(chunk); synchronized.
+ //! \returns true if chunk is from SingletonPool::is_from(chunk)
       pool_type & p = singleton::instance();
       details::pool::guard<Mutex> g(p);
       return p.p.is_from(ptr);
     }
     static void free BOOST_PREVENT_MACRO_SUBSTITUTION(void * const ptr)
- {
+ { //! Equivalent to SingletonPool::p.free(chunk); synchronized.
       pool_type & p = singleton::instance();
       details::pool::guard<Mutex> g(p);
       (p.p.free)(ptr);
     }
     static void ordered_free(void * const ptr)
- {
+ { //! Equivalent to SingletonPool::p.ordered_free(chunk); synchronized.
       pool_type & p = singleton::instance();
       details::pool::guard<Mutex> g(p);
       p.p.ordered_free(ptr);
     }
     static void free BOOST_PREVENT_MACRO_SUBSTITUTION(void * const ptr, const size_type n)
- {
+ { //! Equivalent to SingletonPool::p.free(chunk, n); synchronized.
       pool_type & p = singleton::instance();
       details::pool::guard<Mutex> g(p);
       (p.p.free)(ptr, n);
     }
     static void ordered_free(void * const ptr, const size_type n)
- {
+ { //! Equivalent to SingletonPool::p.ordered_free(chunk, n); synchronized.
       pool_type & p = singleton::instance();
       details::pool::guard<Mutex> g(p);
       p.p.ordered_free(ptr, n);
     }
     static bool release_memory()
- {
+ { //! Equivalent to SingletonPool::p.release_memory(); synchronized.
       pool_type & p = singleton::instance();
       details::pool::guard<Mutex> g(p);
       return p.p.release_memory();
     }
     static bool purge_memory()
- {
+ { //! Equivalent to SingletonPool::p.purge_memory(); synchronized.
       pool_type & p = singleton::instance();
       details::pool::guard<Mutex> g(p);
       return p.p.purge_memory();
     }
-};
+}; // struct singleton_pool
 
 } // namespace boost
 


Boost-Commit list run by bdawes at acm.org, david.abrahams at rcn.com, gregod at cs.rpi.edu, cpdaniel at pacbell.net, john at johnmaddock.co.uk