Boost logo

Boost-Commit :

From: igaztanaga_at_[hidden]
Date: 2008-05-23 18:39:22


Author: igaztanaga
Date: 2008-05-23 18:39:21 EDT (Fri, 23 May 2008)
New Revision: 45702
URL: http://svn.boost.org/trac/boost/changeset/45702

Log:
#1912: some copy edits on boost.intrusive
#1932: move semantics for shared objects
#1635: Incomplete include guard in boost/intrusive
Text files modified:
   trunk/boost/interprocess/allocators/detail/adaptive_node_pool.hpp | 364 +++++++++++++++++++++------------------
   trunk/boost/interprocess/allocators/detail/allocator_common.hpp | 65 +++++--
   trunk/boost/interprocess/allocators/detail/node_pool.hpp | 106 ++++++-----
   trunk/boost/interprocess/containers/detail/node_alloc_holder.hpp | 2
   trunk/boost/interprocess/containers/flat_map.hpp | 16
   trunk/boost/interprocess/containers/vector.hpp | 14 +
   trunk/boost/interprocess/detail/file_wrapper.hpp | 4
   7 files changed, 319 insertions(+), 252 deletions(-)

Modified: trunk/boost/interprocess/allocators/detail/adaptive_node_pool.hpp
==============================================================================
--- trunk/boost/interprocess/allocators/detail/adaptive_node_pool.hpp (original)
+++ trunk/boost/interprocess/allocators/detail/adaptive_node_pool.hpp 2008-05-23 18:39:21 EDT (Fri, 23 May 2008)
@@ -71,18 +71,18 @@
       std::size_t hdr_offset;
    };
 
- struct chunk_info_t
+ struct block_info_t
       :
          public hdr_offset_holder,
          public multiset_hook_t
    {
- //An intrusive list of free node from this chunk
+ //An intrusive list of free node from this block
       free_nodes_t free_nodes;
- friend bool operator <(const chunk_info_t &l, const chunk_info_t &r)
+ friend bool operator <(const block_info_t &l, const block_info_t &r)
       {
 // { return l.free_nodes.size() < r.free_nodes.size(); }
          //Let's order blocks first by free nodes and then by address
- //so that highest address fully free chunks are deallocated.
+ //so that highest address fully free blocks are deallocated.
          //This improves returning memory to the OS (trimming).
          const bool is_less = l.free_nodes.size() < r.free_nodes.size();
          const bool is_equal = l.free_nodes.size() == r.free_nodes.size();
@@ -90,11 +90,11 @@
       }
    };
    typedef typename bi::make_multiset
- <chunk_info_t, bi::base_hook<multiset_hook_t> >::type chunk_multiset_t;
- typedef typename chunk_multiset_t::iterator chunk_iterator;
+ <block_info_t, bi::base_hook<multiset_hook_t> >::type block_multiset_t;
+ typedef typename block_multiset_t::iterator block_iterator;
 
    static const std::size_t MaxAlign = alignment_of<node_t>::value;
- static const std::size_t HdrSize = ((sizeof(chunk_info_t)-1)/MaxAlign+1)*MaxAlign;
+ static const std::size_t HdrSize = ((sizeof(block_info_t)-1)/MaxAlign+1)*MaxAlign;
    static const std::size_t HdrOffsetSize = ((sizeof(hdr_offset_holder)-1)/MaxAlign+1)*MaxAlign;
    static std::size_t calculate_alignment
       (std::size_t overhead_percent, std::size_t real_node_size)
@@ -102,15 +102,15 @@
       //to-do: handle real_node_size != node_size
       const std::size_t divisor = overhead_percent*real_node_size;
       const std::size_t dividend = HdrOffsetSize*100;
- std::size_t elements_per_subchunk = (dividend - 1)/divisor + 1;
+ std::size_t elements_per_subblock = (dividend - 1)/divisor + 1;
       std::size_t candidate_power_of_2 =
- upper_power_of_2(elements_per_subchunk*real_node_size + HdrOffsetSize);
+ upper_power_of_2(elements_per_subblock*real_node_size + HdrOffsetSize);
       bool overhead_satisfied = false;
- //Now calculate the wors-case overhead for a subchunk
- const std::size_t max_subchunk_overhead = HdrSize + PayloadPerAllocation;
+ //Now calculate the wors-case overhead for a subblock
+ const std::size_t max_subblock_overhead = HdrSize + PayloadPerAllocation;
       while(!overhead_satisfied){
- elements_per_subchunk = (candidate_power_of_2 - max_subchunk_overhead)/real_node_size;
- const std::size_t overhead_size = candidate_power_of_2 - elements_per_subchunk*real_node_size;
+ elements_per_subblock = (candidate_power_of_2 - max_subblock_overhead)/real_node_size;
+ const std::size_t overhead_size = candidate_power_of_2 - elements_per_subblock*real_node_size;
          if(overhead_size*100/candidate_power_of_2 < overhead_percent){
             overhead_satisfied = true;
          }
@@ -121,30 +121,30 @@
       return candidate_power_of_2;
    }
 
- static void calculate_num_subchunks
- (std::size_t alignment, std::size_t real_node_size, std::size_t elements_per_chunk
- ,std::size_t &num_subchunks, std::size_t &real_num_node, std::size_t overhead_percent)
- {
- std::size_t elements_per_subchunk = (alignment - HdrOffsetSize)/real_node_size;
- std::size_t possible_num_subchunk = (elements_per_chunk - 1)/elements_per_subchunk + 1;
- std::size_t hdr_subchunk_elements = (alignment - HdrSize - PayloadPerAllocation)/real_node_size;
- while(((possible_num_subchunk-1)*elements_per_subchunk + hdr_subchunk_elements) < elements_per_chunk){
- ++possible_num_subchunk;
+ static void calculate_num_subblocks
+ (std::size_t alignment, std::size_t real_node_size, std::size_t elements_per_block
+ ,std::size_t &num_subblocks, std::size_t &real_num_node, std::size_t overhead_percent)
+ {
+ std::size_t elements_per_subblock = (alignment - HdrOffsetSize)/real_node_size;
+ std::size_t possible_num_subblock = (elements_per_block - 1)/elements_per_subblock + 1;
+ std::size_t hdr_subblock_elements = (alignment - HdrSize - PayloadPerAllocation)/real_node_size;
+ while(((possible_num_subblock-1)*elements_per_subblock + hdr_subblock_elements) < elements_per_block){
+ ++possible_num_subblock;
       }
- elements_per_subchunk = (alignment - HdrOffsetSize)/real_node_size;
+ elements_per_subblock = (alignment - HdrOffsetSize)/real_node_size;
       bool overhead_satisfied = false;
       while(!overhead_satisfied){
- const std::size_t total_data = (elements_per_subchunk*(possible_num_subchunk-1) + hdr_subchunk_elements)*real_node_size;
- const std::size_t total_size = alignment*possible_num_subchunk;
+ const std::size_t total_data = (elements_per_subblock*(possible_num_subblock-1) + hdr_subblock_elements)*real_node_size;
+ const std::size_t total_size = alignment*possible_num_subblock;
          if((total_size - total_data)*100/total_size < overhead_percent){
             overhead_satisfied = true;
          }
          else{
- ++possible_num_subchunk;
+ ++possible_num_subblock;
          }
       }
- num_subchunks = possible_num_subchunk;
- real_num_node = (possible_num_subchunk-1)*elements_per_subchunk + hdr_subchunk_elements;
+ num_subblocks = possible_num_subblock;
+ real_num_node = (possible_num_subblock-1)*elements_per_subblock + hdr_subblock_elements;
    }
 
    public:
@@ -154,27 +154,27 @@
    //!Constructor from a segment manager. Never throws
    private_adaptive_node_pool_impl
       ( segment_manager_base_type *segment_mngr_base, std::size_t node_size
- , std::size_t nodes_per_chunk, std::size_t max_free_chunks
+ , std::size_t nodes_per_block, std::size_t max_free_blocks
       , unsigned char overhead_percent
       )
- : m_max_free_chunks(max_free_chunks)
+ : m_max_free_blocks(max_free_blocks)
    , m_real_node_size(lcm(node_size, std::size_t(alignment_of<node_t>::value)))
    //Round the size to a power of two value.
    //This is the total memory size (including payload) that we want to
    //allocate from the general-purpose allocator
- , m_real_chunk_alignment(calculate_alignment(overhead_percent, m_real_node_size))
- //This is the real number of nodes per chunk
- , m_num_subchunks(0)
+ , m_real_block_alignment(calculate_alignment(overhead_percent, m_real_node_size))
+ //This is the real number of nodes per block
+ , m_num_subblocks(0)
    , m_real_num_node(0)
       //General purpose allocator
    , mp_segment_mngr_base(segment_mngr_base)
- , m_chunk_multiset()
- , m_totally_free_chunks(0)
+ , m_block_multiset()
+ , m_totally_free_blocks(0)
    {
- calculate_num_subchunks(m_real_chunk_alignment, m_real_node_size, nodes_per_chunk, m_num_subchunks, m_real_num_node, overhead_percent);
+ calculate_num_subblocks(m_real_block_alignment, m_real_node_size, nodes_per_block, m_num_subblocks, m_real_num_node, overhead_percent);
    }
 
- //!Destructor. Deallocates all allocated chunks. Never throws
+ //!Destructor. Deallocates all allocated blocks. Never throws
    ~private_adaptive_node_pool_impl()
    { priv_clear(); }
 
@@ -190,8 +190,8 @@
    {
       priv_invariants();
       //If there are no free nodes we allocate a new block
- if (m_chunk_multiset.empty()){
- priv_alloc_chunk(1);
+ if (m_block_multiset.empty()){
+ priv_alloc_block(1);
       }
       //We take the first free node the multiset can't be empty
       return priv_take_first_node();
@@ -200,11 +200,11 @@
    //!Deallocates an array pointed by ptr. Never throws
    void deallocate_node(void *pElem)
    {
- this->priv_reinsert_nodes_in_chunk
+ this->priv_reinsert_nodes_in_block
          (multiallocation_iterator::create_simple_range(pElem));
- //Update free chunk count
- if(m_totally_free_chunks > m_max_free_chunks){
- this->priv_deallocate_free_chunks(m_max_free_chunks);
+ //Update free block count
+ if(m_totally_free_blocks > m_max_free_blocks){
+ this->priv_deallocate_free_blocks(m_max_free_blocks);
       }
       priv_invariants();
    }
@@ -215,17 +215,32 @@
    {
       try{
          priv_invariants();
- for(std::size_t i = 0; i != n; ++i){
- //If there are no free nodes we allocate all needed chunks
- if (m_chunk_multiset.empty()){
- priv_alloc_chunk(((n - i) - 1)/m_real_num_node + 1);
+ std::size_t i = 0;
+ while(i != n){
+ //If there are no free nodes we allocate all needed blocks
+ if (m_block_multiset.empty()){
+ priv_alloc_block(((n - i) - 1)/m_real_num_node + 1);
             }
- nodes.push_front(priv_take_first_node());
+ free_nodes_t &free_nodes = m_block_multiset.begin()->free_nodes;
+ const std::size_t free_nodes_count_before = free_nodes.size();
+ if(free_nodes_count_before == m_real_num_node){
+ --m_totally_free_blocks;
+ }
+ const std::size_t num_elems = ((n-i) < free_nodes_count_before) ? (n-i) : free_nodes_count_before;
+ for(std::size_t j = 0; j != num_elems; ++j){
+ void *new_node = &free_nodes.front();
+ free_nodes.pop_front();
+ nodes.push_back(new_node);
+ }
+
+ if(free_nodes.empty()){
+ m_block_multiset.erase(m_block_multiset.begin());
+ }
+ i += num_elems;
          }
       }
       catch(...){
          this->deallocate_nodes(nodes, nodes.size());
- this->priv_deallocate_free_chunks(m_max_free_chunks);
          throw;
       }
       priv_invariants();
@@ -259,20 +274,20 @@
    //!Deallocates the nodes pointed by the multiallocation iterator. Never throws
    void deallocate_nodes(multiallocation_iterator it)
    {
- this->priv_reinsert_nodes_in_chunk(it);
- if(m_totally_free_chunks > m_max_free_chunks){
- this->priv_deallocate_free_chunks(m_max_free_chunks);
+ this->priv_reinsert_nodes_in_block(it);
+ if(m_totally_free_blocks > m_max_free_blocks){
+ this->priv_deallocate_free_blocks(m_max_free_blocks);
       }
    }
 
- void deallocate_free_chunks()
- { this->priv_deallocate_free_chunks(0); }
+ void deallocate_free_blocks()
+ { this->priv_deallocate_free_blocks(0); }
 
    std::size_t num_free_nodes()
    {
- typedef typename chunk_multiset_t::const_iterator citerator;
+ typedef typename block_multiset_t::const_iterator citerator;
       std::size_t count = 0;
- citerator it (m_chunk_multiset.begin()), itend(m_chunk_multiset.end());
+ citerator it (m_block_multiset.begin()), itend(m_block_multiset.end());
       for(; it != itend; ++it){
          count += it->free_nodes.size();
       }
@@ -281,76 +296,80 @@
 
    void swap(private_adaptive_node_pool_impl &other)
    {
- assert(m_max_free_chunks == other.m_max_free_chunks);
+ assert(m_max_free_blocks == other.m_max_free_blocks);
       assert(m_real_node_size == other.m_real_node_size);
- assert(m_real_chunk_alignment == other.m_real_chunk_alignment);
+ assert(m_real_block_alignment == other.m_real_block_alignment);
       assert(m_real_num_node == other.m_real_num_node);
       std::swap(mp_segment_mngr_base, other.mp_segment_mngr_base);
- std::swap(m_totally_free_chunks, other.m_totally_free_chunks);
- m_chunk_multiset.swap(other.m_chunk_multiset);
+ std::swap(m_totally_free_blocks, other.m_totally_free_blocks);
+ m_block_multiset.swap(other.m_block_multiset);
    }
 
+ //Deprecated, use deallocate_free_blocks
+ void deallocate_free_chunks()
+ { this->priv_deallocate_free_blocks(0); }
+
    private:
- void priv_deallocate_free_chunks(std::size_t max_free_chunks)
+ void priv_deallocate_free_blocks(std::size_t max_free_blocks)
    {
       priv_invariants();
       //Now check if we've reached the free nodes limit
- //and check if we have free chunks. If so, deallocate as much
+ //and check if we have free blocks. If so, deallocate as much
       //as we can to stay below the limit
- for( chunk_iterator itend = m_chunk_multiset.end()
- ; m_totally_free_chunks > max_free_chunks
- ; --m_totally_free_chunks
+ for( block_iterator itend = m_block_multiset.end()
+ ; m_totally_free_blocks > max_free_blocks
+ ; --m_totally_free_blocks
          ){
- assert(!m_chunk_multiset.empty());
- chunk_iterator it = itend;
+ assert(!m_block_multiset.empty());
+ block_iterator it = itend;
          --it;
          std::size_t num_nodes = it->free_nodes.size();
          assert(num_nodes == m_real_num_node);
          (void)num_nodes;
- m_chunk_multiset.erase_and_dispose
- (it, chunk_destroyer(this));
+ m_block_multiset.erase_and_dispose
+ (it, block_destroyer(this));
       }
    }
 
- void priv_reinsert_nodes_in_chunk(multiallocation_iterator it)
+ void priv_reinsert_nodes_in_block(multiallocation_iterator it)
    {
       multiallocation_iterator itend;
- chunk_iterator chunk_it(m_chunk_multiset.end());
+ block_iterator block_it(m_block_multiset.end());
       while(it != itend){
          void *pElem = &*it;
          ++it;
          priv_invariants();
- chunk_info_t *chunk_info = this->priv_chunk_from_node(pElem);
- assert(chunk_info->free_nodes.size() < m_real_num_node);
+ block_info_t *block_info = this->priv_block_from_node(pElem);
+ assert(block_info->free_nodes.size() < m_real_num_node);
          //We put the node at the beginning of the free node list
          node_t * to_deallocate = static_cast<node_t*>(pElem);
- chunk_info->free_nodes.push_front(*to_deallocate);
+ block_info->free_nodes.push_front(*to_deallocate);
 
- chunk_iterator this_chunk(chunk_multiset_t::s_iterator_to(*chunk_info));
- chunk_iterator next_chunk(this_chunk);
- ++next_chunk;
+ block_iterator this_block(block_multiset_t::s_iterator_to(*block_info));
+ block_iterator next_block(this_block);
+ ++next_block;
 
- //Cache the free nodes from the chunk
- std::size_t this_chunk_free_nodes = this_chunk->free_nodes.size();
+ //Cache the free nodes from the block
+ std::size_t this_block_free_nodes = this_block->free_nodes.size();
 
- if(this_chunk_free_nodes == 1){
- m_chunk_multiset.insert(m_chunk_multiset.begin(), *chunk_info);
+ if(this_block_free_nodes == 1){
+ m_block_multiset.insert(m_block_multiset.begin(), *block_info);
          }
          else{
- chunk_iterator next_chunk(this_chunk);
- ++next_chunk;
- if(next_chunk != chunk_it){
- std::size_t next_free_nodes = next_chunk->free_nodes.size();
- if(this_chunk_free_nodes > next_free_nodes){
- //Now move the chunk to the new position
- m_chunk_multiset.erase(this_chunk);
- m_chunk_multiset.insert(*chunk_info);
+ block_iterator next_block(this_block);
+ ++next_block;
+ if(next_block != block_it){
+ std::size_t next_free_nodes = next_block->free_nodes.size();
+ if(this_block_free_nodes > next_free_nodes){
+ //Now move the block to the new position
+ m_block_multiset.erase(this_block);
+ m_block_multiset.insert(*block_info);
                }
             }
          }
- //Update free chunk count
- if(this_chunk_free_nodes == m_real_num_node){
- ++m_totally_free_chunks;
+ //Update free block count
+ if(this_block_free_nodes == m_real_num_node){
+ ++m_totally_free_blocks;
          }
          priv_invariants();
       }
@@ -358,40 +377,40 @@
 
    node_t *priv_take_first_node()
    {
- assert(m_chunk_multiset.begin() != m_chunk_multiset.end());
+ assert(m_block_multiset.begin() != m_block_multiset.end());
       //We take the first free node the multiset can't be empty
- free_nodes_t &free_nodes = m_chunk_multiset.begin()->free_nodes;
+ free_nodes_t &free_nodes = m_block_multiset.begin()->free_nodes;
       node_t *first_node = &free_nodes.front();
       const std::size_t free_nodes_count = free_nodes.size();
       assert(0 != free_nodes_count);
       free_nodes.pop_front();
       if(free_nodes_count == 1){
- m_chunk_multiset.erase(m_chunk_multiset.begin());
+ m_block_multiset.erase(m_block_multiset.begin());
       }
       else if(free_nodes_count == m_real_num_node){
- --m_totally_free_chunks;
+ --m_totally_free_blocks;
       }
       priv_invariants();
       return first_node;
    }
 
- class chunk_destroyer;
- friend class chunk_destroyer;
+ class block_destroyer;
+ friend class block_destroyer;
 
- class chunk_destroyer
+ class block_destroyer
    {
       public:
- chunk_destroyer(const private_adaptive_node_pool_impl *impl)
+ block_destroyer(const private_adaptive_node_pool_impl *impl)
          : mp_impl(impl)
       {}
 
- void operator()(typename chunk_multiset_t::pointer to_deallocate)
+ void operator()(typename block_multiset_t::pointer to_deallocate)
       {
          std::size_t free_nodes = to_deallocate->free_nodes.size();
          (void)free_nodes;
          assert(free_nodes == mp_impl->m_real_num_node);
          assert(0 == to_deallocate->hdr_offset);
- hdr_offset_holder *hdr_off_holder = mp_impl->priv_first_subchunk_from_chunk((chunk_info_t*)detail::get_pointer(to_deallocate));
+ hdr_offset_holder *hdr_off_holder = mp_impl->priv_first_subblock_from_block((block_info_t*)detail::get_pointer(to_deallocate));
          mp_impl->mp_segment_mngr_base->deallocate(hdr_off_holder);
       }
       const private_adaptive_node_pool_impl *mp_impl;
@@ -403,12 +422,12 @@
    #ifdef BOOST_INTERPROCESS_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS
    #undef BOOST_INTERPROCESS_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS
    {
- //We iterate through the chunk list to free the memory
- chunk_iterator it(m_chunk_multiset.begin()),
- itend(m_chunk_multiset.end()), to_deallocate;
+ //We iterate through the block list to free the memory
+ block_iterator it(m_block_multiset.begin()),
+ itend(m_block_multiset.end()), to_deallocate;
       if(it != itend){
          for(++it; it != itend; ++it){
- chunk_iterator prev(it);
+ block_iterator prev(it);
             --prev;
             std::size_t sp = prev->free_nodes.size(),
                         si = it->free_nodes.size();
@@ -419,35 +438,35 @@
 
       {
          //Check that the total free nodes are correct
- it = m_chunk_multiset.begin();
- itend = m_chunk_multiset.end();
+ it = m_block_multiset.begin();
+ itend = m_block_multiset.end();
          std::size_t total_free_nodes = 0;
          for(; it != itend; ++it){
             total_free_nodes += it->free_nodes.size();
          }
- assert(total_free_nodes >= m_totally_free_chunks*m_real_num_node);
+ assert(total_free_nodes >= m_totally_free_blocks*m_real_num_node);
       }
 
       {
- //Check that the total totally free chunks are correct
- it = m_chunk_multiset.begin();
- itend = m_chunk_multiset.end();
- std::size_t total_free_chunks = 0;
+ //Check that the total totally free blocks are correct
+ it = m_block_multiset.begin();
+ itend = m_block_multiset.end();
+ std::size_t total_free_blocks = 0;
          for(; it != itend; ++it){
- total_free_chunks += (it->free_nodes.size() == m_real_num_node);
+ total_free_blocks += (it->free_nodes.size() == m_real_num_node);
          }
- assert(total_free_chunks == m_totally_free_chunks);
+ assert(total_free_blocks == m_totally_free_blocks);
       }
       {
       //Check that header offsets are correct
- it = m_chunk_multiset.begin();
+ it = m_block_multiset.begin();
       for(; it != itend; ++it){
- hdr_offset_holder *hdr_off_holder = priv_first_subchunk_from_chunk(&*it);
- for(std::size_t i = 0, max = m_num_subchunks; i < max; ++i){
+ hdr_offset_holder *hdr_off_holder = priv_first_subblock_from_block(&*it);
+ for(std::size_t i = 0, max = m_num_subblocks; i < max; ++i){
             assert(hdr_off_holder->hdr_offset == std::size_t((char*)&*it- (char*)hdr_off_holder));
- assert(0 == ((std::size_t)hdr_off_holder & (m_real_chunk_alignment - 1)));
- assert(0 == (hdr_off_holder->hdr_offset & (m_real_chunk_alignment - 1)));
- hdr_off_holder = (hdr_offset_holder *)((char*)hdr_off_holder + m_real_chunk_alignment);
+ assert(0 == ((std::size_t)hdr_off_holder & (m_real_block_alignment - 1)));
+ assert(0 == (hdr_off_holder->hdr_offset & (m_real_block_alignment - 1)));
+ hdr_off_holder = (hdr_offset_holder *)((char*)hdr_off_holder + m_real_block_alignment);
          }
       }
       }
@@ -460,72 +479,72 @@
    void priv_clear()
    {
       #ifndef NDEBUG
- chunk_iterator it = m_chunk_multiset.begin();
- chunk_iterator itend = m_chunk_multiset.end();
+ block_iterator it = m_block_multiset.begin();
+ block_iterator itend = m_block_multiset.end();
       std::size_t num_free_nodes = 0;
       for(; it != itend; ++it){
          //Check for memory leak
          assert(it->free_nodes.size() == m_real_num_node);
          ++num_free_nodes;
       }
- assert(num_free_nodes == m_totally_free_chunks);
+ assert(num_free_nodes == m_totally_free_blocks);
       #endif
       priv_invariants();
- m_chunk_multiset.clear_and_dispose
- (chunk_destroyer(this));
- m_totally_free_chunks = 0;
+ m_block_multiset.clear_and_dispose
+ (block_destroyer(this));
+ m_totally_free_blocks = 0;
    }
 
- chunk_info_t *priv_chunk_from_node(void *node) const
+ block_info_t *priv_block_from_node(void *node) const
    {
       hdr_offset_holder *hdr_off_holder =
- (hdr_offset_holder*)((std::size_t)node & std::size_t(~(m_real_chunk_alignment - 1)));
- assert(0 == ((std::size_t)hdr_off_holder & (m_real_chunk_alignment - 1)));
- assert(0 == (hdr_off_holder->hdr_offset & (m_real_chunk_alignment - 1)));
- chunk_info_t *chunk = (chunk_info_t *)(((char*)hdr_off_holder) + hdr_off_holder->hdr_offset);
- assert(chunk->hdr_offset == 0);
- return chunk;
+ (hdr_offset_holder*)((std::size_t)node & std::size_t(~(m_real_block_alignment - 1)));
+ assert(0 == ((std::size_t)hdr_off_holder & (m_real_block_alignment - 1)));
+ assert(0 == (hdr_off_holder->hdr_offset & (m_real_block_alignment - 1)));
+ block_info_t *block = (block_info_t *)(((char*)hdr_off_holder) + hdr_off_holder->hdr_offset);
+ assert(block->hdr_offset == 0);
+ return block;
    }
 
- hdr_offset_holder *priv_first_subchunk_from_chunk(chunk_info_t *chunk) const
+ hdr_offset_holder *priv_first_subblock_from_block(block_info_t *block) const
    {
       hdr_offset_holder *hdr_off_holder = (hdr_offset_holder*)
- (((char*)chunk) - (m_num_subchunks-1)*m_real_chunk_alignment);
- assert(hdr_off_holder->hdr_offset == std::size_t((char*)chunk - (char*)hdr_off_holder));
- assert(0 == ((std::size_t)hdr_off_holder & (m_real_chunk_alignment - 1)));
- assert(0 == (hdr_off_holder->hdr_offset & (m_real_chunk_alignment - 1)));
+ (((char*)block) - (m_num_subblocks-1)*m_real_block_alignment);
+ assert(hdr_off_holder->hdr_offset == std::size_t((char*)block - (char*)hdr_off_holder));
+ assert(0 == ((std::size_t)hdr_off_holder & (m_real_block_alignment - 1)));
+ assert(0 == (hdr_off_holder->hdr_offset & (m_real_block_alignment - 1)));
       return hdr_off_holder;
    }
 
- //!Allocates a several chunks of nodes. Can throw boost::interprocess::bad_alloc
- void priv_alloc_chunk(std::size_t n)
+ //!Allocates a several blocks of nodes. Can throw boost::interprocess::bad_alloc
+ void priv_alloc_block(std::size_t n)
    {
- std::size_t real_chunk_size = m_real_chunk_alignment*m_num_subchunks - SegmentManagerBase::PayloadPerAllocation;
- std::size_t elements_per_subchunk = (m_real_chunk_alignment - HdrOffsetSize)/m_real_node_size;
- std::size_t hdr_subchunk_elements = (m_real_chunk_alignment - HdrSize - SegmentManagerBase::PayloadPerAllocation)/m_real_node_size;
+ std::size_t real_block_size = m_real_block_alignment*m_num_subblocks - SegmentManagerBase::PayloadPerAllocation;
+ std::size_t elements_per_subblock = (m_real_block_alignment - HdrOffsetSize)/m_real_node_size;
+ std::size_t hdr_subblock_elements = (m_real_block_alignment - HdrSize - SegmentManagerBase::PayloadPerAllocation)/m_real_node_size;
 
       for(std::size_t i = 0; i != n; ++i){
          //We allocate a new NodeBlock and put it the last
          //element of the tree
          char *mem_address = detail::char_ptr_cast
- (mp_segment_mngr_base->allocate_aligned(real_chunk_size, m_real_chunk_alignment));
+ (mp_segment_mngr_base->allocate_aligned(real_block_size, m_real_block_alignment));
          if(!mem_address) throw std::bad_alloc();
- ++m_totally_free_chunks;
+ ++m_totally_free_blocks;
 
- //First initialize header information on the last subchunk
- char *hdr_addr = mem_address + m_real_chunk_alignment*(m_num_subchunks-1);
- chunk_info_t *c_info = new(hdr_addr)chunk_info_t;
+ //First initialize header information on the last subblock
+ char *hdr_addr = mem_address + m_real_block_alignment*(m_num_subblocks-1);
+ block_info_t *c_info = new(hdr_addr)block_info_t;
          //Some structural checks
          assert(static_cast<void*>(&static_cast<hdr_offset_holder*>(c_info)->hdr_offset) ==
                 static_cast<void*>(c_info));
          typename free_nodes_t::iterator prev_insert_pos = c_info->free_nodes.before_begin();
- for( std::size_t subchunk = 0, maxsubchunk = m_num_subchunks - 1
- ; subchunk < maxsubchunk
- ; ++subchunk, mem_address += m_real_chunk_alignment){
+ for( std::size_t subblock = 0, maxsubblock = m_num_subblocks - 1
+ ; subblock < maxsubblock
+ ; ++subblock, mem_address += m_real_block_alignment){
             //Initialize header offset mark
             new(mem_address) hdr_offset_holder(std::size_t(hdr_addr - mem_address));
             char *pNode = mem_address + HdrOffsetSize;
- for(std::size_t i = 0; i < elements_per_subchunk; ++i){
+ for(std::size_t i = 0; i < elements_per_subblock; ++i){
                prev_insert_pos = c_info->free_nodes.insert_after(prev_insert_pos, *new (pNode) node_t);
                pNode += m_real_node_size;
             }
@@ -534,13 +553,13 @@
             char *pNode = hdr_addr + HdrSize;
             //We initialize all Nodes in Node Block to insert
             //them in the free Node list
- for(std::size_t i = 0; i < hdr_subchunk_elements; ++i){
+ for(std::size_t i = 0; i < hdr_subblock_elements; ++i){
                prev_insert_pos = c_info->free_nodes.insert_after(prev_insert_pos, *new (pNode) node_t);
                pNode += m_real_node_size;
             }
          }
- //Insert the chunk after the free node list is full
- m_chunk_multiset.insert(m_chunk_multiset.end(), *c_info);
+ //Insert the block after the free node list is full
+ m_block_multiset.insert(m_block_multiset.end(), *c_info);
       }
    }
 
@@ -548,25 +567,25 @@
    typedef typename pointer_to_other
       <void_pointer, segment_manager_base_type>::type segment_mngr_base_ptr_t;
 
- const std::size_t m_max_free_chunks;
+ const std::size_t m_max_free_blocks;
    const std::size_t m_real_node_size;
    //Round the size to a power of two value.
    //This is the total memory size (including payload) that we want to
    //allocate from the general-purpose allocator
- const std::size_t m_real_chunk_alignment;
- std::size_t m_num_subchunks;
- //This is the real number of nodes per chunk
+ const std::size_t m_real_block_alignment;
+ std::size_t m_num_subblocks;
+ //This is the real number of nodes per block
    //const
    std::size_t m_real_num_node;
    segment_mngr_base_ptr_t mp_segment_mngr_base;//Segment manager
- chunk_multiset_t m_chunk_multiset; //Intrusive chunk list
- std::size_t m_totally_free_chunks; //Free chunks
+ block_multiset_t m_block_multiset; //Intrusive block list
+ std::size_t m_totally_free_blocks; //Free blocks
 };
 
 template< class SegmentManager
         , std::size_t NodeSize
- , std::size_t NodesPerChunk
- , std::size_t MaxFreeChunks
+ , std::size_t NodesPerBlock
+ , std::size_t MaxFreeBlocks
         , unsigned char OverheadPercent
>
 class private_adaptive_node_pool
@@ -583,11 +602,14 @@
    public:
    typedef SegmentManager segment_manager;
 
- static const std::size_t nodes_per_chunk = NodesPerChunk;
+ static const std::size_t nodes_per_block = NodesPerBlock;
+
+ //Deprecated, use node_per_block
+ static const std::size_t nodes_per_chunk = NodesPerBlock;
 
    //!Constructor from a segment manager. Never throws
    private_adaptive_node_pool(segment_manager *segment_mngr)
- : base_t(segment_mngr, NodeSize, NodesPerChunk, MaxFreeChunks, OverheadPercent)
+ : base_t(segment_mngr, NodeSize, NodesPerBlock, MaxFreeBlocks, OverheadPercent)
    {}
 
    //!Returns the segment manager. Never throws
@@ -598,22 +620,22 @@
 //!Pooled shared memory allocator using adaptive pool. Includes
 //!a reference count but the class does not delete itself, this is
 //!responsibility of user classes. Node size (NodeSize) and the number of
-//!nodes allocated per chunk (NodesPerChunk) are known at compile time
+//!nodes allocated per block (NodesPerBlock) are known at compile time
 template< class SegmentManager
         , std::size_t NodeSize
- , std::size_t NodesPerChunk
- , std::size_t MaxFreeChunks
+ , std::size_t NodesPerBlock
+ , std::size_t MaxFreeBlocks
         , unsigned char OverheadPercent
>
 class shared_adaptive_node_pool
    : public detail::shared_pool_impl
       < private_adaptive_node_pool
- <SegmentManager, NodeSize, NodesPerChunk, MaxFreeChunks, OverheadPercent>
+ <SegmentManager, NodeSize, NodesPerBlock, MaxFreeBlocks, OverheadPercent>
>
 {
    typedef detail::shared_pool_impl
       < private_adaptive_node_pool
- <SegmentManager, NodeSize, NodesPerChunk, MaxFreeChunks, OverheadPercent>
+ <SegmentManager, NodeSize, NodesPerBlock, MaxFreeBlocks, OverheadPercent>
> base_t;
    public:
    shared_adaptive_node_pool(SegmentManager *segment_mgnr)

Modified: trunk/boost/interprocess/allocators/detail/allocator_common.hpp
==============================================================================
--- trunk/boost/interprocess/allocators/detail/allocator_common.hpp (original)
+++ trunk/boost/interprocess/allocators/detail/allocator_common.hpp 2008-05-23 18:39:21 EDT (Fri, 23 May 2008)
@@ -294,7 +294,7 @@
          (command, limit_size, preferred_size, received_size, detail::get_pointer(reuse));
    }
 
- //!Allocates many elements of size elem_size in a contiguous chunk
+ //!Allocates many elements of size elem_size in a contiguous block
    //!of memory. The minimum number to be allocated is min_elements,
    //!the preferred and maximum number is
    //!preferred_elements. The number of actually allocated elements is
@@ -307,7 +307,7 @@
    }
 
    //!Allocates n_elements elements, each one of size elem_sizes[i]in a
- //!contiguous chunk
+ //!contiguous block
    //!of memory. The elements must be deallocated
    multiallocation_iterator allocate_many(const size_type *elem_sizes, size_type n_elements)
    {
@@ -315,7 +315,7 @@
          (this->derived()->get_segment_manager()->allocate_many(elem_sizes, n_elements, sizeof(T)));
    }
 
- //!Allocates many elements of size elem_size in a contiguous chunk
+ //!Allocates many elements of size elem_size in a contiguous block
    //!of memory. The minimum number to be allocated is min_elements,
    //!the preferred and maximum number is
    //!preferred_elements. The number of actually allocated elements is
@@ -439,7 +439,7 @@
       return pointer(static_cast<value_type*>(pool->allocate_node()));
    }
 
- //!Allocates many elements of size == 1 in a contiguous chunk
+ //!Allocates many elements of size == 1 in a contiguous block
    //!of memory. The minimum number to be allocated is min_elements,
    //!the preferred and maximum number is
    //!preferred_elements. The number of actually allocated elements is
@@ -462,7 +462,7 @@
       pool->deallocate_node(detail::get_pointer(p));
    }
 
- //!Allocates many elements of size == 1 in a contiguous chunk
+ //!Allocates many elements of size == 1 in a contiguous block
    //!of memory. The minimum number to be allocated is min_elements,
    //!the preferred and maximum number is
    //!preferred_elements. The number of actually allocated elements is
@@ -471,9 +471,14 @@
    void deallocate_individual(multiallocation_iterator it)
    { node_pool<0>::get(this->derived()->get_node_pool())->deallocate_nodes(it.base()); }
 
- //!Deallocates all free chunks of the pool
+ //!Deallocates all free blocks of the pool
+ void deallocate_free_blocks()
+ { node_pool<0>::get(this->derived()->get_node_pool())->deallocate_free_blocks(); }
+
+ //!Deprecated, use deallocate_free_blocks.
+ //!Deallocates all free chunks of the pool.
    void deallocate_free_chunks()
- { node_pool<0>::get(this->derived()->get_node_pool())->deallocate_free_chunks(); }
+ { node_pool<0>::get(this->derived()->get_node_pool())->deallocate_free_blocks(); }
 };
 
 template<class T, class NodePool, unsigned int Version>
@@ -576,7 +581,7 @@
    pointer allocate_one()
    { return pointer(static_cast<value_type*>(this->m_cache.cached_allocation())); }
 
- //!Allocates many elements of size == 1 in a contiguous chunk
+ //!Allocates many elements of size == 1 in a contiguous block
    //!of memory. The minimum number to be allocated is min_elements,
    //!the preferred and maximum number is
    //!preferred_elements. The number of actually allocated elements is
@@ -591,7 +596,7 @@
    void deallocate_one(const pointer &p)
    { this->m_cache.cached_deallocation(detail::get_pointer(p)); }
 
- //!Allocates many elements of size == 1 in a contiguous chunk
+ //!Allocates many elements of size == 1 in a contiguous block
    //!of memory. The minimum number to be allocated is min_elements,
    //!the preferred and maximum number is
    //!preferred_elements. The number of actually allocated elements is
@@ -600,9 +605,9 @@
    void deallocate_individual(multiallocation_iterator it)
    { m_cache.cached_deallocation(it.base()); }
 
- //!Deallocates all free chunks of the pool
- void deallocate_free_chunks()
- { m_cache.get_node_pool()->deallocate_free_chunks(); }
+ //!Deallocates all free blocks of the pool
+ void deallocate_free_blocks()
+ { m_cache.get_node_pool()->deallocate_free_blocks(); }
 
    //!Swaps allocators. Does not throw. If each allocator is placed in a
    //!different shared memory segments, the result is undefined.
@@ -616,6 +621,10 @@
    void deallocate_cache()
    { m_cache.deallocate_all_cached_nodes(); }
 
+ //!Deprecated use deallocate_free_blocks.
+ void deallocate_free_chunks()
+ { m_cache.get_node_pool()->deallocate_free_blocks(); }
+
    /// @cond
    private:
    cache_impl<node_pool_t> m_cache;
@@ -639,7 +648,7 @@
 //!Pooled shared memory allocator using adaptive pool. Includes
 //!a reference count but the class does not delete itself, this is
 //!responsibility of user classes. Node size (NodeSize) and the number of
-//!nodes allocated per chunk (NodesPerChunk) are known at compile time
+//!nodes allocated per block (NodesPerBlock) are known at compile time
 template<class private_node_allocator_t>
 class shared_pool_impl
    : public private_node_allocator_t
@@ -661,7 +670,7 @@
       : private_node_allocator_t(segment_mngr)
    {}
 
- //!Destructor. Deallocates all allocated chunks. Never throws
+ //!Destructor. Deallocates all allocated blocks. Never throws
    ~shared_pool_impl()
    {}
 
@@ -730,24 +739,24 @@
       private_node_allocator_t::deallocate_nodes(it);
    }
 
- //!Deallocates all the free chunks of memory. Never throws
- void deallocate_free_chunks()
+ //!Deallocates all the free blocks of memory. Never throws
+ void deallocate_free_blocks()
    {
       //-----------------------
       boost::interprocess::scoped_lock<mutex_type> guard(m_header);
       //-----------------------
- private_node_allocator_t::deallocate_free_chunks();
+ private_node_allocator_t::deallocate_free_blocks();
    }
 
    //!Deallocates all used memory from the common pool.
    //!Precondition: all nodes allocated from this pool should
    //!already be deallocated. Otherwise, undefined behavior. Never throws
- void purge_chunks()
+ void purge_blocks()
    {
       //-----------------------
       boost::interprocess::scoped_lock<mutex_type> guard(m_header);
       //-----------------------
- private_node_allocator_t::purge_chunks();
+ private_node_allocator_t::purge_blocks();
    }
 
    //!Increments internal reference count and returns new count. Never throws
@@ -769,6 +778,24 @@
       return --m_header.m_usecount;
    }
 
+ //!Deprecated, use deallocate_free_blocks.
+ void deallocate_free_chunks()
+ {
+ //-----------------------
+ boost::interprocess::scoped_lock<mutex_type> guard(m_header);
+ //-----------------------
+ private_node_allocator_t::deallocate_free_blocks();
+ }
+
+ //!Deprecated, use purge_blocks.
+ void purge_chunks()
+ {
+ //-----------------------
+ boost::interprocess::scoped_lock<mutex_type> guard(m_header);
+ //-----------------------
+ private_node_allocator_t::purge_blocks();
+ }
+
    private:
    //!This struct includes needed data and derives from
    //!interprocess_mutex to allow EBO when using null_mutex

Modified: trunk/boost/interprocess/allocators/detail/node_pool.hpp
==============================================================================
--- trunk/boost/interprocess/allocators/detail/node_pool.hpp (original)
+++ trunk/boost/interprocess/allocators/detail/node_pool.hpp 2008-05-23 18:39:21 EDT (Fri, 23 May 2008)
@@ -60,30 +60,30 @@
    typedef typename bi::make_slist
       < node_t, bi::base_hook<slist_hook_t>
       , bi::linear<true>
- , bi::constant_time_size<false> >::type chunkslist_t;
+ , bi::constant_time_size<false> >::type blockslist_t;
    public:
 
    //!Segment manager typedef
    typedef SegmentManagerBase segment_manager_base_type;
 
    //!Constructor from a segment manager. Never throws
- private_node_pool_impl(segment_manager_base_type *segment_mngr_base, std::size_t node_size, std::size_t nodes_per_chunk)
- : m_nodes_per_chunk(nodes_per_chunk)
+ private_node_pool_impl(segment_manager_base_type *segment_mngr_base, std::size_t node_size, std::size_t nodes_per_block)
+ : m_nodes_per_block(nodes_per_block)
    , m_real_node_size(detail::lcm(node_size, std::size_t(alignment_of<node_t>::value)))
       //General purpose allocator
    , mp_segment_mngr_base(segment_mngr_base)
- , m_chunklist()
+ , m_blocklist()
    , m_freelist()
       //Debug node count
    , m_allocated(0)
    {}
 
- //!Destructor. Deallocates all allocated chunks. Never throws
+ //!Destructor. Deallocates all allocated blocks. Never throws
    ~private_node_pool_impl()
- { this->purge_chunks(); }
+ { this->purge_blocks(); }
 
    std::size_t get_real_num_node() const
- { return m_nodes_per_chunk; }
+ { return m_nodes_per_block; }
 
    //!Returns the segment manager. Never throws
    segment_manager_base_type* get_segment_manager_base()const
@@ -94,7 +94,7 @@
    {
       //If there are no free nodes we allocate a new block
       if (m_freelist.empty())
- priv_alloc_chunk();
+ priv_alloc_block();
       //We take the first free node
       node_t *n = (node_t*)&m_freelist.front();
       m_freelist.pop_front();
@@ -173,36 +173,36 @@
       }
    }
 
- //!Deallocates all the free chunks of memory. Never throws
- void deallocate_free_chunks()
+ //!Deallocates all the free blocks of memory. Never throws
+ void deallocate_free_blocks()
    {
       typedef typename free_nodes_t::iterator nodelist_iterator;
- typename chunkslist_t::iterator bit(m_chunklist.before_begin()),
- it(m_chunklist.begin()),
- itend(m_chunklist.end());
+ typename blockslist_t::iterator bit(m_blocklist.before_begin()),
+ it(m_blocklist.begin()),
+ itend(m_blocklist.end());
       free_nodes_t backup_list;
       nodelist_iterator backup_list_last = backup_list.before_begin();
 
       //Execute the algorithm and get an iterator to the last value
       std::size_t blocksize = detail::get_rounded_size
- (m_real_node_size*m_nodes_per_chunk, alignment_of<node_t>::value);
+ (m_real_node_size*m_nodes_per_block, alignment_of<node_t>::value);
 
       while(it != itend){
- //Collect all the nodes from the chunk pointed by it
+ //Collect all the nodes from the block pointed by it
          //and push them in the list
          free_nodes_t free_nodes;
          nodelist_iterator last_it = free_nodes.before_begin();
- const void *addr = get_chunk_from_hook(&*it, blocksize);
+ const void *addr = get_block_from_hook(&*it, blocksize);
 
          m_freelist.remove_and_dispose_if
             (is_between(addr, blocksize), push_in_list(free_nodes, last_it));
 
- //If the number of nodes is equal to m_nodes_per_chunk
+ //If the number of nodes is equal to m_nodes_per_block
          //this means that the block can be deallocated
- if(free_nodes.size() == m_nodes_per_chunk){
+ if(free_nodes.size() == m_nodes_per_block){
             //Unlink the nodes
             free_nodes.clear();
- it = m_chunklist.erase_after(bit);
+ it = m_blocklist.erase_after(bit);
             mp_segment_mngr_base->deallocate((void*)addr);
          }
          //Otherwise, insert them in the backup list, since the
@@ -240,19 +240,19 @@
 
    //!Deallocates all used memory. Precondition: all nodes allocated from this pool should
    //!already be deallocated. Otherwise, undefined behaviour. Never throws
- void purge_chunks()
+ void purge_blocks()
    {
       //check for memory leaks
       assert(m_allocated==0);
       std::size_t blocksize = detail::get_rounded_size
- (m_real_node_size*m_nodes_per_chunk, alignment_of<node_t>::value);
- typename chunkslist_t::iterator
- it(m_chunklist.begin()), itend(m_chunklist.end()), aux;
+ (m_real_node_size*m_nodes_per_block, alignment_of<node_t>::value);
+ typename blockslist_t::iterator
+ it(m_blocklist.begin()), itend(m_blocklist.end()), aux;
 
       //We iterate though the NodeBlock list to free the memory
- while(!m_chunklist.empty()){
- void *addr = get_chunk_from_hook(&m_chunklist.front(), blocksize);
- m_chunklist.pop_front();
+ while(!m_blocklist.empty()){
+ void *addr = get_block_from_hook(&m_blocklist.front(), blocksize);
+ m_blocklist.pop_front();
          mp_segment_mngr_base->deallocate((void*)addr);
       }
       //Just clear free node list
@@ -262,7 +262,7 @@
    void swap(private_node_pool_impl &other)
    {
       std::swap(mp_segment_mngr_base, other.mp_segment_mngr_base);
- m_chunklist.swap(other.m_chunklist);
+ m_blocklist.swap(other.m_blocklist);
       m_freelist.swap(other.m_freelist);
       std::swap(m_allocated, other.m_allocated);
    }
@@ -305,36 +305,44 @@
       const char * end_;
    };
 
- //!Allocates a chunk of nodes. Can throw boost::interprocess::bad_alloc
- void priv_alloc_chunk()
+ //!Allocates a block of nodes. Can throw boost::interprocess::bad_alloc
+ void priv_alloc_block()
    {
       //We allocate a new NodeBlock and put it as first
       //element in the free Node list
       std::size_t blocksize =
- detail::get_rounded_size(m_real_node_size*m_nodes_per_chunk, alignment_of<node_t>::value);
+ detail::get_rounded_size(m_real_node_size*m_nodes_per_block, alignment_of<node_t>::value);
       char *pNode = detail::char_ptr_cast
          (mp_segment_mngr_base->allocate(blocksize + sizeof(node_t)));
       if(!pNode) throw bad_alloc();
       char *pBlock = pNode;
- m_chunklist.push_front(get_chunk_hook(pBlock, blocksize));
+ m_blocklist.push_front(get_block_hook(pBlock, blocksize));
 
       //We initialize all Nodes in Node Block to insert
       //them in the free Node list
- for(std::size_t i = 0; i < m_nodes_per_chunk; ++i, pNode += m_real_node_size){
+ for(std::size_t i = 0; i < m_nodes_per_block; ++i, pNode += m_real_node_size){
          m_freelist.push_front(*new (pNode) node_t);
       }
    }
 
+ //!Deprecated, use deallocate_free_blocks
+ void deallocate_free_chunks()
+ { this->deallocate_free_blocks(); }
+
+ //!Deprecated, use purge_blocks
+ void purge_chunks()
+ { this->purge_blocks()(); }
+
    private:
- //!Returns a reference to the chunk hook placed in the end of the chunk
- static inline node_t & get_chunk_hook (void *chunk, std::size_t blocksize)
+ //!Returns a reference to the block hook placed in the end of the block
+ static inline node_t & get_block_hook (void *block, std::size_t blocksize)
    {
       return *static_cast<node_t*>(
- static_cast<void*>((detail::char_ptr_cast(chunk) + blocksize)));
+ static_cast<void*>((detail::char_ptr_cast(block) + blocksize)));
    }
 
- //!Returns the starting address of the chunk reference to the chunk hook placed in the end of the chunk
- inline void *get_chunk_from_hook (node_t *hook, std::size_t blocksize)
+ //!Returns the starting address of the block reference to the block hook placed in the end of the block
+ inline void *get_block_from_hook (node_t *hook, std::size_t blocksize)
    {
       return static_cast<void*>((detail::char_ptr_cast(hook) - blocksize));
    }
@@ -343,10 +351,10 @@
    typedef typename pointer_to_other
       <void_pointer, segment_manager_base_type>::type segment_mngr_base_ptr_t;
 
- const std::size_t m_nodes_per_chunk;
+ const std::size_t m_nodes_per_block;
    const std::size_t m_real_node_size;
    segment_mngr_base_ptr_t mp_segment_mngr_base; //Segment manager
- chunkslist_t m_chunklist; //Intrusive container of chunks
+ blockslist_t m_blocklist; //Intrusive container of blocks
    free_nodes_t m_freelist; //Intrusive container of free nods
    std::size_t m_allocated; //Used nodes for debugging
 };
@@ -355,8 +363,8 @@
 //!Pooled shared memory allocator using single segregated storage. Includes
 //!a reference count but the class does not delete itself, this is
 //!responsibility of user classes. Node size (NodeSize) and the number of
-//!nodes allocated per chunk (NodesPerChunk) are known at compile time
-template< class SegmentManager, std::size_t NodeSize, std::size_t NodesPerChunk >
+//!nodes allocated per block (NodesPerBlock) are known at compile time
+template< class SegmentManager, std::size_t NodeSize, std::size_t NodesPerBlock >
 class private_node_pool
    //Inherit from the implementation to avoid template bloat
    : public private_node_pool_impl<typename SegmentManager::segment_manager_base_type>
@@ -370,11 +378,13 @@
    public:
    typedef SegmentManager segment_manager;
 
- static const std::size_t nodes_per_chunk = NodesPerChunk;
+ static const std::size_t nodes_per_block = NodesPerBlock;
+ //Deprecated, use nodes_per_block
+ static const std::size_t nodes_per_chunk = NodesPerBlock;
 
    //!Constructor from a segment manager. Never throws
    private_node_pool(segment_manager *segment_mngr)
- : base_t(segment_mngr, NodeSize, NodesPerChunk)
+ : base_t(segment_mngr, NodeSize, NodesPerBlock)
    {}
 
    //!Returns the segment manager. Never throws
@@ -386,24 +396,24 @@
 //!Pooled shared memory allocator using single segregated storage. Includes
 //!a reference count but the class does not delete itself, this is
 //!responsibility of user classes. Node size (NodeSize) and the number of
-//!nodes allocated per chunk (NodesPerChunk) are known at compile time
+//!nodes allocated per block (NodesPerBlock) are known at compile time
 //!Pooled shared memory allocator using adaptive pool. Includes
 //!a reference count but the class does not delete itself, this is
 //!responsibility of user classes. Node size (NodeSize) and the number of
-//!nodes allocated per chunk (NodesPerChunk) are known at compile time
+//!nodes allocated per block (NodesPerBlock) are known at compile time
 template< class SegmentManager
         , std::size_t NodeSize
- , std::size_t NodesPerChunk
+ , std::size_t NodesPerBlock
>
 class shared_node_pool
    : public detail::shared_pool_impl
       < private_node_pool
- <SegmentManager, NodeSize, NodesPerChunk>
+ <SegmentManager, NodeSize, NodesPerBlock>
>
 {
    typedef detail::shared_pool_impl
       < private_node_pool
- <SegmentManager, NodeSize, NodesPerChunk>
+ <SegmentManager, NodeSize, NodesPerBlock>
> base_t;
    public:
    shared_node_pool(SegmentManager *segment_mgnr)

Modified: trunk/boost/interprocess/containers/detail/node_alloc_holder.hpp
==============================================================================
--- trunk/boost/interprocess/containers/detail/node_alloc_holder.hpp (original)
+++ trunk/boost/interprocess/containers/detail/node_alloc_holder.hpp 2008-05-23 18:39:21 EDT (Fri, 23 May 2008)
@@ -275,7 +275,7 @@
    {
       typedef typename NodeAlloc::multiallocation_iterator multiallocation_iterator;
 
- //Try to allocate memory in a single chunk
+ //Try to allocate memory in a single block
       multiallocation_iterator itbeg =
          this->node_alloc().allocate_individual(n), itend, itold;
       int constructed = 0;

Modified: trunk/boost/interprocess/containers/flat_map.hpp
==============================================================================
--- trunk/boost/interprocess/containers/flat_map.hpp (original)
+++ trunk/boost/interprocess/containers/flat_map.hpp 2008-05-23 18:39:21 EDT (Fri, 23 May 2008)
@@ -479,8 +479,8 @@
    //!
    //! <b>Note</b>: Invalidates elements with keys
    //! not less than the erased element.
- void erase(const_iterator position)
- { m_flat_tree.erase(force<impl_const_iterator>(position)); }
+ iterator erase(const_iterator position)
+ { return force<iterator>(m_flat_tree.erase(force<impl_const_iterator>(position))); }
 
    //! <b>Effects</b>: Erases all elements in the container with key equivalent to x.
    //!
@@ -499,8 +499,8 @@
    //!
    //! <b>Complexity</b>: Logarithmic search time plus erasure time
    //! linear to the elements with bigger keys.
- void erase(const_iterator first, const_iterator last)
- { m_flat_tree.erase(force<impl_const_iterator>(first), force<impl_const_iterator>(last)); }
+ iterator erase(const_iterator first, const_iterator last)
+ { return force<iterator>(m_flat_tree.erase(force<impl_const_iterator>(first), force<impl_const_iterator>(last))); }
 
    //! <b>Effects</b>: erase(a.begin(),a.end()).
    //!
@@ -1060,8 +1060,8 @@
    //!
    //! <b>Note</b>: Invalidates elements with keys
    //! not less than the erased element.
- void erase(const_iterator position)
- { m_flat_tree.erase(force<impl_const_iterator>(position)); }
+ iterator erase(const_iterator position)
+ { return force<iterator>(m_flat_tree.erase(force<impl_const_iterator>(position))); }
 
    //! <b>Effects</b>: Erases all elements in the container with key equivalent to x.
    //!
@@ -1080,8 +1080,8 @@
    //!
    //! <b>Complexity</b>: Logarithmic search time plus erasure time
    //! linear to the elements with bigger keys.
- void erase(const_iterator first, const_iterator last)
- { m_flat_tree.erase(force<impl_const_iterator>(first), force<impl_const_iterator>(last)); }
+ iterator erase(const_iterator first, const_iterator last)
+ { return force<iterator>(m_flat_tree.erase(force<impl_const_iterator>(first), force<impl_const_iterator>(last))); }
 
    //! <b>Effects</b>: erase(a.begin(),a.end()).
    //!

Modified: trunk/boost/interprocess/containers/vector.hpp
==============================================================================
--- trunk/boost/interprocess/containers/vector.hpp (original)
+++ trunk/boost/interprocess/containers/vector.hpp 2008-05-23 18:39:21 EDT (Fri, 23 May 2008)
@@ -1148,8 +1148,15 @@
          }
          else{
             size_type received_size;
- this->alloc().allocation_command(shrink_in_place, this->size(), this->capacity()
- ,received_size, this->members_.m_start);
+ if(this->alloc().allocation_command
+ ( shrink_in_place | nothrow_allocation
+ , this->capacity(), this->size()
+ , received_size, this->members_.m_start).first){
+ this->members_.m_capacity = received_size;
+ #ifdef BOOST_INTERPROCESS_VECTOR_ALLOC_STATS
+ ++this->num_shrink;
+ #endif
+ }
          }
       }
    }
@@ -1786,9 +1793,10 @@
    public:
    unsigned int num_expand_fwd;
    unsigned int num_expand_bwd;
+ unsigned int num_shrink;
    unsigned int num_alloc;
    void reset_alloc_stats()
- { num_expand_fwd = num_expand_bwd = num_alloc = 0; }
+ { num_expand_fwd = num_expand_bwd = num_alloc = 0, num_shrink = 0; }
    #endif
    /// @endcond
 };

Modified: trunk/boost/interprocess/detail/file_wrapper.hpp
==============================================================================
--- trunk/boost/interprocess/detail/file_wrapper.hpp (original)
+++ trunk/boost/interprocess/detail/file_wrapper.hpp 2008-05-23 18:39:21 EDT (Fri, 23 May 2008)
@@ -51,7 +51,7 @@
    //!Does not throw
    #ifndef BOOST_INTERPROCESS_RVALUE_REFERENCE
    file_wrapper
- (detail::moved_object<file_wrapper> &moved)
+ (detail::moved_object<file_wrapper> moved)
    { this->swap(moved.get()); }
    #else
    file_wrapper(file_wrapper &&moved)
@@ -63,7 +63,7 @@
    //!Does not throw
    #ifndef BOOST_INTERPROCESS_RVALUE_REFERENCE
    file_wrapper &operator=
- (detail::moved_object<file_wrapper> &moved)
+ (detail::moved_object<file_wrapper> moved)
    {
       file_wrapper tmp(moved);
       this->swap(tmp);


Boost-Commit list run by bdawes at acm.org, david.abrahams at rcn.com, gregod at cs.rpi.edu, cpdaniel at pacbell.net, john at johnmaddock.co.uk