[Boost-bugs] [Boost C++ Libraries] #4557: Boost.Interprocess fails to compile when BOOST_ENABLE_ASSERT_HANDLER is defined

Subject: [Boost-bugs] [Boost C++ Libraries] #4557: Boost.Interprocess fails to compile when BOOST_ENABLE_ASSERT_HANDLER is defined
From: Boost C++ Libraries (noreply_at_[hidden])
Date: 2010-08-17 21:21:03


#4557: Boost.Interprocess fails to compile when BOOST_ENABLE_ASSERT_HANDLER is
defined
-------------------------------+--------------------------------------------
 Reporter: andrew@… | Owner: igaztanaga
     Type: Bugs | Status: new
Milestone: To Be Determined | Component: interprocess
  Version: Boost 1.44.0 | Severity: Problem
 Keywords: |
-------------------------------+--------------------------------------------
 Some boost.interprocess files fail to compile with
 BOOST_ENABLE_ASSERT_HANDLER defined. The files seem to all include
 boost/assert.hpp and use the BOOST_ASSERT macro in places, yet also use
 the regular assert() occasionally. However with
 BOOST_ENABLE_ASSERT_HANDLER defined assert.h is never included and we get
 a compile error.

 boost_1_44_0/boost/interprocess $ grep -R assert\( *
 allocators/detail/allocator_common.hpp: assert(m_header.m_usecount >
 0);
 containers/container/detail/advanced_insert_int.hpp: assert(n <=
 count_);
 containers/container/detail/advanced_insert_int.hpp: assert(count_ ==
 0);
 containers/container/detail/advanced_insert_int.hpp:
 assert(difference_type(count_)>= division_count);
 containers/container/detail/advanced_insert_int.hpp: assert(count_ ==
 0);
 containers/container/detail/advanced_insert_int.hpp:
 assert(difference_type(count_)>= division_count);
 containers/container/detail/advanced_insert_int.hpp: assert(new_count
 == 0);
 containers/container/detail/advanced_insert_int.hpp:
 assert(division_count <=1);
 containers/container/detail/advanced_insert_int.hpp:
 assert(division_count <=1);
 containers/container/detail/advanced_insert_int.hpp:
 assert(division_count <=1);
 containers/container/detail/advanced_insert_int.hpp:
 assert(division_count <=1);
 containers/container/detail/advanced_insert_int.hpp:
 assert(division_count <=1);
 \
 containers/container/detail/advanced_insert_int.hpp:
 assert(division_count <=1);
 \
 containers/container/detail/node_pool_impl.hpp:
 assert(m_freelist.empty());
 containers/container/detail/node_pool_impl.hpp:
 assert(m_allocated==0);
 containers/container/detail/node_pool_impl.hpp:
 assert(m_nodes_per_block == other.m_nodes_per_block);
 containers/container/detail/node_pool_impl.hpp:
 assert(m_real_node_size == other.m_real_node_size);
 containers/container/detail/node_pool_impl.hpp:
 assert(m_allocated>0);
 containers/container/detail/adaptive_node_pool_impl.hpp:
 assert(m_max_free_blocks == other.m_max_free_blocks);
 containers/container/detail/adaptive_node_pool_impl.hpp:
 assert(m_real_node_size == other.m_real_node_size);
 containers/container/detail/adaptive_node_pool_impl.hpp:
 assert(m_real_block_alignment == other.m_real_block_alignment);
 containers/container/detail/adaptive_node_pool_impl.hpp:
 assert(m_real_num_node == other.m_real_num_node);
 containers/container/detail/adaptive_node_pool_impl.hpp:
 assert(!m_block_multiset.empty());
 containers/container/detail/adaptive_node_pool_impl.hpp:
 assert(it->free_nodes.size() == m_real_num_node);
 containers/container/detail/adaptive_node_pool_impl.hpp:
 assert(block_info->free_nodes.size() < m_real_num_node);
 containers/container/detail/adaptive_node_pool_impl.hpp:
 assert(m_block_multiset.begin() != m_block_multiset.end());
 containers/container/detail/adaptive_node_pool_impl.hpp: assert(0 !=
 free_nodes_count);
 containers/container/detail/adaptive_node_pool_impl.hpp:
 assert(free_nodes == mp_impl->m_real_num_node);
 containers/container/detail/adaptive_node_pool_impl.hpp:
 assert(free_nodes == mp_impl->m_real_num_node);
 containers/container/detail/adaptive_node_pool_impl.hpp: assert(0
 == to_deallocate->hdr_offset);
 containers/container/detail/adaptive_node_pool_impl.hpp:
 assert(sp <= si);
 containers/container/detail/adaptive_node_pool_impl.hpp:
 assert(total_free_nodes >= m_totally_free_blocks*m_real_num_node);
 containers/container/detail/adaptive_node_pool_impl.hpp:
 assert(total_free >= m_totally_free_blocks);
 containers/container/detail/adaptive_node_pool_impl.hpp:
 assert(hdr_off_holder->hdr_offset ==
 std::size_t(reinterpret_cast<char*>(&*it)-
 reinterpret_cast<char*>(hdr_off_holder)));
 containers/container/detail/adaptive_node_pool_impl.hpp:
 assert(0 == ((std::size_t)hdr_off_holder & (m_real_block_alignment - 1)));
 containers/container/detail/adaptive_node_pool_impl.hpp:
 assert(0 == (hdr_off_holder->hdr_offset & (m_real_block_alignment - 1)));
 containers/container/detail/adaptive_node_pool_impl.hpp:
 assert(it->free_nodes.size() == m_real_num_node);
 containers/container/detail/adaptive_node_pool_impl.hpp:
 assert(num_free_nodes == m_totally_free_blocks);
 containers/container/detail/adaptive_node_pool_impl.hpp: assert(0 ==
 ((std::size_t)hdr_off_holder & (m_real_block_alignment - 1)));
 containers/container/detail/adaptive_node_pool_impl.hpp: assert(0 ==
 (hdr_off_holder->hdr_offset & (m_real_block_alignment - 1)));
 containers/container/detail/adaptive_node_pool_impl.hpp:
 assert(block->hdr_offset == 0);
 containers/container/detail/adaptive_node_pool_impl.hpp:
 assert(hdr_off_holder->hdr_offset ==
 std::size_t(reinterpret_cast<char*>(block) -
 reinterpret_cast<char*>(hdr_off_holder)));
 containers/container/detail/adaptive_node_pool_impl.hpp: assert(0 ==
 ((std::size_t)hdr_off_holder & (m_real_block_alignment - 1)));
 containers/container/detail/adaptive_node_pool_impl.hpp: assert(0 ==
 (hdr_off_holder->hdr_offset & (m_real_block_alignment - 1)));
 containers/container/detail/adaptive_node_pool_impl.hpp:
 assert(static_cast<void*>(&static_cast<hdr_offset_holder*>(c_info)->hdr_offset)
 ==
 detail/segment_manager_helper.hpp: assert(hdr->m_value_alignment ==
 algn);
 detail/segment_manager_helper.hpp: assert(hdr->m_value_bytes % sz ==
 0);
 detail/intersegment_ptr.hpp: assert(addr < static_cast<void*>(this));
 detail/intersegment_ptr.hpp: assert(pow >= frc_size_bits);
 detail/intersegment_ptr.hpp: assert(pow >= frc_size_bits);
 detail/intersegment_ptr.hpp: assert(((frc << (pow - frc_size_bits)) &
 (align-1))==0);
 detail/intersegment_ptr.hpp: assert(mode < is_max_mode);
 detail/intersegment_ptr.hpp: assert(this_info.size == s);
 detail/intersegment_ptr.hpp: assert(!m_segments.empty());
 detail/intersegment_ptr.hpp: assert(segment_id <
 (std::size_t)m_segments.size());
 detail/intersegment_ptr.hpp:
 assert(m_ptr_to_segment_info.empty());
 detail/intersegment_ptr.hpp: assert(ret.second);
 detail/intersegment_ptr.hpp: assert(erased);
 detail/intersegment_ptr.hpp: assert(ret.second);
 detail/managed_multi_shared_memory.hpp: assert(ret);(void)ret;
 detail/managed_multi_shared_memory.hpp: assert(ret);
 managed_external_buffer.hpp: assert((0 == (((std::size_t)addr) &
 (AllocationAlgorithm::Alignment - std::size_t(1u)))));
 managed_external_buffer.hpp: assert((0 == (((std::size_t)addr) &
 (AllocationAlgorithm::Alignment - std::size_t(1u)))));
 mapped_region.hpp: assert(ret == 0);
 mem_algo/detail/mem_algo_common.hpp: assert((needs_backwards %
 backwards_multiple) == 0);
 mem_algo/detail/mem_algo_common.hpp: assert((needs_backwards_lcmed
 & (Alignment - 1u)) == 0);
 mem_algo/detail/mem_algo_common.hpp: assert((needs_backwards_lcmed
 % lcm) == 0);
 mem_algo/detail/mem_algo_common.hpp: assert((needs_backwards_lcmed
 % lcm) == 0);
 mem_algo/detail/mem_algo_common.hpp: assert((needs_backwards %
 backwards_multiple) == 0);
 mem_algo/detail/mem_algo_common.hpp: assert(pos <=
 (reinterpret_cast<char*>(first) + first->m_size*Alignment));
 mem_algo/detail/mem_algo_common.hpp: assert(first->m_size >=
 2*MinBlockUnits);
 mem_algo/detail/mem_algo_common.hpp: assert((pos +
 MinBlockUnits*Alignment - AllocatedCtrlBytes + nbytes*Alignment/Alignment)
 <=
 mem_algo/detail/mem_algo_common.hpp: assert(second->m_size >=
 MinBlockUnits);
 mem_algo/detail/mem_algo_common.hpp:
 assert((new_block->m_size*Alignment - AllocatedCtrlUnits) >=
 sizeof(void_pointer));
 mem_algo/detail/multi_simple_seq_fit_impl.hpp://
 assert(m_header.m_allocated == 0);
 mem_algo/detail/multi_simple_seq_fit_impl.hpp://
 assert(m_header.m_root.m_next->m_next ==
 block_ctrl_ptr(&m_header.m_root));
 mem_algo/detail/multi_simple_seq_fit_impl.hpp: assert(!(size <
 MinBlockSize));
 mem_algo/detail/multi_simple_seq_fit_impl.hpp: assert(p_services);
 mem_algo/detail/multi_simple_seq_fit_impl.hpp: assert(0);
 mem_algo/detail/multi_simple_seq_fit_impl.hpp: assert(block->m_next ==
 0);
 mem_algo/detail/multi_simple_seq_fit_impl.hpp: assert(0);
 mem_algo/detail/multi_simple_seq_fit_impl.hpp: assert(block->m_next ==
 0);
 mem_algo/detail/multi_simple_seq_fit_impl.hpp: assert((alignment &
 (alignment - std::size_t(1u))) != 0);
 mem_algo/detail/multi_simple_seq_fit_impl.hpp:
 assert(((reinterpret_cast<char*>(block) - reinterpret_cast<char*>(this))
 mem_algo/detail/multi_simple_seq_fit_impl.hpp: assert(block->m_next ==
 0);
 mem_algo/detail/multi_simple_seq_fit_impl.hpp:
 assert((reinterpret_cast<char*>(addr) - reinterpret_cast<char*>(this))
 mem_algo/detail/multi_simple_seq_fit_impl.hpp:
 assert(m_header.m_allocated >= total_size);
 mem_algo/detail/simple_seq_fit_impl.hpp:// assert(m_header.m_allocated
 == 0);
 mem_algo/detail/simple_seq_fit_impl.hpp://
 assert(m_header.m_root.m_next->m_next ==
 block_ctrl_ptr(&m_header.m_root));
 mem_algo/detail/simple_seq_fit_impl.hpp: assert(prev == root);
 mem_algo/detail/simple_seq_fit_impl.hpp: assert(last_free_end_address
 == (reinterpret_cast<char*>(last) + last->m_size*Alignment));
 mem_algo/detail/simple_seq_fit_impl.hpp: assert(addr);
 mem_algo/detail/simple_seq_fit_impl.hpp: assert(received_size ==
 last_units*Alignment - AllocatedCtrlBytes);
 mem_algo/detail/simple_seq_fit_impl.hpp: assert(!(size < MinBlockSize));
 mem_algo/detail/simple_seq_fit_impl.hpp: assert(0);
 mem_algo/detail/simple_seq_fit_impl.hpp: assert(ret != 0);
 mem_algo/detail/simple_seq_fit_impl.hpp: assert(block->m_next == 0);
 mem_algo/detail/simple_seq_fit_impl.hpp: assert(0);
 mem_algo/detail/simple_seq_fit_impl.hpp: assert(block->m_next == 0);
 mem_algo/detail/simple_seq_fit_impl.hpp: assert(m_header.m_allocated >=
 total_size);
 mem_algo/rbtree_best_fit.hpp: assert(get_min_size(extra_hdr_bytes) <=
 size);
 mem_algo/rbtree_best_fit.hpp:// assert(m_header.m_allocated == 0);
 mem_algo/rbtree_best_fit.hpp:// assert(m_header.m_root.m_next->m_next ==
 block_ctrl_ptr(&m_header.m_root));
 mem_algo/rbtree_best_fit.hpp:
 assert(priv_is_allocated_block(old_end_block));
 mem_algo/rbtree_best_fit.hpp:// assert((m_header.m_size - old_end) >=
 MinBlockUnits);
 mem_algo/rbtree_best_fit.hpp: assert(first_block ==
 priv_next_block(new_end_block));
 mem_algo/rbtree_best_fit.hpp: assert(new_end_block ==
 priv_end_block(first_block));
 mem_algo/rbtree_best_fit.hpp: assert(new_block->m_size >=
 BlockCtrlUnits);
 mem_algo/rbtree_best_fit.hpp: assert(priv_next_block(new_block) ==
 new_end_block);
 mem_algo/rbtree_best_fit.hpp:
 assert(priv_is_allocated_block(old_end_block));
 mem_algo/rbtree_best_fit.hpp:
 assert(priv_is_allocated_block(unique_block));
 mem_algo/rbtree_best_fit.hpp:
 assert(!priv_is_allocated_block(last_block));
 mem_algo/rbtree_best_fit.hpp: assert(priv_end_block(first_block) ==
 new_end_block);
 mem_algo/rbtree_best_fit.hpp: assert(size >= (BlockCtrlBytes +
 EndCtrlBlockBytes));
 mem_algo/rbtree_best_fit.hpp: assert(first_big_block->m_size >=
 BlockCtrlUnits);
 mem_algo/rbtree_best_fit.hpp: assert(priv_next_block(first_big_block) ==
 end_block);
 mem_algo/rbtree_best_fit.hpp: assert(priv_next_block(end_block) ==
 first_big_block);
 mem_algo/rbtree_best_fit.hpp: assert(priv_end_block(first_big_block) ==
 end_block);
 mem_algo/rbtree_best_fit.hpp: assert(priv_prev_block(end_block) ==
 first_big_block);
 mem_algo/rbtree_best_fit.hpp:
 assert(static_cast<void*>(static_cast<SizeHolder*>(first_big_block))
 mem_algo/rbtree_best_fit.hpp: //assert((Alignment % 2) == 0);
 mem_algo/rbtree_best_fit.hpp: //assert(reuse->m_size ==
 priv_tail_size(reuse));
 mem_algo/rbtree_best_fit.hpp:
 assert(!priv_is_allocated_block(prev_block));
 mem_algo/rbtree_best_fit.hpp: assert(prev_block->m_size ==
 reuse->m_prev_size);
 mem_algo/rbtree_best_fit.hpp: assert(0);
 mem_algo/rbtree_best_fit.hpp: assert(received_size =
 received_size2);
 mem_algo/rbtree_best_fit.hpp: assert(new_block->m_size >=
 BlockCtrlUnits);
 mem_algo/rbtree_best_fit.hpp: assert(prev_block->m_size >=
 BlockCtrlUnits);
 mem_algo/rbtree_best_fit.hpp:
 assert((static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) %
 backwards_multiple == 0);
 mem_algo/rbtree_best_fit.hpp: assert(prev_block->m_size >=
 BlockCtrlUnits);
 mem_algo/rbtree_best_fit.hpp:
 assert((static_cast<char*>(reuse_ptr) - static_cast<char*>(user_ptr)) %
 backwards_multiple == 0);
 mem_algo/rbtree_best_fit.hpp: assert(priv_is_allocated_block(block));
 mem_algo/rbtree_best_fit.hpp: //assert(old_block_units ==
 priv_tail_size(block));
 mem_algo/rbtree_best_fit.hpp: assert(min_user_units <=
 preferred_user_units);
 mem_algo/rbtree_best_fit.hpp: assert(next_block->m_size ==
 priv_next_block(next_block)->m_prev_size);
 mem_algo/rbtree_best_fit.hpp: assert(rem_block->m_size >=
 BlockCtrlUnits);
 mem_algo/rbtree_best_fit.hpp: assert(block->m_size >=
 BlockCtrlUnits);
 mem_algo/rbtree_best_fit.hpp: assert(block->m_size >=
 BlockCtrlUnits);
 mem_algo/rbtree_best_fit.hpp: assert(!ptr->m_prev_allocated);
 mem_algo/rbtree_best_fit.hpp: assert(!priv_is_allocated_block(prev));
 mem_algo/rbtree_best_fit.hpp:
 assert(first_segment_block->m_prev_allocated);
 mem_algo/rbtree_best_fit.hpp:
 assert(priv_is_allocated_block(end_block));
 mem_algo/rbtree_best_fit.hpp: assert(end_block > first_segment_block);
 mem_algo/rbtree_best_fit.hpp: assert(allocated ==
 next_block_prev_allocated);
 mem_algo/rbtree_best_fit.hpp: //assert(!priv_is_allocated_block(block));
 mem_algo/rbtree_best_fit.hpp: //assert(!priv_is_allocated_block(ptr));
 mem_algo/rbtree_best_fit.hpp: assert(block->m_size >=
 BlockCtrlUnits);
 mem_algo/rbtree_best_fit.hpp: assert(rem_block->m_size >=
 BlockCtrlUnits);
 mem_algo/rbtree_best_fit.hpp: assert(0);
 mem_algo/rbtree_best_fit.hpp: assert(priv_is_allocated_block(block));
 mem_algo/rbtree_best_fit.hpp:// assert(block->m_size ==
 priv_tail_size(block));
 mem_algo/rbtree_best_fit.hpp: assert(m_header.m_allocated >=
 block_old_size);
 mem_algo/rbtree_best_fit.hpp: assert(prev_block->m_size >=
 BlockCtrlUnits);
 mem_algo/rbtree_best_fit.hpp: assert(block_to_insert->m_size >=
 BlockCtrlUnits);
 offset_ptr.hpp: assert(0 == (std::size_t(pint) & Mask));
 offset_ptr.hpp: assert(b < (std::size_t(1) << NumBits));
 segment_manager.hpp:
 assert((sizeof(segment_manager_base<MemoryAlgorithm>) ==
 sizeof(MemoryAlgorithm)));
 segment_manager.hpp: assert(0);
 segment_manager.hpp: assert(static_cast<const void*>(this) ==
 static_cast<const void*>(static_cast<Base*>(this)));
 segment_manager.hpp: assert(name != 0);
 segment_manager.hpp: assert(0);
 segment_manager.hpp: assert((type == anonymous_type &&
 ctrl_data->m_num_char == 0) ||
 segment_manager.hpp: assert(ctrl_data->sizeof_char() ==
 sizeof(CharType));
 segment_manager.hpp: assert(ctrl_data->m_num_char ==
 std::char_traits<CharType>::length(name));
 segment_manager.hpp: assert((ctrl_data->value_bytes() %sizeofvalue)
 == 0);
 segment_manager.hpp: assert((instance_type)ctrl_data->alloc_type() <
 max_allocation_type);
 segment_manager.hpp: assert((ctrl_data->m_value_bytes %
 table.size) == 0);
 segment_manager.hpp: assert(ctrl_data->sizeof_char() ==
 sizeof(CharT));
 segment_manager.hpp: assert((ctrl_data->m_value_bytes %
 table.size) == 0);
 segment_manager.hpp: assert(ctrl_data->sizeof_char() ==
 sizeof(CharT));
 segment_manager.hpp: //assert(0);
 segment_manager.hpp: assert((ctrl_data->m_value_bytes % table.size)
 == 0);
 segment_manager.hpp: assert(sizeof(CharT) ==
 ctrl_data->sizeof_char());
 segment_manager.hpp: //assert(0);
 segment_manager.hpp: assert((ctrl_data->m_value_bytes % table.size)
 == 0);
 segment_manager.hpp: assert(static_cast<void*>(stored_name) ==
 static_cast<void*>(ctrl_data->template name<CharT>()));
 segment_manager.hpp: assert(sizeof(CharT) ==
 ctrl_data->sizeof_char());
 streams/vectorstream.hpp: assert(m_vect.size() ==
 m_vect.capacity());
 streams/vectorstream.hpp: assert(m_vect.size() ==
 m_vect.capacity());
 sync/xsi/xsi_named_mutex.hpp: assert(success);
 sync/emulation/interprocess_recursive_mutex.hpp:
 assert(detail::equal_systemwide_thread_id(thr_id, old_id));
 sync/posix/interprocess_recursive_mutex.hpp: assert(res == 0);(void)res;
 sync/posix/interprocess_recursive_mutex.hpp: assert(res == 0);
 sync/posix/interprocess_barrier.hpp: assert(res == 0);(void)res;
 sync/posix/interprocess_mutex.hpp: assert(res == 0);(void)res;
 sync/posix/interprocess_mutex.hpp: assert(res == 0);
 sync/posix/interprocess_condition.hpp: assert(res == 0);
 sync/posix/interprocess_condition.hpp: assert(res == 0);
 sync/posix/interprocess_condition.hpp: assert(res == 0);
 sync/posix/interprocess_condition.hpp: assert(res == 0);
 sync/posix/interprocess_condition.hpp: assert(res == 0 || res ==
 ETIMEDOUT);
 sync/posix/semaphore_wrapper.hpp: assert(0);
 sync/posix/semaphore_wrapper.hpp: assert(0);

-- 
Ticket URL: <https://svn.boost.org/trac/boost/ticket/4557>
Boost C++ Libraries <http://www.boost.org/>
Boost provides free peer-reviewed portable C++ source libraries.

This archive was generated by hypermail 2.1.7 : 2017-02-16 18:50:04 UTC