|
Boost-Commit : |
From: daniel_james_at_[hidden]
Date: 2007-12-07 08:09:09
Author: danieljames
Date: 2007-12-07 08:09:09 EST (Fri, 07 Dec 2007)
New Revision: 41825
URL: http://svn.boost.org/trac/boost/changeset/41825
Log:
Remove all macro use from hash_table, moving it out of hash_table_impl.hpp (which is included twice, once for equivalent keys, once for unique keys) to hash_table.hpp
Text files modified:
sandbox-branches/unordered-refactor/boost/unordered/detail/hash_table.hpp | 921 +++++++++++++++++++++++++++++++++++++++
sandbox-branches/unordered-refactor/boost/unordered/detail/hash_table_impl.hpp | 919 ---------------------------------------
2 files changed, 918 insertions(+), 922 deletions(-)
Modified: sandbox-branches/unordered-refactor/boost/unordered/detail/hash_table.hpp
==============================================================================
--- sandbox-branches/unordered-refactor/boost/unordered/detail/hash_table.hpp (original)
+++ sandbox-branches/unordered-refactor/boost/unordered/detail/hash_table.hpp 2007-12-07 08:09:09 EST (Fri, 07 Dec 2007)
@@ -118,6 +118,919 @@
namespace boost {
namespace unordered_detail {
+ //
+ // Hash Table
+ //
+
+ template <typename ValueType, typename KeyType,
+ typename Hash, typename Pred,
+ typename Alloc, typename data>
+ class hash_table
+ : public data
+ {
+ typedef typename data::node_constructor node_constructor;
+ typedef typename data::bucket_ptr bucket_ptr;
+ typedef typename data::link_ptr link_ptr;
+
+ public:
+
+ typedef BOOST_DEDUCED_TYPENAME data::value_allocator value_allocator;
+ typedef BOOST_DEDUCED_TYPENAME data::node_allocator node_allocator;
+
+ // Type definitions
+
+ typedef KeyType key_type;
+ typedef Hash hasher;
+ typedef Pred key_equal;
+ typedef ValueType value_type;
+ typedef std::size_t size_type;
+
+ // iterators
+
+ typedef BOOST_DEDUCED_TYPENAME data::local_iterator_base local_iterator_base;
+ typedef BOOST_DEDUCED_TYPENAME data::iterator_base iterator_base;
+
+ private:
+
+ class functions
+ {
+ std::pair<hasher, key_equal> functions_;
+
+ public:
+
+ functions(hasher const& h, key_equal const& k)
+ : functions_(h, k) {}
+
+ hasher const& hash_function() const
+ {
+ return functions_.first;
+ }
+
+ key_equal const& key_eq() const
+ {
+ return functions_.second;
+ }
+ };
+
+ // Both hasher and key_equal's copy/assign can throw so double
+ // buffering is used to copy them. func_ points to the currently
+ // active function objects.
+
+ typedef functions hash_table::*functions_ptr;
+
+ functions func1_;
+ functions func2_;
+ functions_ptr func_;
+
+ float mlf_;
+ size_type max_load_;
+
+ public:
+
+ // Constructors
+ //
+ // In the constructors, if anything throws an exception,
+ // data's destructor will clean up.
+
+ hash_table(size_type n,
+ hasher const& hf, key_equal const& eq,
+ value_allocator const& a)
+ : data(n, a), // throws, cleans itself up
+ func1_(hf, eq), // throws " "
+ func2_(hf, eq), // throws " "
+ func_(&hash_table::func1_), // no throw
+ mlf_(1.0f) // no throw
+ {
+ calculate_max_load(); // no throw
+ }
+
+ // Construct from iterators
+
+ // initial_size
+ //
+ // A helper function for the copy constructor to calculate how many
+ // nodes will be created if the iterator's support it. Might get it
+ // totally wrong for containers with unique keys.
+ //
+ // no throw
+
+ template <typename I>
+ size_type initial_size(I i, I j, size_type n,
+ boost::forward_traversal_tag)
+ {
+ // max load factor isn't set yet, but when it is, it'll be 1.0.
+ return (std::max)(static_cast<size_type>(std::distance(i, j)) + 1, n);
+ }
+
+ template <typename I>
+ size_type initial_size(I, I, size_type n,
+ boost::incrementable_traversal_tag)
+ {
+ return n;
+ }
+
+ template <typename I>
+ size_type initial_size(I i, I j, size_type n)
+ {
+ BOOST_DEDUCED_TYPENAME boost::iterator_traversal<I>::type
+ iterator_traversal_tag;
+ return initial_size(i, j, n, iterator_traversal_tag);
+ }
+
+ template <typename I>
+ hash_table(I i, I j, size_type n,
+ hasher const& hf, key_equal const& eq,
+ value_allocator const& a)
+ : data(initial_size(i, j, n), a), // throws, cleans itself up
+ func1_(hf, eq), // throws " "
+ func2_(hf, eq), // throws " "
+ func_(&hash_table::func1_), // no throw
+ mlf_(1.0f) // no throw
+ {
+ calculate_max_load(); // no throw
+ // Inserting elements is left to the containing class.
+ // This is a little odd, but hopefully will be cleaned up with
+ // future development.
+ }
+ // Copy Construct
+
+ hash_table(hash_table const& x)
+ : data(x, x.min_buckets_for_size(x.size())), // throws
+ func1_(x.current_functions()), // throws
+ func2_(x.current_functions()), // throws
+ func_(&hash_table::func1_), // no throw
+ mlf_(x.mlf_) // no throw
+ {
+ calculate_max_load(); // no throw
+
+ // This can throw, but data's destructor will clean
+ // up.
+ copy_buckets(x, *this, current_functions());
+ }
+
+ // Assign
+ //
+ // basic exception safety, if copy_functions of reserver throws
+ // the container is left in a sane, empty state. If copy_buckets
+ // throws the container is left with whatever was successfully
+ // copied.
+
+ hash_table& operator=(hash_table const& x)
+ {
+ if(this != &x)
+ {
+ this->clear(); // no throw
+ func_ = copy_functions(x); // throws, strong
+ mlf_ = x.mlf_; // no throw
+ calculate_max_load(); // no throw
+ reserve(x.size()); // throws
+ copy_buckets(x, *this, current_functions()); // throws
+ }
+
+ return *this;
+ }
+
+ // Swap
+ //
+ // Swap's behaviour when allocators aren't equal is in dispute, see
+ // this paper for full details:
+ //
+ // http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2004/n1599.html
+ //
+ // It lists 3 possible behaviours:
+ //
+ // 1 - If the allocators aren't equal then throw an exception.
+ // 2 - Reallocate the elements in the containers with the
+ // appropriate allocators - messing up exception safety in
+ // the process.
+ // 3 - Swap the allocators, hoping that the allocators have a
+ // no-throw swap.
+ //
+ // The paper recommends #3.
+ //
+ // ----------------------------------------------------------------
+ //
+ // Strong exception safety (might change unused function objects)
+ //
+ // Can throw if hash or predicate object's copy constructor throws.
+ // If allocators are unequal:
+ // Can throw if allocator's swap throws
+ // (I'm assuming that the allocator's swap doesn't throw
+ // but this doesn't seem to be guaranteed. Maybe I
+ // could double buffer the allocators).
+
+ void swap(hash_table& x)
+ {
+ // This only effects the function objects that aren't in use
+ // so it is strongly exception safe, via. double buffering.
+ functions_ptr new_func_this = copy_functions(x); // throws
+ functions_ptr new_func_that = x.copy_functions(*this); // throws
+
+ if(this->allocators_ == x.allocators_) {
+ this->data::swap(x); // no throw
+ }
+ else {
+ // Note: I'm not sure that allocator swap is guaranteed to be no
+ // throw.
+ this->allocators_.swap(x.allocators_);
+ this->data::swap(x);
+ }
+
+ // We've made it, the rest is no throw.
+ std::swap(mlf_, x.mlf_);
+
+ func_ = new_func_this;
+ x.func_ = new_func_that;
+
+ calculate_max_load();
+ x.calculate_max_load();
+ }
+
+ private:
+
+ functions const& current_functions() const
+ {
+ return this->*func_;
+ }
+
+ // This copies the given function objects into the currently unused
+ // function objects and returns a pointer, that func_ can later be
+ // set to, to commit the change.
+ //
+ // Strong exception safety (since only usued function objects are
+ // changed).
+ functions_ptr copy_functions(hash_table const& x)
+ {
+ // no throw:
+ functions_ptr ptr = func_ == &hash_table::func1_
+ ? &hash_table::func2_ : &hash_table::func1_;
+ // throws, functions not in use, so strong
+ this->*ptr = x.current_functions();
+ return ptr;
+ }
+
+ public:
+
+ // accessors
+
+ // no throw
+ value_allocator get_allocator() const
+ {
+ return this->allocators_.value_alloc_;
+ }
+
+ // no throw
+ hasher const& hash_function() const
+ {
+ return current_functions().hash_function();
+ }
+
+ // no throw
+ key_equal const& key_eq() const
+ {
+ return current_functions().key_eq();
+ }
+
+ // no throw
+ size_type size() const
+ {
+ return this->size_;
+ }
+
+ // no throw
+ bool empty() const
+ {
+ return this->size_ == 0;
+ }
+
+ // no throw
+ size_type max_size() const
+ {
+ // size < mlf_ * count
+ return float_to_size_t(ceil(
+ max_bucket_count() * mlf_)) - 1;
+ }
+
+ // strong safety
+ size_type bucket(key_type const& k) const
+ {
+ // hash_function can throw:
+ return this->index_from_hash(hash_function()(k));
+ }
+
+ // strong safety
+ bucket_ptr get_bucket(key_type const& k) const
+ {
+ return this->buckets_ + bucket(k);
+ }
+
+ // no throw
+ size_type bucket_count() const
+ {
+ return this->bucket_count_;
+ }
+
+ // no throw
+ size_type max_bucket_count() const
+ {
+ // -1 to account for the end marker.
+ return prev_prime(this->allocators_.bucket_alloc_.max_size() - 1);
+ }
+
+ private:
+
+ // no throw
+ size_type min_buckets_for_size(size_type n) const
+ {
+ BOOST_ASSERT(mlf_ != 0);
+
+ using namespace std;
+
+ // From 6.3.1/13:
+ // size < mlf_ * count
+ // => count > size / mlf_
+ //
+ // Or from rehash post-condition:
+ // count > size / mlf_
+ return static_cast<size_type>(floor(n / mlf_)) + 1;
+ }
+
+ // no throw
+ void calculate_max_load()
+ {
+ using namespace std;
+
+ // From 6.3.1/13:
+ // Only resize when size >= mlf_ * count
+ max_load_ = float_to_size_t(ceil(mlf_ * this->bucket_count_));
+ }
+
+ // basic exception safety
+ bool reserve(size_type n)
+ {
+ bool need_to_reserve = n >= max_load_;
+ // throws - basic:
+ if (need_to_reserve) rehash_impl(min_buckets_for_size(n));
+ BOOST_ASSERT(n < max_load_ || n > max_size());
+ return need_to_reserve;
+ }
+
+ // basic exception safety
+ //
+ // This version of reserve is called when inserting a range
+ // into a container with equivalent keys, it creates more buckets
+ // if the resulting load factor would be over 80% of the load
+ // factor. This is to try to avoid excessive rehashes.
+ bool reserve_extra(size_type n)
+ {
+ bool need_to_reserve = n >= max_load_;
+ // throws - basic:
+ if (need_to_reserve) {
+ rehash_impl(static_cast<size_type>(floor(n / mlf_ * 1.25)) + 1);
+ }
+ BOOST_ASSERT(n < max_load_ || n > max_size());
+ return need_to_reserve;
+ }
+
+ public:
+
+ // no throw
+ float max_load_factor() const
+ {
+ return mlf_;
+ }
+
+ // no throw
+ void max_load_factor(float z)
+ {
+ BOOST_ASSERT(z > 0);
+ mlf_ = (std::max)(z, minimum_max_load_factor);
+ calculate_max_load();
+ }
+
+ // no throw
+ float load_factor() const
+ {
+ BOOST_ASSERT(this->bucket_count_ != 0);
+ return static_cast<float>(this->size_)
+ / static_cast<float>(this->bucket_count_);
+ }
+
+ private:
+
+ // key extractors
+
+ // no throw
+ static key_type const& extract_key(value_type const& v)
+ {
+ return extract(v, (type_wrapper<value_type>*)0);
+ }
+
+ static key_type const& extract(value_type const& v,
+ type_wrapper<key_type>*)
+ {
+ return v;
+ }
+
+ static key_type const& extract(value_type const& v,
+ void*)
+ {
+ return v.first;
+ }
+
+ public:
+
+ // if hash function throws, basic exception safety
+ // strong otherwise.
+ void rehash(size_type n)
+ {
+ using namespace std;
+
+ // no throw:
+ size_type min_size = min_buckets_for_size(size());
+ // basic/strong:
+ rehash_impl(min_size > n ? min_size : n);
+
+ BOOST_ASSERT((float) bucket_count() > (float) size() / max_load_factor()
+ && bucket_count() >= n);
+ }
+
+ private:
+
+ // if hash function throws, basic exception safety
+ // strong otherwise
+ void rehash_impl(size_type n)
+ {
+ n = next_prime(n); // no throw
+
+ if (n == bucket_count()) // no throw
+ return;
+
+ data new_buckets(*this, n); // throws, seperate
+ move_buckets(*this, new_buckets, hash_function());
+ // basic/no throw
+ new_buckets.swap(*this); // no throw
+ calculate_max_load(); // no throw
+ }
+
+ // move_buckets & copy_buckets
+ //
+ // if the hash function throws, basic excpetion safety
+ // no throw otherwise
+
+ static void move_buckets(data& src, data& dst, hasher const& hf)
+ {
+ BOOST_ASSERT(dst.size_ == 0);
+ //BOOST_ASSERT(src.allocators_.node_alloc_ == dst.allocators_.node_alloc_);
+
+ bucket_ptr end = src.buckets_ + src.bucket_count_;
+
+ for(; src.cached_begin_bucket_ != end;
+ ++src.cached_begin_bucket_) {
+ bucket_ptr src_bucket = src.cached_begin_bucket_;
+ while(src_bucket->next_) {
+ // Move the first group of equivalent nodes in
+ // src_bucket to dst.
+
+ // This next line throws iff the hash function throws.
+ bucket_ptr dst_bucket = dst.buckets_ +
+ dst.index_from_hash(
+ hf(extract_key(data::get_value(src_bucket->next_))));
+
+ link_ptr n = src_bucket->next_;
+ size_type count = src.unlink_group(&src_bucket->next_);
+ dst.link_group(n, dst_bucket, count);
+ }
+ }
+ }
+
+ // basic excpetion safety. If an exception is thrown this will
+ // leave dst partially filled.
+
+ static void copy_buckets(data const& src, data& dst, functions const& f)
+ {
+ BOOST_ASSERT(dst.size_ == 0);
+ // no throw:
+ bucket_ptr end = src.buckets_ + src.bucket_count_;
+ hasher const& hf = f.hash_function();
+
+ // no throw:
+ for(bucket_ptr i = src.cached_begin_bucket_; i != end; ++i) {
+ // no throw:
+ for(local_iterator_base it = src.begin(i);
+ it.not_finished(); it.next_group()) {
+ // hash function can throw.
+ bucket_ptr dst_bucket = dst.buckets_ +
+ dst.index_from_hash(hf(extract_key(*it)));
+ // throws, strong
+ dst.copy_group(it, dst_bucket);
+ }
+ }
+ }
+
+ public:
+
+ // Insert functions
+ //
+ // basic exception safety, if hash function throws
+ // strong otherwise.
+
+ // Insert (equivalent key containers)
+
+ // if hash function throws, basic exception safety
+ // strong otherwise
+ iterator_base insert_equiv(value_type const& v)
+ {
+ key_type const& k = extract_key(v);
+ size_type hash_value = hash_function()(k);
+ bucket_ptr bucket = this->buckets_
+ + this->index_from_hash(hash_value);
+ local_iterator_base position = find_iterator(bucket, k);
+
+ // Create the node before rehashing in case it throws an
+ // exception (need strong safety in such a case).
+ node_constructor a(this->allocators_);
+ a.construct(v);
+
+ // reserve has basic exception safety if the hash function
+ // throws, strong otherwise.
+ if(reserve(size() + 1))
+ bucket = this->buckets_ + this->index_from_hash(hash_value);
+
+ // Nothing after the point can throw.
+
+ link_ptr n = a.release();
+
+ // I'm relying on local_iterator_base not being invalidated by
+ // the rehash here.
+ if(position.not_finished())
+ this->link_node(n, position);
+ else
+ this->link_node(n, bucket);
+
+ return iterator_base(bucket, n);
+ }
+
+ // Insert (equivalent key containers)
+
+ // if hash function throws, basic exception safety
+ // strong otherwise
+ iterator_base insert_equiv(iterator_base const& it, value_type const& v)
+ {
+ // equal can throw, but with no effects
+ if (it == this->end() || !equal(extract_key(v), *it)) {
+ // Use the standard insert if the iterator doesn't point
+ // to a matching key.
+ return insert_equiv(v);
+ }
+ else {
+ // Find the first node in the group - so that the node
+ // will be inserted at the end of the group.
+
+ local_iterator_base start(it.local_);
+ while(this->prev_in_group(start.node_)->next_ == start.node_)
+ start.node_ = this->prev_in_group(start.node_);
+
+ // Create the node before rehashing in case it throws an
+ // exception (need strong safety in such a case).
+ node_constructor a(this->allocators_);
+ a.construct(v);
+
+ // reserve has basic exception safety if the hash function
+ // throws, strong otherwise.
+ bucket_ptr base = reserve(size() + 1) ?
+ get_bucket(extract_key(v)) : it.bucket_;
+
+ // Nothing after this point can throw
+
+ link_ptr n = a.release();
+ this->link_node(n, start);
+
+ return iterator_base(base, n);
+ }
+ }
+
+ // Insert from iterator range (equivalent key containers)
+
+ private:
+
+ // if hash function throws, or inserting > 1 element, basic exception safety
+ // strong otherwise
+ template <typename I>
+ void insert_for_range_equiv(I i, I j, forward_traversal_tag)
+ {
+ size_type distance = std::distance(i, j);
+ if(distance == 1) {
+ insert_equiv(*i);
+ }
+ else {
+ // Only require basic exception safety here
+ reserve_extra(size() + distance);
+
+ for (; i != j; ++i) {
+ node_constructor a(this->allocators_);
+ a.construct(*i);
+
+ key_type const& k = extract_key(a.get()->value_);
+ bucket_ptr bucket = get_bucket(k);
+ local_iterator_base position = find_iterator(bucket, k);
+
+ if(position.not_finished())
+ this->link_node(a.release(), position);
+ else
+ this->link_node(a.release(), bucket);
+ }
+ }
+ }
+
+ // if hash function throws, or inserting > 1 element, basic exception safety
+ // strong otherwise
+ template <typename I>
+ void insert_for_range_equiv(I i, I j,
+ boost::incrementable_traversal_tag)
+ {
+ // If only inserting 1 element, get the required
+ // safety since insert is only called once.
+ for (; i != j; ++i) insert_equiv(*i);
+ }
+
+ public:
+
+ // if hash function throws, or inserting > 1 element, basic exception safety
+ // strong otherwise
+ template <typename I>
+ void insert_equiv(I i, I j)
+ {
+ BOOST_DEDUCED_TYPENAME boost::iterator_traversal<I>::type
+ iterator_traversal_tag;
+ insert_for_range_equiv(i, j, iterator_traversal_tag);
+ }
+
+ // if hash function throws, basic exception safety
+ // strong otherwise
+ value_type& operator[](key_type const& k)
+ {
+ BOOST_STATIC_ASSERT((
+ !boost::is_same<value_type, key_type>::value));
+ typedef BOOST_DEDUCED_TYPENAME value_type::second_type mapped_type;
+
+ size_type hash_value = hash_function()(k);
+ bucket_ptr bucket = this->buckets_ + this->index_from_hash(hash_value);
+ local_iterator_base pos = find_iterator(bucket, k);
+
+ if (pos.not_finished())
+ return *pos;
+ else
+ {
+ // Side effects only in this block.
+
+ // Create the node before rehashing in case it throws an
+ // exception (need strong safety in such a case).
+ node_constructor a(this->allocators_);
+ a.construct(value_type(k, mapped_type()));
+
+ // reserve has basic exception safety if the hash function
+ // throws, strong otherwise.
+ if (reserve(size() + 1))
+ bucket = this->buckets_ + this->index_from_hash(hash_value);
+
+ // Nothing after this point can throw.
+
+ link_ptr n = a.release();
+ this->link_node(n, bucket);
+
+ return *local_iterator_base(n);
+ }
+ }
+
+ // Insert (unique keys)
+
+ // if hash function throws, basic exception safety
+ // strong otherwise
+ std::pair<iterator_base, bool> insert_unique(value_type const& v)
+ {
+ // No side effects in this initial code
+ key_type const& k = extract_key(v);
+ size_type hash_value = hash_function()(k);
+ bucket_ptr bucket = this->buckets_ + this->index_from_hash(hash_value);
+ local_iterator_base pos = find_iterator(bucket, k);
+
+ if (pos.not_finished()) {
+ // Found an existing key, return it (no throw).
+ return std::pair<iterator_base, bool>(
+ iterator_base(bucket, pos), false);
+
+ } else {
+ // Doesn't already exist, add to bucket.
+ // Side effects only in this block.
+
+ // Create the node before rehashing in case it throws an
+ // exception (need strong safety in such a case).
+ node_constructor a(this->allocators_);
+ a.construct(v);
+
+ // reserve has basic exception safety if the hash function
+ // throws, strong otherwise.
+ if(reserve(size() + 1))
+ bucket = this->buckets_ + this->index_from_hash(hash_value);
+
+ // Nothing after this point can throw.
+
+ link_ptr n = a.release();
+ this->link_node(n, bucket);
+
+ return std::pair<iterator_base, bool>(
+ iterator_base(bucket, n), true);
+ }
+ }
+
+ // Insert (unique keys)
+
+ // if hash function throws, basic exception safety
+ // strong otherwise
+ iterator_base insert_unique(iterator_base const& it, value_type const& v)
+ {
+ if(it != this->end() && equal(extract_key(v), *it))
+ return it;
+ else
+ return insert_unique(v).first;
+ }
+
+ // Insert from iterators (unique keys)
+
+ template <typename I>
+ size_type insert_size_unique(I i, I j, boost::forward_traversal_tag)
+ {
+ return std::distance(i, j);
+ }
+
+ template <typename I>
+ size_type insert_size_unique(I, I, boost::incrementable_traversal_tag)
+ {
+ return 1;
+ }
+
+ template <typename I>
+ size_type insert_size_unique(I i, I j)
+ {
+ BOOST_DEDUCED_TYPENAME boost::iterator_traversal<I>::type
+ iterator_traversal_tag;
+ return insert_size_unique(i, j, iterator_traversal_tag);
+ }
+
+ // if hash function throws, or inserting > 1 element, basic exception safety
+ // strong otherwise
+ template <typename InputIterator>
+ void insert_unique(InputIterator i, InputIterator j)
+ {
+ // If only inserting 1 element, get the required
+ // safety since insert is only called once.
+ for (; i != j; ++i) {
+ // No side effects in this initial code
+ size_type hash_value = hash_function()(extract_key(*i));
+ bucket_ptr bucket = this->buckets_
+ + this->index_from_hash(hash_value);
+ local_iterator_base pos = find_iterator(bucket, extract_key(*i));
+
+ if (!pos.not_finished()) {
+ // Doesn't already exist, add to bucket.
+ // Side effects only in this block.
+
+ // Create the node before rehashing in case it throws an
+ // exception (need strong safety in such a case).
+ node_constructor a(this->allocators_);
+ value_type const& v = *i;
+ a.construct(v);
+
+ // reserve has basic exception safety if the hash function
+ // throws, strong otherwise.
+ if(size() + 1 >= max_load_) {
+ reserve(size() + insert_size_unique(i, j));
+ bucket = this->buckets_ + this->index_from_hash(hash_value);
+ }
+
+ // Nothing after this point can throw.
+ this->link_node(a.release(), bucket);
+ }
+ }
+ }
+
+ public:
+
+ // erase
+
+ // no throw
+ iterator_base erase(iterator_base const& r)
+ {
+ return this->data::erase(r);
+ }
+
+ // strong exception safety
+ size_type erase(key_type const& k)
+ {
+ // No side effects in initial section
+ bucket_ptr bucket = get_bucket(k);
+ link_ptr* it = find_for_erase(bucket, k);
+
+ // No throw.
+ return *it ? this->erase_group(it, bucket) : 0;
+ }
+
+ // no throw
+ iterator_base erase(iterator_base const& r1, iterator_base const& r2)
+ {
+ return this->data::erase(r1, r2);
+ }
+
+ // count
+ //
+ // strong exception safety, no side effects
+ size_type count(key_type const& k) const
+ {
+ local_iterator_base it = find_iterator(k); // throws, strong
+ return it.not_finished() ? this->group_count(it) : 0;
+ }
+
+ // find
+ //
+ // strong exception safety, no side effects
+ iterator_base find(key_type const& k) const
+ {
+ bucket_ptr bucket = get_bucket(k);
+ local_iterator_base it = find_iterator(bucket, k);
+
+ if (it.not_finished())
+ return iterator_base(bucket, it);
+ else
+ return this->end();
+ }
+
+ value_type& at(key_type const& k) const
+ {
+ bucket_ptr bucket = get_bucket(k);
+ local_iterator_base it = find_iterator(bucket, k);
+
+ if (it.not_finished())
+ return *it;
+ else
+ throw std::out_of_range("Unable to find key in unordered_map.");
+ }
+
+ // equal_range
+ //
+ // strong exception safety, no side effects
+ std::pair<iterator_base, iterator_base> equal_range(key_type const& k) const
+ {
+ bucket_ptr bucket = get_bucket(k);
+ local_iterator_base it = find_iterator(bucket, k);
+ if (it.not_finished()) {
+ iterator_base first(iterator_base(bucket, it));
+ iterator_base second(iterator_base(bucket, this->last_in_group(it.node_)));
+ second.increment();
+ return std::pair<iterator_base, iterator_base>(first, second);
+ }
+ else {
+ return std::pair<iterator_base, iterator_base>(
+ this->end(), this->end());
+ }
+ }
+
+ private:
+
+ // strong exception safety, no side effects
+ bool equal(key_type const& k, value_type const& v) const
+ {
+ return key_eq()(k, extract_key(v));
+ }
+
+ // strong exception safety, no side effects
+ local_iterator_base find_iterator(key_type const& k) const
+ {
+ return find_iterator(get_bucket(k), k);
+ }
+
+ // strong exception safety, no side effects
+ local_iterator_base find_iterator(bucket_ptr bucket,
+ key_type const& k) const
+ {
+ local_iterator_base it = this->begin(bucket);
+ while (it.not_finished() && !equal(k, *it))
+ it.next_group();
+
+ return it;
+ }
+
+ // strong exception safety, no side effects
+ link_ptr* find_for_erase(bucket_ptr bucket, key_type const& k) const
+ {
+ link_ptr* it = &bucket->next_;
+ while(BOOST_HASH_BORLAND_BOOL(*it) && !equal(k, this->get_value(*it)))
+ it = &this->next_group(*it);
+
+ return it;
+ }
+ };
+
class iterator_access
{
public:
@@ -136,9 +1049,9 @@
boost::unordered_detail::rebind_wrap<Alloc, ValueType>::type
value_allocator;
- typedef hash_table_unique_keys<ValueType, KeyType, Hash, Pred,
- value_allocator> hash_table;
typedef hash_table_data_unique_keys<value_allocator> data;
+ typedef hash_table<ValueType, KeyType, Hash, Pred,
+ value_allocator, data> hash_table;
typedef BOOST_DEDUCED_TYPENAME data::iterator_base iterator_base;
typedef hash_const_local_iterator_unique_keys<value_allocator> const_local_iterator;
@@ -159,9 +1072,9 @@
boost::unordered_detail::rebind_wrap<Alloc, ValueType>::type
value_allocator;
- typedef hash_table_equivalent_keys<ValueType, KeyType, Hash, Pred,
- value_allocator> hash_table;
typedef hash_table_data_equivalent_keys<value_allocator> data;
+ typedef hash_table<ValueType, KeyType, Hash, Pred,
+ value_allocator, data> hash_table;
typedef BOOST_DEDUCED_TYPENAME data::iterator_base iterator_base;
typedef hash_const_local_iterator_equivalent_keys<value_allocator> const_local_iterator;
Modified: sandbox-branches/unordered-refactor/boost/unordered/detail/hash_table_impl.hpp
==============================================================================
--- sandbox-branches/unordered-refactor/boost/unordered/detail/hash_table_impl.hpp (original)
+++ sandbox-branches/unordered-refactor/boost/unordered/detail/hash_table_impl.hpp 2007-12-07 08:09:09 EST (Fri, 07 Dec 2007)
@@ -5,14 +5,12 @@
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#if BOOST_UNORDERED_HASH_EQUIVALENT
-#define HASH_TABLE hash_table_equivalent_keys
#define HASH_TABLE_DATA hash_table_data_equivalent_keys
#define HASH_ITERATOR hash_iterator_equivalent_keys
#define HASH_CONST_ITERATOR hash_const_iterator_equivalent_keys
#define HASH_LOCAL_ITERATOR hash_local_iterator_equivalent_keys
#define HASH_CONST_LOCAL_ITERATOR hash_const_local_iterator_equivalent_keys
#else
-#define HASH_TABLE hash_table_unique_keys
#define HASH_TABLE_DATA hash_table_data_unique_keys
#define HASH_ITERATOR hash_iterator_unique_keys
#define HASH_CONST_ITERATOR hash_const_iterator_unique_keys
@@ -988,921 +986,6 @@
};
#endif
- //
- // Hash Table
- //
-
- template <typename ValueType, typename KeyType,
- typename Hash, typename Pred,
- typename Alloc>
- class HASH_TABLE
- : public HASH_TABLE_DATA<Alloc>
- {
- typedef HASH_TABLE_DATA<Alloc> data;
-
- typedef typename data::node_constructor node_constructor;
- typedef typename data::bucket_ptr bucket_ptr;
- typedef typename data::link_ptr link_ptr;
-
- public:
-
- typedef BOOST_DEDUCED_TYPENAME data::value_allocator value_allocator;
- typedef BOOST_DEDUCED_TYPENAME data::node_allocator node_allocator;
-
- // Type definitions
-
- typedef KeyType key_type;
- typedef Hash hasher;
- typedef Pred key_equal;
- typedef ValueType value_type;
- typedef std::size_t size_type;
-
- // iterators
-
- typedef BOOST_DEDUCED_TYPENAME data::local_iterator_base local_iterator_base;
- typedef BOOST_DEDUCED_TYPENAME data::iterator_base iterator_base;
-
- private:
-
- class functions
- {
- std::pair<hasher, key_equal> functions_;
-
- public:
-
- functions(hasher const& h, key_equal const& k)
- : functions_(h, k) {}
-
- hasher const& hash_function() const
- {
- return functions_.first;
- }
-
- key_equal const& key_eq() const
- {
- return functions_.second;
- }
- };
-
- // Both hasher and key_equal's copy/assign can throw so double
- // buffering is used to copy them. func_ points to the currently
- // active function objects.
-
- typedef functions HASH_TABLE::*functions_ptr;
-
- functions func1_;
- functions func2_;
- functions_ptr func_;
-
- float mlf_;
- size_type max_load_;
-
- public:
-
- // Constructors
- //
- // In the constructors, if anything throws an exception,
- // HASH_TABLE_DATA's destructor will clean up.
-
- HASH_TABLE(size_type n,
- hasher const& hf, key_equal const& eq,
- value_allocator const& a)
- : data(n, a), // throws, cleans itself up
- func1_(hf, eq), // throws " "
- func2_(hf, eq), // throws " "
- func_(&HASH_TABLE::func1_), // no throw
- mlf_(1.0f) // no throw
- {
- calculate_max_load(); // no throw
- }
-
- // Construct from iterators
-
- // initial_size
- //
- // A helper function for the copy constructor to calculate how many
- // nodes will be created if the iterator's support it. Might get it
- // totally wrong for containers with unique keys.
- //
- // no throw
-
- template <typename I>
- size_type initial_size(I i, I j, size_type n,
- boost::forward_traversal_tag)
- {
- // max load factor isn't set yet, but when it is, it'll be 1.0.
- return (std::max)(static_cast<size_type>(std::distance(i, j)) + 1, n);
- }
-
- template <typename I>
- size_type initial_size(I, I, size_type n,
- boost::incrementable_traversal_tag)
- {
- return n;
- }
-
- template <typename I>
- size_type initial_size(I i, I j, size_type n)
- {
- BOOST_DEDUCED_TYPENAME boost::iterator_traversal<I>::type
- iterator_traversal_tag;
- return initial_size(i, j, n, iterator_traversal_tag);
- }
-
- template <typename I>
- HASH_TABLE(I i, I j, size_type n,
- hasher const& hf, key_equal const& eq,
- value_allocator const& a)
- : data(initial_size(i, j, n), a), // throws, cleans itself up
- func1_(hf, eq), // throws " "
- func2_(hf, eq), // throws " "
- func_(&HASH_TABLE::func1_), // no throw
- mlf_(1.0f) // no throw
- {
- calculate_max_load(); // no throw
- // Inserting elements is left to the containing class.
- // This is a little odd, but hopefully will be cleaned up with
- // future development.
- }
- // Copy Construct
-
- HASH_TABLE(HASH_TABLE const& x)
- : data(x, x.min_buckets_for_size(x.size())), // throws
- func1_(x.current_functions()), // throws
- func2_(x.current_functions()), // throws
- func_(&HASH_TABLE::func1_), // no throw
- mlf_(x.mlf_) // no throw
- {
- calculate_max_load(); // no throw
-
- // This can throw, but HASH_TABLE_DATA's destructor will clean
- // up.
- copy_buckets(x, *this, current_functions());
- }
-
- // Assign
- //
- // basic exception safety, if copy_functions of reserver throws
- // the container is left in a sane, empty state. If copy_buckets
- // throws the container is left with whatever was successfully
- // copied.
-
- HASH_TABLE& operator=(HASH_TABLE const& x)
- {
- if(this != &x)
- {
- this->clear(); // no throw
- func_ = copy_functions(x); // throws, strong
- mlf_ = x.mlf_; // no throw
- calculate_max_load(); // no throw
- reserve(x.size()); // throws
- copy_buckets(x, *this, current_functions()); // throws
- }
-
- return *this;
- }
-
- // Swap
- //
- // Swap's behaviour when allocators aren't equal is in dispute, see
- // this paper for full details:
- //
- // http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2004/n1599.html
- //
- // It lists 3 possible behaviours:
- //
- // 1 - If the allocators aren't equal then throw an exception.
- // 2 - Reallocate the elements in the containers with the
- // appropriate allocators - messing up exception safety in
- // the process.
- // 3 - Swap the allocators, hoping that the allocators have a
- // no-throw swap.
- //
- // The paper recommends #3.
- //
- // ----------------------------------------------------------------
- //
- // Strong exception safety (might change unused function objects)
- //
- // Can throw if hash or predicate object's copy constructor throws.
- // If allocators are unequal:
- // Can throw if allocator's swap throws
- // (I'm assuming that the allocator's swap doesn't throw
- // but this doesn't seem to be guaranteed. Maybe I
- // could double buffer the allocators).
-
- void swap(HASH_TABLE& x)
- {
- // This only effects the function objects that aren't in use
- // so it is strongly exception safe, via. double buffering.
- functions_ptr new_func_this = copy_functions(x); // throws
- functions_ptr new_func_that = x.copy_functions(*this); // throws
-
- if(this->allocators_ == x.allocators_) {
- this->data::swap(x); // no throw
- }
- else {
- // Note: I'm not sure that allocator swap is guaranteed to be no
- // throw.
- this->allocators_.swap(x.allocators_);
- this->data::swap(x);
- }
-
- // We've made it, the rest is no throw.
- std::swap(mlf_, x.mlf_);
-
- func_ = new_func_this;
- x.func_ = new_func_that;
-
- calculate_max_load();
- x.calculate_max_load();
- }
-
- private:
-
- functions const& current_functions() const
- {
- return this->*func_;
- }
-
- // This copies the given function objects into the currently unused
- // function objects and returns a pointer, that func_ can later be
- // set to, to commit the change.
- //
- // Strong exception safety (since only usued function objects are
- // changed).
- functions_ptr copy_functions(HASH_TABLE const& x)
- {
- // no throw:
- functions_ptr ptr = func_ == &HASH_TABLE::func1_
- ? &HASH_TABLE::func2_ : &HASH_TABLE::func1_;
- // throws, functions not in use, so strong
- this->*ptr = x.current_functions();
- return ptr;
- }
-
- public:
-
- // accessors
-
- // no throw
- value_allocator get_allocator() const
- {
- return this->allocators_.value_alloc_;
- }
-
- // no throw
- hasher const& hash_function() const
- {
- return current_functions().hash_function();
- }
-
- // no throw
- key_equal const& key_eq() const
- {
- return current_functions().key_eq();
- }
-
- // no throw
- size_type size() const
- {
- return this->size_;
- }
-
- // no throw
- bool empty() const
- {
- return this->size_ == 0;
- }
-
- // no throw
- size_type max_size() const
- {
- // size < mlf_ * count
- return float_to_size_t(ceil(
- max_bucket_count() * mlf_)) - 1;
- }
-
- // strong safety
- size_type bucket(key_type const& k) const
- {
- // hash_function can throw:
- return this->index_from_hash(hash_function()(k));
- }
-
- // strong safety
- bucket_ptr get_bucket(key_type const& k) const
- {
- return this->buckets_ + bucket(k);
- }
-
- // no throw
- size_type bucket_count() const
- {
- return this->bucket_count_;
- }
-
- // no throw
- size_type max_bucket_count() const
- {
- // -1 to account for the end marker.
- return prev_prime(this->allocators_.bucket_alloc_.max_size() - 1);
- }
-
- private:
-
- // no throw
- size_type min_buckets_for_size(size_type n) const
- {
- BOOST_ASSERT(mlf_ != 0);
-
- using namespace std;
-
- // From 6.3.1/13:
- // size < mlf_ * count
- // => count > size / mlf_
- //
- // Or from rehash post-condition:
- // count > size / mlf_
- return static_cast<size_type>(floor(n / mlf_)) + 1;
- }
-
- // no throw
- void calculate_max_load()
- {
- using namespace std;
-
- // From 6.3.1/13:
- // Only resize when size >= mlf_ * count
- max_load_ = float_to_size_t(ceil(mlf_ * this->bucket_count_));
- }
-
- // basic exception safety
- bool reserve(size_type n)
- {
- bool need_to_reserve = n >= max_load_;
- // throws - basic:
- if (need_to_reserve) rehash_impl(min_buckets_for_size(n));
- BOOST_ASSERT(n < max_load_ || n > max_size());
- return need_to_reserve;
- }
-
- // basic exception safety
- //
- // This version of reserve is called when inserting a range
- // into a container with equivalent keys, it creates more buckets
- // if the resulting load factor would be over 80% of the load
- // factor. This is to try to avoid excessive rehashes.
- bool reserve_extra(size_type n)
- {
- bool need_to_reserve = n >= max_load_;
- // throws - basic:
- if (need_to_reserve) {
- rehash_impl(static_cast<size_type>(floor(n / mlf_ * 1.25)) + 1);
- }
- BOOST_ASSERT(n < max_load_ || n > max_size());
- return need_to_reserve;
- }
-
- public:
-
- // no throw
- float max_load_factor() const
- {
- return mlf_;
- }
-
- // no throw
- void max_load_factor(float z)
- {
- BOOST_ASSERT(z > 0);
- mlf_ = (std::max)(z, minimum_max_load_factor);
- calculate_max_load();
- }
-
- // no throw
- float load_factor() const
- {
- BOOST_ASSERT(this->bucket_count_ != 0);
- return static_cast<float>(this->size_)
- / static_cast<float>(this->bucket_count_);
- }
-
- private:
-
- // key extractors
-
- // no throw
- static key_type const& extract_key(value_type const& v)
- {
- return extract(v, (type_wrapper<value_type>*)0);
- }
-
- static key_type const& extract(value_type const& v,
- type_wrapper<key_type>*)
- {
- return v;
- }
-
- static key_type const& extract(value_type const& v,
- void*)
- {
- return v.first;
- }
-
- public:
-
- // if hash function throws, basic exception safety
- // strong otherwise.
- void rehash(size_type n)
- {
- using namespace std;
-
- // no throw:
- size_type min_size = min_buckets_for_size(size());
- // basic/strong:
- rehash_impl(min_size > n ? min_size : n);
-
- BOOST_ASSERT((float) bucket_count() > (float) size() / max_load_factor()
- && bucket_count() >= n);
- }
-
- private:
-
- // if hash function throws, basic exception safety
- // strong otherwise
- void rehash_impl(size_type n)
- {
- n = next_prime(n); // no throw
-
- if (n == bucket_count()) // no throw
- return;
-
- data new_buckets(*this, n); // throws, seperate
- move_buckets(*this, new_buckets, hash_function());
- // basic/no throw
- new_buckets.swap(*this); // no throw
- calculate_max_load(); // no throw
- }
-
- // move_buckets & copy_buckets
- //
- // if the hash function throws, basic excpetion safety
- // no throw otherwise
-
- static void move_buckets(data& src, data& dst, hasher const& hf)
- {
- BOOST_ASSERT(dst.size_ == 0);
- //BOOST_ASSERT(src.allocators_.node_alloc_ == dst.allocators_.node_alloc_);
-
- bucket_ptr end = src.buckets_ + src.bucket_count_;
-
- for(; src.cached_begin_bucket_ != end;
- ++src.cached_begin_bucket_) {
- bucket_ptr src_bucket = src.cached_begin_bucket_;
- while(src_bucket->next_) {
- // Move the first group of equivalent nodes in
- // src_bucket to dst.
-
- // This next line throws iff the hash function throws.
- bucket_ptr dst_bucket = dst.buckets_ +
- dst.index_from_hash(
- hf(extract_key(data::get_value(src_bucket->next_))));
-
- link_ptr n = src_bucket->next_;
- size_type count = src.unlink_group(&src_bucket->next_);
- dst.link_group(n, dst_bucket, count);
- }
- }
- }
-
- // basic excpetion safety. If an exception is thrown this will
- // leave dst partially filled.
-
- static void copy_buckets(data const& src, data& dst, functions const& f)
- {
- BOOST_ASSERT(dst.size_ == 0);
- // no throw:
- bucket_ptr end = src.buckets_ + src.bucket_count_;
- hasher const& hf = f.hash_function();
-
- // no throw:
- for(bucket_ptr i = src.cached_begin_bucket_; i != end; ++i) {
- // no throw:
- for(local_iterator_base it = src.begin(i);
- it.not_finished(); it.next_group()) {
- // hash function can throw.
- bucket_ptr dst_bucket = dst.buckets_ +
- dst.index_from_hash(hf(extract_key(*it)));
- // throws, strong
- dst.copy_group(it, dst_bucket);
- }
- }
- }
-
- public:
-
- // Insert functions
- //
- // basic exception safety, if hash function throws
- // strong otherwise.
-
- // Insert (equivalent key containers)
-
- // if hash function throws, basic exception safety
- // strong otherwise
- iterator_base insert_equiv(value_type const& v)
- {
- key_type const& k = extract_key(v);
- size_type hash_value = hash_function()(k);
- bucket_ptr bucket = this->buckets_
- + this->index_from_hash(hash_value);
- local_iterator_base position = find_iterator(bucket, k);
-
- // Create the node before rehashing in case it throws an
- // exception (need strong safety in such a case).
- node_constructor a(this->allocators_);
- a.construct(v);
-
- // reserve has basic exception safety if the hash function
- // throws, strong otherwise.
- if(reserve(size() + 1))
- bucket = this->buckets_ + this->index_from_hash(hash_value);
-
- // Nothing after the point can throw.
-
- link_ptr n = a.release();
-
- // I'm relying on local_iterator_base not being invalidated by
- // the rehash here.
- if(position.not_finished())
- this->link_node(n, position);
- else
- this->link_node(n, bucket);
-
- return iterator_base(bucket, n);
- }
-
- // Insert (equivalent key containers)
-
- // if hash function throws, basic exception safety
- // strong otherwise
- iterator_base insert_equiv(iterator_base const& it, value_type const& v)
- {
- // equal can throw, but with no effects
- if (it == this->end() || !equal(extract_key(v), *it)) {
- // Use the standard insert if the iterator doesn't point
- // to a matching key.
- return insert_equiv(v);
- }
- else {
- // Find the first node in the group - so that the node
- // will be inserted at the end of the group.
-
- local_iterator_base start(it.local_);
- while(this->prev_in_group(start.node_)->next_ == start.node_)
- start.node_ = this->prev_in_group(start.node_);
-
- // Create the node before rehashing in case it throws an
- // exception (need strong safety in such a case).
- node_constructor a(this->allocators_);
- a.construct(v);
-
- // reserve has basic exception safety if the hash function
- // throws, strong otherwise.
- bucket_ptr base = reserve(size() + 1) ?
- get_bucket(extract_key(v)) : it.bucket_;
-
- // Nothing after this point can throw
-
- link_ptr n = a.release();
- this->link_node(n, start);
-
- return iterator_base(base, n);
- }
- }
-
- // Insert from iterator range (equivalent key containers)
-
- private:
-
- // if hash function throws, or inserting > 1 element, basic exception safety
- // strong otherwise
- template <typename I>
- void insert_for_range_equiv(I i, I j, forward_traversal_tag)
- {
- size_type distance = std::distance(i, j);
- if(distance == 1) {
- insert_equiv(*i);
- }
- else {
- // Only require basic exception safety here
- reserve_extra(size() + distance);
-
- for (; i != j; ++i) {
- node_constructor a(this->allocators_);
- a.construct(*i);
-
- key_type const& k = extract_key(a.get()->value_);
- bucket_ptr bucket = get_bucket(k);
- local_iterator_base position = find_iterator(bucket, k);
-
- if(position.not_finished())
- this->link_node(a.release(), position);
- else
- this->link_node(a.release(), bucket);
- }
- }
- }
-
- // if hash function throws, or inserting > 1 element, basic exception safety
- // strong otherwise
- template <typename I>
- void insert_for_range_equiv(I i, I j,
- boost::incrementable_traversal_tag)
- {
- // If only inserting 1 element, get the required
- // safety since insert is only called once.
- for (; i != j; ++i) insert_equiv(*i);
- }
-
- public:
-
- // if hash function throws, or inserting > 1 element, basic exception safety
- // strong otherwise
- template <typename I>
- void insert_equiv(I i, I j)
- {
- BOOST_DEDUCED_TYPENAME boost::iterator_traversal<I>::type
- iterator_traversal_tag;
- insert_for_range_equiv(i, j, iterator_traversal_tag);
- }
-
- // if hash function throws, basic exception safety
- // strong otherwise
- value_type& operator[](key_type const& k)
- {
- BOOST_STATIC_ASSERT((
- !boost::is_same<value_type, key_type>::value));
- typedef BOOST_DEDUCED_TYPENAME value_type::second_type mapped_type;
-
- size_type hash_value = hash_function()(k);
- bucket_ptr bucket = this->buckets_ + this->index_from_hash(hash_value);
- local_iterator_base pos = find_iterator(bucket, k);
-
- if (pos.not_finished())
- return *pos;
- else
- {
- // Side effects only in this block.
-
- // Create the node before rehashing in case it throws an
- // exception (need strong safety in such a case).
- node_constructor a(this->allocators_);
- a.construct(value_type(k, mapped_type()));
-
- // reserve has basic exception safety if the hash function
- // throws, strong otherwise.
- if (reserve(size() + 1))
- bucket = this->buckets_ + this->index_from_hash(hash_value);
-
- // Nothing after this point can throw.
-
- link_ptr n = a.release();
- this->link_node(n, bucket);
-
- return *local_iterator_base(n);
- }
- }
-
- // Insert (unique keys)
-
- // if hash function throws, basic exception safety
- // strong otherwise
- std::pair<iterator_base, bool> insert_unique(value_type const& v)
- {
- // No side effects in this initial code
- key_type const& k = extract_key(v);
- size_type hash_value = hash_function()(k);
- bucket_ptr bucket = this->buckets_ + this->index_from_hash(hash_value);
- local_iterator_base pos = find_iterator(bucket, k);
-
- if (pos.not_finished()) {
- // Found an existing key, return it (no throw).
- return std::pair<iterator_base, bool>(
- iterator_base(bucket, pos), false);
-
- } else {
- // Doesn't already exist, add to bucket.
- // Side effects only in this block.
-
- // Create the node before rehashing in case it throws an
- // exception (need strong safety in such a case).
- node_constructor a(this->allocators_);
- a.construct(v);
-
- // reserve has basic exception safety if the hash function
- // throws, strong otherwise.
- if(reserve(size() + 1))
- bucket = this->buckets_ + this->index_from_hash(hash_value);
-
- // Nothing after this point can throw.
-
- link_ptr n = a.release();
- this->link_node(n, bucket);
-
- return std::pair<iterator_base, bool>(
- iterator_base(bucket, n), true);
- }
- }
-
- // Insert (unique keys)
-
- // if hash function throws, basic exception safety
- // strong otherwise
- iterator_base insert_unique(iterator_base const& it, value_type const& v)
- {
- if(it != this->end() && equal(extract_key(v), *it))
- return it;
- else
- return insert_unique(v).first;
- }
-
- // Insert from iterators (unique keys)
-
- template <typename I>
- size_type insert_size_unique(I i, I j, boost::forward_traversal_tag)
- {
- return std::distance(i, j);
- }
-
- template <typename I>
- size_type insert_size_unique(I, I, boost::incrementable_traversal_tag)
- {
- return 1;
- }
-
- template <typename I>
- size_type insert_size_unique(I i, I j)
- {
- BOOST_DEDUCED_TYPENAME boost::iterator_traversal<I>::type
- iterator_traversal_tag;
- return insert_size_unique(i, j, iterator_traversal_tag);
- }
-
- // if hash function throws, or inserting > 1 element, basic exception safety
- // strong otherwise
- template <typename InputIterator>
- void insert_unique(InputIterator i, InputIterator j)
- {
- // If only inserting 1 element, get the required
- // safety since insert is only called once.
- for (; i != j; ++i) {
- // No side effects in this initial code
- size_type hash_value = hash_function()(extract_key(*i));
- bucket_ptr bucket = this->buckets_
- + this->index_from_hash(hash_value);
- local_iterator_base pos = find_iterator(bucket, extract_key(*i));
-
- if (!pos.not_finished()) {
- // Doesn't already exist, add to bucket.
- // Side effects only in this block.
-
- // Create the node before rehashing in case it throws an
- // exception (need strong safety in such a case).
- node_constructor a(this->allocators_);
- value_type const& v = *i;
- a.construct(v);
-
- // reserve has basic exception safety if the hash function
- // throws, strong otherwise.
- if(size() + 1 >= max_load_) {
- reserve(size() + insert_size_unique(i, j));
- bucket = this->buckets_ + this->index_from_hash(hash_value);
- }
-
- // Nothing after this point can throw.
- this->link_node(a.release(), bucket);
- }
- }
- }
-
- public:
-
- // erase
-
- // no throw
- iterator_base erase(iterator_base const& r)
- {
- return this->data::erase(r);
- }
-
- // strong exception safety
- size_type erase(key_type const& k)
- {
- // No side effects in initial section
- bucket_ptr bucket = get_bucket(k);
- link_ptr* it = find_for_erase(bucket, k);
-
- // No throw.
- return *it ? this->erase_group(it, bucket) : 0;
- }
-
- // no throw
- iterator_base erase(iterator_base const& r1, iterator_base const& r2)
- {
- return this->data::erase(r1, r2);
- }
-
- // count
- //
- // strong exception safety, no side effects
- size_type count(key_type const& k) const
- {
- local_iterator_base it = find_iterator(k); // throws, strong
- return it.not_finished() ? this->group_count(it) : 0;
- }
-
- // find
- //
- // strong exception safety, no side effects
- iterator_base find(key_type const& k) const
- {
- bucket_ptr bucket = get_bucket(k);
- local_iterator_base it = find_iterator(bucket, k);
-
- if (it.not_finished())
- return iterator_base(bucket, it);
- else
- return this->end();
- }
-
- value_type& at(key_type const& k) const
- {
- bucket_ptr bucket = get_bucket(k);
- local_iterator_base it = find_iterator(bucket, k);
-
- if (it.not_finished())
- return *it;
- else
- throw std::out_of_range("Unable to find key in unordered_map.");
- }
-
- // equal_range
- //
- // strong exception safety, no side effects
- std::pair<iterator_base, iterator_base> equal_range(key_type const& k) const
- {
- bucket_ptr bucket = get_bucket(k);
- local_iterator_base it = find_iterator(bucket, k);
- if (it.not_finished()) {
- iterator_base first(iterator_base(bucket, it));
- iterator_base second(iterator_base(bucket, this->last_in_group(it.node_)));
- second.increment();
- return std::pair<iterator_base, iterator_base>(first, second);
- }
- else {
- return std::pair<iterator_base, iterator_base>(
- this->end(), this->end());
- }
- }
-
- private:
-
- // strong exception safety, no side effects
- bool equal(key_type const& k, value_type const& v) const
- {
- return key_eq()(k, extract_key(v));
- }
-
- // strong exception safety, no side effects
- local_iterator_base find_iterator(key_type const& k) const
- {
- return find_iterator(get_bucket(k), k);
- }
-
- // strong exception safety, no side effects
- local_iterator_base find_iterator(bucket_ptr bucket,
- key_type const& k) const
- {
- local_iterator_base it = this->begin(bucket);
- while (it.not_finished() && !equal(k, *it))
- it.next_group();
-
- return it;
- }
-
- // strong exception safety, no side effects
- link_ptr* find_for_erase(bucket_ptr bucket, key_type const& k) const
- {
- link_ptr* it = &bucket->next_;
- while(BOOST_HASH_BORLAND_BOOL(*it) && !equal(k, this->get_value(*it)))
- it = &this->next_group(*it);
-
- return it;
- }
- };
-
// Iterators
template <typename Alloc> class HASH_ITERATOR;
@@ -2056,7 +1139,7 @@
}
}
-#undef HASH_TABLE
+
#undef HASH_TABLE_DATA
#undef HASH_ITERATOR
#undef HASH_CONST_ITERATOR
Boost-Commit list run by bdawes at acm.org, david.abrahams at rcn.com, gregod at cs.rpi.edu, cpdaniel at pacbell.net, john at johnmaddock.co.uk