Following several remarks on a previous patch of _Hashtable I considered
clarifying its implementation.
libstdc++: [_Hashtable] Fix some implementation inconsistencies
Get rid of the different usages of the mutable keyword. For
_Prime_rehash_policy methods are exported from the library, we need to
keep their const qualifier, so adapt implementation to update
previously
mutable member.
Remove useless noexcept qualification on _Hashtable _M_bucket_index
overload.
Fix comment to explain that we need the computation of bucket index
to be
noexcept to be able to rehash the container when needed. For Standard
instantiations through std::unordered_xxx containers we already force
usage of hash code cache when hash functor is not noexcep so it is
guarantied.
The static_assert purpose in _Hashtable on _M_bucket_index is thus
limited
to usages of _Hashtable with exotic _Hashtable_traits.
libstdc++-v3/ChangeLog:
* include/bits/hashtable_policy.h
(_NodeBuilder<>::_S_build): Remove
const qualification on _NodeGenerator instance.
(_ReuseOrAllocNode<>::operator()(_Args&&...)): Remove const qualification.
(_ReuseOrAllocNode<>::_M_nodes): Remove mutable.
(_Prime_rehash_policy::max_load_factor()): Remove noexcept.
(_Prime_rehash_policy::_M_reset()): Remove noexcept.
(_Prime_rehash_policy::_M_next_resize): Remove mutable.
(_Power2_rehash_policy::_M_next_bkt(size_t)): Remove noexcept.
(_Power2_rehash_policy::_M_bkt_for_elements(size_t)):
Remove noexcept.
(_Power2_rehash_policy::_M_neeed_rehash): Remove noexcept.
(_Power2_rehash_policy::_M_reset): Remove noexcept.
(_Insert_base<>::_M_insert_range): Remove _NodeGetter const
qualification.
(_Hash_code_base<>::_M_bucket_index(const
_Hash_node_value<>&, size_t)):
Simplify noexcept declaration, we already static_assert
that _RangeHash functor
is noexcept.
* include/bits/hashtable.h: Rework comments. Remove const
qualifier on
_NodeGenerator& arguments.
(_Hashtable<>::_M_bucket_index(const __node_value_type&)):
Remove useless
noexcept qualification.
* src/c++11/hashtable_c++0x.cc (_Prime_rehash_policy):
Workaround
_M_next_resize not being mutable anymore.
Tested under Linux x86_64,
ok to commit ?
François
diff --git a/libstdc++-v3/include/bits/hashtable.h
b/libstdc++-v3/include/bits/hashtable.h
index 89132430f3e..8d462ce7bd5 100644
--- a/libstdc++-v3/include/bits/hashtable.h
+++ b/libstdc++-v3/include/bits/hashtable.h
@@ -51,7 +51,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
using __cache_default
= __not_<__and_<// Do not cache for fast hasher.
__is_fast_hash<_Hash>,
- // Mandatory to have erase not throwing.
+ // Mandatory for the rehash process.
__is_nothrow_invocable<const _Hash&, const _Tp&>>>;
// Helper to conditionally delete the default constructor.
@@ -480,7 +480,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
template<typename _Ht, typename _NodeGenerator>
void
- _M_assign(_Ht&&, const _NodeGenerator&);
+ _M_assign(_Ht&&, _NodeGenerator&);
void
_M_move_assign(_Hashtable&&, true_type);
@@ -795,7 +795,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
private:
// Bucket index computation helpers.
size_type
- _M_bucket_index(const __node_value_type& __n) const noexcept
+ _M_bucket_index(const __node_value_type& __n) const
{ return __hash_code_base::_M_bucket_index(__n, _M_bucket_count); }
size_type
@@ -923,7 +923,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
template<typename _Kt, typename _Arg, typename _NodeGenerator>
std::pair<iterator, bool>
- _M_insert_unique(_Kt&&, _Arg&&, const _NodeGenerator&);
+ _M_insert_unique(_Kt&&, _Arg&&, _NodeGenerator&);
template<typename _Kt>
static __conditional_t<
@@ -943,7 +943,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
template<typename _Arg, typename _NodeGenerator>
std::pair<iterator, bool>
- _M_insert_unique_aux(_Arg&& __arg, const _NodeGenerator& __node_gen)
+ _M_insert_unique_aux(_Arg&& __arg, _NodeGenerator& __node_gen)
{
return _M_insert_unique(
_S_forward_key(_ExtractKey{}(std::forward<_Arg>(__arg))),
@@ -952,7 +952,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
template<typename _Arg, typename _NodeGenerator>
std::pair<iterator, bool>
- _M_insert(_Arg&& __arg, const _NodeGenerator& __node_gen,
+ _M_insert(_Arg&& __arg, _NodeGenerator& __node_gen,
true_type /* __uks */)
{
using __to_value
@@ -963,7 +963,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
template<typename _Arg, typename _NodeGenerator>
iterator
- _M_insert(_Arg&& __arg, const _NodeGenerator& __node_gen,
+ _M_insert(_Arg&& __arg, _NodeGenerator& __node_gen,
false_type __uks)
{
using __to_value
@@ -976,7 +976,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
template<typename _Arg, typename _NodeGenerator>
iterator
_M_insert(const_iterator, _Arg&& __arg,
- const _NodeGenerator& __node_gen, true_type __uks)
+ _NodeGenerator& __node_gen, true_type __uks)
{
return
_M_insert(std::forward<_Arg>(__arg), __node_gen, __uks).first;
@@ -986,7 +986,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
template<typename _Arg, typename _NodeGenerator>
iterator
_M_insert(const_iterator, _Arg&&,
- const _NodeGenerator&, false_type __uks);
+ _NodeGenerator&, false_type __uks);
size_type
_M_erase(true_type __uks, const key_type&);
@@ -1381,7 +1381,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
void
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
- _M_assign(_Ht&& __ht, const _NodeGenerator& __node_gen)
+ _M_assign(_Ht&& __ht, _NodeGenerator& __node_gen)
{
__buckets_ptr __buckets = nullptr;
if (!_M_buckets)
@@ -1623,8 +1623,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
~_Hashtable() noexcept
{
// Getting a bucket index from a node shall not throw because it is used
- // in methods (erase, swap...) that shall not throw. Need a complete
- // type to check this, so do it in the destructor not at class scope.
+ // during the rehash process. This static_assert purpose is limited to
usage
+ // of _Hashtable with _Hashtable_traits requesting non-cached hash code.
+ // Need a complete type to check this, so do it in the destructor not at
+ // class scope.
static_assert(noexcept(declval<const __hash_code_base_access&>()
._M_bucket_index(declval<const __node_value_type&>(),
(std::size_t)0)),
@@ -2225,7 +2227,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
_M_insert_unique(_Kt&& __k, _Arg&& __v,
- const _NodeGenerator& __node_gen)
+ _NodeGenerator& __node_gen)
-> pair<iterator, bool>
{
if (size() <= __small_size_threshold())
@@ -2262,7 +2264,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
_M_insert(const_iterator __hint, _Arg&& __v,
- const _NodeGenerator& __node_gen,
+ _NodeGenerator& __node_gen,
false_type /* __uks */)
-> iterator
{
diff --git a/libstdc++-v3/include/bits/hashtable_policy.h
b/libstdc++-v3/include/bits/hashtable_policy.h
index c67eebd3b2b..5c40be2cb72 100644
--- a/libstdc++-v3/include/bits/hashtable_policy.h
+++ b/libstdc++-v3/include/bits/hashtable_policy.h
@@ -155,7 +155,7 @@ namespace __detail
{
template<typename _Kt, typename _Arg, typename _NodeGenerator>
static auto
- _S_build(_Kt&& __k, _Arg&& __arg, const _NodeGenerator& __node_gen)
+ _S_build(_Kt&& __k, _Arg&& __arg, _NodeGenerator& __node_gen)
-> typename _NodeGenerator::__node_ptr
{
return __node_gen(std::forward<_Kt>(__k),
@@ -168,7 +168,7 @@ namespace __detail
{
template<typename _Kt, typename _Arg, typename _NodeGenerator>
static auto
- _S_build(_Kt&& __k, _Arg&&, const _NodeGenerator& __node_gen)
+ _S_build(_Kt&& __k, _Arg&&, _NodeGenerator& __node_gen)
-> typename _NodeGenerator::__node_ptr
{ return __node_gen(std::forward<_Kt>(__k)); }
};
@@ -212,7 +212,7 @@ namespace __detail
template<typename... _Args>
__node_ptr
- operator()(_Args&&... __args) const
+ operator()(_Args&&... __args)
{
if (!_M_nodes)
return _M_h._M_allocate_node(std::forward<_Args>(__args)...);
@@ -230,7 +230,7 @@ namespace __detail
}
private:
- mutable __node_ptr _M_nodes;
+ __node_ptr _M_nodes;
__hashtable_alloc& _M_h;
};
@@ -551,10 +551,11 @@ namespace __detail
: _M_max_load_factor(__z), _M_next_resize(0) { }
float
- max_load_factor() const noexcept
+ max_load_factor() const
{ return _M_max_load_factor; }
// Return a bucket size no smaller than n.
+ // TODO: 'const' qualifier is kept for abi compatibility reason.
std::size_t
_M_next_bkt(std::size_t __n) const;
@@ -567,6 +568,7 @@ namespace __detail
// and __n_ins is number of elements to be inserted. Do we need to
// increase bucket count? If so, return make_pair(true, n), where n
// is the new bucket count. If not, return make_pair(false, 0).
+ // TODO: 'const' qualifier is kept for abi compatibility reason.
std::pair<bool, std::size_t>
_M_need_rehash(std::size_t __n_bkt, std::size_t __n_elt,
std::size_t __n_ins) const;
@@ -578,7 +580,7 @@ namespace __detail
{ return _M_next_resize; }
void
- _M_reset() noexcept
+ _M_reset()
{ _M_next_resize = 0; }
void
@@ -588,7 +590,7 @@ namespace __detail
static const std::size_t _S_growth_factor = 2;
float _M_max_load_factor;
- mutable std::size_t _M_next_resize;
+ std::size_t _M_next_resize;
};
/// Range hashing function assuming that second arg is a power of 2.
@@ -629,13 +631,13 @@ namespace __detail
: _M_max_load_factor(__z), _M_next_resize(0) { }
float
- max_load_factor() const noexcept
+ max_load_factor() const
{ return _M_max_load_factor; }
// Return a bucket size no smaller than n (as long as n is not above the
// highest power of 2).
std::size_t
- _M_next_bkt(std::size_t __n) noexcept
+ _M_next_bkt(std::size_t __n)
{
if (__n == 0)
// Special case on container 1st initialization with 0 bucket count
@@ -669,7 +671,7 @@ namespace __detail
// Return a bucket count appropriate for n elements
std::size_t
- _M_bkt_for_elements(std::size_t __n) const noexcept
+ _M_bkt_for_elements(std::size_t __n) const
{ return __builtin_ceil(__n / (double)_M_max_load_factor); }
// __n_bkt is current bucket count, __n_elt is current element count,
@@ -678,7 +680,7 @@ namespace __detail
// is the new bucket count. If not, return make_pair(false, 0).
std::pair<bool, std::size_t>
_M_need_rehash(std::size_t __n_bkt, std::size_t __n_elt,
- std::size_t __n_ins) noexcept
+ std::size_t __n_ins)
{
if (__n_elt + __n_ins > _M_next_resize)
{
@@ -708,11 +710,11 @@ namespace __detail
{ return _M_next_resize; }
void
- _M_reset() noexcept
+ _M_reset()
{ _M_next_resize = 0; }
void
- _M_reset(_State __state) noexcept
+ _M_reset(_State __state)
{ _M_next_resize = __state; }
static const std::size_t _S_growth_factor = 2;
@@ -931,12 +933,12 @@ namespace __detail
template<typename _InputIterator, typename _NodeGetter>
void
_M_insert_range(_InputIterator __first, _InputIterator __last,
- const _NodeGetter&, true_type __uks);
+ _NodeGetter&, true_type __uks);
template<typename _InputIterator, typename _NodeGetter>
void
_M_insert_range(_InputIterator __first, _InputIterator __last,
- const _NodeGetter&, false_type __uks);
+ _NodeGetter&, false_type __uks);
public:
using iterator = _Node_iterator<_Value, __constant_iterators::value,
@@ -1012,7 +1014,7 @@ namespace __detail
_Hash, _RangeHash, _Unused,
_RehashPolicy, _Traits>::
_M_insert_range(_InputIterator __first, _InputIterator __last,
- const _NodeGetter& __node_gen, true_type __uks)
+ _NodeGetter& __node_gen, true_type __uks)
{
__hashtable& __h = _M_conjure_hashtable();
for (; __first != __last; ++__first)
@@ -1029,7 +1031,7 @@ namespace __detail
_Hash, _RangeHash, _Unused,
_RehashPolicy, _Traits>::
_M_insert_range(_InputIterator __first, _InputIterator __last,
- const _NodeGetter& __node_gen, false_type __uks)
+ _NodeGetter& __node_gen, false_type __uks)
{
using __rehash_guard_t = typename __hashtable::__rehash_guard_t;
using __pair_type = std::pair<bool, std::size_t>;
@@ -1359,9 +1361,7 @@ namespace __detail
std::size_t
_M_bucket_index(const _Hash_node_value<_Value, false>& __n,
std::size_t __bkt_count) const
- noexcept( noexcept(declval<const _Hash&>()(declval<const _Key&>()))
- && noexcept(declval<const _RangeHash&>()((__hash_code)0,
- (std::size_t)0)) )
+ noexcept( noexcept(declval<const _Hash&>()(declval<const _Key&>())) )
{
return _RangeHash{}(_M_hash_code(_ExtractKey{}(__n._M_v())),
__bkt_count);
@@ -1369,9 +1369,7 @@ namespace __detail
std::size_t
_M_bucket_index(const _Hash_node_value<_Value, true>& __n,
- std::size_t __bkt_count) const
- noexcept( noexcept(declval<const _RangeHash&>()((__hash_code)0,
- (std::size_t)0)) )
+ std::size_t __bkt_count) const noexcept
{ return _RangeHash{}(__n._M_hash_code, __bkt_count); }
void
diff --git a/libstdc++-v3/src/c++11/hashtable_c++0x.cc
b/libstdc++-v3/src/c++11/hashtable_c++0x.cc
index 9673d867402..e75b09a31bb 100644
--- a/libstdc++-v3/src/c++11/hashtable_c++0x.cc
+++ b/libstdc++-v3/src/c++11/hashtable_c++0x.cc
@@ -45,6 +45,7 @@ namespace __detail
std::size_t
_Prime_rehash_policy::_M_next_bkt(std::size_t __n) const
{
+ auto __mutable_this = const_cast<_Prime_rehash_policy*>(this);
// Optimize lookups involving the first elements of __prime_list.
// (useful to speed-up, eg, constructors)
static const unsigned char __fast_bkt[]
@@ -58,7 +59,7 @@ namespace __detail
// want to add an element allocation will take place.
return 1;
- _M_next_resize =
+ __mutable_this->_M_next_resize =
__builtin_floor(__fast_bkt[__n] * (double)_M_max_load_factor);
return __fast_bkt[__n];
}
@@ -79,9 +80,9 @@ namespace __detail
// Set next resize to the max value so that we never try to rehash again
// as we already reach the biggest possible bucket number.
// Note that it might result in max_load_factor not being respected.
- _M_next_resize = size_t(-1);
+ __mutable_this->_M_next_resize = size_t(-1);
else
- _M_next_resize =
+ __mutable_this->_M_next_resize =
__builtin_floor(*__next_bkt * (double)_M_max_load_factor);
return *__next_bkt;
@@ -101,6 +102,7 @@ namespace __detail
_M_need_rehash(std::size_t __n_bkt, std::size_t __n_elt,
std::size_t __n_ins) const
{
+ auto __mutable_this = const_cast<_Prime_rehash_policy*>(this);
if (__n_elt + __n_ins > _M_next_resize)
{
// If _M_next_resize is 0 it means that we have nothing allocated so
@@ -114,7 +116,7 @@ namespace __detail
_M_next_bkt(std::max<std::size_t>(__builtin_floor(__min_bkts) + 1,
__n_bkt * _S_growth_factor)) };
- _M_next_resize
+ __mutable_this->_M_next_resize
= __builtin_floor(__n_bkt * (double)_M_max_load_factor);
return { false, 0 };
}