Hi
Here is the last patch I can think of for 4.8. Thanks to it default
performance reported in performance/23_containers/insert/54075.cc and
performance/23_containers/insert_erase/41975.cc are always the best:
54075.cc std::unordered_set without hash code cached
30 insertion attempts, 30 inserted 10r8u 1s
13761936mem0pf
54075.cc std::unordered_set without hash code cached
10 times insertion of 30 elements 31r 31u 0s0mem0pf
54075.cc std::unordered_set with hash code cached
30 insertion attempts, 30 inserted 10r9u 1s
18562000mem0pf
54075.cc std::unordered_set with hash code cached 10
times insertion of 30 elements 34r 35u 0s0mem0pf
54075.cc std::unordered_set default cache 30
insertion attempts, 30 inserted 9r8u0s 13761936mem0pf
54075.cc std::unordered_set default cache 10 times
insertion of 30 elements 31r 32u0s 0mem0pf
41975.cc std::unordered_set without hash
code cached: first insert 9r9u0s 8450336mem0pf
41975.cc std::unordered_set without hash
code cached: erase from iterator 6r5u0s -6400096mem0pf
41975.cc std::unordered_set without hash
code cached: second insert 6r5u0s 640mem0pf
41975.cc std::unordered_set without hash
code cached: erase from key 5r5u0s -640mem0pf
41975.cc std::unordered_set with hash code
cached: first insert 5r5u1s 8450336mem 0pf
41975.cc std::unordered_set with hash code
cached: erase from iterator 4r3u0s -6400096mem0pf
41975.cc std::unordered_set with hash code
cached: second insert 3r3u0s 6400016mem 0pf
41975.cc std::unordered_set with hash code
cached: erase from key 4r3u0s -6400016mem 0pf
41975.cc std::unordered_set default cache:
first insert 5r5u1s 8450336mem0pf
41975.cc std::unordered_set default cache:
erase from iterator 4r3u0s -6400096mem0pf
41975.cc std::unordered_set default cache:
second insert 3r3u0s 640mem0pf
41975.cc std::unordered_set default cache:
erase from key 4r3u0s -640mem 0pf
2013-02-02 François Dumont
* include/bits/functional_hash.h (std::__is_fast_hash<>): New.
* include/bits/basic_string.h: Specialize previous to mark
std::hash for string types as slow.
* include/bits/hashtable.h (__cache_default): Replace is_integral
with __is_fast_hash.
* src/c++11/hash_c++0x.cc: Add type_traits include.
Tested under Linux x86_64.
Ok to commit ?
François
Index: include/bits/functional_hash.h
===
--- include/bits/functional_hash.h (revision 195686)
+++ include/bits/functional_hash.h (working copy)
@@ -195,6 +195,18 @@
// @} group hashes
+ // Hint about performance of hash functor. If not fast the hash based
+ // containers will cache the hash code.
+ // Default behavior is to consider that hasher are fast unless specified
+ // otherwise.
+ template
+struct __is_fast_hash : public std::true_type
+{ };
+
+ template<>
+struct __is_fast_hash> : public std::false_type
+{ };
+
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace
Index: include/bits/basic_string.h
===
--- include/bits/basic_string.h (revision 195686)
+++ include/bits/basic_string.h (working copy)
@@ -3053,6 +3053,10 @@
{ return std::_Hash_impl::hash(__s.data(), __s.length()); }
};
+ template<>
+struct __is_fast_hash> : std::false_type
+{ };
+
#ifdef _GLIBCXX_USE_WCHAR_T
/// std::hash specialization for wstring.
template<>
@@ -3064,6 +3068,10 @@
{ return std::_Hash_impl::hash(__s.data(),
__s.length() * sizeof(wchar_t)); }
};
+
+ template<>
+struct __is_fast_hash> : std::false_type
+{ };
#endif
#endif /* _GLIBCXX_COMPATIBILITY_CXX0X */
@@ -3079,6 +3087,10 @@
__s.length() * sizeof(char16_t)); }
};
+ template<>
+struct __is_fast_hash> : std::false_type
+{ };
+
/// std::hash specialization for u32string.
template<>
struct hash
@@ -3089,6 +3101,10 @@
{ return std::_Hash_impl::hash(__s.data(),
__s.length() * sizeof(char32_t)); }
};
+
+ template<>
+struct __is_fast_hash> : std::false_type
+{ };
#endif
_GLIBCXX_END_NAMESPACE_VERSION
Index: include/bits/hashtable.h