diff options
author | Vsevolod Stakhov <vsevolod@rspamd.com> | 2022-07-17 16:43:47 +0100 |
---|---|---|
committer | Vsevolod Stakhov <vsevolod@rspamd.com> | 2022-07-17 16:43:47 +0100 |
commit | 969902f04569ab12e69e0d57ef1f6e9f0c42f855 (patch) | |
tree | 2d4ae6f19a613c4327e94da917aa89694dd2ff00 | |
parent | 93022e6207ed15f8dd231ad43511d7c9e3f2eee8 (diff) | |
download | rspamd-969902f04569ab12e69e0d57ef1f6e9f0c42f855.tar.gz rspamd-969902f04569ab12e69e0d57ef1f6e9f0c42f855.zip |
[Rework] Use another version of hash table from the same author
24 files changed, 2281 insertions, 2513 deletions
diff --git a/contrib/DEPENDENCY_INFO.md b/contrib/DEPENDENCY_INFO.md index fd01fa5ae..9c5cdb431 100644 --- a/contrib/DEPENDENCY_INFO.md +++ b/contrib/DEPENDENCY_INFO.md @@ -1,38 +1,39 @@ # Rspamd Dependency Info -| Name | Version | License | Patched | Notes | -| --- |---------| --- | --- | --- | -| aho-corasick | ? | LGPL-3.0 | YES | lowercase support | -| cdb | 1.1.0 | Public Domain / CC0 | NO | | -| hiredis | 0.13.3 | BSD-3-Clause | YES | many changes | -| libev | 4.33 | BSD-2-Clause | YES | many changes | -| lc-btrie | ? | BSD-3-Clause | YES | mempool support | -| libottery | ? | Public Domain / CC0 | YES | many changes | -| librdns | ? | BSD-2-Clause | YES | | -| libucl | ? | BSD-2-Clause | YES | | -| replxx | 6d93360 | BSD-2-Clause | YES | libicu usage | -| lua-argparse | 0.7.1 | MIT | NO | | -| lua-bit | 1.0.2 | MIT | YES | build fixes | -| lua-fun | ? | MIT | YES | rspamd text | -| lua-lpeg | 1.0 | MIT | YES | rspamd text + alloc| -| lua-moses | ? | MIT | NO | | -| lua-lupa | ? | MIT | NO | | -| lua-tableshape | ae67256 | MIT | NO | | -| mumhash | ? | MIT | NO | | -| ngx-http-parser | 2.2.0 | MIT | YES | spamc support | -| Mozilla-PublicSuffix | ? | MIT | NO | | -| snowball | ? | BSD-3-Clause | NO | | -| t1ha | ? | Zlib | NO | | -| uthash | 1.9.8 | BSD | YES | | -| xxhash | 0.8.1 | BSD | NO | | -| zstd | 1.4.5 | BSD | NO | | -| google-ced | 37529e6 | Apache 2 | YES | build fixes | -| kann | ? | MIT | YES | blas/lapack changes| -| fpconv | ? | Boost | YES | many changes | -| fastutf8 | ? | MIT | YES | many changes | -| expected | v1.0 | Public Domain / CC0 | NO | | -| robin-hood | 3.9.1 | MIT | NO | | -| frozen | 1.0.1 | Apache 2 | NO | | -| fmt | 8.1.1 | MIT | NO | | -| doctest | 2.4.6 | MIT | NO | | -| function2 | 4.1.0 | Boost | NO | | +| Name | Version | License | Patched | Notes | +|------------------------|---------|---------------------| --- | --- | +| aho-corasick | ? | LGPL-3.0 | YES | lowercase support | +| cdb | 1.1.0 | Public Domain / CC0 | NO | | +| hiredis | 0.13.3 | BSD-3-Clause | YES | many changes | +| libev | 4.33 | BSD-2-Clause | YES | many changes | +| lc-btrie | ? | BSD-3-Clause | YES | mempool support | +| libottery | ? | Public Domain / CC0 | YES | many changes | +| librdns | ? | BSD-2-Clause | YES | | +| libucl | ? | BSD-2-Clause | YES | | +| replxx | 6d93360 | BSD-2-Clause | YES | libicu usage | +| lua-argparse | 0.7.1 | MIT | NO | | +| lua-bit | 1.0.2 | MIT | YES | build fixes | +| lua-fun | ? | MIT | YES | rspamd text | +| lua-lpeg | 1.0 | MIT | YES | rspamd text + alloc| +| lua-moses | ? | MIT | NO | | +| lua-lupa | ? | MIT | NO | | +| lua-tableshape | ae67256 | MIT | NO | | +| mumhash | ? | MIT | NO | | +| ngx-http-parser | 2.2.0 | MIT | YES | spamc support | +| Mozilla-PublicSuffix | ? | MIT | NO | | +| snowball | ? | BSD-3-Clause | NO | | +| t1ha | ? | Zlib | NO | | +| uthash | 1.9.8 | BSD | YES | | +| xxhash | 0.8.1 | BSD | NO | | +| zstd | 1.4.5 | BSD | NO | | +| google-ced | 37529e6 | Apache 2 | YES | build fixes | +| kann | ? | MIT | YES | blas/lapack changes| +| fpconv | ? | Boost | YES | many changes | +| fastutf8 | ? | MIT | YES | many changes | +| expected | v1.0 | Public Domain / CC0 | NO | | +| frozen | 1.0.1 | Apache 2 | NO | | +| fmt | 8.1.1 | MIT | NO | | +| doctest | 2.4.6 | MIT | NO | | +| function2 | 4.1.0 | Boost | NO | | +| ankerl/svector | 1.0.0 | MIT | NO | | +| ankerl/unordered_dense | 1.0.2 | MIT | NO | |
\ No newline at end of file diff --git a/contrib/robin-hood/LICENSE b/contrib/ankerl/LICENSE index e9a58ad65..c4d1a0e48 100644 --- a/contrib/robin-hood/LICENSE +++ b/contrib/ankerl/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2018-2019 Martin Ankerl +Copyright (c) 2022 Martin Leitner-Ankerl Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/contrib/ankerl/svector.h b/contrib/ankerl/svector.h new file mode 100644 index 000000000..b6ef1ad68 --- /dev/null +++ b/contrib/ankerl/svector.h @@ -0,0 +1,993 @@ +// ┌─┐┬ ┬┌─┐┌─┐┌┬┐┌─┐┬─┐ Compact SVO optimized vector C++17 or higher +// └─┐└┐┌┘├┤ │ │ │ │├┬┘ Version 1.0.0 +// └─┘ └┘ └─┘└─┘ ┴ └─┘┴└─ https://github.com/martinus/svector +// +// Licensed under the MIT License <http://opensource.org/licenses/MIT>. +// SPDX-License-Identifier: MIT +// Copyright (c) 2022 Martin Leitner-Ankerl <martin.ankerl@gmail.com> +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +#ifndef ANKERL_SVECTOR_H +#define ANKERL_SVECTOR_H + +// see https://semver.org/spec/v2.0.0.html +#define ANKERL_SVECTOR_VERSION_MAJOR 1 // incompatible API changes +#define ANKERL_SVECTOR_VERSION_MINOR 0 // add functionality in a backwards compatible manner +#define ANKERL_SVECTOR_VERSION_PATCH 0 // backwards compatible bug fixes + +// API versioning with inline namespace, see https://www.foonathan.net/2018/11/inline-namespaces/ +#define ANKERL_SVECTOR_VERSION_CONCAT1(major, minor, patch) v##major##_##minor##_##patch +#define ANKERL_SVECTOR_VERSION_CONCAT(major, minor, patch) ANKERL_SVECTOR_VERSION_CONCAT1(major, minor, patch) +#define ANKERL_SVECTOR_NAMESPACE \ + ANKERL_SVECTOR_VERSION_CONCAT(ANKERL_SVECTOR_VERSION_MAJOR, ANKERL_SVECTOR_VERSION_MINOR, ANKERL_SVECTOR_VERSION_PATCH) + +#include <algorithm> +#include <array> +#include <cstddef> +#include <cstdint> +#include <cstring> +#include <initializer_list> +#include <iterator> +#include <limits> +#include <memory> +#include <new> +#include <stdexcept> +#include <type_traits> +#include <utility> + +namespace ankerl { +inline namespace ANKERL_SVECTOR_NAMESPACE { +namespace detail { + +template <typename Condition, typename T = void> +using enable_if_t = typename std::enable_if<Condition::value, T>::type; + +template <typename It> +using is_input_iterator = std::is_base_of<std::input_iterator_tag, typename std::iterator_traits<It>::iterator_category>; + +constexpr auto round_up(size_t n, size_t multiple) -> size_t { + return ((n + (multiple - 1)) / multiple) * multiple; +} + +template <typename T> +constexpr auto cx_min(T a, T b) -> T { + return a < b ? a : b; +} + +template <typename T> +constexpr auto cx_max(T a, T b) -> T { + return a > b ? a : b; +} + +template <typename T> +constexpr auto alignment_of_svector() -> size_t { + return cx_max(sizeof(void*), std::alignment_of_v<T>); +} + +/** + * @brief Calculates sizeof(svector<T, N>) for a given type and inline capacity + */ +template <typename T> +constexpr auto size_of_svector(size_t min_inline_capacity) -> size_t { + // + 1 for one byte size in direct mode + return round_up(sizeof(T) * min_inline_capacity + 1, alignment_of_svector<T>()); +} + +/** + * @brief Calculates how many T we can actually store inside of an svector without increasing its sizeof(). + * + * E.g. svector<char, 1> could store 7 bytes even though 1 is specified. This makes sure we don't waste any + * of the padding. + */ +template <typename T> +constexpr auto automatic_capacity(size_t min_inline_capacity) -> size_t { + return cx_min((size_of_svector<T>(min_inline_capacity) - 1U) / sizeof(T), size_t(127)); +} + +/** + * Holds size & capacity, a glorified struct. + */ +class header { + size_t m_size{}; + size_t const m_capacity; + +public: + inline explicit header(size_t capacity) + : m_capacity{capacity} {} + + [[nodiscard]] inline auto size() const -> size_t { + return m_size; + } + + [[nodiscard]] inline auto capacity() const -> size_t { + return m_capacity; + } + + inline void size(size_t s) { + m_size = s; + } +}; + +/** + * @brief Holds header (size+capacity) plus an arbitrary number of T. + * + * To make storage compact, we don't actually store a pointer to T. We don't have to + * because we know exactly at which location it begins. + */ +template <typename T> +struct storage : public header { + static constexpr auto alignment_of_t = std::alignment_of_v<T>; + static constexpr auto max_alignment = std::max(std::alignment_of_v<header>, std::alignment_of_v<T>); + static constexpr auto offset_to_data = detail::round_up(sizeof(header), alignment_of_t); + static_assert(max_alignment <= __STDCPP_DEFAULT_NEW_ALIGNMENT__); + + explicit storage(size_t capacity) + : header(capacity) {} + + auto data() -> T* { + auto ptr_to_data = reinterpret_cast<std::byte*>(this) + offset_to_data; + return std::launder(reinterpret_cast<T*>(ptr_to_data)); + } + + /** + * @brief Allocates space for storage plus capacity*T objects. + * + * Checks to make sure that allocation won't overflow. + * + * @param capacity Number of T to allocate. + * @return storage<T>* + */ + static auto alloc(size_t capacity) -> storage<T>* { + // make sure we don't overflow! + auto mem = sizeof(T) * capacity; + if (mem < capacity) { + throw std::bad_alloc(); + } + if (offset_to_data + mem < mem) { + throw std::bad_alloc(); + } + mem += offset_to_data; + if (static_cast<uint64_t>(mem) > static_cast<uint64_t>(std::numeric_limits<std::ptrdiff_t>::max())) { + throw std::bad_alloc(); + } + + void* ptr = ::operator new(offset_to_data + sizeof(T) * capacity); + if (nullptr == ptr) { + throw std::bad_alloc(); + } + // use void* to ensure we don't use an overload for T* + return new (ptr) storage<T>(capacity); + } +}; + +} // namespace detail + +template <typename T, size_t MinInlineCapacity> +class svector { + static_assert(MinInlineCapacity <= 127, "sorry, can't have more than 127 direct elements"); + static constexpr auto N = detail::automatic_capacity<T>(MinInlineCapacity); + + enum class direction { direct, indirect }; + + /** + * A buffer to hold the data of the svector Depending on direct/indirect mode, the content it holds is like so: + * + * direct: + * m_data[0] & 1: lowest bit is 1 for direct mode. + * m_data[0] >> 1: size for direct mode + * Then 0-X bytes unused (padding), and then the actual inline T data. + * indirect: + * m_data[0] & 1: lowest bit is 0 for indirect mode + * m_data[0..7]: stores an uintptr_t, which points to the indirect data. + */ + alignas(detail::alignment_of_svector<T>()) std::array<uint8_t, detail::size_of_svector<T>(MinInlineCapacity)> m_data; + + // direct mode /////////////////////////////////////////////////////////// + + [[nodiscard]] auto is_direct() const -> bool { + return (m_data[0] & 1U) != 0U; + } + + [[nodiscard]] auto direct_size() const -> size_t { + return m_data[0] >> 1U; + } + + // sets size of direct mode and mode to direct too. + constexpr void set_direct_and_size(size_t s) { + m_data[0] = (s << 1U) | 1U; + } + + [[nodiscard]] auto direct_data() -> T* { + return std::launder(reinterpret_cast<T*>(m_data.data() + std::alignment_of_v<T>)); + } + + // indirect mode ///////////////////////////////////////////////////////// + + [[nodiscard]] auto indirect() -> detail::storage<T>* { + detail::storage<T>* ptr; // NOLINT(cppcoreguidelines-init-variables) + std::memcpy(&ptr, m_data.data(), sizeof(ptr)); + return ptr; + } + + [[nodiscard]] auto indirect() const -> detail::storage<T> const* { + return const_cast<svector*>(this)->indirect(); // NOLINT(cppcoreguidelines-pro-type-const-cast) + } + + void set_indirect(detail::storage<T>* ptr) { + std::memcpy(m_data.data(), &ptr, sizeof(ptr)); + + // safety check to guarantee the lowest bit is 0 + if (is_direct()) { + throw std::bad_alloc(); // LCOV_EXCL_LINE + } + } + + // helpers /////////////////////////////////////////////////////////////// + + /** + * @brief Moves size objects from source_ptr to target_ptr, and destroys what remains in source_ptr. + * + * Assumes data is not overlapping + */ + static void uninitialized_move_and_destroy(T* source_ptr, T* target_ptr, size_t size) { + if constexpr (std::is_trivially_copyable_v<T>) { + std::memcpy(target_ptr, source_ptr, size * sizeof(T)); + } else { + std::uninitialized_move_n(source_ptr, size, target_ptr); + std::destroy_n(source_ptr, size); + } + } + + /** + * @brief Reallocates all data when capacity changes. + * + * if new_capacity <= N chooses direct memory, otherwise indirect. + */ + void realloc(size_t new_capacity) { + if (new_capacity <= N) { + // put everything into direct storage + if (is_direct()) { + // direct -> direct: nothing to do! + return; + } + + // indirect -> direct + auto* storage = indirect(); + uninitialized_move_and_destroy(storage->data(), direct_data(), storage->size()); + set_direct_and_size(storage->size()); + delete storage; + } else { + // put everything into indirect storage + auto* storage = detail::storage<T>::alloc(new_capacity); + if (is_direct()) { + // direct -> indirect + uninitialized_move_and_destroy(data<direction::direct>(), storage->data(), size<direction::direct>()); + storage->size(size<direction::direct>()); + } else { + // indirect -> indirect + uninitialized_move_and_destroy(data<direction::indirect>(), storage->data(), size<direction::indirect>()); + storage->size(size<direction::indirect>()); + delete indirect(); + } + set_indirect(storage); + } + } + + /** + * @brief Doubles starting_capacity until it is >= size_to_fit. + */ + [[nodiscard]] static auto calculate_new_capacity(size_t size_to_fit, size_t starting_capacity) -> size_t { + if (size_to_fit > max_size()) { + // not enough space + throw std::bad_alloc(); + } + + if (size_to_fit == 0) { + // special handling for 0 so N==0 works + return starting_capacity; + } + // start with at least 1, so N==0 works + auto new_capacity = std::max<size_t>(1, starting_capacity); + + // double capacity until its large enough, but make sure we don't overflow + while (new_capacity < size_to_fit && new_capacity * 2 > new_capacity) { + new_capacity *= 2; + } + if (new_capacity < size_to_fit) { + // got an overflow, set capacity to max + new_capacity = max_size(); + } + return std::min(new_capacity, max_size()); + } + + template <direction D> + [[nodiscard]] auto capacity() const -> size_t { + if constexpr (D == direction::direct) { + return N; + } else { + return indirect()->capacity(); + } + } + + template <direction D> + [[nodiscard]] auto size() const -> size_t { + if constexpr (D == direction::direct) { + return direct_size(); + } else { + return indirect()->size(); + } + } + + template <direction D> + void set_size(size_t s) { + if constexpr (D == direction::direct) { + set_direct_and_size(s); + } else { + indirect()->size(s); + } + } + + void set_size(size_t s) { + if (is_direct()) { + set_size<direction::direct>(s); + } else { + set_size<direction::indirect>(s); + } + } + + template <direction D> + [[nodiscard]] auto data() -> T* { + if constexpr (D == direction::direct) { + return direct_data(); + } else { + return indirect()->data(); + } + } + + template <direction D> + [[nodiscard]] auto data() const -> T const* { + return const_cast<svector*>(this)->data<D>(); // NOLINT(cppcoreguidelines-pro-type-const-cast) + } + + template <direction D> + void pop_back() { + if constexpr (std::is_trivially_destructible_v<T>) { + set_size<D>(size<D>() - 1); + } else { + auto s = size<D>() - 1; + (data<D>() + s)->~T(); + set_size<D>(s); + } + } + + /** + * @brief We need variadic arguments so we can either use copy ctor or default ctor + */ + template <direction D, class... Args> + void resize_after_reserve(size_t count, Args&&... args) { + auto current_size = size<D>(); + if (current_size > count) { + if constexpr (!std::is_trivially_destructible_v<T>) { + auto* d = data<D>(); + std::destroy(d + count, d + current_size); + } + } else { + auto* d = data<D>(); + for (auto ptr = d + current_size, end = d + count; ptr != end; ++ptr) { + new (static_cast<void*>(ptr)) T(std::forward<Args>(args)...); + } + } + set_size<D>(count); + } + + // Makes sure that to is not past the end iterator + template <direction D> + auto erase_checked_end(T const* cfrom, T const* to) -> T* { + auto* const erase_begin = const_cast<T*>(cfrom); // NOLINT(cppcoreguidelines-pro-type-const-cast) + auto* const container_end = data<D>() + size<D>(); + auto* const erase_end = std::min(const_cast<T*>(to), container_end); // NOLINT(cppcoreguidelines-pro-type-const-cast) + + std::move(erase_end, container_end, erase_begin); + auto const num_erased = std::distance(erase_begin, erase_end); + std::destroy(container_end - num_erased, container_end); + set_size<D>(size<D>() - num_erased); + return erase_begin; + } + + template <typename It> + void assign(It first, It last, std::input_iterator_tag /*unused*/) { + clear(); + + // TODO this can be made faster, e.g. by setting size only when finished. + while (first != last) { + push_back(*first); + ++first; + } + } + + template <typename It> + void assign(It first, It last, std::forward_iterator_tag /*unused*/) { + clear(); + + auto s = std::distance(first, last); + reserve(s); + std::uninitialized_copy(first, last, data()); + set_size(s); + } + + // precondition: all uninitialized + void do_move_assign(svector&& other) { + if (!other.is_direct()) { + // take other's memory, even when empty + set_indirect(other.indirect()); + } else { + auto* other_ptr = other.data<direction::direct>(); + auto s = other.size<direction::direct>(); + auto* other_end = other_ptr + s; + + std::uninitialized_move(other_ptr, other_end, data<direction::direct>()); + std::destroy(other_ptr, other_end); + set_size(s); + } + other.set_direct_and_size(0); + } + + /** + * @brief Shifts data [source_begin, source_end( to the right, starting on target_begin. + * + * Preconditions: + * * contiguous memory + * * source_begin <= target_begin + * * source_end onwards is uninitialized memory + * + * Destroys then empty elements in [source_begin, source_end( + */ + static auto shift_right(T* source_begin, T* source_end, T* target_begin) { + // 1. uninitialized moves + auto const num_moves = std::distance(source_begin, source_end); + auto const target_end = target_begin + num_moves; + auto const num_uninitialized_move = std::min(num_moves, std::distance(source_end, target_end)); + std::uninitialized_move(source_end - num_uninitialized_move, source_end, target_end - num_uninitialized_move); + std::move_backward(source_begin, source_end - num_uninitialized_move, target_end - num_uninitialized_move); + std::destroy(source_begin, std::min(source_end, target_begin)); + } + + template <direction D> + [[nodiscard]] auto make_uninitialized_space_new(size_t s, T* p, size_t count) -> T* { + auto target = svector(); + // we know target is indirect because we're increasing capacity + target.reserve(s + count); + + // move everything [begin, pos[ + auto* target_pos = std::uninitialized_move(data<D>(), p, target.template data<direction::indirect>()); + + // move everything [pos, end] + std::uninitialized_move(p, data<D>() + s, target_pos + count); + + target.template set_size<direction::indirect>(s + count); + *this = std::move(target); + return target_pos; + } + + template <direction D> + [[nodiscard]] auto make_uninitialized_space(T const* pos, size_t count) -> T* { + auto* const p = const_cast<T*>(pos); // NOLINT(cppcoreguidelines-pro-type-const-cast) + auto s = size<D>(); + if (s + count > capacity<D>()) { + return make_uninitialized_space_new<D>(s, p, count); + } + + shift_right(p, data<D>() + s, p + count); + set_size<D>(s + count); + return p; + } + + // makes space for uninitialized data of cout elements. Also updates size. + [[nodiscard]] auto make_uninitialized_space(T const* pos, size_t count) -> T* { + if (is_direct()) { + return make_uninitialized_space<direction::direct>(pos, count); + } + return make_uninitialized_space<direction::indirect>(pos, count); + } + + void destroy() { + auto const is_dir = is_direct(); + if constexpr (!std::is_trivially_destructible_v<T>) { + T* ptr = nullptr; + size_t s = 0; + if (is_dir) { + ptr = data<direction::direct>(); + s = size<direction::direct>(); + } else { + ptr = data<direction::indirect>(); + s = size<direction::indirect>(); + } + std::destroy_n(ptr, s); + } + if (!is_dir) { + delete indirect(); + } + } + + // performs a const_cast so we don't need this implementation twice + template <direction D> + auto at(size_t idx) -> T& { + if (idx >= size<D>()) { + throw std::out_of_range{"svector: idx out of range"}; + } + auto* ptr = const_cast<T*>(data<D>() + idx); // NOLINT(cppcoreguidelines-pro-type-const-cast) + return *ptr; + } // LCOV_EXCL_LINE why is this single } marked as not covered? gcov bug? + +public: + using value_type = T; + using size_type = size_t; + using difference_type = std::ptrdiff_t; + using reference = value_type&; + using const_reference = value_type const&; + using pointer = T*; + using const_pointer = T const*; + using iterator = T*; + using const_iterator = T const*; + using reverse_iterator = std::reverse_iterator<iterator>; + using const_reverse_iterator = std::reverse_iterator<const_iterator>; + + svector() { + set_direct_and_size(0); + } + + svector(size_t count, T const& value) + : svector() { + resize(count, value); + } + + explicit svector(size_t count) + : svector() { + reserve(count); + if (is_direct()) { + resize_after_reserve<direction::direct>(count); + } else { + resize_after_reserve<direction::indirect>(count); + } + } + + template <typename InputIt, typename = detail::enable_if_t<detail::is_input_iterator<InputIt>>> + svector(InputIt first, InputIt last) + : svector() { + assign(first, last); + } + + svector(svector const& other) + : svector() { + auto s = other.size(); + reserve(s); + std::uninitialized_copy(other.begin(), other.end(), begin()); + set_size(s); + } + + svector(svector&& other) noexcept + : svector() { + do_move_assign(std::move(other)); + } + + svector(std::initializer_list<T> init) + : svector(init.begin(), init.end()) {} + + ~svector() { + destroy(); + } + + void assign(size_t count, T const& value) { + clear(); + resize(count, value); + } + + template <typename InputIt, typename = detail::enable_if_t<detail::is_input_iterator<InputIt>>> + void assign(InputIt first, InputIt last) { + assign(first, last, typename std::iterator_traits<InputIt>::iterator_category()); + } + + void assign(std::initializer_list<T> l) { + assign(l.begin(), l.end()); + } + + auto operator=(svector const& other) -> svector& { + if (&other == this) { + return *this; + } + + assign(other.begin(), other.end()); + return *this; + } + + auto operator=(svector&& other) noexcept -> svector& { + if (&other == this) { + // It doesn't seem to be required to do self-check, but let's do it anyways to be safe + return *this; + } + destroy(); + do_move_assign(std::move(other)); + return *this; + } + + auto operator=(std::initializer_list<T> l) -> svector& { + assign(l.begin(), l.end()); + return *this; + } + + void resize(size_t count) { + if (count > capacity()) { + reserve(count); + } + if (is_direct()) { + resize_after_reserve<direction::direct>(count); + } else { + resize_after_reserve<direction::indirect>(count); + } + } + + void resize(size_t count, T const& value) { + if (count > capacity()) { + reserve(count); + } + if (is_direct()) { + resize_after_reserve<direction::direct>(count, value); + } else { + resize_after_reserve<direction::indirect>(count, value); + } + } + + auto reserve(size_t s) { + auto old_capacity = capacity(); + auto new_capacity = calculate_new_capacity(s, old_capacity); + if (new_capacity > old_capacity) { + realloc(new_capacity); + } + } + + [[nodiscard]] auto capacity() const -> size_t { + if (is_direct()) { + return capacity<direction::direct>(); + } + return capacity<direction::indirect>(); + } + + [[nodiscard]] auto size() const -> size_t { + if (is_direct()) { + return size<direction::direct>(); + } + return size<direction::indirect>(); + } + + [[nodiscard]] auto data() -> T* { + if (is_direct()) { + return direct_data(); + } + return indirect()->data(); + } + + [[nodiscard]] auto data() const -> T const* { + return const_cast<svector*>(this)->data(); // NOLINT(cppcoreguidelines-pro-type-const-cast) + } + + template <class... Args> + auto emplace_back(Args&&... args) -> T& { + size_t c; // NOLINT(cppcoreguidelines-init-variables) + size_t s; // NOLINT(cppcoreguidelines-init-variables) + bool is_dir = is_direct(); + if (is_dir) { + c = capacity<direction::direct>(); + s = size<direction::direct>(); + } else { + c = capacity<direction::indirect>(); + s = size<direction::indirect>(); + } + + if (s == c) { + auto new_capacity = calculate_new_capacity(s + 1, c); + realloc(new_capacity); + // reallocation happened, so we definitely are now in indirect mode + is_dir = false; + } + + T* ptr; // NOLINT(cppcoreguidelines-init-variables) + if (is_dir) { + ptr = data<direction::direct>() + s; + set_size<direction::direct>(s + 1); + } else { + ptr = data<direction::indirect>() + s; + set_size<direction::indirect>(s + 1); + } + return *new (static_cast<void*>(ptr)) T(std::forward<Args>(args)...); + } + + void push_back(T const& value) { + emplace_back(value); + } + + void push_back(T&& value) { + emplace_back(std::move(value)); + } + + [[nodiscard]] auto operator[](size_t idx) const -> T const& { + return *(data() + idx); + } + + [[nodiscard]] auto operator[](size_t idx) -> T& { + return *(data() + idx); + } + + auto at(size_t idx) -> T& { + if (is_direct()) { + return at<direction::direct>(idx); + } + return at<direction::indirect>(idx); + } + + auto at(size_t idx) const -> T const& { + return const_cast<svector*>(this)->at(idx); // NOLINT(cppcoreguidelines-pro-type-const-cast) + } + + [[nodiscard]] auto begin() const -> T const* { + return data(); + } + + [[nodiscard]] auto cbegin() const -> T const* { + return begin(); + } + + [[nodiscard]] auto begin() -> T* { + return data(); + } + + [[nodiscard]] auto end() -> T* { + if (is_direct()) { + return data<direction::direct>() + size<direction::direct>(); + } + return data<direction::indirect>() + size<direction::indirect>(); + } + + [[nodiscard]] auto end() const -> T const* { + return const_cast<svector*>(this)->end(); // NOLINT(cppcoreguidelines-pro-type-const-cast) + } + + [[nodiscard]] auto cend() const -> T const* { + return end(); + } + + [[nodiscard]] auto rbegin() -> reverse_iterator { + return reverse_iterator{end()}; + } + + [[nodiscard]] auto rbegin() const -> const_reverse_iterator { + return crbegin(); + } + + [[nodiscard]] auto crbegin() const -> const_reverse_iterator { + return const_reverse_iterator{end()}; + } + + [[nodiscard]] auto rend() -> reverse_iterator { + return reverse_iterator{begin()}; + } + + [[nodiscard]] auto rend() const -> const_reverse_iterator { + return crend(); + } + + [[nodiscard]] auto crend() const -> const_reverse_iterator { + return const_reverse_iterator{begin()}; + } + + [[nodiscard]] auto front() const -> T const& { + return *data(); + } + + [[nodiscard]] auto front() -> T& { + return *data(); + } + + [[nodiscard]] auto back() -> T& { + if (is_direct()) { + return *(data<direction::direct>() + size<direction::direct>() - 1); + } + return *(data<direction::indirect>() + size<direction::indirect>() - 1); + } + + [[nodiscard]] auto back() const -> T const& { + return const_cast<svector*>(this)->back(); // NOLINT(cppcoreguidelines-pro-type-const-cast) + } + + void clear() { + if constexpr (!std::is_trivially_destructible_v<T>) { + std::destroy(begin(), end()); + } + + if (is_direct()) { + set_size<direction::direct>(0); + } else { + set_size<direction::indirect>(0); + } + } + + [[nodiscard]] auto empty() const -> bool { + return 0U == size(); + } + + void pop_back() { + if (is_direct()) { + pop_back<direction::direct>(); + } else { + pop_back<direction::indirect>(); + } + } + + [[nodiscard]] static auto max_size() -> size_t { + return std::numeric_limits<std::ptrdiff_t>::max(); + } + + void swap(svector& other) { + // TODO we could try to do the minimum number of moves + std::swap(*this, other); + } + + void shrink_to_fit() { + // per the standard we wouldn't need to do anything here. But since we are so nice, + // let's do the shrink. + auto const c = capacity(); + auto const s = size(); + if (s >= c) { + return; + } + + auto new_capacity = calculate_new_capacity(s, N); + if (new_capacity == c) { + // nothing change! + return; + } + + realloc(new_capacity); + } + + template <class... Args> + auto emplace(const_iterator pos, Args&&... args) -> iterator { + auto* p = make_uninitialized_space(pos, 1); + return new (static_cast<void*>(p)) T(std::forward<Args>(args)...); + } + + auto insert(const_iterator pos, T const& value) -> iterator { + return emplace(pos, value); + } + + auto insert(const_iterator pos, T&& value) -> iterator { + return emplace(pos, std::move(value)); + } + + auto insert(const_iterator pos, size_t count, T const& value) -> iterator { + auto* p = make_uninitialized_space(pos, count); + std::uninitialized_fill_n(p, count, value); + return p; + } + + template <typename It> + auto insert(const_iterator pos, It first, It last, std::input_iterator_tag /*unused*/) { + if (!(first != last)) { + return const_cast<T*>(pos); // NOLINT(cppcoreguidelines-pro-type-const-cast) + } + + // just input_iterator_tag makes this very slow. Let's do the same as the STL. + if (pos == end()) { + auto s = size(); + while (first != last) { + emplace_back(*first); + ++first; + } + return begin() + s; + } + + auto tmp = svector(first, last); + return insert(pos, std::make_move_iterator(tmp.begin()), std::make_move_iterator(tmp.end())); + } + + template <typename It> + auto insert(const_iterator pos, It first, It last, std::forward_iterator_tag /*unused*/) { + auto* p = make_uninitialized_space(pos, std::distance(first, last)); + std::uninitialized_copy(first, last, p); + return p; + } + + template <typename InputIt, typename = detail::enable_if_t<detail::is_input_iterator<InputIt>>> + auto insert(const_iterator pos, InputIt first, InputIt last) -> iterator { + return insert(pos, first, last, typename std::iterator_traits<InputIt>::iterator_category()); + } + + auto insert(const_iterator pos, std::initializer_list<T> l) -> iterator { + return insert(pos, l.begin(), l.end()); + } + + auto erase(const_iterator pos) -> iterator { + return erase(pos, pos + 1); + } + + auto erase(const_iterator first, const_iterator last) -> iterator { + if (is_direct()) { + return erase_checked_end<direction::direct>(first, last); + } + return erase_checked_end<direction::indirect>(first, last); + } +}; + +template <typename T, size_t NA, size_t NB> +[[nodiscard]] auto operator==(svector<T, NA> const& a, svector<T, NB> const& b) -> bool { + return std::equal(a.begin(), a.end(), b.begin(), b.end()); +} + +template <typename T, size_t NA, size_t NB> +[[nodiscard]] auto operator!=(svector<T, NA> const& a, svector<T, NB> const& b) -> bool { + return !(a == b); +} + +template <typename T, size_t NA, size_t NB> +[[nodiscard]] auto operator<(svector<T, NA> const& a, svector<T, NB> const& b) -> bool { + return std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); +} + +template <typename T, size_t NA, size_t NB> +[[nodiscard]] auto operator>=(svector<T, NA> const& a, svector<T, NB> const& b) -> bool { + return !(a < b); +} + +template <typename T, size_t NA, size_t NB> +[[nodiscard]] auto operator>(svector<T, NA> const& a, svector<T, NB> const& b) -> bool { + return std::lexicographical_compare(b.begin(), b.end(), a.begin(), a.end()); +} + +template <typename T, size_t NA, size_t NB> +[[nodiscard]] auto operator<=(svector<T, NA> const& a, svector<T, NB> const& b) -> bool { + return !(a > b); +} + +} // namespace ANKERL_SVECTOR_NAMESPACE +} // namespace ankerl + +// NOLINTNEXTLINE(cert-dcl58-cpp) +namespace std { +inline namespace ANKERL_SVECTOR_NAMESPACE { + +template <class T, size_t N, class U> +constexpr auto erase(ankerl::svector<T, N>& sv, U const& value) -> typename ankerl::svector<T, N>::size_type { + auto* removed_begin = std::remove(sv.begin(), sv.end(), value); + auto num_removed = std::distance(removed_begin, sv.end()); + sv.erase(removed_begin, sv.end()); + return num_removed; +} + +template <class T, size_t N, class Pred> +constexpr auto erase_if(ankerl::svector<T, N>& sv, Pred pred) -> typename ankerl::svector<T, N>::size_type { + auto* removed_begin = std::remove_if(sv.begin(), sv.end(), pred); + auto num_removed = std::distance(removed_begin, sv.end()); + sv.erase(removed_begin, sv.end()); + return num_removed; +} + +} // namespace ANKERL_SVECTOR_NAMESPACE +} // namespace std + +#endif diff --git a/contrib/ankerl/unordered_dense.h b/contrib/ankerl/unordered_dense.h new file mode 100644 index 000000000..9ae108173 --- /dev/null +++ b/contrib/ankerl/unordered_dense.h @@ -0,0 +1,1199 @@ +///////////////////////// ankerl::unordered_dense::{map, set} ///////////////////////// + +// A fast & densely stored hashmap and hashset based on robin-hood backward shift deletion. +// Version 1.0.2 +// https://github.com/martinus/unordered_dense +// +// Licensed under the MIT License <http://opensource.org/licenses/MIT>. +// SPDX-License-Identifier: MIT +// Copyright (c) 2022 Martin Leitner-Ankerl <martin.ankerl@gmail.com> +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +#ifndef ANKERL_UNORDERED_DENSE_H +#define ANKERL_UNORDERED_DENSE_H + +// see https://semver.org/spec/v2.0.0.html +#define ANKERL_UNORDERED_DENSE_VERSION_MAJOR 1 // incompatible API changes +#define ANKERL_UNORDERED_DENSE_VERSION_MINOR 0 // add functionality in a backwards compatible manner +#define ANKERL_UNORDERED_DENSE_VERSION_PATCH 2 // backwards compatible bug fixes + +#if __cplusplus < 201703L +# error ankerl::unordered_dense requires C++17 or higher +#else + +# include <array> // for array +# include <cstdint> // for uint64_t, uint32_t, uint8_t, UINT64_C +# include <cstring> // for size_t, memcpy, memset +# include <functional> // for equal_to, hash +# include <initializer_list> // for initializer_list +# include <iterator> // for pair, distance +# include <limits> // for numeric_limits +# include <memory> // for allocator, allocator_traits, shared_ptr +# include <stdexcept> // for out_of_range +# include <string> // for basic_string +# include <string_view> // for basic_string_view, hash +# include <tuple> // for forward_as_tuple +# include <type_traits> // for enable_if_t, declval, conditional_t, ena... +# include <utility> // for forward, exchange, pair, as_const, piece... +# include <vector> // for vector + +# define ANKERL_UNORDERED_DENSE_PMR 0 +# if defined(__has_include) +# if __has_include(<memory_resource>) +# undef ANKERL_UNORDERED_DENSE_PMR +# define ANKERL_UNORDERED_DENSE_PMR 1 +# include <memory_resource> // for polymorphic_allocator +# endif +# endif + +# if defined(_MSC_VER) && defined(_M_X64) +# include <intrin.h> +# pragma intrinsic(_umul128) +# endif + +# if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__) +# define ANKERL_UNORDERED_DENSE_LIKELY(x) __builtin_expect(x, 1) +# define ANKERL_UNORDERED_DENSE_UNLIKELY(x) __builtin_expect(x, 0) +# else +# define ANKERL_UNORDERED_DENSE_LIKELY(x) (x) +# define ANKERL_UNORDERED_DENSE_UNLIKELY(x) (x) +# endif + +namespace ankerl::unordered_dense { + +// hash /////////////////////////////////////////////////////////////////////// + +// This is a stripped-down implementation of wyhash: https://github.com/wangyi-fudan/wyhash +// No big-endian support (because different values on different machines don't matter), +// hardcodes seed and the secret, reformattes the code, and clang-tidy fixes. +namespace detail::wyhash { + +static inline void mum(uint64_t* a, uint64_t* b) { +# if defined(__SIZEOF_INT128__) + __uint128_t r = *a; + r *= *b; + *a = static_cast<uint64_t>(r); + *b = static_cast<uint64_t>(r >> 64U); +# elif defined(_MSC_VER) && defined(_M_X64) + *a = _umul128(*a, *b, b); +# else + uint64_t ha = *a >> 32U; + uint64_t hb = *b >> 32U; + uint64_t la = static_cast<uint32_t>(*a); + uint64_t lb = static_cast<uint32_t>(*b); + uint64_t hi{}; + uint64_t lo{}; + uint64_t rh = ha * hb; + uint64_t rm0 = ha * lb; + uint64_t rm1 = hb * la; + uint64_t rl = la * lb; + uint64_t t = rl + (rm0 << 32U); + auto c = static_cast<uint64_t>(t < rl); + lo = t + (rm1 << 32U); + c += static_cast<uint64_t>(lo < t); + hi = rh + (rm0 >> 32U) + (rm1 >> 32U) + c; + *a = lo; + *b = hi; +# endif +} + +// multiply and xor mix function, aka MUM +[[nodiscard]] static inline auto mix(uint64_t a, uint64_t b) -> uint64_t { + mum(&a, &b); + return a ^ b; +} + +// read functions. WARNING: we don't care about endianness, so results are different on big endian! +[[nodiscard]] static inline auto r8(const uint8_t* p) -> uint64_t { + uint64_t v{}; + std::memcpy(&v, p, 8); + return v; +} + +[[nodiscard]] static inline auto r4(const uint8_t* p) -> uint64_t { + uint32_t v{}; + std::memcpy(&v, p, 4); + return v; +} + +// reads 1, 2, or 3 bytes +[[nodiscard]] static inline auto r3(const uint8_t* p, size_t k) -> uint64_t { + return (static_cast<uint64_t>(p[0]) << 16U) | (static_cast<uint64_t>(p[k >> 1U]) << 8U) | p[k - 1]; +} + +[[nodiscard]] static inline auto hash(void const* key, size_t len) -> uint64_t { + static constexpr auto secret = std::array{UINT64_C(0xa0761d6478bd642f), + UINT64_C(0xe7037ed1a0b428db), + UINT64_C(0x8ebc6af09c88c6e3), + UINT64_C(0x589965cc75374cc3)}; + + auto const* p = static_cast<uint8_t const*>(key); + uint64_t seed = secret[0]; + uint64_t a{}; + uint64_t b{}; + if (ANKERL_UNORDERED_DENSE_LIKELY(len <= 16)) { + if (ANKERL_UNORDERED_DENSE_LIKELY(len >= 4)) { + a = (r4(p) << 32U) | r4(p + ((len >> 3U) << 2U)); + b = (r4(p + len - 4) << 32U) | r4(p + len - 4 - ((len >> 3U) << 2U)); + } else if (ANKERL_UNORDERED_DENSE_LIKELY(len > 0)) { + a = r3(p, len); + b = 0; + } else { + a = 0; + b = 0; + } + } else { + size_t i = len; + if (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 48)) { + uint64_t see1 = seed; + uint64_t see2 = seed; + do { + seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed); + see1 = mix(r8(p + 16) ^ secret[2], r8(p + 24) ^ see1); + see2 = mix(r8(p + 32) ^ secret[3], r8(p + 40) ^ see2); + p += 48; + i -= 48; + } while (ANKERL_UNORDERED_DENSE_LIKELY(i > 48)); + seed ^= see1 ^ see2; + } + while (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 16)) { + seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed); + i -= 16; + p += 16; + } + a = r8(p + i - 16); + b = r8(p + i - 8); + } + + return mix(secret[1] ^ len, mix(a ^ secret[1], b ^ seed)); +} + +[[nodiscard]] static inline auto hash(uint64_t x) -> uint64_t { + return detail::wyhash::mix(x, UINT64_C(0x9E3779B97F4A7C15)); +} + +} // namespace detail::wyhash + +template <typename T, typename Enable = void> +struct hash : public std::hash<T> { + using is_avalanching = void; + auto operator()(T const& obj) const noexcept(noexcept(std::declval<std::hash<T>>().operator()(std::declval<T const&>()))) + -> size_t { + return static_cast<size_t>(detail::wyhash::hash(std::hash<T>::operator()(obj))); + } +}; + +template <typename CharT> +struct hash<std::basic_string<CharT>> { + using is_avalanching = void; + auto operator()(std::basic_string<CharT> const& str) const noexcept -> size_t { + return static_cast<size_t>(detail::wyhash::hash(str.data(), sizeof(CharT) * str.size())); + } +}; + +template <typename CharT> +struct hash<std::basic_string_view<CharT>> { + using is_avalanching = void; + auto operator()(std::basic_string_view<CharT> const& sv) const noexcept -> size_t { + return static_cast<size_t>(detail::wyhash::hash(sv.data(), sizeof(CharT) * sv.size())); + } +}; + +template <class T> +struct hash<T*> { + using is_avalanching = void; + auto operator()(T* ptr) const noexcept -> size_t { + return static_cast<size_t>(detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr))); + } +}; + +template <class T> +struct hash<std::unique_ptr<T>> { + using is_avalanching = void; + auto operator()(std::unique_ptr<T> const& ptr) const noexcept -> size_t { + return static_cast<size_t>(detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr.get()))); + } +}; + +template <class T> +struct hash<std::shared_ptr<T>> { + using is_avalanching = void; + auto operator()(std::shared_ptr<T> const& ptr) const noexcept -> size_t { + return static_cast<size_t>(detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr.get()))); + } +}; + +template <typename Enum> +struct hash<Enum, typename std::enable_if<std::is_enum<Enum>::value>::type> { + using is_avalanching = void; + auto operator()(Enum e) const noexcept -> size_t { + using Underlying = typename std::underlying_type_t<Enum>; + return static_cast<size_t>(detail::wyhash::hash(static_cast<Underlying>(e))); + } +}; + +# define ANKERL_UNORDERED_DENSE_HASH_STATICCAST(T) \ + template <> \ + struct hash<T> { \ + using is_avalanching = void; \ + auto operator()(T const& obj) const noexcept -> size_t { \ + return static_cast<size_t>(detail::wyhash::hash(static_cast<uint64_t>(obj))); \ + } \ + } + +# if defined(__GNUC__) && !defined(__clang__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wuseless-cast" +# endif +// see https://en.cppreference.com/w/cpp/utility/hash +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(bool); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(signed char); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned char); +# if __cplusplus >= 202002L +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char8_t); +# endif +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char16_t); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char32_t); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(wchar_t); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(short); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned short); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(int); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned int); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(long); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(long long); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long long); + +# if defined(__GNUC__) && !defined(__clang__) +# pragma GCC diagnostic pop +# endif + +namespace detail { + +struct nonesuch {}; + +template <class Default, class AlwaysVoid, template <class...> class Op, class... Args> +struct detector { + using value_t = std::false_type; + using type = Default; +}; + +template <class Default, template <class...> class Op, class... Args> +struct detector<Default, std::void_t<Op<Args...>>, Op, Args...> { + using value_t = std::true_type; + using type = Op<Args...>; +}; + +template <template <class...> class Op, class... Args> +using is_detected = typename detail::detector<detail::nonesuch, void, Op, Args...>::value_t; + +template <template <class...> class Op, class... Args> +constexpr bool is_detected_v = is_detected<Op, Args...>::value; + +template <typename T> +using detect_avalanching = typename T::is_avalanching; + +template <typename T> +using detect_is_transparent = typename T::is_transparent; + +template <typename H, typename KE> +using is_transparent = + std::enable_if_t<is_detected_v<detect_is_transparent, H> && is_detected_v<detect_is_transparent, KE>, bool>; + +// This is it, the table. Doubles as map and set, and uses `void` for T when its used as a set. +template <class Key, + class T, // when void, treat it as a set. + class Hash, + class KeyEqual, + class Allocator> +class table { + struct Bucket; + using ValueContainer = + typename std::vector<typename std::conditional_t<std::is_void_v<T>, Key, std::pair<Key, T>>, Allocator>; + using BucketAlloc = typename std::allocator_traits<Allocator>::template rebind_alloc<Bucket>; + using BucketAllocTraits = std::allocator_traits<BucketAlloc>; + + static constexpr uint32_t BUCKET_DIST_INC = 1U << 8U; // skip 1 byte fingerprint + static constexpr uint32_t BUCKET_FINGERPRINT_MASK = BUCKET_DIST_INC - 1; // mask for 1 byte of fingerprint + static constexpr uint8_t INITIAL_SHIFTS = 64 - 3; // 2^(64-m_shift) number of buckets + static constexpr float DEFAULT_MAX_LOAD_FACTOR = 0.8F; + +public: + using key_type = Key; + using mapped_type = T; + using value_type = typename ValueContainer::value_type; + using size_type = typename ValueContainer::size_type; + using difference_type = typename ValueContainer::difference_type; + using hasher = Hash; + using key_equal = KeyEqual; + using allocator_type = typename ValueContainer::allocator_type; + using reference = typename ValueContainer::reference; + using const_reference = typename ValueContainer::const_reference; + using pointer = typename ValueContainer::pointer; + using const_pointer = typename ValueContainer::const_pointer; + using iterator = typename ValueContainer::iterator; + using const_iterator = typename ValueContainer::const_iterator; + +private: + struct Bucket { + uint32_t dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash + uint32_t value_idx; // index into the m_values vector. + }; + static_assert(std::is_trivially_destructible_v<Bucket>, "assert there's no need to call destructor / std::destroy"); + static_assert(std::is_trivially_copyable_v<Bucket>, "assert we can just memset / memcpy"); + + ValueContainer m_values{}; // Contains all the key-value pairs in one densely stored container. No holes. + Bucket* m_buckets_start = nullptr; + Bucket* m_buckets_end = nullptr; + uint32_t m_max_bucket_capacity = 0; + float m_max_load_factor = DEFAULT_MAX_LOAD_FACTOR; + Hash m_hash{}; + KeyEqual m_equal{}; + uint8_t m_shifts = INITIAL_SHIFTS; + + [[nodiscard]] auto next(Bucket const* bucket) const -> Bucket const* { + return ANKERL_UNORDERED_DENSE_UNLIKELY(bucket + 1 == m_buckets_end) ? m_buckets_start : bucket + 1; + } + + [[nodiscard]] auto next(Bucket* bucket) -> Bucket* { + return ANKERL_UNORDERED_DENSE_UNLIKELY(bucket + 1 == m_buckets_end) ? m_buckets_start : bucket + 1; + } + + template <typename K> + [[nodiscard]] constexpr auto mixed_hash(K const& key) const -> uint64_t { + if constexpr (is_detected_v<detect_avalanching, Hash>) { + return m_hash(key); + } else { + return wyhash::hash(m_hash(key)); + } + } + + [[nodiscard]] constexpr auto dist_and_fingerprint_from_hash(uint64_t hash) const -> uint32_t { + return BUCKET_DIST_INC | (hash & BUCKET_FINGERPRINT_MASK); + } + + [[nodiscard]] constexpr auto bucket_from_hash(uint64_t hash) const -> Bucket const* { + return m_buckets_start + (hash >> m_shifts); + } + + [[nodiscard]] constexpr auto bucket_from_hash(uint64_t hash) -> Bucket* { + return m_buckets_start + (hash >> m_shifts); + } + + [[nodiscard]] static constexpr auto get_key(value_type const& vt) -> key_type const& { + if constexpr (std::is_void_v<T>) { + return vt; + } else { + return vt.first; + } + } + + template <typename K> + [[nodiscard]] auto next_while_less(K const& key) -> std::pair<uint32_t, Bucket*> { + auto const& pair = std::as_const(*this).next_while_less(key); + return {pair.first, const_cast<Bucket*>(pair.second)}; // NOLINT(cppcoreguidelines-pro-type-const-cast) + } + + template <typename K> + [[nodiscard]] auto next_while_less(K const& key) const -> std::pair<uint32_t, Bucket const*> { + auto hash = mixed_hash(key); + auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash); + auto const* bucket = bucket_from_hash(hash); + + while (dist_and_fingerprint < bucket->dist_and_fingerprint) { + dist_and_fingerprint += BUCKET_DIST_INC; + bucket = next(bucket); + } + return {dist_and_fingerprint, bucket}; + } + + void place_and_shift_up(Bucket bucket, Bucket* place) { + while (0 != place->dist_and_fingerprint) { + bucket = std::exchange(*place, bucket); + bucket.dist_and_fingerprint += BUCKET_DIST_INC; + place = next(place); + } + *place = bucket; + } + + [[nodiscard]] static constexpr auto calc_num_buckets(uint8_t shifts) -> uint64_t { + return UINT64_C(1) << (64U - shifts); + } + + [[nodiscard]] constexpr auto calc_shifts_for_size(size_t s) const -> uint8_t { + auto shifts = INITIAL_SHIFTS; + while (shifts > 0 && static_cast<uint64_t>(calc_num_buckets(shifts) * max_load_factor()) < s) { + --shifts; + } + return shifts; + } + + // assumes m_values has data, m_buckets_start=m_buckets_end=nullptr, m_shifts is INITIAL_SHIFTS + void copy_buckets(table const& other) { + if (!empty()) { + m_shifts = other.m_shifts; + allocate_buckets_from_shift(); + std::memcpy(m_buckets_start, other.m_buckets_start, sizeof(Bucket) * bucket_count()); + } + } + + /** + * True when no element can be added any more without increasing the size + */ + [[nodiscard]] auto is_full() const -> bool { + return size() >= m_max_bucket_capacity; + } + + void deallocate_buckets() { + auto bucket_alloc = BucketAlloc(m_values.get_allocator()); + BucketAllocTraits::deallocate(bucket_alloc, m_buckets_start, bucket_count()); + m_buckets_start = nullptr; + m_buckets_end = nullptr; + m_max_bucket_capacity = 0; + } + + void allocate_buckets_from_shift() { + auto bucket_alloc = BucketAlloc(m_values.get_allocator()); + auto num_buckets = calc_num_buckets(m_shifts); + m_buckets_start = BucketAllocTraits::allocate(bucket_alloc, num_buckets); + m_buckets_end = m_buckets_start + num_buckets; + m_max_bucket_capacity = static_cast<uint64_t>(num_buckets * max_load_factor()); + } + + void clear_buckets() { + if (m_buckets_start != nullptr) { + std::memset(m_buckets_start, 0, sizeof(Bucket) * bucket_count()); + } + } + + void clear_and_fill_buckets_from_values() { + clear_buckets(); + for (uint32_t value_idx = 0, end_idx = static_cast<uint32_t>(m_values.size()); value_idx < end_idx; ++value_idx) { + auto const& key = get_key(m_values[value_idx]); + auto [dist_and_fingerprint, bucket] = next_while_less(key); + + // we know for certain that key has not yet been inserted, so no need to check it. + place_and_shift_up({dist_and_fingerprint, value_idx}, bucket); + } + } + + void increase_size() { + --m_shifts; + deallocate_buckets(); + allocate_buckets_from_shift(); + clear_and_fill_buckets_from_values(); + } + + void do_erase(Bucket* bucket) { + auto const value_idx_to_remove = bucket->value_idx; + + // shift down until either empty or an element with correct spot is found + auto* next_bucket = next(bucket); + while (next_bucket->dist_and_fingerprint >= BUCKET_DIST_INC * 2) { + *bucket = {next_bucket->dist_and_fingerprint - BUCKET_DIST_INC, next_bucket->value_idx}; + bucket = std::exchange(next_bucket, next(next_bucket)); + } + *bucket = {}; + + // update m_values + if (value_idx_to_remove != m_values.size() - 1) { + // no luck, we'll have to replace the value with the last one and update the index accordingly + auto& val = m_values[value_idx_to_remove]; + val = std::move(m_values.back()); + + // update the values_idx of the moved entry. No need to play the info game, just look until we find the values_idx + auto mh = mixed_hash(get_key(val)); + bucket = bucket_from_hash(mh); + + auto const values_idx_back = static_cast<uint32_t>(m_values.size() - 1); + while (values_idx_back != bucket->value_idx) { + bucket = next(bucket); + } + bucket->value_idx = value_idx_to_remove; + } + m_values.pop_back(); + } + + template <typename K> + auto do_erase_key(K&& key) -> size_t { + if (empty()) { + return 0; + } + + auto [dist_and_fingerprint, bucket] = next_while_less(key); + + while (dist_and_fingerprint == bucket->dist_and_fingerprint && !m_equal(key, get_key(m_values[bucket->value_idx]))) { + dist_and_fingerprint += BUCKET_DIST_INC; + bucket = next(bucket); + } + + if (dist_and_fingerprint != bucket->dist_and_fingerprint) { + return 0; + } + do_erase(bucket); + return 1; + } + + template <class K, class M> + auto do_insert_or_assign(K&& key, M&& mapped) -> std::pair<iterator, bool> { + auto it_isinserted = try_emplace(std::forward<K>(key), std::forward<M>(mapped)); + if (!it_isinserted.second) { + it_isinserted.first->second = std::forward<M>(mapped); + } + return it_isinserted; + } + + template <typename K, typename... Args> + auto do_try_emplace(K&& key, Args&&... args) -> std::pair<iterator, bool> { + if (is_full()) { + increase_size(); + } + + auto hash = mixed_hash(key); + auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash); + auto* bucket = bucket_from_hash(hash); + + while (dist_and_fingerprint <= bucket->dist_and_fingerprint) { + if (dist_and_fingerprint == bucket->dist_and_fingerprint && m_equal(key, m_values[bucket->value_idx].first)) { + return {begin() + bucket->value_idx, false}; + } + dist_and_fingerprint += BUCKET_DIST_INC; + bucket = next(bucket); + } + + // emplace the new value. If that throws an exception, no harm done; index is still in a valid state + m_values.emplace_back(std::piecewise_construct, + std::forward_as_tuple(std::forward<K>(key)), + std::forward_as_tuple(std::forward<Args>(args)...)); + + // place element and shift up until we find an empty spot + uint32_t value_idx = static_cast<uint32_t>(m_values.size()) - 1; + place_and_shift_up({dist_and_fingerprint, value_idx}, bucket); + return {begin() + value_idx, true}; + } + + template <typename K> + auto do_find(K const& key) -> iterator { + if (empty()) { + return end(); + } + + auto mh = mixed_hash(key); + auto dist_and_fingerprint = dist_and_fingerprint_from_hash(mh); + auto const* bucket = bucket_from_hash(mh); + + // unrolled loop. *Always* check a few directly, then enter the loop. This is faster. + if (dist_and_fingerprint == bucket->dist_and_fingerprint && m_equal(key, get_key(m_values[bucket->value_idx]))) { + return begin() + bucket->value_idx; + } + dist_and_fingerprint += BUCKET_DIST_INC; + bucket = next(bucket); + + if (dist_and_fingerprint == bucket->dist_and_fingerprint && m_equal(key, get_key(m_values[bucket->value_idx]))) { + return begin() + bucket->value_idx; + } + dist_and_fingerprint += BUCKET_DIST_INC; + bucket = next(bucket); + + do { + if (dist_and_fingerprint == bucket->dist_and_fingerprint && m_equal(key, get_key(m_values[bucket->value_idx]))) { + return begin() + bucket->value_idx; + } + dist_and_fingerprint += BUCKET_DIST_INC; + bucket = next(bucket); + } while (dist_and_fingerprint <= bucket->dist_and_fingerprint); + return end(); + } + + template <typename K> + auto do_find(K const& key) const -> const_iterator { + return const_cast<table*>(this)->do_find(key); // NOLINT(cppcoreguidelines-pro-type-const-cast) + } + +public: + table() + : table(0) {} + + explicit table(size_t /*bucket_count*/, + Hash const& hash = Hash(), + KeyEqual const& equal = KeyEqual(), + Allocator const& alloc = Allocator()) + : m_values(alloc) + , m_hash(hash) + , m_equal(equal) {} + + table(size_t bucket_count, Allocator const& alloc) + : table(bucket_count, Hash(), KeyEqual(), alloc) {} + + table(size_t bucket_count, Hash const& hash, Allocator const& alloc) + : table(bucket_count, hash, KeyEqual(), alloc) {} + + explicit table(Allocator const& alloc) + : table(0, Hash(), KeyEqual(), alloc) {} + + template <class InputIt> + table(InputIt first, + InputIt last, + size_type bucket_count = 0, + Hash const& hash = Hash(), + KeyEqual const& equal = KeyEqual(), + Allocator const& alloc = Allocator()) + : table(bucket_count, hash, equal, alloc) { + insert(first, last); + } + + template <class InputIt> + table(InputIt first, InputIt last, size_type bucket_count, Allocator const& alloc) + : table(first, last, bucket_count, Hash(), KeyEqual(), alloc) {} + + template <class InputIt> + table(InputIt first, InputIt last, size_type bucket_count, Hash const& hash, Allocator const& alloc) + : table(first, last, bucket_count, hash, KeyEqual(), alloc) {} + + table(table const& other) + : table(other, other.m_values.get_allocator()) {} + + table(table const& other, Allocator const& alloc) + : m_values(other.m_values, alloc) + , m_max_load_factor(other.m_max_load_factor) + , m_hash(other.m_hash) + , m_equal(other.m_equal) { + copy_buckets(other); + } + + table(table&& other) noexcept + : table(std::move(other), other.m_values.get_allocator()) {} + + table(table&& other, Allocator const& alloc) noexcept + : m_values(std::move(other.m_values), alloc) + , m_buckets_start(std::exchange(other.m_buckets_start, nullptr)) + , m_buckets_end(std::exchange(other.m_buckets_end, nullptr)) + , m_max_bucket_capacity(std::exchange(other.m_max_bucket_capacity, 0)) + , m_max_load_factor(std::exchange(other.m_max_load_factor, DEFAULT_MAX_LOAD_FACTOR)) + , m_hash(std::exchange(other.m_hash, {})) + , m_equal(std::exchange(other.m_equal, {})) + , m_shifts(std::exchange(other.m_shifts, INITIAL_SHIFTS)) { + other.m_values.clear(); + } + + table(std::initializer_list<value_type> ilist, + size_t bucket_count = 0, + Hash const& hash = Hash(), + KeyEqual const& equal = KeyEqual(), + Allocator const& alloc = Allocator()) + : table(bucket_count, hash, equal, alloc) { + insert(ilist); + } + + table(std::initializer_list<value_type> ilist, size_type bucket_count, const Allocator& alloc) + : table(ilist, bucket_count, Hash(), KeyEqual(), alloc) {} + + table(std::initializer_list<value_type> init, size_type bucket_count, Hash const& hash, Allocator const& alloc) + : table(init, bucket_count, hash, KeyEqual(), alloc) {} + + ~table() { + auto bucket_alloc = BucketAlloc(m_values.get_allocator()); + BucketAllocTraits::deallocate(bucket_alloc, m_buckets_start, bucket_count()); + } + + auto operator=(table const& other) -> table& { + if (&other != this) { + deallocate_buckets(); // deallocate before m_values is set (might have another allocator) + m_values = other.m_values; + m_max_load_factor = other.m_max_load_factor; + m_hash = other.m_hash; + m_equal = other.m_equal; + m_shifts = INITIAL_SHIFTS; + copy_buckets(other); + } + return *this; + } + + auto operator=(table&& other) noexcept( + noexcept(std::is_nothrow_move_assignable_v<ValueContainer>&& std::is_nothrow_move_assignable_v<Hash>&& + std::is_nothrow_move_assignable_v<KeyEqual>)) -> table& { + if (&other != this) { + deallocate_buckets(); // deallocate before m_values is set (might have another allocator) + m_values = std::move(other.m_values); + m_buckets_start = std::exchange(other.m_buckets_start, nullptr); + m_buckets_end = std::exchange(other.m_buckets_end, nullptr); + m_max_bucket_capacity = std::exchange(other.m_max_bucket_capacity, 0); + m_max_load_factor = std::exchange(other.m_max_load_factor, DEFAULT_MAX_LOAD_FACTOR); + m_hash = std::exchange(other.m_hash, {}); + m_equal = std::exchange(other.m_equal, {}); + m_shifts = std::exchange(other.m_shifts, INITIAL_SHIFTS); + other.m_values.clear(); + } + return *this; + } + + auto operator=(std::initializer_list<value_type> ilist) -> table& { + clear(); + insert(ilist); + return *this; + } + + auto get_allocator() const noexcept -> allocator_type { + return m_values.get_allocator(); + } + + // iterators ////////////////////////////////////////////////////////////// + + auto begin() noexcept -> iterator { + return m_values.begin(); + } + + auto begin() const noexcept -> const_iterator { + return m_values.begin(); + } + + auto cbegin() const noexcept -> const_iterator { + return m_values.cbegin(); + } + + auto end() noexcept -> iterator { + return m_values.end(); + } + + auto cend() const noexcept -> const_iterator { + return m_values.cend(); + } + + auto end() const noexcept -> const_iterator { + return m_values.end(); + } + + // capacity /////////////////////////////////////////////////////////////// + + [[nodiscard]] auto empty() const noexcept -> bool { + return m_values.empty(); + } + + [[nodiscard]] auto size() const noexcept -> size_t { + return m_values.size(); + } + + [[nodiscard]] auto max_size() const noexcept -> size_t { + return std::numeric_limits<uint32_t>::max(); + } + + // modifiers ////////////////////////////////////////////////////////////// + + void clear() { + m_values.clear(); + clear_buckets(); + } + + auto insert(value_type const& value) -> std::pair<iterator, bool> { + return emplace(value); + } + + auto insert(value_type&& value) -> std::pair<iterator, bool> { + return emplace(std::move(value)); + } + + template <class P, std::enable_if_t<std::is_constructible_v<value_type, P&&>, bool> = true> + auto insert(P&& value) -> std::pair<iterator, bool> { + return emplace(std::forward<P>(value)); + } + + auto insert(const_iterator /*hint*/, value_type const& value) -> iterator { + return insert(value).first; + } + + auto insert(const_iterator /*hint*/, value_type&& value) -> iterator { + return insert(std::move(value)).first; + } + + template <class P, std::enable_if_t<std::is_constructible_v<value_type, P&&>, bool> = true> + auto insert(const_iterator /*hint*/, P&& value) -> iterator { + return insert(std::forward<P>(value)).first; + } + + template <class InputIt> + void insert(InputIt first, InputIt last) { + while (first != last) { + insert(*first); + ++first; + } + } + + void insert(std::initializer_list<value_type> ilist) { + insert(ilist.begin(), ilist.end()); + } + + template <class M, typename Q = T, std::enable_if_t<!std::is_void_v<Q>, bool> = true> + auto insert_or_assign(Key const& key, M&& mapped) -> std::pair<iterator, bool> { + return do_insert_or_assign(key, std::forward<M>(mapped)); + } + + template <class M, typename Q = T, std::enable_if_t<!std::is_void_v<Q>, bool> = true> + auto insert_or_assign(Key&& key, M&& mapped) -> std::pair<iterator, bool> { + return do_insert_or_assign(std::move(key), std::forward<M>(mapped)); + } + + template <class M, typename Q = T, std::enable_if_t<!std::is_void_v<Q>, bool> = true> + auto insert_or_assign(const_iterator /*hint*/, Key const& key, M&& mapped) -> iterator { + return do_insert_or_assign(key, std::forward<M>(mapped)).first; + } + + template <class M, typename Q = T, std::enable_if_t<!std::is_void_v<Q>, bool> = true> + auto insert_or_assign(const_iterator /*hint*/, Key&& key, M&& mapped) -> iterator { + return do_insert_or_assign(std::move(key), std::forward<M>(mapped)).first; + } + + template <class... Args> + auto emplace(Args&&... args) -> std::pair<iterator, bool> { + if (is_full()) { + increase_size(); + } + + // first emplace_back the object so it is constructed. If the key is already there, pop it. + auto& val = m_values.emplace_back(std::forward<Args>(args)...); + auto hash = mixed_hash(get_key(val)); + auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash); + auto* bucket = bucket_from_hash(hash); + + while (dist_and_fingerprint <= bucket->dist_and_fingerprint) { + if (dist_and_fingerprint == bucket->dist_and_fingerprint && + m_equal(get_key(val), get_key(m_values[bucket->value_idx]))) { + m_values.pop_back(); // value was already there, so get rid of it + return {begin() + bucket->value_idx, false}; + } + dist_and_fingerprint += BUCKET_DIST_INC; + bucket = next(bucket); + } + + // value is new, place the bucket and shift up until we find an empty spot + uint32_t value_idx = static_cast<uint32_t>(m_values.size()) - 1; + place_and_shift_up({dist_and_fingerprint, value_idx}, bucket); + + return {begin() + value_idx, true}; + } + + template <class... Args> + auto emplace_hint(const_iterator /*hint*/, Args&&... args) -> iterator { + return emplace(std::forward<Args>(args)...).first; + } + + template <class... Args, typename Q = T, std::enable_if_t<!std::is_void_v<Q>, bool> = true> + auto try_emplace(Key const& key, Args&&... args) -> std::pair<iterator, bool> { + return do_try_emplace(key, std::forward<Args>(args)...); + } + + template <class... Args, typename Q = T, std::enable_if_t<!std::is_void_v<Q>, bool> = true> + auto try_emplace(Key&& key, Args&&... args) -> std::pair<iterator, bool> { + return do_try_emplace(std::move(key), std::forward<Args>(args)...); + } + + template <class... Args, typename Q = T, std::enable_if_t<!std::is_void_v<Q>, bool> = true> + auto try_emplace(const_iterator /*hint*/, Key const& key, Args&&... args) -> iterator { + return do_try_emplace(key, std::forward<Args>(args)...).first; + } + + template <class... Args, typename Q = T, std::enable_if_t<!std::is_void_v<Q>, bool> = true> + auto try_emplace(const_iterator /*hint*/, Key&& key, Args&&... args) -> iterator { + return do_try_emplace(std::move(key), std::forward<Args>(args)...).first; + } + + auto erase(iterator it) -> iterator { + auto hash = mixed_hash(get_key(*it)); + auto* bucket = bucket_from_hash(hash); + + auto const value_idx_to_remove = static_cast<uint32_t>(it - cbegin()); + while (bucket->value_idx != value_idx_to_remove) { + bucket = next(bucket); + } + + do_erase(bucket); + return begin() + value_idx_to_remove; + } + + auto erase(const_iterator it) -> iterator { + return erase(begin() + (it - cbegin())); + } + + auto erase(const_iterator first, const_iterator last) -> iterator { + auto const idx_first = first - cbegin(); + auto const idx_last = last - cbegin(); + auto const first_to_last = std::distance(first, last); + auto const last_to_end = std::distance(last, cend()); + + // remove elements from left to right which moves elements from the end back + auto const mid = idx_first + std::min(first_to_last, last_to_end); + auto idx = idx_first; + while (idx != mid) { + erase(begin() + idx); + ++idx; + } + + // all elements from the right are moved, now remove the last element until all done + idx = idx_last; + while (idx != mid) { + --idx; + erase(begin() + idx); + } + + return begin() + idx_first; + } + + auto erase(Key const& key) -> size_t { + return do_erase_key(key); + } + + template <class K, class H = Hash, class KE = KeyEqual, is_transparent<H, KE> = true> + auto erase(K&& key) -> size_t { + return do_erase_key(std::forward<K>(key)); + } + + void swap(table& other) noexcept(noexcept(std::is_nothrow_swappable_v<ValueContainer>&& std::is_nothrow_swappable_v<Hash>&& + std::is_nothrow_swappable_v<KeyEqual>)) { + using std::swap; + swap(other, *this); + } + + // lookup ///////////////////////////////////////////////////////////////// + + template <typename Q = T, std::enable_if_t<!std::is_void_v<Q>, bool> = true> + auto at(key_type const& key) -> Q& { + if (auto it = find(key); end() != it) { + return it->second; + } + throw std::out_of_range("ankerl::unordered_dense::map::at(): key not found"); + } // LCOV_EXCL_LINE is this a gcov/lcov bug? this method is fully tested. + + template <typename Q = T, std::enable_if_t<!std::is_void_v<Q>, bool> = true> + auto at(key_type const& key) const -> Q const& { + return const_cast<table*>(this)->at(key); // NOLINT(cppcoreguidelines-pro-type-const-cast) + } + + template <typename Q = T, std::enable_if_t<!std::is_void_v<Q>, bool> = true> + auto operator[](Key const& key) -> Q& { + return try_emplace(key).first->second; + } + + template <typename Q = T, std::enable_if_t<!std::is_void_v<Q>, bool> = true> + auto operator[](Key&& key) -> Q& { + return try_emplace(std::move(key)).first->second; + } + + auto count(Key const& key) const -> size_t { + return find(key) == end() ? 0 : 1; + } + + template <class K, class H = Hash, class KE = KeyEqual, is_transparent<H, KE> = true> + auto count(K const& key) const -> size_t { + return find(key) == end() ? 0 : 1; + } + + auto find(Key const& key) -> iterator { + return do_find(key); + } + + auto find(Key const& key) const -> const_iterator { + return do_find(key); + } + + template <class K, class H = Hash, class KE = KeyEqual, is_transparent<H, KE> = true> + auto find(K const& key) -> iterator { + return do_find(key); + } + + template <class K, class H = Hash, class KE = KeyEqual, is_transparent<H, KE> = true> + auto find(K const& key) const -> const_iterator { + return do_find(key); + } + + auto contains(Key const& key) const -> size_t { + return find(key) != end(); + } + + template <class K, class H = Hash, class KE = KeyEqual, is_transparent<H, KE> = true> + auto contains(K const& key) const -> size_t { + return find(key) != end(); + } + + auto equal_range(Key const& key) -> std::pair<iterator, iterator> { + auto it = do_find(key); + return {it, it == end() ? end() : it + 1}; + } + + auto equal_range(const Key& key) const -> std::pair<const_iterator, const_iterator> { + auto it = do_find(key); + return {it, it == end() ? end() : it + 1}; + } + + template <class K, class H = Hash, class KE = KeyEqual, is_transparent<H, KE> = true> + auto equal_range(K const& key) -> std::pair<iterator, iterator> { + auto it = do_find(key); + return {it, it == end() ? end() : it + 1}; + } + + template <class K, class H = Hash, class KE = KeyEqual, is_transparent<H, KE> = true> + auto equal_range(K const& key) const -> std::pair<const_iterator, const_iterator> { + auto it = do_find(key); + return {it, it == end() ? end() : it + 1}; + } + + // bucket interface /////////////////////////////////////////////////////// + + auto bucket_count() const noexcept -> size_t { // NOLINT(modernize-use-nodiscard) + return m_buckets_end - m_buckets_start; + } + + auto max_bucket_count() const noexcept -> size_t { // NOLINT(modernize-use-nodiscard) + return std::numeric_limits<uint32_t>::max(); + } + + // hash policy //////////////////////////////////////////////////////////// + + [[nodiscard]] auto load_factor() const -> float { + return bucket_count() ? static_cast<float>(size()) / bucket_count() : 0.0F; + } + + [[nodiscard]] auto max_load_factor() const -> float { + return m_max_load_factor; + } + + void max_load_factor(float ml) { + m_max_load_factor = ml; + m_max_bucket_capacity = static_cast<uint32_t>(bucket_count() * max_load_factor()); + } + + void rehash(size_t count) { + auto shifts = calc_shifts_for_size(std::max(count, size())); + if (shifts != m_shifts) { + m_shifts = shifts; + deallocate_buckets(); + m_values.shrink_to_fit(); + allocate_buckets_from_shift(); + clear_and_fill_buckets_from_values(); + } + } + + void reserve(size_t capa) { + auto shifts = calc_shifts_for_size(std::max(capa, size())); + if (shifts < m_shifts) { + m_shifts = shifts; + deallocate_buckets(); + allocate_buckets_from_shift(); + clear_and_fill_buckets_from_values(); + } + } + + // observers ////////////////////////////////////////////////////////////// + + auto hash_function() const -> hasher { + return m_hash; + } + + auto key_eq() const -> key_equal { + return m_equal; + } + + // non-member functions /////////////////////////////////////////////////// + + friend auto operator==(table const& a, table const& b) -> bool { + if (&a == &b) { + return true; + } + if (a.size() != b.size()) { + return false; + } + for (auto const& b_entry : b) { + auto it = a.find(get_key(b_entry)); + if constexpr (std::is_void_v<T>) { + // set: only check that the key is here + if (a.end() == it) { + return false; + } + } else { + // map: check that key is here, then also check that value is the same + if (a.end() == it || !(b_entry.second == it->second)) { + return false; + } + } + } + return true; + } + + friend auto operator!=(table const& a, table const& b) -> bool { + return !(a == b); + } +}; + +} // namespace detail + +template <class Key, + class T, + class Hash = hash<Key>, + class KeyEqual = std::equal_to<Key>, + class Allocator = std::allocator<std::pair<Key, T>>> +using map = detail::table<Key, T, Hash, KeyEqual, Allocator>; + +template <class Key, class Hash = hash<Key>, class KeyEqual = std::equal_to<Key>, class Allocator = std::allocator<Key>> +using set = detail::table<Key, void, Hash, KeyEqual, Allocator>; + +# if ANKERL_UNORDERED_DENSE_PMR + +namespace pmr { + +template <class Key, class T, class Hash = hash<Key>, class KeyEqual = std::equal_to<Key>> +using map = detail::table<Key, T, Hash, KeyEqual, std::pmr::polymorphic_allocator<std::pair<Key, T>>>; + +template <class Key, class Hash = hash<Key>, class KeyEqual = std::equal_to<Key>> +using set = detail::table<Key, void, Hash, KeyEqual, std::pmr::polymorphic_allocator<Key>>; + +} // namespace pmr + +# endif + +// deduction guides /////////////////////////////////////////////////////////// + +// deduction guides for alias templates are only possible since C++20 +// see https://en.cppreference.com/w/cpp/language/class_template_argument_deduction + +} // namespace ankerl::unordered_dense + +// std extensions ///////////////////////////////////////////////////////////// + +namespace std { // NOLINT(cert-dcl58-cpp) + +template <class Key, class T, class Hash, class KeyEqual, class Allocator, class Pred> +auto erase_if(ankerl::unordered_dense::detail::table<Key, T, Hash, KeyEqual, Allocator>& map, Pred pred) -> size_t { + // going back to front because erase() invalidates the end iterator + auto const old_size = map.size(); + auto idx = old_size; + while (idx) { + --idx; + auto it = map.begin() + idx; + if (pred(*it)) { + map.erase(it); + } + } + + return map.size() - old_size; +} + +} // namespace std + +#endif +#endif diff --git a/contrib/robin-hood/robin_hood.h b/contrib/robin-hood/robin_hood.h deleted file mode 100644 index 9141848d7..000000000 --- a/contrib/robin-hood/robin_hood.h +++ /dev/null @@ -1,2430 +0,0 @@ -// ______ _____ ______ _________ -// ______________ ___ /_ ___(_)_______ ___ /_ ______ ______ ______ / -// __ ___/_ __ \__ __ \__ / __ __ \ __ __ \_ __ \_ __ \_ __ / -// _ / / /_/ /_ /_/ /_ / _ / / / _ / / // /_/ // /_/ // /_/ / -// /_/ \____/ /_.___/ /_/ /_/ /_/ ________/_/ /_/ \____/ \____/ \__,_/ -// _/_____/ -// -// Fast & memory efficient hashtable based on robin hood hashing for C++11/14/17/20 -// https://github.com/martinus/robin-hood-hashing -// -// Licensed under the MIT License <http://opensource.org/licenses/MIT>. -// SPDX-License-Identifier: MIT -// Copyright (c) 2018-2020 Martin Ankerl <http://martin.ankerl.com> -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -#ifndef ROBIN_HOOD_H_INCLUDED -#define ROBIN_HOOD_H_INCLUDED - -// see https://semver.org/ -#define ROBIN_HOOD_VERSION_MAJOR 3 // for incompatible API changes -#define ROBIN_HOOD_VERSION_MINOR 9 // for adding functionality in a backwards-compatible manner -#define ROBIN_HOOD_VERSION_PATCH 1 // for backwards-compatible bug fixes - -#include <algorithm> -#include <cstdlib> -#include <cstring> -#include <functional> -#include <limits> -#include <memory> // only to support hash of smart pointers -#include <stdexcept> -#include <string> -#include <type_traits> -#include <utility> -#if __cplusplus >= 201703L -# include <string_view> -#endif - -// #define ROBIN_HOOD_LOG_ENABLED -#ifdef ROBIN_HOOD_LOG_ENABLED -# include <iostream> -# define ROBIN_HOOD_LOG(...) \ - std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << __VA_ARGS__ << std::endl; -#else -# define ROBIN_HOOD_LOG(x) -#endif - -// #define ROBIN_HOOD_TRACE_ENABLED -#ifdef ROBIN_HOOD_TRACE_ENABLED -# include <iostream> -# define ROBIN_HOOD_TRACE(...) \ - std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << __VA_ARGS__ << std::endl; -#else -# define ROBIN_HOOD_TRACE(x) -#endif - -// #define ROBIN_HOOD_COUNT_ENABLED -#ifdef ROBIN_HOOD_COUNT_ENABLED -# include <iostream> -# define ROBIN_HOOD_COUNT(x) ++counts().x; -namespace robin_hood { -struct Counts { - uint64_t shiftUp{}; - uint64_t shiftDown{}; -}; -inline std::ostream& operator<<(std::ostream& os, Counts const& c) { - return os << c.shiftUp << " shiftUp" << std::endl << c.shiftDown << " shiftDown" << std::endl; -} - -static Counts& counts() { - static Counts counts{}; - return counts; -} -} // namespace robin_hood -#else -# define ROBIN_HOOD_COUNT(x) -#endif - -// all non-argument macros should use this facility. See -// https://www.fluentcpp.com/2019/05/28/better-macros-better-flags/ -#define ROBIN_HOOD(x) ROBIN_HOOD_PRIVATE_DEFINITION_##x() - -// mark unused members with this macro -#define ROBIN_HOOD_UNUSED(identifier) - -// bitness -#if SIZE_MAX == UINT32_MAX -# define ROBIN_HOOD_PRIVATE_DEFINITION_BITNESS() 32 -#elif SIZE_MAX == UINT64_MAX -# define ROBIN_HOOD_PRIVATE_DEFINITION_BITNESS() 64 -#else -# error Unsupported bitness -#endif - -// endianess -#ifdef _MSC_VER -# define ROBIN_HOOD_PRIVATE_DEFINITION_LITTLE_ENDIAN() 1 -# define ROBIN_HOOD_PRIVATE_DEFINITION_BIG_ENDIAN() 0 -#else -# define ROBIN_HOOD_PRIVATE_DEFINITION_LITTLE_ENDIAN() \ - (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) -# define ROBIN_HOOD_PRIVATE_DEFINITION_BIG_ENDIAN() (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) -#endif - -// inline -#ifdef _MSC_VER -# define ROBIN_HOOD_PRIVATE_DEFINITION_NOINLINE() __declspec(noinline) -#else -# define ROBIN_HOOD_PRIVATE_DEFINITION_NOINLINE() __attribute__((noinline)) -#endif - -// exceptions -#if !defined(__cpp_exceptions) && !defined(__EXCEPTIONS) && !defined(_CPPUNWIND) -# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_EXCEPTIONS() 0 -#else -# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_EXCEPTIONS() 1 -#endif - -// count leading/trailing bits -#if !defined(ROBIN_HOOD_DISABLE_INTRINSICS) -# ifdef _MSC_VER -# if ROBIN_HOOD(BITNESS) == 32 -# define ROBIN_HOOD_PRIVATE_DEFINITION_BITSCANFORWARD() _BitScanForward -# else -# define ROBIN_HOOD_PRIVATE_DEFINITION_BITSCANFORWARD() _BitScanForward64 -# endif -# include <intrin.h> -# pragma intrinsic(ROBIN_HOOD(BITSCANFORWARD)) -# define ROBIN_HOOD_COUNT_TRAILING_ZEROES(x) \ - [](size_t mask) noexcept -> int { \ - unsigned long index; \ - return ROBIN_HOOD(BITSCANFORWARD)(&index, mask) ? static_cast<int>(index) \ - : ROBIN_HOOD(BITNESS); \ - }(x) -# else -# if ROBIN_HOOD(BITNESS) == 32 -# define ROBIN_HOOD_PRIVATE_DEFINITION_CTZ() __builtin_ctzl -# define ROBIN_HOOD_PRIVATE_DEFINITION_CLZ() __builtin_clzl -# else -# define ROBIN_HOOD_PRIVATE_DEFINITION_CTZ() __builtin_ctzll -# define ROBIN_HOOD_PRIVATE_DEFINITION_CLZ() __builtin_clzll -# endif -# define ROBIN_HOOD_COUNT_LEADING_ZEROES(x) ((x) ? ROBIN_HOOD(CLZ)(x) : ROBIN_HOOD(BITNESS)) -# define ROBIN_HOOD_COUNT_TRAILING_ZEROES(x) ((x) ? ROBIN_HOOD(CTZ)(x) : ROBIN_HOOD(BITNESS)) -# endif -#endif - -// fallthrough -#ifndef __has_cpp_attribute // For backwards compatibility -# define __has_cpp_attribute(x) 0 -#endif -#if __has_cpp_attribute(clang::fallthrough) -# define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH() [[clang::fallthrough]] -#elif __has_cpp_attribute(gnu::fallthrough) -# define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH() [[gnu::fallthrough]] -#else -# define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH() -#endif - -// likely/unlikely -#ifdef _MSC_VER -# define ROBIN_HOOD_LIKELY(condition) condition -# define ROBIN_HOOD_UNLIKELY(condition) condition -#else -# define ROBIN_HOOD_LIKELY(condition) __builtin_expect(condition, 1) -# define ROBIN_HOOD_UNLIKELY(condition) __builtin_expect(condition, 0) -#endif - -// detect if native wchar_t type is availiable in MSVC -#ifdef _MSC_VER -# ifdef _NATIVE_WCHAR_T_DEFINED -# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_NATIVE_WCHART() 1 -# else -# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_NATIVE_WCHART() 0 -# endif -#else -# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_NATIVE_WCHART() 1 -#endif - -// workaround missing "is_trivially_copyable" in g++ < 5.0 -// See https://stackoverflow.com/a/31798726/48181 -#if defined(__GNUC__) && __GNUC__ < 5 -# define ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(...) __has_trivial_copy(__VA_ARGS__) -#else -# define ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(...) std::is_trivially_copyable<__VA_ARGS__>::value -#endif - -// helpers for C++ versions, see https://gcc.gnu.org/onlinedocs/cpp/Standard-Predefined-Macros.html -#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX() __cplusplus -#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX98() 199711L -#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX11() 201103L -#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX14() 201402L -#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX17() 201703L - -#if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX17) -# define ROBIN_HOOD_PRIVATE_DEFINITION_NODISCARD() [[nodiscard]] -#else -# define ROBIN_HOOD_PRIVATE_DEFINITION_NODISCARD() -#endif - -namespace robin_hood { - -#if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX14) -# define ROBIN_HOOD_STD std -#else - -// c++11 compatibility layer -namespace ROBIN_HOOD_STD { -template <class T> -struct alignment_of - : std::integral_constant<std::size_t, alignof(typename std::remove_all_extents<T>::type)> {}; - -template <class T, T... Ints> -class integer_sequence { -public: - using value_type = T; - static_assert(std::is_integral<value_type>::value, "not integral type"); - static constexpr std::size_t size() noexcept { - return sizeof...(Ints); - } -}; -template <std::size_t... Inds> -using index_sequence = integer_sequence<std::size_t, Inds...>; - -namespace detail_ { -template <class T, T Begin, T End, bool> -struct IntSeqImpl { - using TValue = T; - static_assert(std::is_integral<TValue>::value, "not integral type"); - static_assert(Begin >= 0 && Begin < End, "unexpected argument (Begin<0 || Begin<=End)"); - - template <class, class> - struct IntSeqCombiner; - - template <TValue... Inds0, TValue... Inds1> - struct IntSeqCombiner<integer_sequence<TValue, Inds0...>, integer_sequence<TValue, Inds1...>> { - using TResult = integer_sequence<TValue, Inds0..., Inds1...>; - }; - - using TResult = - typename IntSeqCombiner<typename IntSeqImpl<TValue, Begin, Begin + (End - Begin) / 2, - (End - Begin) / 2 == 1>::TResult, - typename IntSeqImpl<TValue, Begin + (End - Begin) / 2, End, - (End - Begin + 1) / 2 == 1>::TResult>::TResult; -}; - -template <class T, T Begin> -struct IntSeqImpl<T, Begin, Begin, false> { - using TValue = T; - static_assert(std::is_integral<TValue>::value, "not integral type"); - static_assert(Begin >= 0, "unexpected argument (Begin<0)"); - using TResult = integer_sequence<TValue>; -}; - -template <class T, T Begin, T End> -struct IntSeqImpl<T, Begin, End, true> { - using TValue = T; - static_assert(std::is_integral<TValue>::value, "not integral type"); - static_assert(Begin >= 0, "unexpected argument (Begin<0)"); - using TResult = integer_sequence<TValue, Begin>; -}; -} // namespace detail_ - -template <class T, T N> -using make_integer_sequence = typename detail_::IntSeqImpl<T, 0, N, (N - 0) == 1>::TResult; - -template <std::size_t N> -using make_index_sequence = make_integer_sequence<std::size_t, N>; - -template <class... T> -using index_sequence_for = make_index_sequence<sizeof...(T)>; - -} // namespace ROBIN_HOOD_STD - -#endif - -namespace detail { - -// make sure we static_cast to the correct type for hash_int -#if ROBIN_HOOD(BITNESS) == 64 -using SizeT = uint64_t; -#else -using SizeT = uint32_t; -#endif - -template <typename T> -T rotr(T x, unsigned k) { - return (x >> k) | (x << (8U * sizeof(T) - k)); -} - -// This cast gets rid of warnings like "cast from 'uint8_t*' {aka 'unsigned char*'} to -// 'uint64_t*' {aka 'long unsigned int*'} increases required alignment of target type". Use with -// care! -template <typename T> -inline T reinterpret_cast_no_cast_align_warning(void* ptr) noexcept { - return reinterpret_cast<T>(ptr); -} - -template <typename T> -inline T reinterpret_cast_no_cast_align_warning(void const* ptr) noexcept { - return reinterpret_cast<T>(ptr); -} - -// make sure this is not inlined as it is slow and dramatically enlarges code, thus making other -// inlinings more difficult. Throws are also generally the slow path. -template <typename E, typename... Args> -[[noreturn]] ROBIN_HOOD(NOINLINE) -#if ROBIN_HOOD(HAS_EXCEPTIONS) - void doThrow(Args&&... args) { - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - throw E(std::forward<Args>(args)...); -} -#else - void doThrow(Args&&... ROBIN_HOOD_UNUSED(args) /*unused*/) { - abort(); -} -#endif - -template <typename E, typename T, typename... Args> -T* assertNotNull(T* t, Args&&... args) { - if (ROBIN_HOOD_UNLIKELY(nullptr == t)) { - doThrow<E>(std::forward<Args>(args)...); - } - return t; -} - -template <typename T> -inline T unaligned_load(void const* ptr) noexcept { - // using memcpy so we don't get into unaligned load problems. - // compiler should optimize this very well anyways. - T t; - std::memcpy(&t, ptr, sizeof(T)); - return t; -} - -// Allocates bulks of memory for objects of type T. This deallocates the memory in the destructor, -// and keeps a linked list of the allocated memory around. Overhead per allocation is the size of a -// pointer. -template <typename T, size_t MinNumAllocs = 4, size_t MaxNumAllocs = 256> -class BulkPoolAllocator { -public: - BulkPoolAllocator() noexcept = default; - - // does not copy anything, just creates a new allocator. - BulkPoolAllocator(const BulkPoolAllocator& ROBIN_HOOD_UNUSED(o) /*unused*/) noexcept - : mHead(nullptr) - , mListForFree(nullptr) {} - - BulkPoolAllocator(BulkPoolAllocator&& o) noexcept - : mHead(o.mHead) - , mListForFree(o.mListForFree) { - o.mListForFree = nullptr; - o.mHead = nullptr; - } - - BulkPoolAllocator& operator=(BulkPoolAllocator&& o) noexcept { - reset(); - mHead = o.mHead; - mListForFree = o.mListForFree; - o.mListForFree = nullptr; - o.mHead = nullptr; - return *this; - } - - BulkPoolAllocator& - // NOLINTNEXTLINE(bugprone-unhandled-self-assignment,cert-oop54-cpp) - operator=(const BulkPoolAllocator& ROBIN_HOOD_UNUSED(o) /*unused*/) noexcept { - // does not do anything - return *this; - } - - ~BulkPoolAllocator() noexcept { - reset(); - } - - // Deallocates all allocated memory. - void reset() noexcept { - while (mListForFree) { - T* tmp = *mListForFree; - ROBIN_HOOD_LOG("std::free") - std::free(mListForFree); - mListForFree = reinterpret_cast_no_cast_align_warning<T**>(tmp); - } - mHead = nullptr; - } - - // allocates, but does NOT initialize. Use in-place new constructor, e.g. - // T* obj = pool.allocate(); - // ::new (static_cast<void*>(obj)) T(); - T* allocate() { - T* tmp = mHead; - if (!tmp) { - tmp = performAllocation(); - } - - mHead = *reinterpret_cast_no_cast_align_warning<T**>(tmp); - return tmp; - } - - // does not actually deallocate but puts it in store. - // make sure you have already called the destructor! e.g. with - // obj->~T(); - // pool.deallocate(obj); - void deallocate(T* obj) noexcept { - *reinterpret_cast_no_cast_align_warning<T**>(obj) = mHead; - mHead = obj; - } - - // Adds an already allocated block of memory to the allocator. This allocator is from now on - // responsible for freeing the data (with free()). If the provided data is not large enough to - // make use of, it is immediately freed. Otherwise it is reused and freed in the destructor. - void addOrFree(void* ptr, const size_t numBytes) noexcept { - // calculate number of available elements in ptr - if (numBytes < ALIGNMENT + ALIGNED_SIZE) { - // not enough data for at least one element. Free and return. - ROBIN_HOOD_LOG("std::free") - std::free(ptr); - } else { - ROBIN_HOOD_LOG("add to buffer") - add(ptr, numBytes); - } - } - - void swap(BulkPoolAllocator<T, MinNumAllocs, MaxNumAllocs>& other) noexcept { - using std::swap; - swap(mHead, other.mHead); - swap(mListForFree, other.mListForFree); - } - -private: - // iterates the list of allocated memory to calculate how many to alloc next. - // Recalculating this each time saves us a size_t member. - // This ignores the fact that memory blocks might have been added manually with addOrFree. In - // practice, this should not matter much. - ROBIN_HOOD(NODISCARD) size_t calcNumElementsToAlloc() const noexcept { - auto tmp = mListForFree; - size_t numAllocs = MinNumAllocs; - - while (numAllocs * 2 <= MaxNumAllocs && tmp) { - auto x = reinterpret_cast<T***>(tmp); - tmp = *x; - numAllocs *= 2; - } - - return numAllocs; - } - - // WARNING: Underflow if numBytes < ALIGNMENT! This is guarded in addOrFree(). - void add(void* ptr, const size_t numBytes) noexcept { - const size_t numElements = (numBytes - ALIGNMENT) / ALIGNED_SIZE; - - auto data = reinterpret_cast<T**>(ptr); - - // link free list - auto x = reinterpret_cast<T***>(data); - *x = mListForFree; - mListForFree = data; - - // create linked list for newly allocated data - auto* const headT = - reinterpret_cast_no_cast_align_warning<T*>(reinterpret_cast<char*>(ptr) + ALIGNMENT); - - auto* const head = reinterpret_cast<char*>(headT); - - // Visual Studio compiler automatically unrolls this loop, which is pretty cool - for (size_t i = 0; i < numElements; ++i) { - *reinterpret_cast_no_cast_align_warning<char**>(head + i * ALIGNED_SIZE) = - head + (i + 1) * ALIGNED_SIZE; - } - - // last one points to 0 - *reinterpret_cast_no_cast_align_warning<T**>(head + (numElements - 1) * ALIGNED_SIZE) = - mHead; - mHead = headT; - } - - // Called when no memory is available (mHead == 0). - // Don't inline this slow path. - ROBIN_HOOD(NOINLINE) T* performAllocation() { - size_t const numElementsToAlloc = calcNumElementsToAlloc(); - - // alloc new memory: [prev |T, T, ... T] - size_t const bytes = ALIGNMENT + ALIGNED_SIZE * numElementsToAlloc; - ROBIN_HOOD_LOG("std::malloc " << bytes << " = " << ALIGNMENT << " + " << ALIGNED_SIZE - << " * " << numElementsToAlloc) - add(assertNotNull<std::bad_alloc>(std::malloc(bytes)), bytes); - return mHead; - } - - // enforce byte alignment of the T's -#if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX14) - static constexpr size_t ALIGNMENT = - (std::max)(std::alignment_of<T>::value, std::alignment_of<T*>::value); -#else - static const size_t ALIGNMENT = - (ROBIN_HOOD_STD::alignment_of<T>::value > ROBIN_HOOD_STD::alignment_of<T*>::value) - ? ROBIN_HOOD_STD::alignment_of<T>::value - : +ROBIN_HOOD_STD::alignment_of<T*>::value; // the + is for walkarround -#endif - - static constexpr size_t ALIGNED_SIZE = ((sizeof(T) - 1) / ALIGNMENT + 1) * ALIGNMENT; - - static_assert(MinNumAllocs >= 1, "MinNumAllocs"); - static_assert(MaxNumAllocs >= MinNumAllocs, "MaxNumAllocs"); - static_assert(ALIGNED_SIZE >= sizeof(T*), "ALIGNED_SIZE"); - static_assert(0 == (ALIGNED_SIZE % sizeof(T*)), "ALIGNED_SIZE mod"); - static_assert(ALIGNMENT >= sizeof(T*), "ALIGNMENT"); - - T* mHead{nullptr}; - T** mListForFree{nullptr}; -}; - -template <typename T, size_t MinSize, size_t MaxSize, bool IsFlat> -struct NodeAllocator; - -// dummy allocator that does nothing -template <typename T, size_t MinSize, size_t MaxSize> -struct NodeAllocator<T, MinSize, MaxSize, true> { - - // we are not using the data, so just free it. - void addOrFree(void* ptr, size_t ROBIN_HOOD_UNUSED(numBytes) /*unused*/) noexcept { - ROBIN_HOOD_LOG("std::free") - std::free(ptr); - } -}; - -template <typename T, size_t MinSize, size_t MaxSize> -struct NodeAllocator<T, MinSize, MaxSize, false> : public BulkPoolAllocator<T, MinSize, MaxSize> {}; - -// dummy hash, unsed as mixer when robin_hood::hash is already used -template <typename T> -struct identity_hash { - constexpr size_t operator()(T const& obj) const noexcept { - return static_cast<size_t>(obj); - } -}; - -// c++14 doesn't have is_nothrow_swappable, and clang++ 6.0.1 doesn't like it either, so I'm making -// my own here. -namespace swappable { -#if ROBIN_HOOD(CXX) < ROBIN_HOOD(CXX17) -using std::swap; -template <typename T> -struct nothrow { - static const bool value = noexcept(swap(std::declval<T&>(), std::declval<T&>())); -}; -#else -template <typename T> -struct nothrow { - static const bool value = std::is_nothrow_swappable<T>::value; -}; -#endif -} // namespace swappable - -} // namespace detail - -struct is_transparent_tag {}; - -// A custom pair implementation is used in the map because std::pair is not is_trivially_copyable, -// which means it would not be allowed to be used in std::memcpy. This struct is copyable, which is -// also tested. -template <typename T1, typename T2> -struct pair { - using first_type = T1; - using second_type = T2; - - template <typename U1 = T1, typename U2 = T2, - typename = typename std::enable_if<std::is_default_constructible<U1>::value && - std::is_default_constructible<U2>::value>::type> - constexpr pair() noexcept(noexcept(U1()) && noexcept(U2())) - : first() - , second() {} - - // pair constructors are explicit so we don't accidentally call this ctor when we don't have to. - explicit constexpr pair(std::pair<T1, T2> const& o) noexcept( - noexcept(T1(std::declval<T1 const&>())) && noexcept(T2(std::declval<T2 const&>()))) - : first(o.first) - , second(o.second) {} - - // pair constructors are explicit so we don't accidentally call this ctor when we don't have to. - explicit constexpr pair(std::pair<T1, T2>&& o) noexcept(noexcept( - T1(std::move(std::declval<T1&&>()))) && noexcept(T2(std::move(std::declval<T2&&>())))) - : first(std::move(o.first)) - , second(std::move(o.second)) {} - - constexpr pair(T1&& a, T2&& b) noexcept(noexcept( - T1(std::move(std::declval<T1&&>()))) && noexcept(T2(std::move(std::declval<T2&&>())))) - : first(std::move(a)) - , second(std::move(b)) {} - - template <typename U1, typename U2> - constexpr pair(U1&& a, U2&& b) noexcept(noexcept(T1(std::forward<U1>( - std::declval<U1&&>()))) && noexcept(T2(std::forward<U2>(std::declval<U2&&>())))) - : first(std::forward<U1>(a)) - , second(std::forward<U2>(b)) {} - - template <typename... U1, typename... U2> - constexpr pair( - std::piecewise_construct_t /*unused*/, std::tuple<U1...> a, - std::tuple<U2...> b) noexcept(noexcept(pair(std::declval<std::tuple<U1...>&>(), - std::declval<std::tuple<U2...>&>(), - ROBIN_HOOD_STD::index_sequence_for<U1...>(), - ROBIN_HOOD_STD::index_sequence_for<U2...>()))) - : pair(a, b, ROBIN_HOOD_STD::index_sequence_for<U1...>(), - ROBIN_HOOD_STD::index_sequence_for<U2...>()) {} - - // constructor called from the std::piecewise_construct_t ctor - template <typename... U1, size_t... I1, typename... U2, size_t... I2> - pair(std::tuple<U1...>& a, std::tuple<U2...>& b, ROBIN_HOOD_STD::index_sequence<I1...> /*unused*/, ROBIN_HOOD_STD::index_sequence<I2...> /*unused*/) noexcept( - noexcept(T1(std::forward<U1>(std::get<I1>( - std::declval<std::tuple< - U1...>&>()))...)) && noexcept(T2(std:: - forward<U2>(std::get<I2>( - std::declval<std::tuple<U2...>&>()))...))) - : first(std::forward<U1>(std::get<I1>(a))...) - , second(std::forward<U2>(std::get<I2>(b))...) { - // make visual studio compiler happy about warning about unused a & b. - // Visual studio's pair implementation disables warning 4100. - (void)a; - (void)b; - } - - void swap(pair<T1, T2>& o) noexcept((detail::swappable::nothrow<T1>::value) && - (detail::swappable::nothrow<T2>::value)) { - using std::swap; - swap(first, o.first); - swap(second, o.second); - } - - T1 first; // NOLINT(misc-non-private-member-variables-in-classes) - T2 second; // NOLINT(misc-non-private-member-variables-in-classes) -}; - -template <typename A, typename B> -inline void swap(pair<A, B>& a, pair<A, B>& b) noexcept( - noexcept(std::declval<pair<A, B>&>().swap(std::declval<pair<A, B>&>()))) { - a.swap(b); -} - -template <typename A, typename B> -inline constexpr bool operator==(pair<A, B> const& x, pair<A, B> const& y) { - return (x.first == y.first) && (x.second == y.second); -} -template <typename A, typename B> -inline constexpr bool operator!=(pair<A, B> const& x, pair<A, B> const& y) { - return !(x == y); -} -template <typename A, typename B> -inline constexpr bool operator<(pair<A, B> const& x, pair<A, B> const& y) noexcept(noexcept( - std::declval<A const&>() < std::declval<A const&>()) && noexcept(std::declval<B const&>() < - std::declval<B const&>())) { - return x.first < y.first || (!(y.first < x.first) && x.second < y.second); -} -template <typename A, typename B> -inline constexpr bool operator>(pair<A, B> const& x, pair<A, B> const& y) { - return y < x; -} -template <typename A, typename B> -inline constexpr bool operator<=(pair<A, B> const& x, pair<A, B> const& y) { - return !(x > y); -} -template <typename A, typename B> -inline constexpr bool operator>=(pair<A, B> const& x, pair<A, B> const& y) { - return !(x < y); -} - -inline size_t hash_bytes(void const* ptr, size_t len) noexcept { - static constexpr uint64_t m = UINT64_C(0xc6a4a7935bd1e995); - static constexpr uint64_t seed = UINT64_C(0xe17a1465); - static constexpr unsigned int r = 47; - - auto const* const data64 = static_cast<uint64_t const*>(ptr); - uint64_t h = seed ^ (len * m); - - size_t const n_blocks = len / 8; - for (size_t i = 0; i < n_blocks; ++i) { - auto k = detail::unaligned_load<uint64_t>(data64 + i); - - k *= m; - k ^= k >> r; - k *= m; - - h ^= k; - h *= m; - } - - auto const* const data8 = reinterpret_cast<uint8_t const*>(data64 + n_blocks); - switch (len & 7U) { - case 7: - h ^= static_cast<uint64_t>(data8[6]) << 48U; - ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH - case 6: - h ^= static_cast<uint64_t>(data8[5]) << 40U; - ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH - case 5: - h ^= static_cast<uint64_t>(data8[4]) << 32U; - ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH - case 4: - h ^= static_cast<uint64_t>(data8[3]) << 24U; - ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH - case 3: - h ^= static_cast<uint64_t>(data8[2]) << 16U; - ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH - case 2: - h ^= static_cast<uint64_t>(data8[1]) << 8U; - ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH - case 1: - h ^= static_cast<uint64_t>(data8[0]); - h *= m; - ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH - default: - break; - } - - h ^= h >> r; - h *= m; - h ^= h >> r; - return static_cast<size_t>(h); -} - -inline size_t hash_int(uint64_t x) noexcept { - // inspired by lemire's strongly universal hashing - // https://lemire.me/blog/2018/08/15/fast-strongly-universal-64-bit-hashing-everywhere/ - // - // Instead of shifts, we use rotations so we don't lose any bits. - // - // Added a final multiplcation with a constant for more mixing. It is most important that - // the lower bits are well mixed. - auto h1 = x * UINT64_C(0xA24BAED4963EE407); - auto h2 = detail::rotr(x, 32U) * UINT64_C(0x9FB21C651E98DF25); - auto h = detail::rotr(h1 + h2, 32U); - return static_cast<size_t>(h); -} - -// A thin wrapper around std::hash, performing an additional simple mixing step of the result. -template <typename T, typename Enable = void> -struct hash : public std::hash<T> { - size_t operator()(T const& obj) const - noexcept(noexcept(std::declval<std::hash<T>>().operator()(std::declval<T const&>()))) { - // call base hash - auto result = std::hash<T>::operator()(obj); - // return mixed of that, to be save against identity has - return hash_int(static_cast<detail::SizeT>(result)); - } -}; - -template <typename CharT> -struct hash<std::basic_string<CharT>> { - size_t operator()(std::basic_string<CharT> const& str) const noexcept { - return hash_bytes(str.data(), sizeof(CharT) * str.size()); - } -}; - -#if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX17) -template <typename CharT> -struct hash<std::basic_string_view<CharT>> { - size_t operator()(std::basic_string_view<CharT> const& sv) const noexcept { - return hash_bytes(sv.data(), sizeof(CharT) * sv.size()); - } -}; -#endif - -template <class T> -struct hash<T*> { - size_t operator()(T* ptr) const noexcept { - return hash_int(reinterpret_cast<detail::SizeT>(ptr)); - } -}; - -template <class T> -struct hash<std::unique_ptr<T>> { - size_t operator()(std::unique_ptr<T> const& ptr) const noexcept { - return hash_int(reinterpret_cast<detail::SizeT>(ptr.get())); - } -}; - -template <class T> -struct hash<std::shared_ptr<T>> { - size_t operator()(std::shared_ptr<T> const& ptr) const noexcept { - return hash_int(reinterpret_cast<detail::SizeT>(ptr.get())); - } -}; - -template <typename Enum> -struct hash<Enum, typename std::enable_if<std::is_enum<Enum>::value>::type> { - size_t operator()(Enum e) const noexcept { - using Underlying = typename std::underlying_type<Enum>::type; - return hash<Underlying>{}(static_cast<Underlying>(e)); - } -}; - -#define ROBIN_HOOD_HASH_INT(T) \ - template <> \ - struct hash<T> { \ - size_t operator()(T const& obj) const noexcept { \ - return hash_int(static_cast<uint64_t>(obj)); \ - } \ - } - -#if defined(__GNUC__) && !defined(__clang__) -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wuseless-cast" -#endif -// see https://en.cppreference.com/w/cpp/utility/hash -ROBIN_HOOD_HASH_INT(bool); -ROBIN_HOOD_HASH_INT(char); -ROBIN_HOOD_HASH_INT(signed char); -ROBIN_HOOD_HASH_INT(unsigned char); -ROBIN_HOOD_HASH_INT(char16_t); -ROBIN_HOOD_HASH_INT(char32_t); -#if ROBIN_HOOD(HAS_NATIVE_WCHART) -ROBIN_HOOD_HASH_INT(wchar_t); -#endif -ROBIN_HOOD_HASH_INT(short); -ROBIN_HOOD_HASH_INT(unsigned short); -ROBIN_HOOD_HASH_INT(int); -ROBIN_HOOD_HASH_INT(unsigned int); -ROBIN_HOOD_HASH_INT(long); -ROBIN_HOOD_HASH_INT(long long); -ROBIN_HOOD_HASH_INT(unsigned long); -ROBIN_HOOD_HASH_INT(unsigned long long); -#if defined(__GNUC__) && !defined(__clang__) -# pragma GCC diagnostic pop -#endif -namespace detail { - -template <typename T> -struct void_type { - using type = void; -}; - -template <typename T, typename = void> -struct has_is_transparent : public std::false_type {}; - -template <typename T> -struct has_is_transparent<T, typename void_type<typename T::is_transparent>::type> - : public std::true_type {}; - -// using wrapper classes for hash and key_equal prevents the diamond problem when the same type -// is used. see https://stackoverflow.com/a/28771920/48181 -template <typename T> -struct WrapHash : public T { - WrapHash() = default; - explicit WrapHash(T const& o) noexcept(noexcept(T(std::declval<T const&>()))) - : T(o) {} -}; - -template <typename T> -struct WrapKeyEqual : public T { - WrapKeyEqual() = default; - explicit WrapKeyEqual(T const& o) noexcept(noexcept(T(std::declval<T const&>()))) - : T(o) {} -}; - -// A highly optimized hashmap implementation, using the Robin Hood algorithm. -// -// In most cases, this map should be usable as a drop-in replacement for std::unordered_map, but -// be about 2x faster in most cases and require much less allocations. -// -// This implementation uses the following memory layout: -// -// [Node, Node, ... Node | info, info, ... infoSentinel ] -// -// * Node: either a DataNode that directly has the std::pair<key, val> as member, -// or a DataNode with a pointer to std::pair<key,val>. Which DataNode representation to use -// depends on how fast the swap() operation is. Heuristically, this is automatically choosen -// based on sizeof(). there are always 2^n Nodes. -// -// * info: Each Node in the map has a corresponding info byte, so there are 2^n info bytes. -// Each byte is initialized to 0, meaning the corresponding Node is empty. Set to 1 means the -// corresponding node contains data. Set to 2 means the corresponding Node is filled, but it -// actually belongs to the previous position and was pushed out because that place is already -// taken. -// -// * infoSentinel: Sentinel byte set to 1, so that iterator's ++ can stop at end() without the -// need for a idx variable. -// -// According to STL, order of templates has effect on throughput. That's why I've moved the -// boolean to the front. -// https://www.reddit.com/r/cpp/comments/ahp6iu/compile_time_binary_size_reductions_and_cs_future/eeguck4/ -template <bool IsFlat, size_t MaxLoadFactor100, typename Key, typename T, typename Hash, - typename KeyEqual> -class Table - : public WrapHash<Hash>, - public WrapKeyEqual<KeyEqual>, - detail::NodeAllocator< - typename std::conditional< - std::is_void<T>::value, Key, - robin_hood::pair<typename std::conditional<IsFlat, Key, Key const>::type, T>>::type, - 4, 16384, IsFlat> { -public: - static constexpr bool is_flat = IsFlat; - static constexpr bool is_map = !std::is_void<T>::value; - static constexpr bool is_set = !is_map; - static constexpr bool is_transparent = - has_is_transparent<Hash>::value && has_is_transparent<KeyEqual>::value; - - using key_type = Key; - using mapped_type = T; - using value_type = typename std::conditional< - is_set, Key, - robin_hood::pair<typename std::conditional<is_flat, Key, Key const>::type, T>>::type; - using size_type = size_t; - using hasher = Hash; - using key_equal = KeyEqual; - using Self = Table<IsFlat, MaxLoadFactor100, key_type, mapped_type, hasher, key_equal>; - -private: - static_assert(MaxLoadFactor100 > 10 && MaxLoadFactor100 < 100, - "MaxLoadFactor100 needs to be >10 && < 100"); - - using WHash = WrapHash<Hash>; - using WKeyEqual = WrapKeyEqual<KeyEqual>; - - // configuration defaults - - // make sure we have 8 elements, needed to quickly rehash mInfo - static constexpr size_t InitialNumElements = sizeof(uint64_t); - static constexpr uint32_t InitialInfoNumBits = 5; - static constexpr uint8_t InitialInfoInc = 1U << InitialInfoNumBits; - static constexpr size_t InfoMask = InitialInfoInc - 1U; - static constexpr uint8_t InitialInfoHashShift = 0; - using DataPool = detail::NodeAllocator<value_type, 4, 16384, IsFlat>; - - // type needs to be wider than uint8_t. - using InfoType = uint32_t; - - // DataNode //////////////////////////////////////////////////////// - - // Primary template for the data node. We have special implementations for small and big - // objects. For large objects it is assumed that swap() is fairly slow, so we allocate these - // on the heap so swap merely swaps a pointer. - template <typename M, bool> - class DataNode {}; - - // Small: just allocate on the stack. - template <typename M> - class DataNode<M, true> final { - public: - template <typename... Args> - explicit DataNode(M& ROBIN_HOOD_UNUSED(map) /*unused*/, Args&&... args) noexcept( - noexcept(value_type(std::forward<Args>(args)...))) - : mData(std::forward<Args>(args)...) {} - - DataNode(M& ROBIN_HOOD_UNUSED(map) /*unused*/, DataNode<M, true>&& n) noexcept( - std::is_nothrow_move_constructible<value_type>::value) - : mData(std::move(n.mData)) {} - - // doesn't do anything - void destroy(M& ROBIN_HOOD_UNUSED(map) /*unused*/) noexcept {} - void destroyDoNotDeallocate() noexcept {} - - value_type const* operator->() const noexcept { - return &mData; - } - value_type* operator->() noexcept { - return &mData; - } - - const value_type& operator*() const noexcept { - return mData; - } - - value_type& operator*() noexcept { - return mData; - } - - template <typename VT = value_type> - ROBIN_HOOD(NODISCARD) - typename std::enable_if<is_map, typename VT::first_type&>::type getFirst() noexcept { - return mData.first; - } - template <typename VT = value_type> - ROBIN_HOOD(NODISCARD) - typename std::enable_if<is_set, VT&>::type getFirst() noexcept { - return mData; - } - - template <typename VT = value_type> - ROBIN_HOOD(NODISCARD) - typename std::enable_if<is_map, typename VT::first_type const&>::type - getFirst() const noexcept { - return mData.first; - } - template <typename VT = value_type> - ROBIN_HOOD(NODISCARD) - typename std::enable_if<is_set, VT const&>::type getFirst() const noexcept { - return mData; - } - - template <typename MT = mapped_type> - ROBIN_HOOD(NODISCARD) - typename std::enable_if<is_map, MT&>::type getSecond() noexcept { - return mData.second; - } - - template <typename MT = mapped_type> - ROBIN_HOOD(NODISCARD) - typename std::enable_if<is_set, MT const&>::type getSecond() const noexcept { - return mData.second; - } - - void swap(DataNode<M, true>& o) noexcept( - noexcept(std::declval<value_type>().swap(std::declval<value_type>()))) { - mData.swap(o.mData); - } - - private: - value_type mData; - }; - - // big object: allocate on heap. - template <typename M> - class DataNode<M, false> { - public: - template <typename... Args> - explicit DataNode(M& map, Args&&... args) - : mData(map.allocate()) { - ::new (static_cast<void*>(mData)) value_type(std::forward<Args>(args)...); - } - - DataNode(M& ROBIN_HOOD_UNUSED(map) /*unused*/, DataNode<M, false>&& n) noexcept - : mData(std::move(n.mData)) {} - - void destroy(M& map) noexcept { - // don't deallocate, just put it into list of datapool. - mData->~value_type(); - map.deallocate(mData); - } - - void destroyDoNotDeallocate() noexcept { - mData->~value_type(); - } - - value_type const* operator->() const noexcept { - return mData; - } - - value_type* operator->() noexcept { - return mData; - } - - const value_type& operator*() const { - return *mData; - } - - value_type& operator*() { - return *mData; - } - - template <typename VT = value_type> - ROBIN_HOOD(NODISCARD) - typename std::enable_if<is_map, typename VT::first_type&>::type getFirst() noexcept { - return mData->first; - } - template <typename VT = value_type> - ROBIN_HOOD(NODISCARD) - typename std::enable_if<is_set, VT&>::type getFirst() noexcept { - return *mData; - } - - template <typename VT = value_type> - ROBIN_HOOD(NODISCARD) - typename std::enable_if<is_map, typename VT::first_type const&>::type - getFirst() const noexcept { - return mData->first; - } - template <typename VT = value_type> - ROBIN_HOOD(NODISCARD) - typename std::enable_if<is_set, VT const&>::type getFirst() const noexcept { - return *mData; - } - - template <typename MT = mapped_type> - ROBIN_HOOD(NODISCARD) - typename std::enable_if<is_map, MT&>::type getSecond() noexcept { - return mData->second; - } - - template <typename MT = mapped_type> - ROBIN_HOOD(NODISCARD) - typename std::enable_if<is_map, MT const&>::type getSecond() const noexcept { - return mData->second; - } - - void swap(DataNode<M, false>& o) noexcept { - using std::swap; - swap(mData, o.mData); - } - - private: - value_type* mData; - }; - - using Node = DataNode<Self, IsFlat>; - - // helpers for doInsert: extract first entry (only const required) - ROBIN_HOOD(NODISCARD) key_type const& getFirstConst(Node const& n) const noexcept { - return n.getFirst(); - } - - // in case we have void mapped_type, we are not using a pair, thus we just route k through. - // No need to disable this because it's just not used if not applicable. - ROBIN_HOOD(NODISCARD) key_type const& getFirstConst(key_type const& k) const noexcept { - return k; - } - - // in case we have non-void mapped_type, we have a standard robin_hood::pair - template <typename Q = mapped_type> - ROBIN_HOOD(NODISCARD) - typename std::enable_if<!std::is_void<Q>::value, key_type const&>::type - getFirstConst(value_type const& vt) const noexcept { - return vt.first; - } - - // Cloner ////////////////////////////////////////////////////////// - - template <typename M, bool UseMemcpy> - struct Cloner; - - // fast path: Just copy data, without allocating anything. - template <typename M> - struct Cloner<M, true> { - void operator()(M const& source, M& target) const { - auto const* const src = reinterpret_cast<char const*>(source.mKeyVals); - auto* tgt = reinterpret_cast<char*>(target.mKeyVals); - auto const numElementsWithBuffer = target.calcNumElementsWithBuffer(target.mMask + 1); - std::copy(src, src + target.calcNumBytesTotal(numElementsWithBuffer), tgt); - } - }; - - template <typename M> - struct Cloner<M, false> { - void operator()(M const& s, M& t) const { - auto const numElementsWithBuffer = t.calcNumElementsWithBuffer(t.mMask + 1); - std::copy(s.mInfo, s.mInfo + t.calcNumBytesInfo(numElementsWithBuffer), t.mInfo); - - for (size_t i = 0; i < numElementsWithBuffer; ++i) { - if (t.mInfo[i]) { - ::new (static_cast<void*>(t.mKeyVals + i)) Node(t, *s.mKeyVals[i]); - } - } - } - }; - - // Destroyer /////////////////////////////////////////////////////// - - template <typename M, bool IsFlatAndTrivial> - struct Destroyer {}; - - template <typename M> - struct Destroyer<M, true> { - void nodes(M& m) const noexcept { - m.mNumElements = 0; - } - - void nodesDoNotDeallocate(M& m) const noexcept { - m.mNumElements = 0; - } - }; - - template <typename M> - struct Destroyer<M, false> { - void nodes(M& m) const noexcept { - m.mNumElements = 0; - // clear also resets mInfo to 0, that's sometimes not necessary. - auto const numElementsWithBuffer = m.calcNumElementsWithBuffer(m.mMask + 1); - - for (size_t idx = 0; idx < numElementsWithBuffer; ++idx) { - if (0 != m.mInfo[idx]) { - Node& n = m.mKeyVals[idx]; - n.destroy(m); - n.~Node(); - } - } - } - - void nodesDoNotDeallocate(M& m) const noexcept { - m.mNumElements = 0; - // clear also resets mInfo to 0, that's sometimes not necessary. - auto const numElementsWithBuffer = m.calcNumElementsWithBuffer(m.mMask + 1); - for (size_t idx = 0; idx < numElementsWithBuffer; ++idx) { - if (0 != m.mInfo[idx]) { - Node& n = m.mKeyVals[idx]; - n.destroyDoNotDeallocate(); - n.~Node(); - } - } - } - }; - - // Iter //////////////////////////////////////////////////////////// - - struct fast_forward_tag {}; - - // generic iterator for both const_iterator and iterator. - template <bool IsConst> - // NOLINTNEXTLINE(hicpp-special-member-functions,cppcoreguidelines-special-member-functions) - class Iter { - private: - using NodePtr = typename std::conditional<IsConst, Node const*, Node*>::type; - - public: - using difference_type = std::ptrdiff_t; - using value_type = typename Self::value_type; - using reference = typename std::conditional<IsConst, value_type const&, value_type&>::type; - using pointer = typename std::conditional<IsConst, value_type const*, value_type*>::type; - using iterator_category = std::forward_iterator_tag; - - // default constructed iterator can be compared to itself, but WON'T return true when - // compared to end(). - Iter() = default; - - // Rule of zero: nothing specified. The conversion constructor is only enabled for - // iterator to const_iterator, so it doesn't accidentally work as a copy ctor. - - // Conversion constructor from iterator to const_iterator. - template <bool OtherIsConst, - typename = typename std::enable_if<IsConst && !OtherIsConst>::type> - // NOLINTNEXTLINE(hicpp-explicit-conversions) - Iter(Iter<OtherIsConst> const& other) noexcept - : mKeyVals(other.mKeyVals) - , mInfo(other.mInfo) {} - - Iter(NodePtr valPtr, uint8_t const* infoPtr) noexcept - : mKeyVals(valPtr) - , mInfo(infoPtr) {} - - Iter(NodePtr valPtr, uint8_t const* infoPtr, - fast_forward_tag ROBIN_HOOD_UNUSED(tag) /*unused*/) noexcept - : mKeyVals(valPtr) - , mInfo(infoPtr) { - fastForward(); - } - - template <bool OtherIsConst, - typename = typename std::enable_if<IsConst && !OtherIsConst>::type> - Iter& operator=(Iter<OtherIsConst> const& other) noexcept { - mKeyVals = other.mKeyVals; - mInfo = other.mInfo; - return *this; - } - - // prefix increment. Undefined behavior if we are at end()! - Iter& operator++() noexcept { - mInfo++; - mKeyVals++; - fastForward(); - return *this; - } - - Iter operator++(int) noexcept { - Iter tmp = *this; - ++(*this); - return tmp; - } - - reference operator*() const { - return **mKeyVals; - } - - pointer operator->() const { - return &**mKeyVals; - } - - template <bool O> - bool operator==(Iter<O> const& o) const noexcept { - return mKeyVals == o.mKeyVals; - } - - template <bool O> - bool operator!=(Iter<O> const& o) const noexcept { - return mKeyVals != o.mKeyVals; - } - - private: - // fast forward to the next non-free info byte - // I've tried a few variants that don't depend on intrinsics, but unfortunately they are - // quite a bit slower than this one. So I've reverted that change again. See map_benchmark. - void fastForward() noexcept { - size_t n = 0; - while (0U == (n = detail::unaligned_load<size_t>(mInfo))) { - mInfo += sizeof(size_t); - mKeyVals += sizeof(size_t); - } -#if defined(ROBIN_HOOD_DISABLE_INTRINSICS) - // we know for certain that within the next 8 bytes we'll find a non-zero one. - if (ROBIN_HOOD_UNLIKELY(0U == detail::unaligned_load<uint32_t>(mInfo))) { - mInfo += 4; - mKeyVals += 4; - } - if (ROBIN_HOOD_UNLIKELY(0U == detail::unaligned_load<uint16_t>(mInfo))) { - mInfo += 2; - mKeyVals += 2; - } - if (ROBIN_HOOD_UNLIKELY(0U == *mInfo)) { - mInfo += 1; - mKeyVals += 1; - } -#else -# if ROBIN_HOOD(LITTLE_ENDIAN) - auto inc = ROBIN_HOOD_COUNT_TRAILING_ZEROES(n) / 8; -# else - auto inc = ROBIN_HOOD_COUNT_LEADING_ZEROES(n) / 8; -# endif - mInfo += inc; - mKeyVals += inc; -#endif - } - - friend class Table<IsFlat, MaxLoadFactor100, key_type, mapped_type, hasher, key_equal>; - NodePtr mKeyVals{nullptr}; - uint8_t const* mInfo{nullptr}; - }; - - //////////////////////////////////////////////////////////////////// - - // highly performance relevant code. - // Lower bits are used for indexing into the array (2^n size) - // The upper 1-5 bits need to be a reasonable good hash, to save comparisons. - template <typename HashKey> - void keyToIdx(HashKey&& key, size_t* idx, InfoType* info) const { - // for a user-specified hash that is *not* robin_hood::hash, apply robin_hood::hash as - // an additional mixing step. This serves as a bad hash prevention, if the given data is - // badly mixed. - using Mix = - typename std::conditional<std::is_same<::robin_hood::hash<key_type>, hasher>::value, - ::robin_hood::detail::identity_hash<size_t>, - ::robin_hood::hash<size_t>>::type; - - // the lower InitialInfoNumBits are reserved for info. - auto h = Mix{}(WHash::operator()(key)); - *info = mInfoInc + static_cast<InfoType>((h & InfoMask) >> mInfoHashShift); - *idx = (h >> InitialInfoNumBits) & mMask; - } - - // forwards the index by one, wrapping around at the end - void next(InfoType* info, size_t* idx) const noexcept { - *idx = *idx + 1; - *info += mInfoInc; - } - - void nextWhileLess(InfoType* info, size_t* idx) const noexcept { - // unrolling this by hand did not bring any speedups. - while (*info < mInfo[*idx]) { - next(info, idx); - } - } - - // Shift everything up by one element. Tries to move stuff around. - void - shiftUp(size_t startIdx, - size_t const insertion_idx) noexcept(std::is_nothrow_move_assignable<Node>::value) { - auto idx = startIdx; - ::new (static_cast<void*>(mKeyVals + idx)) Node(std::move(mKeyVals[idx - 1])); - while (--idx != insertion_idx) { - mKeyVals[idx] = std::move(mKeyVals[idx - 1]); - } - - idx = startIdx; - while (idx != insertion_idx) { - ROBIN_HOOD_COUNT(shiftUp) - mInfo[idx] = static_cast<uint8_t>(mInfo[idx - 1] + mInfoInc); - if (ROBIN_HOOD_UNLIKELY(mInfo[idx] + mInfoInc > 0xFF)) { - mMaxNumElementsAllowed = 0; - } - --idx; - } - } - - void shiftDown(size_t idx) noexcept(std::is_nothrow_move_assignable<Node>::value) { - // until we find one that is either empty or has zero offset. - // TODO(martinus) we don't need to move everything, just the last one for the same - // bucket. - mKeyVals[idx].destroy(*this); - - // until we find one that is either empty or has zero offset. - while (mInfo[idx + 1] >= 2 * mInfoInc) { - ROBIN_HOOD_COUNT(shiftDown) - mInfo[idx] = static_cast<uint8_t>(mInfo[idx + 1] - mInfoInc); - mKeyVals[idx] = std::move(mKeyVals[idx + 1]); - ++idx; - } - - mInfo[idx] = 0; - // don't destroy, we've moved it - // mKeyVals[idx].destroy(*this); - mKeyVals[idx].~Node(); - } - - // copy of find(), except that it returns iterator instead of const_iterator. - template <typename Other> - ROBIN_HOOD(NODISCARD) - size_t findIdx(Other const& key) const { - size_t idx{}; - InfoType info{}; - keyToIdx(key, &idx, &info); - - do { - // unrolling this twice gives a bit of a speedup. More unrolling did not help. - if (info == mInfo[idx] && - ROBIN_HOOD_LIKELY(WKeyEqual::operator()(key, mKeyVals[idx].getFirst()))) { - return idx; - } - next(&info, &idx); - if (info == mInfo[idx] && - ROBIN_HOOD_LIKELY(WKeyEqual::operator()(key, mKeyVals[idx].getFirst()))) { - return idx; - } - next(&info, &idx); - } while (info <= mInfo[idx]); - - // nothing found! - return mMask == 0 ? 0 - : static_cast<size_t>(std::distance( - mKeyVals, reinterpret_cast_no_cast_align_warning<Node*>(mInfo))); - } - - void cloneData(const Table& o) { - Cloner<Table, IsFlat && ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(Node)>()(o, *this); - } - - // inserts a keyval that is guaranteed to be new, e.g. when the hashmap is resized. - // @return index where the element was created - size_t insert_move(Node&& keyval) { - // we don't retry, fail if overflowing - // don't need to check max num elements - if (0 == mMaxNumElementsAllowed && !try_increase_info()) { - throwOverflowError(); // impossible to reach LCOV_EXCL_LINE - } - - size_t idx{}; - InfoType info{}; - keyToIdx(keyval.getFirst(), &idx, &info); - - // skip forward. Use <= because we are certain that the element is not there. - while (info <= mInfo[idx]) { - idx = idx + 1; - info += mInfoInc; - } - - // key not found, so we are now exactly where we want to insert it. - auto const insertion_idx = idx; - auto const insertion_info = static_cast<uint8_t>(info); - if (ROBIN_HOOD_UNLIKELY(insertion_info + mInfoInc > 0xFF)) { - mMaxNumElementsAllowed = 0; - } - - // find an empty spot - while (0 != mInfo[idx]) { - next(&info, &idx); - } - - auto& l = mKeyVals[insertion_idx]; - if (idx == insertion_idx) { - ::new (static_cast<void*>(&l)) Node(std::move(keyval)); - } else { - shiftUp(idx, insertion_idx); - l = std::move(keyval); - } - - // put at empty spot - mInfo[insertion_idx] = insertion_info; - - ++mNumElements; - return insertion_idx; - } - -public: - using iterator = Iter<false>; - using const_iterator = Iter<true>; - - Table() noexcept(noexcept(Hash()) && noexcept(KeyEqual())) - : WHash() - , WKeyEqual() { - ROBIN_HOOD_TRACE(this) - } - - // Creates an empty hash map. Nothing is allocated yet, this happens at the first insert. - // This tremendously speeds up ctor & dtor of a map that never receives an element. The - // penalty is payed at the first insert, and not before. Lookup of this empty map works - // because everybody points to DummyInfoByte::b. parameter bucket_count is dictated by the - // standard, but we can ignore it. - explicit Table( - size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/, const Hash& h = Hash{}, - const KeyEqual& equal = KeyEqual{}) noexcept(noexcept(Hash(h)) && noexcept(KeyEqual(equal))) - : WHash(h) - , WKeyEqual(equal) { - ROBIN_HOOD_TRACE(this) - } - - template <typename Iter> - Table(Iter first, Iter last, size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/ = 0, - const Hash& h = Hash{}, const KeyEqual& equal = KeyEqual{}) - : WHash(h) - , WKeyEqual(equal) { - ROBIN_HOOD_TRACE(this) - insert(first, last); - } - - Table(std::initializer_list<value_type> initlist, - size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/ = 0, const Hash& h = Hash{}, - const KeyEqual& equal = KeyEqual{}) - : WHash(h) - , WKeyEqual(equal) { - ROBIN_HOOD_TRACE(this) - insert(initlist.begin(), initlist.end()); - } - - Table(Table&& o) noexcept - : WHash(std::move(static_cast<WHash&>(o))) - , WKeyEqual(std::move(static_cast<WKeyEqual&>(o))) - , DataPool(std::move(static_cast<DataPool&>(o))) { - ROBIN_HOOD_TRACE(this) - if (o.mMask) { - mKeyVals = std::move(o.mKeyVals); - mInfo = std::move(o.mInfo); - mNumElements = std::move(o.mNumElements); - mMask = std::move(o.mMask); - mMaxNumElementsAllowed = std::move(o.mMaxNumElementsAllowed); - mInfoInc = std::move(o.mInfoInc); - mInfoHashShift = std::move(o.mInfoHashShift); - // set other's mask to 0 so its destructor won't do anything - o.init(); - } - } - - Table& operator=(Table&& o) noexcept { - ROBIN_HOOD_TRACE(this) - if (&o != this) { - if (o.mMask) { - // only move stuff if the other map actually has some data - destroy(); - mKeyVals = std::move(o.mKeyVals); - mInfo = std::move(o.mInfo); - mNumElements = std::move(o.mNumElements); - mMask = std::move(o.mMask); - mMaxNumElementsAllowed = std::move(o.mMaxNumElementsAllowed); - mInfoInc = std::move(o.mInfoInc); - mInfoHashShift = std::move(o.mInfoHashShift); - WHash::operator=(std::move(static_cast<WHash&>(o))); - WKeyEqual::operator=(std::move(static_cast<WKeyEqual&>(o))); - DataPool::operator=(std::move(static_cast<DataPool&>(o))); - - o.init(); - - } else { - // nothing in the other map => just clear us. - clear(); - } - } - return *this; - } - - Table(const Table& o) - : WHash(static_cast<const WHash&>(o)) - , WKeyEqual(static_cast<const WKeyEqual&>(o)) - , DataPool(static_cast<const DataPool&>(o)) { - ROBIN_HOOD_TRACE(this) - if (!o.empty()) { - // not empty: create an exact copy. it is also possible to just iterate through all - // elements and insert them, but copying is probably faster. - - auto const numElementsWithBuffer = calcNumElementsWithBuffer(o.mMask + 1); - auto const numBytesTotal = calcNumBytesTotal(numElementsWithBuffer); - - ROBIN_HOOD_LOG("std::malloc " << numBytesTotal << " = calcNumBytesTotal(" - << numElementsWithBuffer << ")") - mKeyVals = static_cast<Node*>( - detail::assertNotNull<std::bad_alloc>(std::malloc(numBytesTotal))); - // no need for calloc because clonData does memcpy - mInfo = reinterpret_cast<uint8_t*>(mKeyVals + numElementsWithBuffer); - mNumElements = o.mNumElements; - mMask = o.mMask; - mMaxNumElementsAllowed = o.mMaxNumElementsAllowed; - mInfoInc = o.mInfoInc; - mInfoHashShift = o.mInfoHashShift; - cloneData(o); - } - } - - // Creates a copy of the given map. Copy constructor of each entry is used. - // Not sure why clang-tidy thinks this doesn't handle self assignment, it does - // NOLINTNEXTLINE(bugprone-unhandled-self-assignment,cert-oop54-cpp) - Table& operator=(Table const& o) { - ROBIN_HOOD_TRACE(this) - if (&o == this) { - // prevent assigning of itself - return *this; - } - - // we keep using the old allocator and not assign the new one, because we want to keep - // the memory available. when it is the same size. - if (o.empty()) { - if (0 == mMask) { - // nothing to do, we are empty too - return *this; - } - - // not empty: destroy what we have there - // clear also resets mInfo to 0, that's sometimes not necessary. - destroy(); - init(); - WHash::operator=(static_cast<const WHash&>(o)); - WKeyEqual::operator=(static_cast<const WKeyEqual&>(o)); - DataPool::operator=(static_cast<DataPool const&>(o)); - - return *this; - } - - // clean up old stuff - Destroyer<Self, IsFlat && std::is_trivially_destructible<Node>::value>{}.nodes(*this); - - if (mMask != o.mMask) { - // no luck: we don't have the same array size allocated, so we need to realloc. - if (0 != mMask) { - // only deallocate if we actually have data! - ROBIN_HOOD_LOG("std::free") - std::free(mKeyVals); - } - - auto const numElementsWithBuffer = calcNumElementsWithBuffer(o.mMask + 1); - auto const numBytesTotal = calcNumBytesTotal(numElementsWithBuffer); - ROBIN_HOOD_LOG("std::malloc " << numBytesTotal << " = calcNumBytesTotal(" - << numElementsWithBuffer << ")") - mKeyVals = static_cast<Node*>( - detail::assertNotNull<std::bad_alloc>(std::malloc(numBytesTotal))); - - // no need for calloc here because cloneData performs a memcpy. - mInfo = reinterpret_cast<uint8_t*>(mKeyVals + numElementsWithBuffer); - // sentinel is set in cloneData - } - WHash::operator=(static_cast<const WHash&>(o)); - WKeyEqual::operator=(static_cast<const WKeyEqual&>(o)); - DataPool::operator=(static_cast<DataPool const&>(o)); - mNumElements = o.mNumElements; - mMask = o.mMask; - mMaxNumElementsAllowed = o.mMaxNumElementsAllowed; - mInfoInc = o.mInfoInc; - mInfoHashShift = o.mInfoHashShift; - cloneData(o); - - return *this; - } - - // Swaps everything between the two maps. - void swap(Table& o) { - ROBIN_HOOD_TRACE(this) - using std::swap; - swap(o, *this); - } - - // Clears all data, without resizing. - void clear() { - ROBIN_HOOD_TRACE(this) - if (empty()) { - // don't do anything! also important because we don't want to write to - // DummyInfoByte::b, even though we would just write 0 to it. - return; - } - - Destroyer<Self, IsFlat && std::is_trivially_destructible<Node>::value>{}.nodes(*this); - - auto const numElementsWithBuffer = calcNumElementsWithBuffer(mMask + 1); - // clear everything, then set the sentinel again - uint8_t const z = 0; - std::fill(mInfo, mInfo + calcNumBytesInfo(numElementsWithBuffer), z); - mInfo[numElementsWithBuffer] = 1; - - mInfoInc = InitialInfoInc; - mInfoHashShift = InitialInfoHashShift; - } - - // Destroys the map and all it's contents. - ~Table() { - ROBIN_HOOD_TRACE(this) - destroy(); - } - - // Checks if both tables contain the same entries. Order is irrelevant. - bool operator==(const Table& other) const { - ROBIN_HOOD_TRACE(this) - if (other.size() != size()) { - return false; - } - for (auto const& otherEntry : other) { - if (!has(otherEntry)) { - return false; - } - } - - return true; - } - - bool operator!=(const Table& other) const { - ROBIN_HOOD_TRACE(this) - return !operator==(other); - } - - template <typename Q = mapped_type> - typename std::enable_if<!std::is_void<Q>::value, Q&>::type operator[](const key_type& key) { - ROBIN_HOOD_TRACE(this) - return doCreateByKey(key); - } - - template <typename Q = mapped_type> - typename std::enable_if<!std::is_void<Q>::value, Q&>::type operator[](key_type&& key) { - ROBIN_HOOD_TRACE(this) - return doCreateByKey(std::move(key)); - } - - template <typename Iter> - void insert(Iter first, Iter last) { - for (; first != last; ++first) { - // value_type ctor needed because this might be called with std::pair's - insert(value_type(*first)); - } - } - - template <typename... Args> - std::pair<iterator, bool> emplace(Args&&... args) { - ROBIN_HOOD_TRACE(this) - Node n{*this, std::forward<Args>(args)...}; - auto r = doInsert(std::move(n)); - if (!r.second) { - // insertion not possible: destroy node - // NOLINTNEXTLINE(bugprone-use-after-move) - n.destroy(*this); - } - return r; - } - - template <typename... Args> - std::pair<iterator, bool> try_emplace(const key_type& key, Args&&... args) { - return try_emplace_impl(key, std::forward<Args>(args)...); - } - - template <typename... Args> - std::pair<iterator, bool> try_emplace(key_type&& key, Args&&... args) { - return try_emplace_impl(std::move(key), std::forward<Args>(args)...); - } - - template <typename... Args> - std::pair<iterator, bool> try_emplace(const_iterator hint, const key_type& key, - Args&&... args) { - (void)hint; - return try_emplace_impl(key, std::forward<Args>(args)...); - } - - template <typename... Args> - std::pair<iterator, bool> try_emplace(const_iterator hint, key_type&& key, Args&&... args) { - (void)hint; - return try_emplace_impl(std::move(key), std::forward<Args>(args)...); - } - - template <typename Mapped> - std::pair<iterator, bool> insert_or_assign(const key_type& key, Mapped&& obj) { - return insert_or_assign_impl(key, std::forward<Mapped>(obj)); - } - - template <typename Mapped> - std::pair<iterator, bool> insert_or_assign(key_type&& key, Mapped&& obj) { - return insert_or_assign_impl(std::move(key), std::forward<Mapped>(obj)); - } - - template <typename Mapped> - std::pair<iterator, bool> insert_or_assign(const_iterator hint, const key_type& key, - Mapped&& obj) { - (void)hint; - return insert_or_assign_impl(key, std::forward<Mapped>(obj)); - } - - template <typename Mapped> - std::pair<iterator, bool> insert_or_assign(const_iterator hint, key_type&& key, Mapped&& obj) { - (void)hint; - return insert_or_assign_impl(std::move(key), std::forward<Mapped>(obj)); - } - - std::pair<iterator, bool> insert(const value_type& keyval) { - ROBIN_HOOD_TRACE(this) - return doInsert(keyval); - } - - std::pair<iterator, bool> insert(value_type&& keyval) { - return doInsert(std::move(keyval)); - } - - // Returns 1 if key is found, 0 otherwise. - size_t count(const key_type& key) const { // NOLINT(modernize-use-nodiscard) - ROBIN_HOOD_TRACE(this) - auto kv = mKeyVals + findIdx(key); - if (kv != reinterpret_cast_no_cast_align_warning<Node*>(mInfo)) { - return 1; - } - return 0; - } - - template <typename OtherKey, typename Self_ = Self> - // NOLINTNEXTLINE(modernize-use-nodiscard) - typename std::enable_if<Self_::is_transparent, size_t>::type count(const OtherKey& key) const { - ROBIN_HOOD_TRACE(this) - auto kv = mKeyVals + findIdx(key); - if (kv != reinterpret_cast_no_cast_align_warning<Node*>(mInfo)) { - return 1; - } - return 0; - } - - bool contains(const key_type& key) const { // NOLINT(modernize-use-nodiscard) - return 1U == count(key); - } - - template <typename OtherKey, typename Self_ = Self> - // NOLINTNEXTLINE(modernize-use-nodiscard) - typename std::enable_if<Self_::is_transparent, bool>::type contains(const OtherKey& key) const { - return 1U == count(key); - } - - // Returns a reference to the value found for key. - // Throws std::out_of_range if element cannot be found - template <typename Q = mapped_type> - // NOLINTNEXTLINE(modernize-use-nodiscard) - typename std::enable_if<!std::is_void<Q>::value, Q&>::type at(key_type const& key) { - ROBIN_HOOD_TRACE(this) - auto kv = mKeyVals + findIdx(key); - if (kv == reinterpret_cast_no_cast_align_warning<Node*>(mInfo)) { - doThrow<std::out_of_range>("key not found"); - } - return kv->getSecond(); - } - - // Returns a reference to the value found for key. - // Throws std::out_of_range if element cannot be found - template <typename Q = mapped_type> - // NOLINTNEXTLINE(modernize-use-nodiscard) - typename std::enable_if<!std::is_void<Q>::value, Q const&>::type at(key_type const& key) const { - ROBIN_HOOD_TRACE(this) - auto kv = mKeyVals + findIdx(key); - if (kv == reinterpret_cast_no_cast_align_warning<Node*>(mInfo)) { - doThrow<std::out_of_range>("key not found"); - } - return kv->getSecond(); - } - - const_iterator find(const key_type& key) const { // NOLINT(modernize-use-nodiscard) - ROBIN_HOOD_TRACE(this) - const size_t idx = findIdx(key); - return const_iterator{mKeyVals + idx, mInfo + idx}; - } - - template <typename OtherKey> - const_iterator find(const OtherKey& key, is_transparent_tag /*unused*/) const { - ROBIN_HOOD_TRACE(this) - const size_t idx = findIdx(key); - return const_iterator{mKeyVals + idx, mInfo + idx}; - } - - template <typename OtherKey, typename Self_ = Self> - typename std::enable_if<Self_::is_transparent, // NOLINT(modernize-use-nodiscard) - const_iterator>::type // NOLINT(modernize-use-nodiscard) - find(const OtherKey& key) const { // NOLINT(modernize-use-nodiscard) - ROBIN_HOOD_TRACE(this) - const size_t idx = findIdx(key); - return const_iterator{mKeyVals + idx, mInfo + idx}; - } - - iterator find(const key_type& key) { - ROBIN_HOOD_TRACE(this) - const size_t idx = findIdx(key); - return iterator{mKeyVals + idx, mInfo + idx}; - } - - template <typename OtherKey> - iterator find(const OtherKey& key, is_transparent_tag /*unused*/) { - ROBIN_HOOD_TRACE(this) - const size_t idx = findIdx(key); - return iterator{mKeyVals + idx, mInfo + idx}; - } - - template <typename OtherKey, typename Self_ = Self> - typename std::enable_if<Self_::is_transparent, iterator>::type find(const OtherKey& key) { - ROBIN_HOOD_TRACE(this) - const size_t idx = findIdx(key); - return iterator{mKeyVals + idx, mInfo + idx}; - } - - iterator begin() { - ROBIN_HOOD_TRACE(this) - if (empty()) { - return end(); - } - return iterator(mKeyVals, mInfo, fast_forward_tag{}); - } - const_iterator begin() const { // NOLINT(modernize-use-nodiscard) - ROBIN_HOOD_TRACE(this) - return cbegin(); - } - const_iterator cbegin() const { // NOLINT(modernize-use-nodiscard) - ROBIN_HOOD_TRACE(this) - if (empty()) { - return cend(); - } - return const_iterator(mKeyVals, mInfo, fast_forward_tag{}); - } - - iterator end() { - ROBIN_HOOD_TRACE(this) - // no need to supply valid info pointer: end() must not be dereferenced, and only node - // pointer is compared. - return iterator{reinterpret_cast_no_cast_align_warning<Node*>(mInfo), nullptr}; - } - const_iterator end() const { // NOLINT(modernize-use-nodiscard) - ROBIN_HOOD_TRACE(this) - return cend(); - } - const_iterator cend() const { // NOLINT(modernize-use-nodiscard) - ROBIN_HOOD_TRACE(this) - return const_iterator{reinterpret_cast_no_cast_align_warning<Node*>(mInfo), nullptr}; - } - - iterator erase(const_iterator pos) { - ROBIN_HOOD_TRACE(this) - // its safe to perform const cast here - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) - return erase(iterator{const_cast<Node*>(pos.mKeyVals), const_cast<uint8_t*>(pos.mInfo)}); - } - - // Erases element at pos, returns iterator to the next element. - iterator erase(iterator pos) { - ROBIN_HOOD_TRACE(this) - // we assume that pos always points to a valid entry, and not end(). - auto const idx = static_cast<size_t>(pos.mKeyVals - mKeyVals); - - shiftDown(idx); - --mNumElements; - - if (*pos.mInfo) { - // we've backward shifted, return this again - return pos; - } - - // no backward shift, return next element - return ++pos; - } - - size_t erase(const key_type& key) { - ROBIN_HOOD_TRACE(this) - size_t idx{}; - InfoType info{}; - keyToIdx(key, &idx, &info); - - // check while info matches with the source idx - do { - if (info == mInfo[idx] && WKeyEqual::operator()(key, mKeyVals[idx].getFirst())) { - shiftDown(idx); - --mNumElements; - return 1; - } - next(&info, &idx); - } while (info <= mInfo[idx]); - - // nothing found to delete - return 0; - } - - // reserves space for the specified number of elements. Makes sure the old data fits. - // exactly the same as reserve(c). - void rehash(size_t c) { - // forces a reserve - reserve(c, true); - } - - // reserves space for the specified number of elements. Makes sure the old data fits. - // Exactly the same as rehash(c). Use rehash(0) to shrink to fit. - void reserve(size_t c) { - // reserve, but don't force rehash - reserve(c, false); - } - - size_type size() const noexcept { // NOLINT(modernize-use-nodiscard) - ROBIN_HOOD_TRACE(this) - return mNumElements; - } - - size_type max_size() const noexcept { // NOLINT(modernize-use-nodiscard) - ROBIN_HOOD_TRACE(this) - return static_cast<size_type>(-1); - } - - ROBIN_HOOD(NODISCARD) bool empty() const noexcept { - ROBIN_HOOD_TRACE(this) - return 0 == mNumElements; - } - - float max_load_factor() const noexcept { // NOLINT(modernize-use-nodiscard) - ROBIN_HOOD_TRACE(this) - return MaxLoadFactor100 / 100.0F; - } - - // Average number of elements per bucket. Since we allow only 1 per bucket - float load_factor() const noexcept { // NOLINT(modernize-use-nodiscard) - ROBIN_HOOD_TRACE(this) - return static_cast<float>(size()) / static_cast<float>(mMask + 1); - } - - ROBIN_HOOD(NODISCARD) size_t mask() const noexcept { - ROBIN_HOOD_TRACE(this) - return mMask; - } - - ROBIN_HOOD(NODISCARD) size_t calcMaxNumElementsAllowed(size_t maxElements) const noexcept { - if (ROBIN_HOOD_LIKELY(maxElements <= (std::numeric_limits<size_t>::max)() / 100)) { - return maxElements * MaxLoadFactor100 / 100; - } - - // we might be a bit inprecise, but since maxElements is quite large that doesn't matter - return (maxElements / 100) * MaxLoadFactor100; - } - - ROBIN_HOOD(NODISCARD) size_t calcNumBytesInfo(size_t numElements) const noexcept { - // we add a uint64_t, which houses the sentinel (first byte) and padding so we can load - // 64bit types. - return numElements + sizeof(uint64_t); - } - - ROBIN_HOOD(NODISCARD) - size_t calcNumElementsWithBuffer(size_t numElements) const noexcept { - auto maxNumElementsAllowed = calcMaxNumElementsAllowed(numElements); - return numElements + (std::min)(maxNumElementsAllowed, (static_cast<size_t>(0xFF))); - } - - // calculation only allowed for 2^n values - ROBIN_HOOD(NODISCARD) size_t calcNumBytesTotal(size_t numElements) const { -#if ROBIN_HOOD(BITNESS) == 64 - return numElements * sizeof(Node) + calcNumBytesInfo(numElements); -#else - // make sure we're doing 64bit operations, so we are at least safe against 32bit overflows. - auto const ne = static_cast<uint64_t>(numElements); - auto const s = static_cast<uint64_t>(sizeof(Node)); - auto const infos = static_cast<uint64_t>(calcNumBytesInfo(numElements)); - - auto const total64 = ne * s + infos; - auto const total = static_cast<size_t>(total64); - - if (ROBIN_HOOD_UNLIKELY(static_cast<uint64_t>(total) != total64)) { - throwOverflowError(); - } - return total; -#endif - } - -private: - template <typename Q = mapped_type> - ROBIN_HOOD(NODISCARD) - typename std::enable_if<!std::is_void<Q>::value, bool>::type has(const value_type& e) const { - ROBIN_HOOD_TRACE(this) - auto it = find(e.first); - return it != end() && it->second == e.second; - } - - template <typename Q = mapped_type> - ROBIN_HOOD(NODISCARD) - typename std::enable_if<std::is_void<Q>::value, bool>::type has(const value_type& e) const { - ROBIN_HOOD_TRACE(this) - return find(e) != end(); - } - - void reserve(size_t c, bool forceRehash) { - ROBIN_HOOD_TRACE(this) - auto const minElementsAllowed = (std::max)(c, mNumElements); - auto newSize = InitialNumElements; - while (calcMaxNumElementsAllowed(newSize) < minElementsAllowed && newSize != 0) { - newSize *= 2; - } - if (ROBIN_HOOD_UNLIKELY(newSize == 0)) { - throwOverflowError(); - } - - ROBIN_HOOD_LOG("newSize > mMask + 1: " << newSize << " > " << mMask << " + 1") - - // only actually do anything when the new size is bigger than the old one. This prevents to - // continuously allocate for each reserve() call. - if (forceRehash || newSize > mMask + 1) { - rehashPowerOfTwo(newSize); - } - } - - // reserves space for at least the specified number of elements. - // only works if numBuckets if power of two - void rehashPowerOfTwo(size_t numBuckets) { - ROBIN_HOOD_TRACE(this) - - Node* const oldKeyVals = mKeyVals; - uint8_t const* const oldInfo = mInfo; - - const size_t oldMaxElementsWithBuffer = calcNumElementsWithBuffer(mMask + 1); - - // resize operation: move stuff - init_data(numBuckets); - if (oldMaxElementsWithBuffer > 1) { - for (size_t i = 0; i < oldMaxElementsWithBuffer; ++i) { - if (oldInfo[i] != 0) { - insert_move(std::move(oldKeyVals[i])); - // destroy the node but DON'T destroy the data. - oldKeyVals[i].~Node(); - } - } - - // this check is not necessary as it's guarded by the previous if, but it helps silence - // g++'s overeager "attempt to free a non-heap object 'map' - // [-Werror=free-nonheap-object]" warning. - if (oldKeyVals != reinterpret_cast_no_cast_align_warning<Node*>(&mMask)) { - // don't destroy old data: put it into the pool instead - DataPool::addOrFree(oldKeyVals, calcNumBytesTotal(oldMaxElementsWithBuffer)); - } - } - } - - ROBIN_HOOD(NOINLINE) void throwOverflowError() const { -#if ROBIN_HOOD(HAS_EXCEPTIONS) - throw std::overflow_error("robin_hood::map overflow"); -#else - abort(); -#endif - } - - template <typename OtherKey, typename... Args> - std::pair<iterator, bool> try_emplace_impl(OtherKey&& key, Args&&... args) { - ROBIN_HOOD_TRACE(this) - auto it = find(key); - if (it == end()) { - return emplace(std::piecewise_construct, - std::forward_as_tuple(std::forward<OtherKey>(key)), - std::forward_as_tuple(std::forward<Args>(args)...)); - } - return {it, false}; - } - - template <typename OtherKey, typename Mapped> - std::pair<iterator, bool> insert_or_assign_impl(OtherKey&& key, Mapped&& obj) { - ROBIN_HOOD_TRACE(this) - auto it = find(key); - if (it == end()) { - return emplace(std::forward<OtherKey>(key), std::forward<Mapped>(obj)); - } - it->second = std::forward<Mapped>(obj); - return {it, false}; - } - - void init_data(size_t max_elements) { - mNumElements = 0; - mMask = max_elements - 1; - mMaxNumElementsAllowed = calcMaxNumElementsAllowed(max_elements); - - auto const numElementsWithBuffer = calcNumElementsWithBuffer(max_elements); - - // calloc also zeroes everything - auto const numBytesTotal = calcNumBytesTotal(numElementsWithBuffer); - ROBIN_HOOD_LOG("std::calloc " << numBytesTotal << " = calcNumBytesTotal(" - << numElementsWithBuffer << ")") - mKeyVals = reinterpret_cast<Node*>( - detail::assertNotNull<std::bad_alloc>(std::calloc(1, numBytesTotal))); - mInfo = reinterpret_cast<uint8_t*>(mKeyVals + numElementsWithBuffer); - - // set sentinel - mInfo[numElementsWithBuffer] = 1; - - mInfoInc = InitialInfoInc; - mInfoHashShift = InitialInfoHashShift; - } - - template <typename Arg, typename Q = mapped_type> - typename std::enable_if<!std::is_void<Q>::value, Q&>::type doCreateByKey(Arg&& key) { - while (true) { - size_t idx{}; - InfoType info{}; - keyToIdx(key, &idx, &info); - nextWhileLess(&info, &idx); - - // while we potentially have a match. Can't do a do-while here because when mInfo is - // 0 we don't want to skip forward - while (info == mInfo[idx]) { - if (WKeyEqual::operator()(key, mKeyVals[idx].getFirst())) { - // key already exists, do not insert. - return mKeyVals[idx].getSecond(); - } - next(&info, &idx); - } - - // unlikely that this evaluates to true - if (ROBIN_HOOD_UNLIKELY(mNumElements >= mMaxNumElementsAllowed)) { - increase_size(); - continue; - } - - // key not found, so we are now exactly where we want to insert it. - auto const insertion_idx = idx; - auto const insertion_info = info; - if (ROBIN_HOOD_UNLIKELY(insertion_info + mInfoInc > 0xFF)) { - mMaxNumElementsAllowed = 0; - } - - // find an empty spot - while (0 != mInfo[idx]) { - next(&info, &idx); - } - - auto& l = mKeyVals[insertion_idx]; - if (idx == insertion_idx) { - // put at empty spot. This forwards all arguments into the node where the object - // is constructed exactly where it is needed. - ::new (static_cast<void*>(&l)) - Node(*this, std::piecewise_construct, - std::forward_as_tuple(std::forward<Arg>(key)), std::forward_as_tuple()); - } else { - shiftUp(idx, insertion_idx); - l = Node(*this, std::piecewise_construct, - std::forward_as_tuple(std::forward<Arg>(key)), std::forward_as_tuple()); - } - - // mKeyVals[idx].getFirst() = std::move(key); - mInfo[insertion_idx] = static_cast<uint8_t>(insertion_info); - - ++mNumElements; - return mKeyVals[insertion_idx].getSecond(); - } - } - - // This is exactly the same code as operator[], except for the return values - template <typename Arg> - std::pair<iterator, bool> doInsert(Arg&& keyval) { - while (true) { - size_t idx{}; - InfoType info{}; - keyToIdx(getFirstConst(keyval), &idx, &info); - nextWhileLess(&info, &idx); - - // while we potentially have a match - while (info == mInfo[idx]) { - if (WKeyEqual::operator()(getFirstConst(keyval), mKeyVals[idx].getFirst())) { - // key already exists, do NOT insert. - // see http://en.cppreference.com/w/cpp/container/unordered_map/insert - return std::make_pair<iterator, bool>(iterator(mKeyVals + idx, mInfo + idx), - false); - } - next(&info, &idx); - } - - // unlikely that this evaluates to true - if (ROBIN_HOOD_UNLIKELY(mNumElements >= mMaxNumElementsAllowed)) { - increase_size(); - continue; - } - - // key not found, so we are now exactly where we want to insert it. - auto const insertion_idx = idx; - auto const insertion_info = info; - if (ROBIN_HOOD_UNLIKELY(insertion_info + mInfoInc > 0xFF)) { - mMaxNumElementsAllowed = 0; - } - - // find an empty spot - while (0 != mInfo[idx]) { - next(&info, &idx); - } - - auto& l = mKeyVals[insertion_idx]; - if (idx == insertion_idx) { - ::new (static_cast<void*>(&l)) Node(*this, std::forward<Arg>(keyval)); - } else { - shiftUp(idx, insertion_idx); - l = Node(*this, std::forward<Arg>(keyval)); - } - - // put at empty spot - mInfo[insertion_idx] = static_cast<uint8_t>(insertion_info); - - ++mNumElements; - return std::make_pair(iterator(mKeyVals + insertion_idx, mInfo + insertion_idx), true); - } - } - - bool try_increase_info() { - ROBIN_HOOD_LOG("mInfoInc=" << mInfoInc << ", numElements=" << mNumElements - << ", maxNumElementsAllowed=" - << calcMaxNumElementsAllowed(mMask + 1)) - if (mInfoInc <= 2) { - // need to be > 2 so that shift works (otherwise undefined behavior!) - return false; - } - // we got space left, try to make info smaller - mInfoInc = static_cast<uint8_t>(mInfoInc >> 1U); - - // remove one bit of the hash, leaving more space for the distance info. - // This is extremely fast because we can operate on 8 bytes at once. - ++mInfoHashShift; - auto const numElementsWithBuffer = calcNumElementsWithBuffer(mMask + 1); - - for (size_t i = 0; i < numElementsWithBuffer; i += 8) { - auto val = unaligned_load<uint64_t>(mInfo + i); - val = (val >> 1U) & UINT64_C(0x7f7f7f7f7f7f7f7f); - std::memcpy(mInfo + i, &val, sizeof(val)); - } - // update sentinel, which might have been cleared out! - mInfo[numElementsWithBuffer] = 1; - - mMaxNumElementsAllowed = calcMaxNumElementsAllowed(mMask + 1); - return true; - } - - void increase_size() { - // nothing allocated yet? just allocate InitialNumElements - if (0 == mMask) { - init_data(InitialNumElements); - return; - } - - auto const maxNumElementsAllowed = calcMaxNumElementsAllowed(mMask + 1); - if (mNumElements < maxNumElementsAllowed && try_increase_info()) { - return; - } - - ROBIN_HOOD_LOG("mNumElements=" << mNumElements << ", maxNumElementsAllowed=" - << maxNumElementsAllowed << ", load=" - << (static_cast<double>(mNumElements) * 100.0 / - (static_cast<double>(mMask) + 1))) - // it seems we have a really bad hash function! don't try to resize again - if (mNumElements * 2 < calcMaxNumElementsAllowed(mMask + 1)) { - throwOverflowError(); - } - - rehashPowerOfTwo((mMask + 1) * 2); - } - - void destroy() { - if (0 == mMask) { - // don't deallocate! - return; - } - - Destroyer<Self, IsFlat && std::is_trivially_destructible<Node>::value>{} - .nodesDoNotDeallocate(*this); - - // This protection against not deleting mMask shouldn't be needed as it's sufficiently - // protected with the 0==mMask check, but I have this anyways because g++ 7 otherwise - // reports a compile error: attempt to free a non-heap object 'fm' - // [-Werror=free-nonheap-object] - if (mKeyVals != reinterpret_cast_no_cast_align_warning<Node*>(&mMask)) { - ROBIN_HOOD_LOG("std::free") - std::free(mKeyVals); - } - } - - void init() noexcept { - mKeyVals = reinterpret_cast_no_cast_align_warning<Node*>(&mMask); - mInfo = reinterpret_cast<uint8_t*>(&mMask); - mNumElements = 0; - mMask = 0; - mMaxNumElementsAllowed = 0; - mInfoInc = InitialInfoInc; - mInfoHashShift = InitialInfoHashShift; - } - - // members are sorted so no padding occurs - Node* mKeyVals = reinterpret_cast_no_cast_align_warning<Node*>(&mMask); // 8 byte 8 - uint8_t* mInfo = reinterpret_cast<uint8_t*>(&mMask); // 8 byte 16 - size_t mNumElements = 0; // 8 byte 24 - size_t mMask = 0; // 8 byte 32 - size_t mMaxNumElementsAllowed = 0; // 8 byte 40 - InfoType mInfoInc = InitialInfoInc; // 4 byte 44 - InfoType mInfoHashShift = InitialInfoHashShift; // 4 byte 48 - // 16 byte 56 if NodeAllocator -}; - -} // namespace detail - -// map - -template <typename Key, typename T, typename Hash = hash<Key>, - typename KeyEqual = std::equal_to<Key>, size_t MaxLoadFactor100 = 80> -using unordered_flat_map = detail::Table<true, MaxLoadFactor100, Key, T, Hash, KeyEqual>; - -template <typename Key, typename T, typename Hash = hash<Key>, - typename KeyEqual = std::equal_to<Key>, size_t MaxLoadFactor100 = 80> -using unordered_node_map = detail::Table<false, MaxLoadFactor100, Key, T, Hash, KeyEqual>; - -template <typename Key, typename T, typename Hash = hash<Key>, - typename KeyEqual = std::equal_to<Key>, size_t MaxLoadFactor100 = 80> -using unordered_map = - detail::Table<sizeof(robin_hood::pair<Key, T>) <= sizeof(size_t) * 6 && - std::is_nothrow_move_constructible<robin_hood::pair<Key, T>>::value && - std::is_nothrow_move_assignable<robin_hood::pair<Key, T>>::value, - MaxLoadFactor100, Key, T, Hash, KeyEqual>; - -// set - -template <typename Key, typename Hash = hash<Key>, typename KeyEqual = std::equal_to<Key>, - size_t MaxLoadFactor100 = 80> -using unordered_flat_set = detail::Table<true, MaxLoadFactor100, Key, void, Hash, KeyEqual>; - -template <typename Key, typename Hash = hash<Key>, typename KeyEqual = std::equal_to<Key>, - size_t MaxLoadFactor100 = 80> -using unordered_node_set = detail::Table<false, MaxLoadFactor100, Key, void, Hash, KeyEqual>; - -template <typename Key, typename Hash = hash<Key>, typename KeyEqual = std::equal_to<Key>, - size_t MaxLoadFactor100 = 80> -using unordered_set = detail::Table<sizeof(Key) <= sizeof(size_t) * 6 && - std::is_nothrow_move_constructible<Key>::value && - std::is_nothrow_move_assignable<Key>::value, - MaxLoadFactor100, Key, void, Hash, KeyEqual>; - -} // namespace robin_hood - -#endif diff --git a/src/libmime/received.cxx b/src/libmime/received.cxx index 691d7ca04..8e0609f39 100644 --- a/src/libmime/received.cxx +++ b/src/libmime/received.cxx @@ -879,7 +879,7 @@ TEST_SUITE("received") { TEST_CASE("parse received") { using namespace std::string_view_literals; - using map_type = robin_hood::unordered_flat_map<std::string_view, std::string_view>; + using map_type = ankerl::unordered_dense::map<std::string_view, std::string_view>; std::vector<std::pair<std::string_view, map_type>> cases{ // Simple received {"from smtp11.mailtrack.pl (smtp11.mailtrack.pl [185.243.30.90])"sv, diff --git a/src/libmime/received.hxx b/src/libmime/received.hxx index 2edf628cc..a5d5e3fe6 100644 --- a/src/libmime/received.hxx +++ b/src/libmime/received.hxx @@ -24,7 +24,7 @@ #include "mime_string.hxx" #include "libmime/email_addr.h" #include "libserver/task.h" -#include "contrib/robin-hood/robin_hood.h" +#include "contrib/ankerl/unordered_dense.h" #include <vector> #include <string_view> #include <utility> @@ -179,7 +179,7 @@ struct received_header { } /* Unit tests helper */ - static auto from_map(const robin_hood::unordered_flat_map<std::string_view, std::string_view> &map) -> received_header { + static auto from_map(const ankerl::unordered_dense::map<std::string_view, std::string_view> &map) -> received_header { using namespace std::string_view_literals; received_header rh; @@ -202,9 +202,9 @@ struct received_header { return rh; } - auto as_map() const -> robin_hood::unordered_flat_map<std::string_view, std::string_view> + auto as_map() const -> ankerl::unordered_dense::map<std::string_view, std::string_view> { - robin_hood::unordered_flat_map<std::string_view, std::string_view> map; + ankerl::unordered_dense::map<std::string_view, std::string_view> map; if (!from_hostname.empty()) { map["from_hostname"] = from_hostname.as_view(); diff --git a/src/libserver/composites/composites.cxx b/src/libserver/composites/composites.cxx index 3b5b274a7..9779b4678 100644 --- a/src/libserver/composites/composites.cxx +++ b/src/libserver/composites/composites.cxx @@ -25,7 +25,7 @@ #include <vector> #include <variant> #include "libutil/cxx/util.hxx" -#include "contrib/robin-hood/robin_hood.h" +#include "contrib/ankerl/unordered_dense.h" #include "composites_internal.hxx" @@ -82,7 +82,7 @@ struct composites_data { struct rspamd_task *task; struct rspamd_composite *composite; struct rspamd_scan_result *metric_res; - robin_hood::unordered_flat_map<std::string_view, + ankerl::unordered_dense::map<std::string_view, std::vector<symbol_remove_data>> symbols_to_remove; std::vector<bool> checked; diff --git a/src/libserver/composites/composites_internal.hxx b/src/libserver/composites/composites_internal.hxx index a12d5f9af..570cb4ea9 100644 --- a/src/libserver/composites/composites_internal.hxx +++ b/src/libserver/composites/composites_internal.hxx @@ -93,7 +93,7 @@ private: return composite; } - robin_hood::unordered_flat_map<std::string, + ankerl::unordered_dense::map<std::string, std::shared_ptr<rspamd_composite>, rspamd::smart_str_hash, rspamd::smart_str_equal> composites; /* Store all composites here, even if we have duplicates */ std::vector<std::shared_ptr<rspamd_composite>> all_composites; diff --git a/src/libserver/composites/composites_manager.cxx b/src/libserver/composites/composites_manager.cxx index bf56fe675..27fb170b9 100644 --- a/src/libserver/composites/composites_manager.cxx +++ b/src/libserver/composites/composites_manager.cxx @@ -17,7 +17,7 @@ #include <memory> #include <vector> #include <cmath> -#include "contrib/robin-hood/robin_hood.h" +#include "contrib/ankerl/unordered_dense.h" #include "composites.h" #include "composites_internal.hxx" @@ -29,7 +29,7 @@ namespace rspamd::composites { static auto composite_policy_from_str(const std::string_view &inp) -> enum rspamd_composite_policy { - const static robin_hood::unordered_flat_map<std::string_view, + const static ankerl::unordered_dense::map<std::string_view, enum rspamd_composite_policy> names{ {"remove", rspamd_composite_policy::RSPAMD_COMPOSITE_POLICY_REMOVE_ALL}, {"remove_all", rspamd_composite_policy::RSPAMD_COMPOSITE_POLICY_REMOVE_ALL}, diff --git a/src/libserver/css/css.cxx b/src/libserver/css/css.cxx index 9e26eb42f..7de4ab307 100644 --- a/src/libserver/css/css.cxx +++ b/src/libserver/css/css.cxx @@ -15,7 +15,7 @@ */ #include "css.hxx" -#include "contrib/robin-hood/robin_hood.h" +#include "contrib/ankerl/unordered_dense.h" #include "css_parser.hxx" #include "libserver/html/html_tag.hxx" #include "libserver/html/html_block.hxx" @@ -34,7 +34,7 @@ public: using sel_shared_hash = smart_ptr_hash<css_selector>; using sel_shared_eq = smart_ptr_equal<css_selector>; using selector_ptr = std::unique_ptr<css_selector>; - using selectors_hash = robin_hood::unordered_flat_map<selector_ptr, css_declarations_block_ptr, + using selectors_hash = ankerl::unordered_dense::map<selector_ptr, css_declarations_block_ptr, sel_shared_hash, sel_shared_eq>; using universal_selector_t = std::pair<selector_ptr, css_declarations_block_ptr>; selectors_hash tags_selector; diff --git a/src/libserver/css/css_colors_list.hxx b/src/libserver/css/css_colors_list.hxx index b1fc5d6ee..e880aa24b 100644 --- a/src/libserver/css/css_colors_list.hxx +++ b/src/libserver/css/css_colors_list.hxx @@ -20,7 +20,7 @@ #pragma once #include <string_view> -#include "contrib/robin-hood/robin_hood.h" +#include "contrib/ankerl/unordered_dense.h" #include "css_value.hxx" namespace rspamd::css { @@ -30,7 +30,7 @@ namespace rspamd::css { * TODO: think about frozen structs when we can deal with 700 values without * compiler limits... */ -static const robin_hood::unordered_flat_map<std::string_view, css_color> css_colors_map{ +static const ankerl::unordered_dense::map<std::string_view, css_color> css_colors_map{ {"aliceblue", {240, 248, 255}}, {"antiquewhite", {250, 235, 215}}, {"antiquewhite1", {255, 239, 219}}, diff --git a/src/libserver/css/css_rule.hxx b/src/libserver/css/css_rule.hxx index acf44ba86..c942b8795 100644 --- a/src/libserver/css/css_rule.hxx +++ b/src/libserver/css/css_rule.hxx @@ -21,7 +21,7 @@ #include "css_value.hxx" #include "css_property.hxx" #include "css_parser.hxx" -#include "contrib/robin-hood/robin_hood.h" +#include "contrib/ankerl/unordered_dense.h" #include "libutil/cxx/util.hxx" #include "libutil/cxx/hash_util.hxx" #include <vector> @@ -121,7 +121,7 @@ public: auto compile_to_block(rspamd_mempool_t *pool) const -> rspamd::html::html_block *; private: - robin_hood::unordered_flat_set<rule_shared_ptr, rule_shared_hash, rule_shared_eq> rules; + ankerl::unordered_dense::set<rule_shared_ptr, rule_shared_hash, rule_shared_eq> rules; }; using css_declarations_block_ptr = std::shared_ptr<css_declarations_block>; diff --git a/src/libserver/css/css_value.cxx b/src/libserver/css/css_value.cxx index 6982647f4..7a451e1e5 100644 --- a/src/libserver/css/css_value.cxx +++ b/src/libserver/css/css_value.cxx @@ -19,7 +19,7 @@ #include "frozen/unordered_map.h" #include "frozen/string.h" #include "libutil/util.h" -#include "contrib/robin-hood/robin_hood.h" +#include "contrib/ankerl/unordered_dense.h" #include "fmt/core.h" #define DOCTEST_CONFIG_IMPLEMENTATION_IN_DLL diff --git a/src/libserver/html/html_entities.cxx b/src/libserver/html/html_entities.cxx index 8a3ff61da..f1bc91963 100644 --- a/src/libserver/html/html_entities.cxx +++ b/src/libserver/html/html_entities.cxx @@ -20,7 +20,7 @@ #include <string> #include <utility> #include <vector> -#include <contrib/robin-hood/robin_hood.h> +#include "contrib/ankerl/unordered_dense.h" #include <unicode/utf8.h> #include <unicode/uchar.h> #include "libutil/cxx/util.hxx" @@ -2168,9 +2168,9 @@ static const html_entity_def html_entities_array[] = { }; class html_entities_storage { - robin_hood::unordered_flat_map<std::string_view, html_entity_def> entity_by_name; - robin_hood::unordered_flat_map<std::string_view, html_entity_def> entity_by_name_heur; - robin_hood::unordered_flat_map<unsigned, html_entity_def> entity_by_id; + ankerl::unordered_dense::map<std::string_view, html_entity_def> entity_by_name; + ankerl::unordered_dense::map<std::string_view, html_entity_def> entity_by_name_heur; + ankerl::unordered_dense::map<unsigned, html_entity_def> entity_by_id; public: html_entities_storage() { auto nelts = G_N_ELEMENTS(html_entities_array); diff --git a/src/libserver/html/html_tag_defs.hxx b/src/libserver/html/html_tag_defs.hxx index 812ec2021..e166e0b7c 100644 --- a/src/libserver/html/html_tag_defs.hxx +++ b/src/libserver/html/html_tag_defs.hxx @@ -21,7 +21,7 @@ #include "libutil/cxx/util.hxx" #include <string> -#include <contrib/robin-hood/robin_hood.h> +#include "contrib/ankerl/unordered_dense.h" namespace rspamd::html { @@ -139,8 +139,8 @@ static const auto html_tag_defs_array = rspamd::array_of( ); class html_tags_storage { - robin_hood::unordered_flat_map<std::string_view, html_tag_def> tag_by_name; - robin_hood::unordered_flat_map<tag_id_t, html_tag_def> tag_by_id; + ankerl::unordered_dense::map<std::string_view, html_tag_def> tag_by_name; + ankerl::unordered_dense::map<tag_id_t, html_tag_def> tag_by_id; public: html_tags_storage() { tag_by_name.reserve(html_tag_defs_array.size()); diff --git a/src/libserver/redis_pool.cxx b/src/libserver/redis_pool.cxx index 548551ab9..2ea8b727c 100644 --- a/src/libserver/redis_pool.cxx +++ b/src/libserver/redis_pool.cxx @@ -23,9 +23,8 @@ #include "contrib/hiredis/adapters/libev.h" #include "cryptobox.h" #include "logger.h" - #include <list> -#include "contrib/robin-hood/robin_hood.h" +#include "contrib/ankerl/unordered_dense.h" namespace rspamd { class redis_pool_elt; @@ -91,6 +90,12 @@ class redis_pool_elt { redis_pool_key_t key; bool is_unix; public: + /* Disable copy */ + redis_pool_elt() = delete; + redis_pool_elt(const redis_pool_elt &) = delete; + /* Enable move */ + redis_pool_elt(redis_pool_elt &&other) = default; + explicit redis_pool_elt(redis_pool *_pool, const gchar *_db, const gchar *_password, const char *_ip, int _port) @@ -194,9 +199,9 @@ class redis_pool final { static constexpr const unsigned default_max_conns = 100; /* We want to have references integrity */ - robin_hood::unordered_flat_map<redisAsyncContext *, + ankerl::unordered_dense::map<redisAsyncContext *, redis_pool_connection *> conns_by_ctx; - robin_hood::unordered_node_map<redis_pool_key_t, redis_pool_elt> elts_by_key; + ankerl::unordered_dense::map<redis_pool_key_t, redis_pool_elt> elts_by_key; bool wanna_die = false; /* Hiredis is 'clever' so we can call ourselves from destructor */ public: double timeout = default_timeout; @@ -490,9 +495,9 @@ redis_pool::new_connection(const gchar *db, const gchar *password, } else { /* Need to create a pool */ - auto nelt = elts_by_key.emplace(std::piecewise_construct, - std::forward_as_tuple(key), - std::forward_as_tuple(this, db, password, ip, port)); + auto nconn = redis_pool_elt{this, db, password, ip, port}; + auto nelt = elts_by_key.try_emplace(key, + std::move(nconn)); return nelt.first->second.new_connection(); } diff --git a/src/libserver/symcache/symcache_internal.hxx b/src/libserver/symcache/symcache_internal.hxx index 9bce37532..063777a71 100644 --- a/src/libserver/symcache/symcache_internal.hxx +++ b/src/libserver/symcache/symcache_internal.hxx @@ -34,7 +34,7 @@ #include "rspamd_symcache.h" #include "contrib/libev/ev.h" -#include "contrib/robin-hood/robin_hood.h" +#include "contrib/ankerl/unordered_dense.h" #include "contrib/expected/expected.hpp" #include "cfg_file.h" @@ -109,9 +109,9 @@ struct order_generation { /* All items ordered */ std::vector<cache_item_ptr> d; /* Mapping from symbol name to the position in the order array */ - robin_hood::unordered_flat_map<std::string_view, unsigned int> by_symbol; + ankerl::unordered_dense::map<std::string_view, unsigned int> by_symbol; /* Mapping from symbol id to the position in the order array */ - robin_hood::unordered_flat_map<unsigned int, unsigned int> by_cache_id; + ankerl::unordered_dense::map<unsigned int, unsigned int> by_cache_id; /* It matches cache->generation_id; if not, a fresh ordering is required */ unsigned int generation_id; @@ -147,7 +147,7 @@ class symcache { private: using items_ptr_vec = std::vector<cache_item_ptr>; /* Map indexed by symbol name: all symbols must have unique names, so this map holds ownership */ - robin_hood::unordered_flat_map<std::string_view, cache_item_ptr> items_by_symbol; + ankerl::unordered_dense::map<std::string_view, cache_item_ptr> items_by_symbol; items_ptr_vec items_by_id; /* Items sorted into some order */ diff --git a/src/libserver/symcache/symcache_item.cxx b/src/libserver/symcache/symcache_item.cxx index bdc378f32..588d2e93f 100644 --- a/src/libserver/symcache/symcache_item.cxx +++ b/src/libserver/symcache/symcache_item.cxx @@ -32,7 +32,7 @@ struct augmentation_info { /* A list of internal augmentations that are known to Rspamd with their weight */ static const auto known_augmentations = - robin_hood::unordered_flat_map<std::string, augmentation_info, rspamd::smart_str_hash, rspamd::smart_str_equal>{ + ankerl::unordered_dense::map<std::string, augmentation_info, rspamd::smart_str_hash, rspamd::smart_str_equal>{ {"passthrough", { .weight = 10, .implied_flags = SYMBOL_TYPE_IGNORE_PASSTHROUGH diff --git a/src/libserver/symcache/symcache_item.hxx b/src/libserver/symcache/symcache_item.hxx index 2c2072fd3..50e321265 100644 --- a/src/libserver/symcache/symcache_item.hxx +++ b/src/libserver/symcache/symcache_item.hxx @@ -200,7 +200,7 @@ struct cache_item : std::enable_shared_from_this<cache_item> { id_list forbidden_ids{}; /* Set of augmentations */ - robin_hood::unordered_flat_set<std::string, rspamd::smart_str_hash, rspamd::smart_str_equal> augmentations; + ankerl::unordered_dense::set<std::string, rspamd::smart_str_hash, rspamd::smart_str_equal> augmentations; /* Dependencies */ std::vector<cache_dependency> deps; diff --git a/src/libstat/backends/cdb_backend.cxx b/src/libstat/backends/cdb_backend.cxx index d0915d2d7..5e188b978 100644 --- a/src/libstat/backends/cdb_backend.cxx +++ b/src/libstat/backends/cdb_backend.cxx @@ -27,7 +27,7 @@ #include <string> #include <optional> #include "contrib/expected/expected.hpp" -#include "contrib/robin-hood/robin_hood.h" +#include "contrib/ankerl/unordered_dense.h" #include "fmt/core.h" namespace rspamd::stat::cdb { @@ -87,7 +87,7 @@ private: * We store weak pointers here to allow owning cdb statfiles to free * expensive cdb before this cache is terminated (e.g. on dynamic cdb reload) */ - robin_hood::unordered_flat_map<std::string, std::weak_ptr<struct cdb>> elts; + ankerl::unordered_dense::map<std::string, std::weak_ptr<struct cdb>> elts; struct cdb_deleter { void operator()(struct cdb *c) const { diff --git a/src/libstat/backends/http_backend.cxx b/src/libstat/backends/http_backend.cxx index 3f4f1abd0..69c735a09 100644 --- a/src/libstat/backends/http_backend.cxx +++ b/src/libstat/backends/http_backend.cxx @@ -19,7 +19,7 @@ #include "libserver/http/http_connection.h" #include "libserver/mempool_vars_internal.h" #include "upstream.h" -#include "contrib/robin-hood/robin_hood.h" +#include "contrib/ankerl/unordered_dense.h" #include <vector> namespace rspamd::stat::http { @@ -91,7 +91,7 @@ public: bool learn) -> bool; private: http_backends_collection *all_backends; - robin_hood::unordered_flat_map<int, const struct rspamd_statfile_config *> seen_statfiles; + ankerl::unordered_dense::map<int, const struct rspamd_statfile_config *> seen_statfiles; struct upstream *selected; private: http_backend_runtime(struct rspamd_task *task, bool is_learn) : diff --git a/src/libutil/cxx/hash_util.hxx b/src/libutil/cxx/hash_util.hxx index d8529774e..afad29fa5 100644 --- a/src/libutil/cxx/hash_util.hxx +++ b/src/libutil/cxx/hash_util.hxx @@ -20,7 +20,7 @@ #include <string_view> #include <string> -#include "contrib/robin-hood/robin_hood.h" +#include "contrib/ankerl/unordered_dense.h" namespace rspamd { @@ -81,10 +81,10 @@ struct smart_str_equal { struct smart_str_hash { using is_transparent = void; auto operator()(const std::string &a) const { - return robin_hood::hash<std::string>()(a); + return ankerl::unordered_dense::hash<std::string>()(a); } auto operator()(const std::string_view &a) const { - return robin_hood::hash<std::string_view>()(a); + return ankerl::unordered_dense::hash<std::string_view>()(a); } }; diff --git a/src/lua/lua_html.cxx b/src/lua/lua_html.cxx index 666b08a60..cecf4bb5c 100644 --- a/src/lua/lua_html.cxx +++ b/src/lua/lua_html.cxx @@ -21,7 +21,7 @@ #include "libserver/html/html_block.hxx" #include "images.h" -#include <contrib/robin-hood/robin_hood.h> +#include "contrib/ankerl/unordered_dense.h" #include <frozen/string.h> #include <frozen/unordered_map.h> @@ -406,7 +406,7 @@ lua_html_foreach_tag (lua_State *L) const gchar *tagname; gint id; auto any = false; - robin_hood::unordered_flat_set<int> tags; + ankerl::unordered_dense::set<int> tags; if (lua_type (L, 2) == LUA_TSTRING) { |