From 969902f04569ab12e69e0d57ef1f6e9f0c42f855 Mon Sep 17 00:00:00 2001 From: Vsevolod Stakhov Date: Sun, 17 Jul 2022 16:43:47 +0100 Subject: [Rework] Use another version of hash table from the same author --- contrib/DEPENDENCY_INFO.md | 73 +- contrib/ankerl/LICENSE | 21 + contrib/ankerl/svector.h | 993 +++++++++ contrib/ankerl/unordered_dense.h | 1199 +++++++++++ contrib/robin-hood/LICENSE | 21 - contrib/robin-hood/robin_hood.h | 2430 ---------------------- src/libmime/received.cxx | 2 +- src/libmime/received.hxx | 8 +- src/libserver/composites/composites.cxx | 4 +- src/libserver/composites/composites_internal.hxx | 2 +- src/libserver/composites/composites_manager.cxx | 4 +- src/libserver/css/css.cxx | 4 +- src/libserver/css/css_colors_list.hxx | 4 +- src/libserver/css/css_rule.hxx | 4 +- src/libserver/css/css_value.cxx | 2 +- src/libserver/html/html_entities.cxx | 8 +- src/libserver/html/html_tag_defs.hxx | 6 +- src/libserver/redis_pool.cxx | 19 +- src/libserver/symcache/symcache_internal.hxx | 8 +- src/libserver/symcache/symcache_item.cxx | 2 +- src/libserver/symcache/symcache_item.hxx | 2 +- src/libstat/backends/cdb_backend.cxx | 4 +- src/libstat/backends/http_backend.cxx | 4 +- src/libutil/cxx/hash_util.hxx | 6 +- src/lua/lua_html.cxx | 4 +- 25 files changed, 2301 insertions(+), 2533 deletions(-) create mode 100644 contrib/ankerl/LICENSE create mode 100644 contrib/ankerl/svector.h create mode 100644 contrib/ankerl/unordered_dense.h delete mode 100644 contrib/robin-hood/LICENSE delete mode 100644 contrib/robin-hood/robin_hood.h diff --git a/contrib/DEPENDENCY_INFO.md b/contrib/DEPENDENCY_INFO.md index fd01fa5ae..9c5cdb431 100644 --- a/contrib/DEPENDENCY_INFO.md +++ b/contrib/DEPENDENCY_INFO.md @@ -1,38 +1,39 @@ # Rspamd Dependency Info -| Name | Version | License | Patched | Notes | -| --- |---------| --- | --- | --- | -| aho-corasick | ? | LGPL-3.0 | YES | lowercase support | -| cdb | 1.1.0 | Public Domain / CC0 | NO | | -| hiredis | 0.13.3 | BSD-3-Clause | YES | many changes | -| libev | 4.33 | BSD-2-Clause | YES | many changes | -| lc-btrie | ? | BSD-3-Clause | YES | mempool support | -| libottery | ? | Public Domain / CC0 | YES | many changes | -| librdns | ? | BSD-2-Clause | YES | | -| libucl | ? | BSD-2-Clause | YES | | -| replxx | 6d93360 | BSD-2-Clause | YES | libicu usage | -| lua-argparse | 0.7.1 | MIT | NO | | -| lua-bit | 1.0.2 | MIT | YES | build fixes | -| lua-fun | ? | MIT | YES | rspamd text | -| lua-lpeg | 1.0 | MIT | YES | rspamd text + alloc| -| lua-moses | ? | MIT | NO | | -| lua-lupa | ? | MIT | NO | | -| lua-tableshape | ae67256 | MIT | NO | | -| mumhash | ? | MIT | NO | | -| ngx-http-parser | 2.2.0 | MIT | YES | spamc support | -| Mozilla-PublicSuffix | ? | MIT | NO | | -| snowball | ? | BSD-3-Clause | NO | | -| t1ha | ? | Zlib | NO | | -| uthash | 1.9.8 | BSD | YES | | -| xxhash | 0.8.1 | BSD | NO | | -| zstd | 1.4.5 | BSD | NO | | -| google-ced | 37529e6 | Apache 2 | YES | build fixes | -| kann | ? | MIT | YES | blas/lapack changes| -| fpconv | ? | Boost | YES | many changes | -| fastutf8 | ? | MIT | YES | many changes | -| expected | v1.0 | Public Domain / CC0 | NO | | -| robin-hood | 3.9.1 | MIT | NO | | -| frozen | 1.0.1 | Apache 2 | NO | | -| fmt | 8.1.1 | MIT | NO | | -| doctest | 2.4.6 | MIT | NO | | -| function2 | 4.1.0 | Boost | NO | | +| Name | Version | License | Patched | Notes | +|------------------------|---------|---------------------| --- | --- | +| aho-corasick | ? | LGPL-3.0 | YES | lowercase support | +| cdb | 1.1.0 | Public Domain / CC0 | NO | | +| hiredis | 0.13.3 | BSD-3-Clause | YES | many changes | +| libev | 4.33 | BSD-2-Clause | YES | many changes | +| lc-btrie | ? | BSD-3-Clause | YES | mempool support | +| libottery | ? | Public Domain / CC0 | YES | many changes | +| librdns | ? | BSD-2-Clause | YES | | +| libucl | ? | BSD-2-Clause | YES | | +| replxx | 6d93360 | BSD-2-Clause | YES | libicu usage | +| lua-argparse | 0.7.1 | MIT | NO | | +| lua-bit | 1.0.2 | MIT | YES | build fixes | +| lua-fun | ? | MIT | YES | rspamd text | +| lua-lpeg | 1.0 | MIT | YES | rspamd text + alloc| +| lua-moses | ? | MIT | NO | | +| lua-lupa | ? | MIT | NO | | +| lua-tableshape | ae67256 | MIT | NO | | +| mumhash | ? | MIT | NO | | +| ngx-http-parser | 2.2.0 | MIT | YES | spamc support | +| Mozilla-PublicSuffix | ? | MIT | NO | | +| snowball | ? | BSD-3-Clause | NO | | +| t1ha | ? | Zlib | NO | | +| uthash | 1.9.8 | BSD | YES | | +| xxhash | 0.8.1 | BSD | NO | | +| zstd | 1.4.5 | BSD | NO | | +| google-ced | 37529e6 | Apache 2 | YES | build fixes | +| kann | ? | MIT | YES | blas/lapack changes| +| fpconv | ? | Boost | YES | many changes | +| fastutf8 | ? | MIT | YES | many changes | +| expected | v1.0 | Public Domain / CC0 | NO | | +| frozen | 1.0.1 | Apache 2 | NO | | +| fmt | 8.1.1 | MIT | NO | | +| doctest | 2.4.6 | MIT | NO | | +| function2 | 4.1.0 | Boost | NO | | +| ankerl/svector | 1.0.0 | MIT | NO | | +| ankerl/unordered_dense | 1.0.2 | MIT | NO | | \ No newline at end of file diff --git a/contrib/ankerl/LICENSE b/contrib/ankerl/LICENSE new file mode 100644 index 000000000..c4d1a0e48 --- /dev/null +++ b/contrib/ankerl/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Martin Leitner-Ankerl + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/contrib/ankerl/svector.h b/contrib/ankerl/svector.h new file mode 100644 index 000000000..b6ef1ad68 --- /dev/null +++ b/contrib/ankerl/svector.h @@ -0,0 +1,993 @@ +// ┌─┐┬ ┬┌─┐┌─┐┌┬┐┌─┐┬─┐ Compact SVO optimized vector C++17 or higher +// └─┐└┐┌┘├┤ │ │ │ │├┬┘ Version 1.0.0 +// └─┘ └┘ └─┘└─┘ ┴ └─┘┴└─ https://github.com/martinus/svector +// +// Licensed under the MIT License . +// SPDX-License-Identifier: MIT +// Copyright (c) 2022 Martin Leitner-Ankerl +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +#ifndef ANKERL_SVECTOR_H +#define ANKERL_SVECTOR_H + +// see https://semver.org/spec/v2.0.0.html +#define ANKERL_SVECTOR_VERSION_MAJOR 1 // incompatible API changes +#define ANKERL_SVECTOR_VERSION_MINOR 0 // add functionality in a backwards compatible manner +#define ANKERL_SVECTOR_VERSION_PATCH 0 // backwards compatible bug fixes + +// API versioning with inline namespace, see https://www.foonathan.net/2018/11/inline-namespaces/ +#define ANKERL_SVECTOR_VERSION_CONCAT1(major, minor, patch) v##major##_##minor##_##patch +#define ANKERL_SVECTOR_VERSION_CONCAT(major, minor, patch) ANKERL_SVECTOR_VERSION_CONCAT1(major, minor, patch) +#define ANKERL_SVECTOR_NAMESPACE \ + ANKERL_SVECTOR_VERSION_CONCAT(ANKERL_SVECTOR_VERSION_MAJOR, ANKERL_SVECTOR_VERSION_MINOR, ANKERL_SVECTOR_VERSION_PATCH) + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ankerl { +inline namespace ANKERL_SVECTOR_NAMESPACE { +namespace detail { + +template +using enable_if_t = typename std::enable_if::type; + +template +using is_input_iterator = std::is_base_of::iterator_category>; + +constexpr auto round_up(size_t n, size_t multiple) -> size_t { + return ((n + (multiple - 1)) / multiple) * multiple; +} + +template +constexpr auto cx_min(T a, T b) -> T { + return a < b ? a : b; +} + +template +constexpr auto cx_max(T a, T b) -> T { + return a > b ? a : b; +} + +template +constexpr auto alignment_of_svector() -> size_t { + return cx_max(sizeof(void*), std::alignment_of_v); +} + +/** + * @brief Calculates sizeof(svector) for a given type and inline capacity + */ +template +constexpr auto size_of_svector(size_t min_inline_capacity) -> size_t { + // + 1 for one byte size in direct mode + return round_up(sizeof(T) * min_inline_capacity + 1, alignment_of_svector()); +} + +/** + * @brief Calculates how many T we can actually store inside of an svector without increasing its sizeof(). + * + * E.g. svector could store 7 bytes even though 1 is specified. This makes sure we don't waste any + * of the padding. + */ +template +constexpr auto automatic_capacity(size_t min_inline_capacity) -> size_t { + return cx_min((size_of_svector(min_inline_capacity) - 1U) / sizeof(T), size_t(127)); +} + +/** + * Holds size & capacity, a glorified struct. + */ +class header { + size_t m_size{}; + size_t const m_capacity; + +public: + inline explicit header(size_t capacity) + : m_capacity{capacity} {} + + [[nodiscard]] inline auto size() const -> size_t { + return m_size; + } + + [[nodiscard]] inline auto capacity() const -> size_t { + return m_capacity; + } + + inline void size(size_t s) { + m_size = s; + } +}; + +/** + * @brief Holds header (size+capacity) plus an arbitrary number of T. + * + * To make storage compact, we don't actually store a pointer to T. We don't have to + * because we know exactly at which location it begins. + */ +template +struct storage : public header { + static constexpr auto alignment_of_t = std::alignment_of_v; + static constexpr auto max_alignment = std::max(std::alignment_of_v
, std::alignment_of_v); + static constexpr auto offset_to_data = detail::round_up(sizeof(header), alignment_of_t); + static_assert(max_alignment <= __STDCPP_DEFAULT_NEW_ALIGNMENT__); + + explicit storage(size_t capacity) + : header(capacity) {} + + auto data() -> T* { + auto ptr_to_data = reinterpret_cast(this) + offset_to_data; + return std::launder(reinterpret_cast(ptr_to_data)); + } + + /** + * @brief Allocates space for storage plus capacity*T objects. + * + * Checks to make sure that allocation won't overflow. + * + * @param capacity Number of T to allocate. + * @return storage* + */ + static auto alloc(size_t capacity) -> storage* { + // make sure we don't overflow! + auto mem = sizeof(T) * capacity; + if (mem < capacity) { + throw std::bad_alloc(); + } + if (offset_to_data + mem < mem) { + throw std::bad_alloc(); + } + mem += offset_to_data; + if (static_cast(mem) > static_cast(std::numeric_limits::max())) { + throw std::bad_alloc(); + } + + void* ptr = ::operator new(offset_to_data + sizeof(T) * capacity); + if (nullptr == ptr) { + throw std::bad_alloc(); + } + // use void* to ensure we don't use an overload for T* + return new (ptr) storage(capacity); + } +}; + +} // namespace detail + +template +class svector { + static_assert(MinInlineCapacity <= 127, "sorry, can't have more than 127 direct elements"); + static constexpr auto N = detail::automatic_capacity(MinInlineCapacity); + + enum class direction { direct, indirect }; + + /** + * A buffer to hold the data of the svector Depending on direct/indirect mode, the content it holds is like so: + * + * direct: + * m_data[0] & 1: lowest bit is 1 for direct mode. + * m_data[0] >> 1: size for direct mode + * Then 0-X bytes unused (padding), and then the actual inline T data. + * indirect: + * m_data[0] & 1: lowest bit is 0 for indirect mode + * m_data[0..7]: stores an uintptr_t, which points to the indirect data. + */ + alignas(detail::alignment_of_svector()) std::array(MinInlineCapacity)> m_data; + + // direct mode /////////////////////////////////////////////////////////// + + [[nodiscard]] auto is_direct() const -> bool { + return (m_data[0] & 1U) != 0U; + } + + [[nodiscard]] auto direct_size() const -> size_t { + return m_data[0] >> 1U; + } + + // sets size of direct mode and mode to direct too. + constexpr void set_direct_and_size(size_t s) { + m_data[0] = (s << 1U) | 1U; + } + + [[nodiscard]] auto direct_data() -> T* { + return std::launder(reinterpret_cast(m_data.data() + std::alignment_of_v)); + } + + // indirect mode ///////////////////////////////////////////////////////// + + [[nodiscard]] auto indirect() -> detail::storage* { + detail::storage* ptr; // NOLINT(cppcoreguidelines-init-variables) + std::memcpy(&ptr, m_data.data(), sizeof(ptr)); + return ptr; + } + + [[nodiscard]] auto indirect() const -> detail::storage const* { + return const_cast(this)->indirect(); // NOLINT(cppcoreguidelines-pro-type-const-cast) + } + + void set_indirect(detail::storage* ptr) { + std::memcpy(m_data.data(), &ptr, sizeof(ptr)); + + // safety check to guarantee the lowest bit is 0 + if (is_direct()) { + throw std::bad_alloc(); // LCOV_EXCL_LINE + } + } + + // helpers /////////////////////////////////////////////////////////////// + + /** + * @brief Moves size objects from source_ptr to target_ptr, and destroys what remains in source_ptr. + * + * Assumes data is not overlapping + */ + static void uninitialized_move_and_destroy(T* source_ptr, T* target_ptr, size_t size) { + if constexpr (std::is_trivially_copyable_v) { + std::memcpy(target_ptr, source_ptr, size * sizeof(T)); + } else { + std::uninitialized_move_n(source_ptr, size, target_ptr); + std::destroy_n(source_ptr, size); + } + } + + /** + * @brief Reallocates all data when capacity changes. + * + * if new_capacity <= N chooses direct memory, otherwise indirect. + */ + void realloc(size_t new_capacity) { + if (new_capacity <= N) { + // put everything into direct storage + if (is_direct()) { + // direct -> direct: nothing to do! + return; + } + + // indirect -> direct + auto* storage = indirect(); + uninitialized_move_and_destroy(storage->data(), direct_data(), storage->size()); + set_direct_and_size(storage->size()); + delete storage; + } else { + // put everything into indirect storage + auto* storage = detail::storage::alloc(new_capacity); + if (is_direct()) { + // direct -> indirect + uninitialized_move_and_destroy(data(), storage->data(), size()); + storage->size(size()); + } else { + // indirect -> indirect + uninitialized_move_and_destroy(data(), storage->data(), size()); + storage->size(size()); + delete indirect(); + } + set_indirect(storage); + } + } + + /** + * @brief Doubles starting_capacity until it is >= size_to_fit. + */ + [[nodiscard]] static auto calculate_new_capacity(size_t size_to_fit, size_t starting_capacity) -> size_t { + if (size_to_fit > max_size()) { + // not enough space + throw std::bad_alloc(); + } + + if (size_to_fit == 0) { + // special handling for 0 so N==0 works + return starting_capacity; + } + // start with at least 1, so N==0 works + auto new_capacity = std::max(1, starting_capacity); + + // double capacity until its large enough, but make sure we don't overflow + while (new_capacity < size_to_fit && new_capacity * 2 > new_capacity) { + new_capacity *= 2; + } + if (new_capacity < size_to_fit) { + // got an overflow, set capacity to max + new_capacity = max_size(); + } + return std::min(new_capacity, max_size()); + } + + template + [[nodiscard]] auto capacity() const -> size_t { + if constexpr (D == direction::direct) { + return N; + } else { + return indirect()->capacity(); + } + } + + template + [[nodiscard]] auto size() const -> size_t { + if constexpr (D == direction::direct) { + return direct_size(); + } else { + return indirect()->size(); + } + } + + template + void set_size(size_t s) { + if constexpr (D == direction::direct) { + set_direct_and_size(s); + } else { + indirect()->size(s); + } + } + + void set_size(size_t s) { + if (is_direct()) { + set_size(s); + } else { + set_size(s); + } + } + + template + [[nodiscard]] auto data() -> T* { + if constexpr (D == direction::direct) { + return direct_data(); + } else { + return indirect()->data(); + } + } + + template + [[nodiscard]] auto data() const -> T const* { + return const_cast(this)->data(); // NOLINT(cppcoreguidelines-pro-type-const-cast) + } + + template + void pop_back() { + if constexpr (std::is_trivially_destructible_v) { + set_size(size() - 1); + } else { + auto s = size() - 1; + (data() + s)->~T(); + set_size(s); + } + } + + /** + * @brief We need variadic arguments so we can either use copy ctor or default ctor + */ + template + void resize_after_reserve(size_t count, Args&&... args) { + auto current_size = size(); + if (current_size > count) { + if constexpr (!std::is_trivially_destructible_v) { + auto* d = data(); + std::destroy(d + count, d + current_size); + } + } else { + auto* d = data(); + for (auto ptr = d + current_size, end = d + count; ptr != end; ++ptr) { + new (static_cast(ptr)) T(std::forward(args)...); + } + } + set_size(count); + } + + // Makes sure that to is not past the end iterator + template + auto erase_checked_end(T const* cfrom, T const* to) -> T* { + auto* const erase_begin = const_cast(cfrom); // NOLINT(cppcoreguidelines-pro-type-const-cast) + auto* const container_end = data() + size(); + auto* const erase_end = std::min(const_cast(to), container_end); // NOLINT(cppcoreguidelines-pro-type-const-cast) + + std::move(erase_end, container_end, erase_begin); + auto const num_erased = std::distance(erase_begin, erase_end); + std::destroy(container_end - num_erased, container_end); + set_size(size() - num_erased); + return erase_begin; + } + + template + void assign(It first, It last, std::input_iterator_tag /*unused*/) { + clear(); + + // TODO this can be made faster, e.g. by setting size only when finished. + while (first != last) { + push_back(*first); + ++first; + } + } + + template + void assign(It first, It last, std::forward_iterator_tag /*unused*/) { + clear(); + + auto s = std::distance(first, last); + reserve(s); + std::uninitialized_copy(first, last, data()); + set_size(s); + } + + // precondition: all uninitialized + void do_move_assign(svector&& other) { + if (!other.is_direct()) { + // take other's memory, even when empty + set_indirect(other.indirect()); + } else { + auto* other_ptr = other.data(); + auto s = other.size(); + auto* other_end = other_ptr + s; + + std::uninitialized_move(other_ptr, other_end, data()); + std::destroy(other_ptr, other_end); + set_size(s); + } + other.set_direct_and_size(0); + } + + /** + * @brief Shifts data [source_begin, source_end( to the right, starting on target_begin. + * + * Preconditions: + * * contiguous memory + * * source_begin <= target_begin + * * source_end onwards is uninitialized memory + * + * Destroys then empty elements in [source_begin, source_end( + */ + static auto shift_right(T* source_begin, T* source_end, T* target_begin) { + // 1. uninitialized moves + auto const num_moves = std::distance(source_begin, source_end); + auto const target_end = target_begin + num_moves; + auto const num_uninitialized_move = std::min(num_moves, std::distance(source_end, target_end)); + std::uninitialized_move(source_end - num_uninitialized_move, source_end, target_end - num_uninitialized_move); + std::move_backward(source_begin, source_end - num_uninitialized_move, target_end - num_uninitialized_move); + std::destroy(source_begin, std::min(source_end, target_begin)); + } + + template + [[nodiscard]] auto make_uninitialized_space_new(size_t s, T* p, size_t count) -> T* { + auto target = svector(); + // we know target is indirect because we're increasing capacity + target.reserve(s + count); + + // move everything [begin, pos[ + auto* target_pos = std::uninitialized_move(data(), p, target.template data()); + + // move everything [pos, end] + std::uninitialized_move(p, data() + s, target_pos + count); + + target.template set_size(s + count); + *this = std::move(target); + return target_pos; + } + + template + [[nodiscard]] auto make_uninitialized_space(T const* pos, size_t count) -> T* { + auto* const p = const_cast(pos); // NOLINT(cppcoreguidelines-pro-type-const-cast) + auto s = size(); + if (s + count > capacity()) { + return make_uninitialized_space_new(s, p, count); + } + + shift_right(p, data() + s, p + count); + set_size(s + count); + return p; + } + + // makes space for uninitialized data of cout elements. Also updates size. + [[nodiscard]] auto make_uninitialized_space(T const* pos, size_t count) -> T* { + if (is_direct()) { + return make_uninitialized_space(pos, count); + } + return make_uninitialized_space(pos, count); + } + + void destroy() { + auto const is_dir = is_direct(); + if constexpr (!std::is_trivially_destructible_v) { + T* ptr = nullptr; + size_t s = 0; + if (is_dir) { + ptr = data(); + s = size(); + } else { + ptr = data(); + s = size(); + } + std::destroy_n(ptr, s); + } + if (!is_dir) { + delete indirect(); + } + } + + // performs a const_cast so we don't need this implementation twice + template + auto at(size_t idx) -> T& { + if (idx >= size()) { + throw std::out_of_range{"svector: idx out of range"}; + } + auto* ptr = const_cast(data() + idx); // NOLINT(cppcoreguidelines-pro-type-const-cast) + return *ptr; + } // LCOV_EXCL_LINE why is this single } marked as not covered? gcov bug? + +public: + using value_type = T; + using size_type = size_t; + using difference_type = std::ptrdiff_t; + using reference = value_type&; + using const_reference = value_type const&; + using pointer = T*; + using const_pointer = T const*; + using iterator = T*; + using const_iterator = T const*; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + svector() { + set_direct_and_size(0); + } + + svector(size_t count, T const& value) + : svector() { + resize(count, value); + } + + explicit svector(size_t count) + : svector() { + reserve(count); + if (is_direct()) { + resize_after_reserve(count); + } else { + resize_after_reserve(count); + } + } + + template >> + svector(InputIt first, InputIt last) + : svector() { + assign(first, last); + } + + svector(svector const& other) + : svector() { + auto s = other.size(); + reserve(s); + std::uninitialized_copy(other.begin(), other.end(), begin()); + set_size(s); + } + + svector(svector&& other) noexcept + : svector() { + do_move_assign(std::move(other)); + } + + svector(std::initializer_list init) + : svector(init.begin(), init.end()) {} + + ~svector() { + destroy(); + } + + void assign(size_t count, T const& value) { + clear(); + resize(count, value); + } + + template >> + void assign(InputIt first, InputIt last) { + assign(first, last, typename std::iterator_traits::iterator_category()); + } + + void assign(std::initializer_list l) { + assign(l.begin(), l.end()); + } + + auto operator=(svector const& other) -> svector& { + if (&other == this) { + return *this; + } + + assign(other.begin(), other.end()); + return *this; + } + + auto operator=(svector&& other) noexcept -> svector& { + if (&other == this) { + // It doesn't seem to be required to do self-check, but let's do it anyways to be safe + return *this; + } + destroy(); + do_move_assign(std::move(other)); + return *this; + } + + auto operator=(std::initializer_list l) -> svector& { + assign(l.begin(), l.end()); + return *this; + } + + void resize(size_t count) { + if (count > capacity()) { + reserve(count); + } + if (is_direct()) { + resize_after_reserve(count); + } else { + resize_after_reserve(count); + } + } + + void resize(size_t count, T const& value) { + if (count > capacity()) { + reserve(count); + } + if (is_direct()) { + resize_after_reserve(count, value); + } else { + resize_after_reserve(count, value); + } + } + + auto reserve(size_t s) { + auto old_capacity = capacity(); + auto new_capacity = calculate_new_capacity(s, old_capacity); + if (new_capacity > old_capacity) { + realloc(new_capacity); + } + } + + [[nodiscard]] auto capacity() const -> size_t { + if (is_direct()) { + return capacity(); + } + return capacity(); + } + + [[nodiscard]] auto size() const -> size_t { + if (is_direct()) { + return size(); + } + return size(); + } + + [[nodiscard]] auto data() -> T* { + if (is_direct()) { + return direct_data(); + } + return indirect()->data(); + } + + [[nodiscard]] auto data() const -> T const* { + return const_cast(this)->data(); // NOLINT(cppcoreguidelines-pro-type-const-cast) + } + + template + auto emplace_back(Args&&... args) -> T& { + size_t c; // NOLINT(cppcoreguidelines-init-variables) + size_t s; // NOLINT(cppcoreguidelines-init-variables) + bool is_dir = is_direct(); + if (is_dir) { + c = capacity(); + s = size(); + } else { + c = capacity(); + s = size(); + } + + if (s == c) { + auto new_capacity = calculate_new_capacity(s + 1, c); + realloc(new_capacity); + // reallocation happened, so we definitely are now in indirect mode + is_dir = false; + } + + T* ptr; // NOLINT(cppcoreguidelines-init-variables) + if (is_dir) { + ptr = data() + s; + set_size(s + 1); + } else { + ptr = data() + s; + set_size(s + 1); + } + return *new (static_cast(ptr)) T(std::forward(args)...); + } + + void push_back(T const& value) { + emplace_back(value); + } + + void push_back(T&& value) { + emplace_back(std::move(value)); + } + + [[nodiscard]] auto operator[](size_t idx) const -> T const& { + return *(data() + idx); + } + + [[nodiscard]] auto operator[](size_t idx) -> T& { + return *(data() + idx); + } + + auto at(size_t idx) -> T& { + if (is_direct()) { + return at(idx); + } + return at(idx); + } + + auto at(size_t idx) const -> T const& { + return const_cast(this)->at(idx); // NOLINT(cppcoreguidelines-pro-type-const-cast) + } + + [[nodiscard]] auto begin() const -> T const* { + return data(); + } + + [[nodiscard]] auto cbegin() const -> T const* { + return begin(); + } + + [[nodiscard]] auto begin() -> T* { + return data(); + } + + [[nodiscard]] auto end() -> T* { + if (is_direct()) { + return data() + size(); + } + return data() + size(); + } + + [[nodiscard]] auto end() const -> T const* { + return const_cast(this)->end(); // NOLINT(cppcoreguidelines-pro-type-const-cast) + } + + [[nodiscard]] auto cend() const -> T const* { + return end(); + } + + [[nodiscard]] auto rbegin() -> reverse_iterator { + return reverse_iterator{end()}; + } + + [[nodiscard]] auto rbegin() const -> const_reverse_iterator { + return crbegin(); + } + + [[nodiscard]] auto crbegin() const -> const_reverse_iterator { + return const_reverse_iterator{end()}; + } + + [[nodiscard]] auto rend() -> reverse_iterator { + return reverse_iterator{begin()}; + } + + [[nodiscard]] auto rend() const -> const_reverse_iterator { + return crend(); + } + + [[nodiscard]] auto crend() const -> const_reverse_iterator { + return const_reverse_iterator{begin()}; + } + + [[nodiscard]] auto front() const -> T const& { + return *data(); + } + + [[nodiscard]] auto front() -> T& { + return *data(); + } + + [[nodiscard]] auto back() -> T& { + if (is_direct()) { + return *(data() + size() - 1); + } + return *(data() + size() - 1); + } + + [[nodiscard]] auto back() const -> T const& { + return const_cast(this)->back(); // NOLINT(cppcoreguidelines-pro-type-const-cast) + } + + void clear() { + if constexpr (!std::is_trivially_destructible_v) { + std::destroy(begin(), end()); + } + + if (is_direct()) { + set_size(0); + } else { + set_size(0); + } + } + + [[nodiscard]] auto empty() const -> bool { + return 0U == size(); + } + + void pop_back() { + if (is_direct()) { + pop_back(); + } else { + pop_back(); + } + } + + [[nodiscard]] static auto max_size() -> size_t { + return std::numeric_limits::max(); + } + + void swap(svector& other) { + // TODO we could try to do the minimum number of moves + std::swap(*this, other); + } + + void shrink_to_fit() { + // per the standard we wouldn't need to do anything here. But since we are so nice, + // let's do the shrink. + auto const c = capacity(); + auto const s = size(); + if (s >= c) { + return; + } + + auto new_capacity = calculate_new_capacity(s, N); + if (new_capacity == c) { + // nothing change! + return; + } + + realloc(new_capacity); + } + + template + auto emplace(const_iterator pos, Args&&... args) -> iterator { + auto* p = make_uninitialized_space(pos, 1); + return new (static_cast(p)) T(std::forward(args)...); + } + + auto insert(const_iterator pos, T const& value) -> iterator { + return emplace(pos, value); + } + + auto insert(const_iterator pos, T&& value) -> iterator { + return emplace(pos, std::move(value)); + } + + auto insert(const_iterator pos, size_t count, T const& value) -> iterator { + auto* p = make_uninitialized_space(pos, count); + std::uninitialized_fill_n(p, count, value); + return p; + } + + template + auto insert(const_iterator pos, It first, It last, std::input_iterator_tag /*unused*/) { + if (!(first != last)) { + return const_cast(pos); // NOLINT(cppcoreguidelines-pro-type-const-cast) + } + + // just input_iterator_tag makes this very slow. Let's do the same as the STL. + if (pos == end()) { + auto s = size(); + while (first != last) { + emplace_back(*first); + ++first; + } + return begin() + s; + } + + auto tmp = svector(first, last); + return insert(pos, std::make_move_iterator(tmp.begin()), std::make_move_iterator(tmp.end())); + } + + template + auto insert(const_iterator pos, It first, It last, std::forward_iterator_tag /*unused*/) { + auto* p = make_uninitialized_space(pos, std::distance(first, last)); + std::uninitialized_copy(first, last, p); + return p; + } + + template >> + auto insert(const_iterator pos, InputIt first, InputIt last) -> iterator { + return insert(pos, first, last, typename std::iterator_traits::iterator_category()); + } + + auto insert(const_iterator pos, std::initializer_list l) -> iterator { + return insert(pos, l.begin(), l.end()); + } + + auto erase(const_iterator pos) -> iterator { + return erase(pos, pos + 1); + } + + auto erase(const_iterator first, const_iterator last) -> iterator { + if (is_direct()) { + return erase_checked_end(first, last); + } + return erase_checked_end(first, last); + } +}; + +template +[[nodiscard]] auto operator==(svector const& a, svector const& b) -> bool { + return std::equal(a.begin(), a.end(), b.begin(), b.end()); +} + +template +[[nodiscard]] auto operator!=(svector const& a, svector const& b) -> bool { + return !(a == b); +} + +template +[[nodiscard]] auto operator<(svector const& a, svector const& b) -> bool { + return std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); +} + +template +[[nodiscard]] auto operator>=(svector const& a, svector const& b) -> bool { + return !(a < b); +} + +template +[[nodiscard]] auto operator>(svector const& a, svector const& b) -> bool { + return std::lexicographical_compare(b.begin(), b.end(), a.begin(), a.end()); +} + +template +[[nodiscard]] auto operator<=(svector const& a, svector const& b) -> bool { + return !(a > b); +} + +} // namespace ANKERL_SVECTOR_NAMESPACE +} // namespace ankerl + +// NOLINTNEXTLINE(cert-dcl58-cpp) +namespace std { +inline namespace ANKERL_SVECTOR_NAMESPACE { + +template +constexpr auto erase(ankerl::svector& sv, U const& value) -> typename ankerl::svector::size_type { + auto* removed_begin = std::remove(sv.begin(), sv.end(), value); + auto num_removed = std::distance(removed_begin, sv.end()); + sv.erase(removed_begin, sv.end()); + return num_removed; +} + +template +constexpr auto erase_if(ankerl::svector& sv, Pred pred) -> typename ankerl::svector::size_type { + auto* removed_begin = std::remove_if(sv.begin(), sv.end(), pred); + auto num_removed = std::distance(removed_begin, sv.end()); + sv.erase(removed_begin, sv.end()); + return num_removed; +} + +} // namespace ANKERL_SVECTOR_NAMESPACE +} // namespace std + +#endif diff --git a/contrib/ankerl/unordered_dense.h b/contrib/ankerl/unordered_dense.h new file mode 100644 index 000000000..9ae108173 --- /dev/null +++ b/contrib/ankerl/unordered_dense.h @@ -0,0 +1,1199 @@ +///////////////////////// ankerl::unordered_dense::{map, set} ///////////////////////// + +// A fast & densely stored hashmap and hashset based on robin-hood backward shift deletion. +// Version 1.0.2 +// https://github.com/martinus/unordered_dense +// +// Licensed under the MIT License . +// SPDX-License-Identifier: MIT +// Copyright (c) 2022 Martin Leitner-Ankerl +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +#ifndef ANKERL_UNORDERED_DENSE_H +#define ANKERL_UNORDERED_DENSE_H + +// see https://semver.org/spec/v2.0.0.html +#define ANKERL_UNORDERED_DENSE_VERSION_MAJOR 1 // incompatible API changes +#define ANKERL_UNORDERED_DENSE_VERSION_MINOR 0 // add functionality in a backwards compatible manner +#define ANKERL_UNORDERED_DENSE_VERSION_PATCH 2 // backwards compatible bug fixes + +#if __cplusplus < 201703L +# error ankerl::unordered_dense requires C++17 or higher +#else + +# include // for array +# include // for uint64_t, uint32_t, uint8_t, UINT64_C +# include // for size_t, memcpy, memset +# include // for equal_to, hash +# include // for initializer_list +# include // for pair, distance +# include // for numeric_limits +# include // for allocator, allocator_traits, shared_ptr +# include // for out_of_range +# include // for basic_string +# include // for basic_string_view, hash +# include // for forward_as_tuple +# include // for enable_if_t, declval, conditional_t, ena... +# include // for forward, exchange, pair, as_const, piece... +# include // for vector + +# define ANKERL_UNORDERED_DENSE_PMR 0 +# if defined(__has_include) +# if __has_include() +# undef ANKERL_UNORDERED_DENSE_PMR +# define ANKERL_UNORDERED_DENSE_PMR 1 +# include // for polymorphic_allocator +# endif +# endif + +# if defined(_MSC_VER) && defined(_M_X64) +# include +# pragma intrinsic(_umul128) +# endif + +# if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__) +# define ANKERL_UNORDERED_DENSE_LIKELY(x) __builtin_expect(x, 1) +# define ANKERL_UNORDERED_DENSE_UNLIKELY(x) __builtin_expect(x, 0) +# else +# define ANKERL_UNORDERED_DENSE_LIKELY(x) (x) +# define ANKERL_UNORDERED_DENSE_UNLIKELY(x) (x) +# endif + +namespace ankerl::unordered_dense { + +// hash /////////////////////////////////////////////////////////////////////// + +// This is a stripped-down implementation of wyhash: https://github.com/wangyi-fudan/wyhash +// No big-endian support (because different values on different machines don't matter), +// hardcodes seed and the secret, reformattes the code, and clang-tidy fixes. +namespace detail::wyhash { + +static inline void mum(uint64_t* a, uint64_t* b) { +# if defined(__SIZEOF_INT128__) + __uint128_t r = *a; + r *= *b; + *a = static_cast(r); + *b = static_cast(r >> 64U); +# elif defined(_MSC_VER) && defined(_M_X64) + *a = _umul128(*a, *b, b); +# else + uint64_t ha = *a >> 32U; + uint64_t hb = *b >> 32U; + uint64_t la = static_cast(*a); + uint64_t lb = static_cast(*b); + uint64_t hi{}; + uint64_t lo{}; + uint64_t rh = ha * hb; + uint64_t rm0 = ha * lb; + uint64_t rm1 = hb * la; + uint64_t rl = la * lb; + uint64_t t = rl + (rm0 << 32U); + auto c = static_cast(t < rl); + lo = t + (rm1 << 32U); + c += static_cast(lo < t); + hi = rh + (rm0 >> 32U) + (rm1 >> 32U) + c; + *a = lo; + *b = hi; +# endif +} + +// multiply and xor mix function, aka MUM +[[nodiscard]] static inline auto mix(uint64_t a, uint64_t b) -> uint64_t { + mum(&a, &b); + return a ^ b; +} + +// read functions. WARNING: we don't care about endianness, so results are different on big endian! +[[nodiscard]] static inline auto r8(const uint8_t* p) -> uint64_t { + uint64_t v{}; + std::memcpy(&v, p, 8); + return v; +} + +[[nodiscard]] static inline auto r4(const uint8_t* p) -> uint64_t { + uint32_t v{}; + std::memcpy(&v, p, 4); + return v; +} + +// reads 1, 2, or 3 bytes +[[nodiscard]] static inline auto r3(const uint8_t* p, size_t k) -> uint64_t { + return (static_cast(p[0]) << 16U) | (static_cast(p[k >> 1U]) << 8U) | p[k - 1]; +} + +[[nodiscard]] static inline auto hash(void const* key, size_t len) -> uint64_t { + static constexpr auto secret = std::array{UINT64_C(0xa0761d6478bd642f), + UINT64_C(0xe7037ed1a0b428db), + UINT64_C(0x8ebc6af09c88c6e3), + UINT64_C(0x589965cc75374cc3)}; + + auto const* p = static_cast(key); + uint64_t seed = secret[0]; + uint64_t a{}; + uint64_t b{}; + if (ANKERL_UNORDERED_DENSE_LIKELY(len <= 16)) { + if (ANKERL_UNORDERED_DENSE_LIKELY(len >= 4)) { + a = (r4(p) << 32U) | r4(p + ((len >> 3U) << 2U)); + b = (r4(p + len - 4) << 32U) | r4(p + len - 4 - ((len >> 3U) << 2U)); + } else if (ANKERL_UNORDERED_DENSE_LIKELY(len > 0)) { + a = r3(p, len); + b = 0; + } else { + a = 0; + b = 0; + } + } else { + size_t i = len; + if (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 48)) { + uint64_t see1 = seed; + uint64_t see2 = seed; + do { + seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed); + see1 = mix(r8(p + 16) ^ secret[2], r8(p + 24) ^ see1); + see2 = mix(r8(p + 32) ^ secret[3], r8(p + 40) ^ see2); + p += 48; + i -= 48; + } while (ANKERL_UNORDERED_DENSE_LIKELY(i > 48)); + seed ^= see1 ^ see2; + } + while (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 16)) { + seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed); + i -= 16; + p += 16; + } + a = r8(p + i - 16); + b = r8(p + i - 8); + } + + return mix(secret[1] ^ len, mix(a ^ secret[1], b ^ seed)); +} + +[[nodiscard]] static inline auto hash(uint64_t x) -> uint64_t { + return detail::wyhash::mix(x, UINT64_C(0x9E3779B97F4A7C15)); +} + +} // namespace detail::wyhash + +template +struct hash : public std::hash { + using is_avalanching = void; + auto operator()(T const& obj) const noexcept(noexcept(std::declval>().operator()(std::declval()))) + -> size_t { + return static_cast(detail::wyhash::hash(std::hash::operator()(obj))); + } +}; + +template +struct hash> { + using is_avalanching = void; + auto operator()(std::basic_string const& str) const noexcept -> size_t { + return static_cast(detail::wyhash::hash(str.data(), sizeof(CharT) * str.size())); + } +}; + +template +struct hash> { + using is_avalanching = void; + auto operator()(std::basic_string_view const& sv) const noexcept -> size_t { + return static_cast(detail::wyhash::hash(sv.data(), sizeof(CharT) * sv.size())); + } +}; + +template +struct hash { + using is_avalanching = void; + auto operator()(T* ptr) const noexcept -> size_t { + return static_cast(detail::wyhash::hash(reinterpret_cast(ptr))); + } +}; + +template +struct hash> { + using is_avalanching = void; + auto operator()(std::unique_ptr const& ptr) const noexcept -> size_t { + return static_cast(detail::wyhash::hash(reinterpret_cast(ptr.get()))); + } +}; + +template +struct hash> { + using is_avalanching = void; + auto operator()(std::shared_ptr const& ptr) const noexcept -> size_t { + return static_cast(detail::wyhash::hash(reinterpret_cast(ptr.get()))); + } +}; + +template +struct hash::value>::type> { + using is_avalanching = void; + auto operator()(Enum e) const noexcept -> size_t { + using Underlying = typename std::underlying_type_t; + return static_cast(detail::wyhash::hash(static_cast(e))); + } +}; + +# define ANKERL_UNORDERED_DENSE_HASH_STATICCAST(T) \ + template <> \ + struct hash { \ + using is_avalanching = void; \ + auto operator()(T const& obj) const noexcept -> size_t { \ + return static_cast(detail::wyhash::hash(static_cast(obj))); \ + } \ + } + +# if defined(__GNUC__) && !defined(__clang__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wuseless-cast" +# endif +// see https://en.cppreference.com/w/cpp/utility/hash +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(bool); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(signed char); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned char); +# if __cplusplus >= 202002L +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char8_t); +# endif +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char16_t); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char32_t); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(wchar_t); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(short); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned short); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(int); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned int); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(long); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(long long); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long); +ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long long); + +# if defined(__GNUC__) && !defined(__clang__) +# pragma GCC diagnostic pop +# endif + +namespace detail { + +struct nonesuch {}; + +template class Op, class... Args> +struct detector { + using value_t = std::false_type; + using type = Default; +}; + +template class Op, class... Args> +struct detector>, Op, Args...> { + using value_t = std::true_type; + using type = Op; +}; + +template