From 486435c147e6af12f8e9ad0cffec98e742fea2c4 Mon Sep 17 00:00:00 2001 From: Vsevolod Stakhov Date: Mon, 30 Apr 2018 12:40:14 +0100 Subject: [PATCH] [Minor] Remove t1ha0 as it causes too many issues --- contrib/t1ha/CMakeLists.txt | 4 +- contrib/t1ha/t1ha.h | 91 ------- contrib/t1ha/t1ha0.c | 419 ----------------------------- contrib/t1ha/t1ha0_ia32aes_a.h | 210 --------------- contrib/t1ha/t1ha0_ia32aes_noavx.c | 2 - src/libcryptobox/cryptobox.c | 2 +- 6 files changed, 2 insertions(+), 726 deletions(-) delete mode 100644 contrib/t1ha/t1ha0.c delete mode 100644 contrib/t1ha/t1ha0_ia32aes_a.h delete mode 100644 contrib/t1ha/t1ha0_ia32aes_noavx.c diff --git a/contrib/t1ha/CMakeLists.txt b/contrib/t1ha/CMakeLists.txt index 1b54c96d4..6c50d740d 100644 --- a/contrib/t1ha/CMakeLists.txt +++ b/contrib/t1ha/CMakeLists.txt @@ -1,6 +1,4 @@ -SET(T1HASRC t1ha0.c - t1ha0_ia32aes_noavx.c - t1ha1.c +SET(T1HASRC t1ha1.c t1ha2.c) ADD_LIBRARY(rspamd-t1ha STATIC ${T1HASRC}) diff --git a/contrib/t1ha/t1ha.h b/contrib/t1ha/t1ha.h index 82e6e6a77..30c408c87 100644 --- a/contrib/t1ha/t1ha.h +++ b/contrib/t1ha/t1ha.h @@ -326,97 +326,6 @@ static __inline uint64_t t1ha(const void *data, size_t length, uint64_t seed) { return t1ha1_le(data, length, seed); } -/****************************************************************************** - * - * t1ha0 = 64-bit, JUST ONLY FASTER: - * - * - Provides fast-as-possible hashing for current CPU, including - * 32-bit systems and engaging the available hardware acceleration. - * - It is a facade that selects most quick-and-dirty hash - * for the current processor. For instance, on IA32 (x86) actual function - * will be selected in runtime, depending on current CPU capabilities - * - * BE CAREFUL!!! THIS IS MEANS: - * - * 1. The quality of hash is a subject for tradeoffs with performance. - * So, the quality and strength of t1ha0() may be lower than t1ha1(), - * especially on 32-bit targets, but then much faster. - * However, guaranteed that it passes all SMHasher tests. - * - * 2. No warranty that the hash result will be same for particular - * key on another machine or another version of libt1ha. - * - * Briefly, such hash-results and their derivatives, should be - * used only in runtime, but should not be persist or transferred - * over a network. - */ - -/* The little-endian variant for 32-bit CPU. */ -uint64_t t1ha0_32le(const void *data, size_t length, uint64_t seed); -/* The big-endian variant for 32-bit CPU. */ -uint64_t t1ha0_32be(const void *data, size_t length, uint64_t seed); - -/* Define T1HA0_AESNI_AVAILABLE to 0 for disable AES-NI support. */ -#ifndef T1HA0_AESNI_AVAILABLE -#if (defined(__ia32__) && (!defined(_M_IX86) || _MSC_VER > 1800)) - #if defined(__GNUC__) && \ - ((defined(__clang__) && (__clang_major__ >= 4 || (__clang_major__ >= 3 && __clang_minor__ >= 8))) || \ - ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 8) || (__GNUC__ > 4))) - #define T1HA0_AESNI_AVAILABLE 1 - #else - #define T1HA0_AESNI_AVAILABLE 0 - #endif -#else -#define T1HA0_AESNI_AVAILABLE 0 -#endif -#endif /* T1HA0_AESNI_AVAILABLE */ - -/* Define T1HA0_RUNTIME_SELECT to 0 for disable dispatching t1ha0 at runtime. */ -#ifndef T1HA0_RUNTIME_SELECT -#if T1HA0_AESNI_AVAILABLE && !defined(__e2k__) -#define T1HA0_RUNTIME_SELECT 1 -#else -#define T1HA0_RUNTIME_SELECT 0 -#endif -#endif /* T1HA0_RUNTIME_SELECT */ - -#if T1HA0_AESNI_AVAILABLE -uint64_t t1ha0_ia32aes_noavx(const void *data, size_t length, uint64_t seed); -#endif /* T1HA0_AESNI_AVAILABLE */ - -#if T1HA0_RUNTIME_SELECT -#ifdef __ELF__ -/* ifunc/gnu_indirect_function will be used on ELF. - * Please see https://en.wikipedia.org/wiki/Executable_and_Linkable_Format */ -T1HA_API uint64_t t1ha0(const void *data, size_t length, uint64_t seed); -#else -/* Otherwise function pointer will be used. - * Unfortunately this may cause some overhead calling. */ -T1HA_API extern uint64_t (*t1ha0_funcptr)(const void *data, size_t length, - uint64_t seed); -static __inline uint64_t t1ha0(const void *data, size_t length, uint64_t seed) { - return t1ha0_funcptr(data, length, seed); -} -#endif /* __ELF__ */ - -#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -static __inline uint64_t t1ha0(const void *data, size_t length, uint64_t seed) { -#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul - return t1ha1_be(data, length, seed); -#else - return t1ha0_32be(data, length, seed); -#endif -} -#else -static __inline uint64_t t1ha0(const void *data, size_t length, uint64_t seed) { -#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul - return t1ha1_le(data, length, seed); -#else - return t1ha0_32le(data, length, seed); -#endif -} -#endif /* !T1HA0_RUNTIME_SELECT */ - #ifdef __cplusplus } #endif diff --git a/contrib/t1ha/t1ha0.c b/contrib/t1ha/t1ha0.c deleted file mode 100644 index 14761d8cd..000000000 --- a/contrib/t1ha/t1ha0.c +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2016-2018 Positive Technologies, https://www.ptsecurity.com, - * Fast Positive Hash. - * - * Portions Copyright (c) 2010-2018 Leonid Yuriev , - * The 1Hippeus project (t1h). - * - * This software is provided 'as-is', without any express or implied - * warranty. In no event will the authors be held liable for any damages - * arising from the use of this software. - * - * Permission is granted to anyone to use this software for any purpose, - * including commercial applications, and to alter it and redistribute it - * freely, subject to the following restrictions: - * - * 1. The origin of this software must not be misrepresented; you must not - * claim that you wrote the original software. If you use this software - * in a product, an acknowledgement in the product documentation would be - * appreciated but is not required. - * 2. Altered source versions must be plainly marked as such, and must not be - * misrepresented as being the original software. - * 3. This notice may not be removed or altered from any source distribution. - */ - -/* - * t1ha = { Fast Positive Hash, aka "Позитивный Хэш" } - * by [Positive Technologies](https://www.ptsecurity.ru) - * - * Briefly, it is a 64-bit Hash Function: - * 1. Created for 64-bit little-endian platforms, in predominantly for x86_64, - * but portable and without penalties it can run on any 64-bit CPU. - * 2. In most cases up to 15% faster than City64, xxHash, mum-hash, metro-hash - * and all others portable hash-functions (which do not use specific - * hardware tricks). - * 3. Not suitable for cryptography. - * - * The Future will Positive. Всё будет хорошо. - * - * ACKNOWLEDGEMENT: - * The t1ha was originally developed by Leonid Yuriev (Леонид Юрьев) - * for The 1Hippeus project - zerocopy messaging in the spirit of Sparta! - */ - -#include "config.h" -#include "t1ha_bits.h" - -#if defined(__ia32__) || defined(__e2k__) -#include -#endif - -#if defined(__ia32__) -#include -#endif - -static __always_inline uint32_t tail32_le(const void *v, size_t tail) { - const uint8_t *p = (const uint8_t *)v; -#ifdef can_read_underside - /* On some systems (e.g. x86) we can perform a 'oneshot' read, which - * is little bit faster. Thanks Marcin Żukowski - * for the reminder. */ - const unsigned offset = (4 - tail) & 3; - const unsigned shift = offset << 3; - if (likely(can_read_underside(p, 4))) { - p -= offset; - return fetch32_le(p) >> shift; - } - return fetch32_le(p) & ((~UINT32_C(0)) >> shift); -#endif /* 'oneshot' read */ - - uint32_t r = 0; - switch (tail & 3) { -#if UNALIGNED_OK && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - /* For most CPUs this code is better when not needed - * copying for alignment or byte reordering. */ - case 0: - return fetch32_le(p); - case 3: - r = (uint32_t)p[2] << 16; - /* fall through */ - case 2: - return r + fetch16_le(p); - case 1: - return p[0]; -#else - /* For most CPUs this code is better than a - * copying for alignment and/or byte reordering. */ - case 0: - r += p[3]; - r <<= 8; - /* fall through */ - case 3: - r += p[2]; - r <<= 8; - /* fall through */ - case 2: - r += p[1]; - r <<= 8; - /* fall through */ - case 1: - return r + p[0]; -#endif - } - unreachable(); -} - -static __always_inline uint32_t tail32_be(const void *v, size_t tail) { - const uint8_t *p = (const uint8_t *)v; -#ifdef can_read_underside - /* On some systems we can perform a 'oneshot' read, which is little bit - * faster. Thanks Marcin Żukowski for the - * reminder. */ - const unsigned offset = (4 - tail) & 3; - const unsigned shift = offset << 3; - if (likely(can_read_underside(p, 4))) { - p -= offset; - return fetch32_be(p) & ((~UINT32_C(0)) >> shift); - } - return fetch32_be(p) >> shift; -#endif /* 'oneshot' read */ - - switch (tail & 3) { -#if UNALIGNED_OK && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ - /* For most CPUs this code is better when not needed - * copying for alignment or byte reordering. */ - case 1: - return p[0]; - case 2: - return fetch16_be(p); - case 3: - return fetch16_be(p) << 8 | p[2]; - case 0: - return fetch32_be(p); -#else - /* For most CPUs this code is better than a - * copying for alignment and/or byte reordering. */ - case 1: - return p[0]; - case 2: - return p[1] | (uint32_t)p[0] << 8; - case 3: - return p[2] | (uint32_t)p[1] << 8 | (uint32_t)p[0] << 16; - case 0: - return p[3] | (uint32_t)p[2] << 8 | (uint32_t)p[1] << 16 | - (uint32_t)p[0] << 24; -#endif - } - unreachable(); -} - -/***************************************************************************/ - -#ifndef rot32 -static __maybe_unused __always_inline uint32_t rot32(uint32_t v, unsigned s) { - return (v >> s) | (v << (32 - s)); -} -#endif /* rot32 */ - -static __always_inline void mixup32(uint32_t *a, uint32_t *b, uint32_t v, - uint32_t prime) { - uint64_t l = mul_32x32_64(*b + v, prime); - *a ^= (uint32_t)l; - *b += (uint32_t)(l >> 32); -} - -static __always_inline uint64_t final32(uint32_t a, uint32_t b) { - uint64_t l = (b ^ rot32(a, 13)) | (uint64_t)a << 32; - l *= prime_0; - l ^= l >> 41; - l *= prime_4; - l ^= l >> 47; - l *= prime_6; - return l; -} - -/* 32-bit 'magic' primes */ -static const uint32_t prime32_0 = UINT32_C(0x92D78269); -static const uint32_t prime32_1 = UINT32_C(0xCA9B4735); -static const uint32_t prime32_2 = UINT32_C(0xA4ABA1C3); -static const uint32_t prime32_3 = UINT32_C(0xF6499843); -static const uint32_t prime32_4 = UINT32_C(0x86F0FD61); -static const uint32_t prime32_5 = UINT32_C(0xCA2DA6FB); -static const uint32_t prime32_6 = UINT32_C(0xC4BB3575); - -uint64_t t1ha0_32le(const void *data, size_t len, uint64_t seed) { - uint32_t a = rot32((uint32_t)len, 17) + (uint32_t)seed; - uint32_t b = (uint32_t)len ^ (uint32_t)(seed >> 32); - - const int need_align = (((uintptr_t)data) & 3) != 0 && !UNALIGNED_OK; - uint32_t align[4]; - - if (unlikely(len > 16)) { - uint32_t c = ~a; - uint32_t d = rot32(b, 5); - const void *detent = (const uint8_t *)data + len - 15; - do { - const uint32_t *v = (const uint32_t *)data; - if (unlikely(need_align)) - v = (const uint32_t *)memcpy(&align, unaligned(v), 16); - - uint32_t w0 = fetch32_le(v + 0); - uint32_t w1 = fetch32_le(v + 1); - uint32_t w2 = fetch32_le(v + 2); - uint32_t w3 = fetch32_le(v + 3); - - uint32_t c02 = w0 ^ rot32(w2 + c, 11); - uint32_t d13 = w1 + rot32(w3 + d, 17); - c ^= rot32(b + w1, 7); - d ^= rot32(a + w0, 3); - b = prime32_1 * (c02 + w3); - a = prime32_0 * (d13 ^ w2); - - data = (const uint32_t *)data + 4; - } while (likely(data < detent)); - - c += a; - d += b; - a ^= prime32_6 * (rot32(c, 16) + d); - b ^= prime32_5 * (c + rot32(d, 16)); - - len &= 15; - } - - const uint8_t *v = (const uint8_t *)data; - if (unlikely(need_align) && len > 4) - v = (const uint8_t *)memcpy(&align, unaligned(v), len); - - switch (len) { - default: - mixup32(&a, &b, fetch32_le(v), prime32_4); - v += 4; - /* fall through */ - case 12: - case 11: - case 10: - case 9: - mixup32(&b, &a, fetch32_le(v), prime32_3); - v += 4; - /* fall through */ - case 8: - case 7: - case 6: - case 5: - mixup32(&a, &b, fetch32_le(v), prime32_2); - v += 4; - /* fall through */ - case 4: - case 3: - case 2: - case 1: - mixup32(&b, &a, tail32_le(v, len), prime32_1); - /* fall through */ - case 0: - return final32(a, b); - } -} - -uint64_t t1ha0_32be(const void *data, size_t len, uint64_t seed) { - uint32_t a = rot32((uint32_t)len, 17) + (uint32_t)seed; - uint32_t b = (uint32_t)len ^ (uint32_t)(seed >> 32); - - const int need_align = (((uintptr_t)data) & 3) != 0 && !UNALIGNED_OK; - uint32_t align[4]; - - if (unlikely(len > 16)) { - uint32_t c = ~a; - uint32_t d = rot32(b, 5); - const void *detent = (const uint8_t *)data + len - 15; - do { - const uint32_t *v = (const uint32_t *)data; - if (unlikely(need_align)) - v = (const uint32_t *)memcpy(&align, unaligned(v), 16); - - uint32_t w0 = fetch32_be(v + 0); - uint32_t w1 = fetch32_be(v + 1); - uint32_t w2 = fetch32_be(v + 2); - uint32_t w3 = fetch32_be(v + 3); - - uint32_t c02 = w0 ^ rot32(w2 + c, 11); - uint32_t d13 = w1 + rot32(w3 + d, 17); - c ^= rot32(b + w1, 7); - d ^= rot32(a + w0, 3); - b = prime32_1 * (c02 + w3); - a = prime32_0 * (d13 ^ w2); - - data = (const uint32_t *)data + 4; - } while (likely(data < detent)); - - c += a; - d += b; - a ^= prime32_6 * (rot32(c, 16) + d); - b ^= prime32_5 * (c + rot32(d, 16)); - - len &= 15; - } - - const uint8_t *v = (const uint8_t *)data; - if (unlikely(need_align) && len > 4) - v = (const uint8_t *)memcpy(&align, unaligned(v), len); - - switch (len) { - default: - mixup32(&a, &b, fetch32_be(v), prime32_4); - v += 4; - /* fall through */ - case 12: - case 11: - case 10: - case 9: - mixup32(&b, &a, fetch32_be(v), prime32_3); - v += 4; - /* fall through */ - case 8: - case 7: - case 6: - case 5: - mixup32(&a, &b, fetch32_be(v), prime32_2); - v += 4; - /* fall through */ - case 4: - case 3: - case 2: - case 1: - mixup32(&b, &a, tail32_be(v, len), prime32_1); - /* fall through */ - case 0: - return final32(a, b); - } -} - -/***************************************************************************/ - -#if T1HA0_RUNTIME_SELECT - -#if T1HA0_AESNI_AVAILABLE && defined(__ia32__) -static uint64_t x86_cpu_features(void) { - uint32_t features = 0; - uint32_t extended = 0; -#ifdef __GNUC__ - uint32_t eax, ebx, ecx, edx; - const unsigned cpuid_max = __get_cpuid_max(0, NULL); - if (cpuid_max >= 1) { - __cpuid_count(1, 0, eax, ebx, features, edx); - if (cpuid_max >= 7) - __cpuid_count(7, 0, eax, extended, ecx, edx); - } -#elif defined(_MSC_VER) - int info[4]; - __cpuid(info, 0); - const unsigned cpuid_max = info[0]; - if (cpuid_max >= 1) { - __cpuidex(info, 1, 0); - features = info[2]; - if (cpuid_max >= 7) { - __cpuidex(info, 7, 0); - extended = info[1]; - } - } -#endif - return features | (uint64_t)extended << 32; -} -#endif /* T1HA0_AESNI_AVAILABLE && __ia32__ */ - -static -#if __GNUC_PREREQ(4, 0) || __has_attribute(used) - __attribute__((used)) -#endif - uint64_t (*t1ha0_resolve(void))(const void *, size_t, uint64_t) { - -#if T1HA0_AESNI_AVAILABLE && defined(__ia32__) - uint64_t features = x86_cpu_features(); - if (features & UINT32_C(0x02000000) /* check for AES-NI */) { - return t1ha0_ia32aes_noavx; - } -#endif /* T1HA0_AESNI_AVAILABLE && __ia32__ */ - -#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul - return t1ha1_be; -#else - return t1ha0_32be; -#endif -#else /* __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__ */ -#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul - return t1ha1_le; -#else - return t1ha0_32le; -#endif -#endif /* __BYTE_ORDER__ */ -} - -#ifdef __ELF__ - -#if __has_attribute(ifunc) - -uint64_t t1ha0(const void *data, size_t len, uint64_t seed) - __attribute__((ifunc("t1ha0_resolve"))); -#else -__asm("\t.globl\tt1ha0\n\t.type\tt1ha0, " - "%gnu_indirect_function\n\t.set\tt1ha0,t1ha0_resolve"); -#endif /* ifunc */ - -#elif __GNUC_PREREQ(4, 0) || __has_attribute(constructor) - -uint64_t (*t1ha0_funcptr)(const void *, size_t, uint64_t); - -static void __attribute__((constructor)) t1ha0_init(void) { - t1ha0_funcptr = t1ha0_resolve(); -} - -#else /* ELF */ -static uint64_t t1ha0_proxy(const void *data, size_t len, uint64_t seed) { - t1ha0_funcptr = t1ha0_resolve(); - return t1ha0_funcptr(data, len, seed); -} - -uint64_t (*t1ha0_funcptr)(const void *, size_t, uint64_t) = t1ha0_proxy; - -#endif /* !ELF */ -#endif /* T1HA0_RUNTIME_SELECT */ diff --git a/contrib/t1ha/t1ha0_ia32aes_a.h b/contrib/t1ha/t1ha0_ia32aes_a.h deleted file mode 100644 index 7399c15d1..000000000 --- a/contrib/t1ha/t1ha0_ia32aes_a.h +++ /dev/null @@ -1,210 +0,0 @@ -/* - * Copyright (c) 2016-2018 Positive Technologies, https://www.ptsecurity.com, - * Fast Positive Hash. - * - * Portions Copyright (c) 2010-2018 Leonid Yuriev , - * The 1Hippeus project (t1h). - * - * This software is provided 'as-is', without any express or implied - * warranty. In no event will the authors be held liable for any damages - * arising from the use of this software. - * - * Permission is granted to anyone to use this software for any purpose, - * including commercial applications, and to alter it and redistribute it - * freely, subject to the following restrictions: - * - * 1. The origin of this software must not be misrepresented; you must not - * claim that you wrote the original software. If you use this software - * in a product, an acknowledgement in the product documentation would be - * appreciated but is not required. - * 2. Altered source versions must be plainly marked as such, and must not be - * misrepresented as being the original software. - * 3. This notice may not be removed or altered from any source distribution. - */ - -/* - * t1ha = { Fast Positive Hash, aka "Позитивный Хэш" } - * by [Positive Technologies](https://www.ptsecurity.ru) - * - * Briefly, it is a 64-bit Hash Function: - * 1. Created for 64-bit little-endian platforms, in predominantly for x86_64, - * but portable and without penalties it can run on any 64-bit CPU. - * 2. In most cases up to 15% faster than City64, xxHash, mum-hash, metro-hash - * and all others portable hash-functions (which do not use specific - * hardware tricks). - * 3. Not suitable for cryptography. - * - * The Future will Positive. Всё будет хорошо. - * - * ACKNOWLEDGEMENT: - * The t1ha was originally developed by Leonid Yuriev (Леонид Юрьев) - * for The 1Hippeus project - zerocopy messaging in the spirit of Sparta! - */ - -#include "t1ha_bits.h" - - -#if T1HA0_AESNI_AVAILABLE - -#pragma GCC push_options -#pragma GCC target("aes") -#ifndef __SSE2__ -#define __SSE2__ -#endif -#ifndef __SSE__ -#define __SSE__ -#endif -#ifndef __AES__ -#define __AES__ -#endif -#include -#if defined(__ia32__) || defined(__e2k__) -#include -#endif - -#if defined(__ia32__) -#include -#endif - -uint64_t T1HA_IA32AES_NAME(const void *data, size_t len, uint64_t seed) __attribute__((target("aes"))); - -uint64_t T1HA_IA32AES_NAME(const void *data, size_t len, uint64_t seed) { - uint64_t a = seed; - uint64_t b = len; - - if (unlikely(len > 32)) { - __m128i x = _mm_set_epi64x(a, b); - __m128i y = _mm_aesenc_si128(x, _mm_set_epi64x(prime_5, prime_6)); - - const __m128i *__restrict v = (const __m128i *)data; - const __m128i *__restrict const detent = - (const __m128i *)((const uint8_t *)data + len - 127); - - while (v < detent) { - __m128i v0 = _mm_loadu_si128(v + 0); - __m128i v1 = _mm_loadu_si128(v + 1); - __m128i v2 = _mm_loadu_si128(v + 2); - __m128i v3 = _mm_loadu_si128(v + 3); - __m128i v4 = _mm_loadu_si128(v + 4); - __m128i v5 = _mm_loadu_si128(v + 5); - __m128i v6 = _mm_loadu_si128(v + 6); - __m128i v7 = _mm_loadu_si128(v + 7); - - __m128i v0y = _mm_aesenc_si128(v0, y); - __m128i v2x6 = _mm_aesenc_si128(v2, _mm_xor_si128(x, v6)); - __m128i v45_67 = - _mm_xor_si128(_mm_aesenc_si128(v4, v5), _mm_add_epi64(v6, v7)); - - __m128i v0y7_1 = _mm_aesdec_si128(_mm_sub_epi64(v7, v0y), v1); - __m128i v2x6_3 = _mm_aesenc_si128(v2x6, v3); - - x = _mm_aesenc_si128(v45_67, _mm_add_epi64(x, y)); - y = _mm_aesenc_si128(v2x6_3, _mm_xor_si128(v0y7_1, v5)); - v += 8; - } - - if (len & 64) { - __m128i v0y = _mm_add_epi64(y, _mm_loadu_si128(v++)); - __m128i v1x = _mm_sub_epi64(x, _mm_loadu_si128(v++)); - x = _mm_aesdec_si128(x, v0y); - y = _mm_aesdec_si128(y, v1x); - - __m128i v2y = _mm_add_epi64(y, _mm_loadu_si128(v++)); - __m128i v3x = _mm_sub_epi64(x, _mm_loadu_si128(v++)); - x = _mm_aesdec_si128(x, v2y); - y = _mm_aesdec_si128(y, v3x); - } - - if (len & 32) { - __m128i v0y = _mm_add_epi64(y, _mm_loadu_si128(v++)); - __m128i v1x = _mm_sub_epi64(x, _mm_loadu_si128(v++)); - x = _mm_aesdec_si128(x, v0y); - y = _mm_aesdec_si128(y, v1x); - } - - if (len & 16) { - y = _mm_add_epi64(x, y); - x = _mm_aesdec_si128(x, _mm_loadu_si128(v++)); - } - - x = _mm_add_epi64(_mm_aesdec_si128(x, _mm_aesenc_si128(y, x)), y); -#if defined(__x86_64__) || defined(_M_X64) -#if defined(__SSE4_1__) || defined(__AVX__) - a = _mm_extract_epi64(x, 0); - b = _mm_extract_epi64(x, 1); -#else - a = _mm_cvtsi128_si64(x); - b = _mm_cvtsi128_si64(_mm_unpackhi_epi64(x, x)); -#endif -#else -#if defined(__SSE4_1__) || defined(__AVX__) - a = (uint32_t)_mm_extract_epi32(x, 0) | - (uint64_t)_mm_extract_epi32(x, 1) << 32; - b = (uint32_t)_mm_extract_epi32(x, 2) | - (uint64_t)_mm_extract_epi32(x, 3) << 32; -#else - a = (uint32_t)_mm_cvtsi128_si32(x); - a |= (uint64_t)_mm_cvtsi128_si32(_mm_shuffle_epi32(x, 1)) << 32; - x = _mm_unpackhi_epi64(x, x); - b = (uint32_t)_mm_cvtsi128_si32(x); - b |= (uint64_t)_mm_cvtsi128_si32(_mm_shuffle_epi32(x, 1)) << 32; -#endif -#endif -#ifdef __AVX__ - _mm256_zeroall(); -#elif !(defined(_X86_64_) || defined(__x86_64__) || defined(_M_X64)) - _mm_empty(); -#endif - data = v; - len &= 15; - } - - const uint64_t *v = (const uint64_t *)data; -#ifdef __e2k__ - const int need_align = (((uintptr_t)data) & 7) != 0 && !UNALIGNED_OK; - uint64_t align[4]; - if (unlikely(need_align) && len > 8) - v = (const uint64_t *)memcpy(&align, unaligned(v), len); -#endif /* __e2k__ */ - - switch (len) { - default: - mixup64(&a, &b, *v++, prime_4); - /* fall through */ - case 24: - case 23: - case 22: - case 21: - case 20: - case 19: - case 18: - case 17: - mixup64(&b, &a, *v++, prime_3); - /* fall through */ - case 16: - case 15: - case 14: - case 13: - case 12: - case 11: - case 10: - case 9: - mixup64(&a, &b, *v++, prime_2); - /* fall through */ - case 8: - case 7: - case 6: - case 5: - case 4: - case 3: - case 2: - case 1: - mixup64(&b, &a, tail64_le(v, len), prime_1); - /* fall through */ - case 0: - return final64(a, b); - } -} - -#endif /* T1HA0_AESNI_AVAILABLE */ -#undef T1HA_IA32AES_NAME diff --git a/contrib/t1ha/t1ha0_ia32aes_noavx.c b/contrib/t1ha/t1ha0_ia32aes_noavx.c deleted file mode 100644 index ca4588de7..000000000 --- a/contrib/t1ha/t1ha0_ia32aes_noavx.c +++ /dev/null @@ -1,2 +0,0 @@ -#define T1HA_IA32AES_NAME t1ha0_ia32aes_noavx -#include "t1ha0_ia32aes_a.h" diff --git a/src/libcryptobox/cryptobox.c b/src/libcryptobox/cryptobox.c index 5f8b2eca4..c0857b094 100644 --- a/src/libcryptobox/cryptobox.c +++ b/src/libcryptobox/cryptobox.c @@ -1511,7 +1511,7 @@ static inline guint64 rspamd_cryptobox_fast_hash_machdep (const void *data, gsize len, guint64 seed) { - return t1ha0 (data, len, seed); + return t1ha2_atonce (data, len, seed); } static inline guint64 -- 2.39.5