You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

t1ha0.c 11KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411
  1. /*
  2. * Copyright (c) 2016-2018 Positive Technologies, https://www.ptsecurity.com,
  3. * Fast Positive Hash.
  4. *
  5. * Portions Copyright (c) 2010-2018 Leonid Yuriev <leo@yuriev.ru>,
  6. * The 1Hippeus project (t1h).
  7. *
  8. * This software is provided 'as-is', without any express or implied
  9. * warranty. In no event will the authors be held liable for any damages
  10. * arising from the use of this software.
  11. *
  12. * Permission is granted to anyone to use this software for any purpose,
  13. * including commercial applications, and to alter it and redistribute it
  14. * freely, subject to the following restrictions:
  15. *
  16. * 1. The origin of this software must not be misrepresented; you must not
  17. * claim that you wrote the original software. If you use this software
  18. * in a product, an acknowledgement in the product documentation would be
  19. * appreciated but is not required.
  20. * 2. Altered source versions must be plainly marked as such, and must not be
  21. * misrepresented as being the original software.
  22. * 3. This notice may not be removed or altered from any source distribution.
  23. */
  24. /*
  25. * t1ha = { Fast Positive Hash, aka "Позитивный Хэш" }
  26. * by [Positive Technologies](https://www.ptsecurity.ru)
  27. *
  28. * Briefly, it is a 64-bit Hash Function:
  29. * 1. Created for 64-bit little-endian platforms, in predominantly for x86_64,
  30. * but portable and without penalties it can run on any 64-bit CPU.
  31. * 2. In most cases up to 15% faster than City64, xxHash, mum-hash, metro-hash
  32. * and all others portable hash-functions (which do not use specific
  33. * hardware tricks).
  34. * 3. Not suitable for cryptography.
  35. *
  36. * The Future will Positive. Всё будет хорошо.
  37. *
  38. * ACKNOWLEDGEMENT:
  39. * The t1ha was originally developed by Leonid Yuriev (Леонид Юрьев)
  40. * for The 1Hippeus project - zerocopy messaging in the spirit of Sparta!
  41. */
  42. #include "config.h"
  43. #include "t1ha_bits.h"
  44. static __always_inline uint32_t tail32_le(const void *v, size_t tail) {
  45. const uint8_t *p = (const uint8_t *)v;
  46. #ifdef can_read_underside
  47. /* On some systems (e.g. x86) we can perform a 'oneshot' read, which
  48. * is little bit faster. Thanks Marcin Żukowski <marcin.zukowski@gmail.com>
  49. * for the reminder. */
  50. const unsigned offset = (4 - tail) & 3;
  51. const unsigned shift = offset << 3;
  52. if (likely(can_read_underside(p, 4))) {
  53. p -= offset;
  54. return fetch32_le(p) >> shift;
  55. }
  56. return fetch32_le(p) & ((~UINT32_C(0)) >> shift);
  57. #endif /* 'oneshot' read */
  58. uint32_t r = 0;
  59. switch (tail & 3) {
  60. #if UNALIGNED_OK && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  61. /* For most CPUs this code is better when not needed
  62. * copying for alignment or byte reordering. */
  63. case 0:
  64. return fetch32_le(p);
  65. case 3:
  66. r = (uint32_t)p[2] << 16;
  67. /* fall through */
  68. case 2:
  69. return r + fetch16_le(p);
  70. case 1:
  71. return p[0];
  72. #else
  73. /* For most CPUs this code is better than a
  74. * copying for alignment and/or byte reordering. */
  75. case 0:
  76. r += p[3];
  77. r <<= 8;
  78. /* fall through */
  79. case 3:
  80. r += p[2];
  81. r <<= 8;
  82. /* fall through */
  83. case 2:
  84. r += p[1];
  85. r <<= 8;
  86. /* fall through */
  87. case 1:
  88. return r + p[0];
  89. #endif
  90. }
  91. unreachable();
  92. }
  93. static __always_inline uint32_t tail32_be(const void *v, size_t tail) {
  94. const uint8_t *p = (const uint8_t *)v;
  95. #ifdef can_read_underside
  96. /* On some systems we can perform a 'oneshot' read, which is little bit
  97. * faster. Thanks Marcin Żukowski <marcin.zukowski@gmail.com> for the
  98. * reminder. */
  99. const unsigned offset = (4 - tail) & 3;
  100. const unsigned shift = offset << 3;
  101. if (likely(can_read_underside(p, 4))) {
  102. p -= offset;
  103. return fetch32_be(p) & ((~UINT32_C(0)) >> shift);
  104. }
  105. return fetch32_be(p) >> shift;
  106. #endif /* 'oneshot' read */
  107. switch (tail & 3) {
  108. #if UNALIGNED_OK && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  109. /* For most CPUs this code is better when not needed
  110. * copying for alignment or byte reordering. */
  111. case 1:
  112. return p[0];
  113. case 2:
  114. return fetch16_be(p);
  115. case 3:
  116. return fetch16_be(p) << 8 | p[2];
  117. case 0:
  118. return fetch32_be(p);
  119. #else
  120. /* For most CPUs this code is better than a
  121. * copying for alignment and/or byte reordering. */
  122. case 1:
  123. return p[0];
  124. case 2:
  125. return p[1] | (uint32_t)p[0] << 8;
  126. case 3:
  127. return p[2] | (uint32_t)p[1] << 8 | (uint32_t)p[0] << 16;
  128. case 0:
  129. return p[3] | (uint32_t)p[2] << 8 | (uint32_t)p[1] << 16 |
  130. (uint32_t)p[0] << 24;
  131. #endif
  132. }
  133. unreachable();
  134. }
  135. /***************************************************************************/
  136. #ifndef rot32
  137. static __maybe_unused __always_inline uint32_t rot32(uint32_t v, unsigned s) {
  138. return (v >> s) | (v << (32 - s));
  139. }
  140. #endif /* rot32 */
  141. static __always_inline void mixup32(uint32_t *a, uint32_t *b, uint32_t v,
  142. uint32_t prime) {
  143. uint64_t l = mul_32x32_64(*b + v, prime);
  144. *a ^= (uint32_t)l;
  145. *b += (uint32_t)(l >> 32);
  146. }
  147. static __always_inline uint64_t final32(uint32_t a, uint32_t b) {
  148. uint64_t l = (b ^ rot32(a, 13)) | (uint64_t)a << 32;
  149. l *= prime_0;
  150. l ^= l >> 41;
  151. l *= prime_4;
  152. l ^= l >> 47;
  153. l *= prime_6;
  154. return l;
  155. }
  156. /* 32-bit 'magic' primes */
  157. static const uint32_t prime32_0 = UINT32_C(0x92D78269);
  158. static const uint32_t prime32_1 = UINT32_C(0xCA9B4735);
  159. static const uint32_t prime32_2 = UINT32_C(0xA4ABA1C3);
  160. static const uint32_t prime32_3 = UINT32_C(0xF6499843);
  161. static const uint32_t prime32_4 = UINT32_C(0x86F0FD61);
  162. static const uint32_t prime32_5 = UINT32_C(0xCA2DA6FB);
  163. static const uint32_t prime32_6 = UINT32_C(0xC4BB3575);
  164. uint64_t t1ha0_32le(const void *data, size_t len, uint64_t seed) {
  165. uint32_t a = rot32((uint32_t)len, 17) + (uint32_t)seed;
  166. uint32_t b = (uint32_t)len ^ (uint32_t)(seed >> 32);
  167. const int need_align = (((uintptr_t)data) & 3) != 0 && !UNALIGNED_OK;
  168. uint32_t align[4];
  169. if (unlikely(len > 16)) {
  170. uint32_t c = ~a;
  171. uint32_t d = rot32(b, 5);
  172. const void *detent = (const uint8_t *)data + len - 15;
  173. do {
  174. const uint32_t *v = (const uint32_t *)data;
  175. if (unlikely(need_align))
  176. v = (const uint32_t *)memcpy(&align, unaligned(v), 16);
  177. uint32_t w0 = fetch32_le(v + 0);
  178. uint32_t w1 = fetch32_le(v + 1);
  179. uint32_t w2 = fetch32_le(v + 2);
  180. uint32_t w3 = fetch32_le(v + 3);
  181. uint32_t c02 = w0 ^ rot32(w2 + c, 11);
  182. uint32_t d13 = w1 + rot32(w3 + d, 17);
  183. c ^= rot32(b + w1, 7);
  184. d ^= rot32(a + w0, 3);
  185. b = prime32_1 * (c02 + w3);
  186. a = prime32_0 * (d13 ^ w2);
  187. data = (const uint32_t *)data + 4;
  188. } while (likely(data < detent));
  189. c += a;
  190. d += b;
  191. a ^= prime32_6 * (rot32(c, 16) + d);
  192. b ^= prime32_5 * (c + rot32(d, 16));
  193. len &= 15;
  194. }
  195. const uint8_t *v = (const uint8_t *)data;
  196. if (unlikely(need_align) && len > 4)
  197. v = (const uint8_t *)memcpy(&align, unaligned(v), len);
  198. switch (len) {
  199. default:
  200. mixup32(&a, &b, fetch32_le(v), prime32_4);
  201. v += 4;
  202. /* fall through */
  203. case 12:
  204. case 11:
  205. case 10:
  206. case 9:
  207. mixup32(&b, &a, fetch32_le(v), prime32_3);
  208. v += 4;
  209. /* fall through */
  210. case 8:
  211. case 7:
  212. case 6:
  213. case 5:
  214. mixup32(&a, &b, fetch32_le(v), prime32_2);
  215. v += 4;
  216. /* fall through */
  217. case 4:
  218. case 3:
  219. case 2:
  220. case 1:
  221. mixup32(&b, &a, tail32_le(v, len), prime32_1);
  222. /* fall through */
  223. case 0:
  224. return final32(a, b);
  225. }
  226. }
  227. uint64_t t1ha0_32be(const void *data, size_t len, uint64_t seed) {
  228. uint32_t a = rot32((uint32_t)len, 17) + (uint32_t)seed;
  229. uint32_t b = (uint32_t)len ^ (uint32_t)(seed >> 32);
  230. const int need_align = (((uintptr_t)data) & 3) != 0 && !UNALIGNED_OK;
  231. uint32_t align[4];
  232. if (unlikely(len > 16)) {
  233. uint32_t c = ~a;
  234. uint32_t d = rot32(b, 5);
  235. const void *detent = (const uint8_t *)data + len - 15;
  236. do {
  237. const uint32_t *v = (const uint32_t *)data;
  238. if (unlikely(need_align))
  239. v = (const uint32_t *)memcpy(&align, unaligned(v), 16);
  240. uint32_t w0 = fetch32_be(v + 0);
  241. uint32_t w1 = fetch32_be(v + 1);
  242. uint32_t w2 = fetch32_be(v + 2);
  243. uint32_t w3 = fetch32_be(v + 3);
  244. uint32_t c02 = w0 ^ rot32(w2 + c, 11);
  245. uint32_t d13 = w1 + rot32(w3 + d, 17);
  246. c ^= rot32(b + w1, 7);
  247. d ^= rot32(a + w0, 3);
  248. b = prime32_1 * (c02 + w3);
  249. a = prime32_0 * (d13 ^ w2);
  250. data = (const uint32_t *)data + 4;
  251. } while (likely(data < detent));
  252. c += a;
  253. d += b;
  254. a ^= prime32_6 * (rot32(c, 16) + d);
  255. b ^= prime32_5 * (c + rot32(d, 16));
  256. len &= 15;
  257. }
  258. const uint8_t *v = (const uint8_t *)data;
  259. if (unlikely(need_align) && len > 4)
  260. v = (const uint8_t *)memcpy(&align, unaligned(v), len);
  261. switch (len) {
  262. default:
  263. mixup32(&a, &b, fetch32_be(v), prime32_4);
  264. v += 4;
  265. /* fall through */
  266. case 12:
  267. case 11:
  268. case 10:
  269. case 9:
  270. mixup32(&b, &a, fetch32_be(v), prime32_3);
  271. v += 4;
  272. /* fall through */
  273. case 8:
  274. case 7:
  275. case 6:
  276. case 5:
  277. mixup32(&a, &b, fetch32_be(v), prime32_2);
  278. v += 4;
  279. /* fall through */
  280. case 4:
  281. case 3:
  282. case 2:
  283. case 1:
  284. mixup32(&b, &a, tail32_be(v, len), prime32_1);
  285. /* fall through */
  286. case 0:
  287. return final32(a, b);
  288. }
  289. }
  290. /***************************************************************************/
  291. #if T1HA0_RUNTIME_SELECT
  292. #if T1HA0_AESNI_AVAILABLE && defined(__ia32__)
  293. static uint64_t x86_cpu_features(void) {
  294. uint32_t features = 0;
  295. uint32_t extended = 0;
  296. #ifdef __GNUC__
  297. uint32_t eax, ebx, ecx, edx;
  298. const unsigned cpuid_max = __get_cpuid_max(0, NULL);
  299. if (cpuid_max >= 1) {
  300. __cpuid_count(1, 0, eax, ebx, features, edx);
  301. if (cpuid_max >= 7)
  302. __cpuid_count(7, 0, eax, extended, ecx, edx);
  303. }
  304. #elif defined(_MSC_VER)
  305. int info[4];
  306. __cpuid(info, 0);
  307. const unsigned cpuid_max = info[0];
  308. if (cpuid_max >= 1) {
  309. __cpuidex(info, 1, 0);
  310. features = info[2];
  311. if (cpuid_max >= 7) {
  312. __cpuidex(info, 7, 0);
  313. extended = info[1];
  314. }
  315. }
  316. #endif
  317. return features | (uint64_t)extended << 32;
  318. }
  319. #endif /* T1HA0_AESNI_AVAILABLE && __ia32__ */
  320. static
  321. #if __GNUC_PREREQ(4, 0) || __has_attribute(used)
  322. __attribute__((used))
  323. #endif
  324. uint64_t (*t1ha0_resolve(void))(const void *, size_t, uint64_t) {
  325. #if T1HA0_AESNI_AVAILABLE && defined(__ia32__)
  326. uint64_t features = x86_cpu_features();
  327. if (features & UINT32_C(0x02000000) /* check for AES-NI */) {
  328. return t1ha0_ia32aes_noavx;
  329. }
  330. #endif /* T1HA0_AESNI_AVAILABLE && __ia32__ */
  331. #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  332. #if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul
  333. return t1ha1_be;
  334. #else
  335. return t1ha0_32be;
  336. #endif
  337. #else /* __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__ */
  338. #if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul
  339. return t1ha1_le;
  340. #else
  341. return t1ha0_32le;
  342. #endif
  343. #endif /* __BYTE_ORDER__ */
  344. }
  345. #ifdef __ELF__
  346. #if __has_attribute(ifunc)
  347. uint64_t t1ha0(const void *data, size_t len, uint64_t seed)
  348. __attribute__((ifunc("t1ha0_resolve")));
  349. #else
  350. __asm("\t.globl\tt1ha0\n\t.type\tt1ha0, "
  351. "%gnu_indirect_function\n\t.set\tt1ha0,t1ha0_resolve");
  352. #endif /* ifunc */
  353. #elif __GNUC_PREREQ(4, 0) || __has_attribute(constructor)
  354. uint64_t (*t1ha0_funcptr)(const void *, size_t, uint64_t);
  355. static void __attribute__((constructor)) t1ha0_init(void) {
  356. t1ha0_funcptr = t1ha0_resolve();
  357. }
  358. #else /* ELF */
  359. static uint64_t t1ha0_proxy(const void *data, size_t len, uint64_t seed) {
  360. t1ha0_funcptr = t1ha0_resolve();
  361. return t1ha0_funcptr(data, len, seed);
  362. }
  363. uint64_t (*t1ha0_funcptr)(const void *, size_t, uint64_t) = t1ha0_proxy;
  364. #endif /* !ELF */
  365. #endif /* T1HA0_RUNTIME_SELECT */