diff options
author | Vsevolod Stakhov <vsevolod@highsecure.ru> | 2018-05-22 14:15:47 +0100 |
---|---|---|
committer | Vsevolod Stakhov <vsevolod@highsecure.ru> | 2018-05-22 14:15:47 +0100 |
commit | b6156175b6e0e9a1d8063571f81c428f4aac570f (patch) | |
tree | 6608e0f98d0807497ec9e6abb6e14e447b574231 /contrib/t1ha/t1ha2.c | |
parent | 061d3d2e1da9acf6f17014cefab9c66af7afa1d5 (diff) | |
download | rspamd-b6156175b6e0e9a1d8063571f81c428f4aac570f.tar.gz rspamd-b6156175b6e0e9a1d8063571f81c428f4aac570f.zip |
[Minor] Backport fixes from t1ha
Diffstat (limited to 'contrib/t1ha/t1ha2.c')
-rw-r--r-- | contrib/t1ha/t1ha2.c | 95 |
1 files changed, 51 insertions, 44 deletions
diff --git a/contrib/t1ha/t1ha2.c b/contrib/t1ha/t1ha2.c index 95f646da4..4cb5281ee 100644 --- a/contrib/t1ha/t1ha2.c +++ b/contrib/t1ha/t1ha2.c @@ -56,7 +56,7 @@ static __always_inline void init_cd(t1ha_state256_t *s, uint64_t x, s->n.d = ~y + rot64(x, 19); } -/* TODO C++ template in the next version */ +/* TODO: C++ template in the next version */ #define T1HA2_UPDATE(ENDIANNES, ALIGNESS, state, v) \ do { \ t1ha_state256_t *const s = state; \ @@ -67,8 +67,8 @@ static __always_inline void init_cd(t1ha_state256_t *s, uint64_t x, \ const uint64_t d02 = w0 + rot64(w2 + s->n.d, 56); \ const uint64_t c13 = w1 + rot64(w3 + s->n.c, 19); \ - s->n.d ^= s->n.b + rot64(w1, 38); \ s->n.c ^= s->n.a + rot64(w0, 57); \ + s->n.d ^= s->n.b + rot64(w1, 38); \ s->n.b ^= prime_6 * (c13 + w2); \ s->n.a ^= prime_5 * (d02 + w3); \ } while (0) @@ -78,26 +78,23 @@ static __always_inline void squash(t1ha_state256_t *s) { s->n.b ^= prime_5 * (rot64(s->n.c, 19) + s->n.d); } -/* TODO C++ template in the next version */ -#define T1HA2_LOOP(ENDIANNES, ALIGNESS, BUFFER4COPY, state, data, len) \ +/* TODO: C++ template in the next version */ +#define T1HA2_LOOP(ENDIANNES, ALIGNESS, state, data, len) \ do { \ const void *detent = (const uint8_t *)data + len - 31; \ do { \ const uint64_t *v = (const uint64_t *)data; \ - if (BUFFER4COPY != NULL) \ - memcpy((void *)(v = BUFFER4COPY), data, 32); \ - T1HA2_UPDATE(le, unaligned, state, v); \ data = (const uint64_t *)data + 4; \ + prefetch(data); \ + T1HA2_UPDATE(le, ALIGNESS, state, v); \ } while (likely(data < detent)); \ } while (0) -/* TODO C++ template in the next version */ -#define T1HA2_TAIL_AB(ENDIANNES, ALIGNESS, BUFFER4COPY, state, data, len) \ +/* TODO: C++ template in the next version */ +#define T1HA2_TAIL_AB(ENDIANNES, ALIGNESS, state, data, len) \ do { \ t1ha_state256_t *const s = state; \ const uint64_t *v = (const uint64_t *)data; \ - if (BUFFER4COPY != NULL) \ - memcpy((void *)(v = BUFFER4COPY), data, len); \ switch (len) { \ default: \ mixup64(&s->n.a, &s->n.b, fetch64_##ENDIANNES##_##ALIGNESS(v++), \ @@ -141,13 +138,11 @@ static __always_inline void squash(t1ha_state256_t *s) { } \ } while (0) -/* TODO C++ template in the next version */ -#define T1HA2_TAIL_ABCD(ENDIANNES, ALIGNESS, BUFFER4COPY, state, data, len) \ +/* TODO: C++ template in the next version */ +#define T1HA2_TAIL_ABCD(ENDIANNES, ALIGNESS, state, data, len) \ do { \ t1ha_state256_t *const s = state; \ const uint64_t *v = (const uint64_t *)data; \ - if (BUFFER4COPY != NULL) \ - memcpy((void *)(v = BUFFER4COPY), data, len); \ switch (len) { \ default: \ mixup64(&s->n.a, &s->n.d, fetch64_##ENDIANNES##_##ALIGNESS(v++), \ @@ -207,26 +202,34 @@ uint64_t t1ha2_atonce(const void *data, size_t length, uint64_t seed) { t1ha_state256_t state; init_ab(&state, seed, length); - const bool need_copy4align = - (((uintptr_t)data) & (ALIGNMENT_64 - 1)) != 0 && !UNALIGNED_OK; - if (need_copy4align) { - uint64_t buffer4align[4]; +#if T1HA_CONFIG_UNALIGNED_ACCESS == T1HA_CONFIG_UNALIGNED_ACCESS__EFFICIENT + if (unlikely(length > 32)) { + init_cd(&state, seed, length); + T1HA2_LOOP(le, unaligned, &state, data, length); + squash(&state); + length &= 31; + } + T1HA2_TAIL_AB(le, unaligned, &state, data, length); +#else + const bool misaligned = (((uintptr_t)data) & (ALIGNMENT_64 - 1)) != 0; + if (misaligned) { if (unlikely(length > 32)) { init_cd(&state, seed, length); - T1HA2_LOOP(le, aligned, buffer4align, &state, data, length); + T1HA2_LOOP(le, unaligned, &state, data, length); squash(&state); length &= 31; } - T1HA2_TAIL_AB(le, aligned, buffer4align, &state, data, length); + T1HA2_TAIL_AB(le, unaligned, &state, data, length); } else { if (unlikely(length > 32)) { init_cd(&state, seed, length); - T1HA2_LOOP(le, unaligned, NULL, &state, data, length); + T1HA2_LOOP(le, aligned, &state, data, length); squash(&state); length &= 31; } - T1HA2_TAIL_AB(le, unaligned, NULL, &state, data, length); + T1HA2_TAIL_AB(le, aligned, &state, data, length); } +#endif } uint64_t t1ha2_atonce128(uint64_t *__restrict extra_result, @@ -236,22 +239,28 @@ uint64_t t1ha2_atonce128(uint64_t *__restrict extra_result, init_ab(&state, seed, length); init_cd(&state, seed, length); - const bool need_copy4align = - (((uintptr_t)data) & (ALIGNMENT_64 - 1)) != 0 && !UNALIGNED_OK; - if (need_copy4align) { - uint64_t buffer4align[4]; +#if T1HA_CONFIG_UNALIGNED_ACCESS == T1HA_CONFIG_UNALIGNED_ACCESS__EFFICIENT + if (unlikely(length > 32)) { + T1HA2_LOOP(le, unaligned, &state, data, length); + length &= 31; + } + T1HA2_TAIL_ABCD(le, unaligned, &state, data, length); +#else + const bool misaligned = (((uintptr_t)data) & (ALIGNMENT_64 - 1)) != 0; + if (misaligned) { if (unlikely(length > 32)) { - T1HA2_LOOP(le, aligned, buffer4align, &state, data, length); + T1HA2_LOOP(le, unaligned, &state, data, length); length &= 31; } - T1HA2_TAIL_ABCD(le, aligned, buffer4align, &state, data, length); + T1HA2_TAIL_ABCD(le, unaligned, &state, data, length); } else { if (unlikely(length > 32)) { - T1HA2_LOOP(le, unaligned, NULL, &state, data, length); + T1HA2_LOOP(le, aligned, &state, data, length); length &= 31; } - T1HA2_TAIL_ABCD(le, unaligned, NULL, &state, data, length); + T1HA2_TAIL_ABCD(le, aligned, &state, data, length); } +#endif } //------------------------------------------------------------------------------ @@ -283,13 +292,16 @@ void t1ha2_update(t1ha_context_t *__restrict ctx, const void *__restrict data, } if (length >= 32) { - const bool need_copy4align = - (((uintptr_t)data) & (ALIGNMENT_64 - 1)) != 0 && !UNALIGNED_OK; - if (need_copy4align) { - T1HA2_LOOP(le, aligned, ctx->buffer.u64, &ctx->state, data, length); +#if T1HA_CONFIG_UNALIGNED_ACCESS == T1HA_CONFIG_UNALIGNED_ACCESS__EFFICIENT + T1HA2_LOOP(le, unaligned, &ctx->state, data, length); +#else + const bool misaligned = (((uintptr_t)data) & (ALIGNMENT_64 - 1)) != 0; + if (misaligned) { + T1HA2_LOOP(le, unaligned, &ctx->state, data, length); } else { - T1HA2_LOOP(le, unaligned, NULL, &ctx->state, data, length); + T1HA2_LOOP(le, aligned, &ctx->state, data, length); } +#endif length &= 31; } @@ -307,13 +319,8 @@ uint64_t t1ha2_final(t1ha_context_t *__restrict ctx, if (likely(!extra_result)) { squash(&ctx->state); - T1HA2_TAIL_AB(le, aligned, NULL, &ctx->state, ctx->buffer.u64, - ctx->partial); - return final64(ctx->state.n.a, ctx->state.n.b); + T1HA2_TAIL_AB(le, aligned, &ctx->state, ctx->buffer.u64, ctx->partial); } - T1HA2_TAIL_ABCD(le, aligned, NULL, &ctx->state, ctx->buffer.u64, - ctx->partial); - return final128(ctx->state.n.a, ctx->state.n.b, ctx->state.n.c, - ctx->state.n.d, extra_result); -}
\ No newline at end of file + T1HA2_TAIL_ABCD(le, aligned, &ctx->state, ctx->buffer.u64, ctx->partial); +} |