summaryrefslogtreecommitdiffstats
path: root/contrib
diff options
context:
space:
mode:
authorVsevolod Stakhov <vsevolod@highsecure.ru>2016-05-11 15:14:49 +0100
committerVsevolod Stakhov <vsevolod@highsecure.ru>2016-05-11 15:14:49 +0100
commitbcb5eaadd0b810b399fbdb133253bbc9ec7f060e (patch)
treec033bc4de05344b6003b420506cec1923f8746dd /contrib
parentec82bfc93c4142c2d8d4ce2591f414284c322505 (diff)
downloadrspamd-bcb5eaadd0b810b399fbdb133253bbc9ec7f060e.tar.gz
rspamd-bcb5eaadd0b810b399fbdb133253bbc9ec7f060e.zip
[Fix] Fix compilation issue
Diffstat (limited to 'contrib')
-rw-r--r--contrib/mumhash/mum.h8
1 files changed, 5 insertions, 3 deletions
diff --git a/contrib/mumhash/mum.h b/contrib/mumhash/mum.h
index 9391de0a6..a9a661661 100644
--- a/contrib/mumhash/mum.h
+++ b/contrib/mumhash/mum.h
@@ -232,7 +232,7 @@ _mum_final (uint64_t h) {
}
#if defined(__x86_64__) && defined(__GNUC__)
-
+#if 0
/* We want to use AVX2 insn MULX instead of generic x86-64 MULQ where
it is possible. Although on modern Intel processors MULQ takes
3-cycles vs. 4 for MULX, MULX permits more freedom in insn
@@ -242,6 +242,7 @@ _mum_hash_avx2 (const void * key, size_t len, uint64_t seed) {
return _mum_final (_mum_hash_aligned (seed + len, key, len));
}
#endif
+#endif
#ifndef _MUM_UNALIGNED_ACCESS
#if defined(__x86_64__) || defined(__i386__) || defined(__PPC64__) \
@@ -350,11 +351,12 @@ mum_hash64 (uint64_t key, uint64_t seed) {
static inline uint64_t
mum_hash (const void *key, size_t len, uint64_t seed) {
#if defined(__x86_64__) && defined(__GNUC__)
- static int avx2_support = 0;
+#if 0
+ static int avx2_support = 0;
if (avx2_support > 0)
return _mum_hash_avx2 (key, len, seed);
-#if 0
+
else if (! avx2_support) {
__builtin_cpu_init ();
avx2_support = __builtin_cpu_supports ("avx2") ? 1 : -1;