]> source.dussan.org Git - rspamd.git/commitdiff
Use PTHREAD_PROCESS_SHARED mutexes if possible.
authorVsevolod Stakhov <vsevolod@highsecure.ru>
Sat, 1 Nov 2014 22:11:15 +0000 (22:11 +0000)
committerVsevolod Stakhov <vsevolod@highsecure.ru>
Sat, 1 Nov 2014 22:11:15 +0000 (22:11 +0000)
CMakeLists.txt
config.h.in
src/libutil/mem_pool.c
src/libutil/mem_pool.h

index 34e07940b5967877bd85306ec5f8daf457b8c4a1..a00d9cc483125cbab5a6a1dc2dad456c01d6e631 100644 (file)
@@ -748,6 +748,7 @@ CHECK_SYMBOL_EXISTS(_SC_NPROCESSORS_ONLN unistd.h HAVE_SC_NPROCESSORS_ONLN)
 CHECK_SYMBOL_EXISTS(setbit sys/param.h PARAM_H_HAS_BITSET)
 CHECK_SYMBOL_EXISTS(getaddrinfo "sys/types.h;sys/socket.h;netdb.h" HAVE_GETADDRINFO)
 CHECK_SYMBOL_EXISTS(sched_yield "sched.h" HAVE_SCHED_YIELD)
+CHECK_SYMBOL_EXISTS(PTHREAD_PROCESS_SHARED "pthread.h" HAVE_PTHREAD_PROCESS_SHARED)
 
 IF(NOT HAVE_GETADDRINFO)
        MESSAGE(FATAL_ERROR "Your system does not support getaddrinfo call, please consider upgrading it to run rspamd")
index ba57103f851bf3cebdb1aea89ced454e7e87cf9b..158140c9cc23ca0d756b487cc51b03df7872fa91 100644 (file)
 
 #cmakedefine HAVE_CTYPE_H        1
 #cmakedefine HAVE_SCHED_YEILD    1
+#cmakedefine HAVE_PTHREAD_PROCESS_SHARED 1
 
 /* Configure allocator */
 #define uthash_malloc(sz) g_slice_alloc(sz)
index 810c85d0ce72dca415eef6e2910a6547a4b861bc..6c9ee6f1637ab4cf3786bd73834eeb55d71f04ab 100644 (file)
@@ -483,50 +483,6 @@ memory_pool_find_pool (rspamd_mempool_t * pool, void *pointer)
        return NULL;
 }
 
-static inline gint
-__mutex_spin (rspamd_mempool_mutex_t * mutex)
-{
-       /* check spin count */
-       if (g_atomic_int_dec_and_test (&mutex->spin)) {
-               /* This may be deadlock, so check owner of this lock */
-               if (mutex->owner == getpid ()) {
-                       /* This mutex was locked by calling process, so it is just double lock and we can easily unlock it */
-                       g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
-                       return 0;
-               }
-               else if (kill (mutex->owner, 0) == -1) {
-                       /* Owner process was not found, so release lock */
-                       g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
-                       return 0;
-               }
-               /* Spin again */
-               g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
-       }
-
-#ifdef HAVE_SCHED_YIELD
-       (void)sched_yield ();
-#elif defined(HAVE_NANOSLEEP)
-       struct timespec ts;
-       ts.tv_sec = 0;
-       ts.tv_nsec = MUTEX_SLEEP_TIME;
-       /* Spin */
-       while (nanosleep (&ts, &ts) == -1 && errno == EINTR) ;
-#else
-#       error No methods to spin are defined
-#endif
-       return 1;
-}
-
-static void
-memory_pool_mutex_spin (rspamd_mempool_mutex_t * mutex)
-{
-       while (!g_atomic_int_compare_and_exchange (&mutex->lock, 0, 1)) {
-               if (!__mutex_spin (mutex)) {
-                       return;
-               }
-       }
-}
-
 /* Simple implementation of spinlock */
 void
 rspamd_mempool_lock_shared (rspamd_mempool_t * pool, void *pointer)
@@ -718,6 +674,54 @@ rspamd_mempool_suggest_size (void)
 #endif
 }
 
+#ifndef HAVE_PTHREAD_PROCESS_SHARED
+/*
+ * Own emulation
+ */
+static inline gint
+__mutex_spin (rspamd_mempool_mutex_t * mutex)
+{
+       /* check spin count */
+       if (g_atomic_int_dec_and_test (&mutex->spin)) {
+               /* This may be deadlock, so check owner of this lock */
+               if (mutex->owner == getpid ()) {
+                       /* This mutex was locked by calling process, so it is just double lock and we can easily unlock it */
+                       g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
+                       return 0;
+               }
+               else if (kill (mutex->owner, 0) == -1) {
+                       /* Owner process was not found, so release lock */
+                       g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
+                       return 0;
+               }
+               /* Spin again */
+               g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
+       }
+
+#ifdef HAVE_SCHED_YIELD
+       (void)sched_yield ();
+#elif defined(HAVE_NANOSLEEP)
+       struct timespec ts;
+       ts.tv_sec = 0;
+       ts.tv_nsec = MUTEX_SLEEP_TIME;
+       /* Spin */
+       while (nanosleep (&ts, &ts) == -1 && errno == EINTR) ;
+#else
+#       error No methods to spin are defined
+#endif
+       return 1;
+}
+
+static void
+memory_pool_mutex_spin (rspamd_mempool_mutex_t * mutex)
+{
+       while (!g_atomic_int_compare_and_exchange (&mutex->lock, 0, 1)) {
+               if (!__mutex_spin (mutex)) {
+                       return;
+               }
+       }
+}
+
 rspamd_mempool_mutex_t *
 rspamd_mempool_get_mutex (rspamd_mempool_t * pool)
 {
@@ -798,6 +802,89 @@ rspamd_mempool_wunlock_rwlock (rspamd_mempool_rwlock_t * lock)
 {
        rspamd_mempool_unlock_mutex (lock->__w_lock);
 }
+#else
+
+/*
+ * Pthread bases shared mutexes
+ */
+rspamd_mempool_mutex_t *
+rspamd_mempool_get_mutex (rspamd_mempool_t * pool)
+{
+       rspamd_mempool_mutex_t *res;
+       pthread_mutexattr_t mattr;
+
+       if (pool != NULL) {
+               res =
+                       rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_mutex_t));
+
+               pthread_mutexattr_init (&mattr);
+               pthread_mutexattr_setpshared (&mattr, PTHREAD_PROCESS_SHARED);
+               pthread_mutex_init (res, &mattr);
+               rspamd_mempool_add_destructor (pool,
+                               (rspamd_mempool_destruct_t)pthread_mutex_destroy, res);
+
+               return res;
+       }
+       return NULL;
+}
+
+void
+rspamd_mempool_lock_mutex (rspamd_mempool_mutex_t * mutex)
+{
+       pthread_mutex_lock (mutex);
+}
+
+void
+rspamd_mempool_unlock_mutex (rspamd_mempool_mutex_t * mutex)
+{
+       pthread_mutex_unlock (mutex);
+}
+
+rspamd_mempool_rwlock_t *
+rspamd_mempool_get_rwlock (rspamd_mempool_t * pool)
+{
+       rspamd_mempool_rwlock_t *res;
+       pthread_rwlockattr_t mattr;
+
+       if (pool != NULL) {
+               res =
+                       rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_rwlock_t));
+
+               pthread_rwlockattr_init (&mattr);
+               pthread_rwlockattr_setpshared (&mattr, PTHREAD_PROCESS_SHARED);
+               pthread_rwlock_init (res, &mattr);
+               rspamd_mempool_add_destructor (pool,
+                               (rspamd_mempool_destruct_t)pthread_rwlock_destroy, res);
+
+               return res;
+       }
+       return NULL;
+}
+
+void
+rspamd_mempool_rlock_rwlock (rspamd_mempool_rwlock_t * lock)
+{
+       pthread_rwlock_rdlock (lock);
+}
+
+void
+rspamd_mempool_wlock_rwlock (rspamd_mempool_rwlock_t * lock)
+{
+       pthread_rwlock_wrlock (lock);
+}
+
+void
+rspamd_mempool_runlock_rwlock (rspamd_mempool_rwlock_t * lock)
+{
+       pthread_rwlock_unlock (lock);
+}
+
+void
+rspamd_mempool_wunlock_rwlock (rspamd_mempool_rwlock_t * lock)
+{
+       pthread_rwlock_unlock (lock);
+}
+#endif
 
 void
 rspamd_mempool_set_variable (rspamd_mempool_t *pool,
index 6d59e27c7979ca7859a9d0cc8316b85fe092b62e..7403ced29bec05a1893885af52fd0c40602d52c8 100644 (file)
@@ -13,7 +13,9 @@
 #define RSPAMD_MEM_POOL_H
 
 #include "config.h"
-
+#ifdef HAVE_PTHREAD_PROCESS_SHARED
+#include <pthread.h>
+#endif
 
 struct f_str_s;
 
@@ -29,11 +31,23 @@ typedef void (*rspamd_mempool_destruct_t)(void *ptr);
 /**
  * Pool mutex structure
  */
+#ifndef HAVE_PTHREAD_PROCESS_SHARED
 typedef struct memory_pool_mutex_s {
        gint lock;
        pid_t owner;
        guint spin;
 } rspamd_mempool_mutex_t;
+/**
+ * Rwlock for locking shared memory regions
+ */
+typedef struct memory_pool_rwlock_s {
+       rspamd_mempool_mutex_t *__r_lock;                           /**< read mutex (private)                                                           */
+       rspamd_mempool_mutex_t *__w_lock;                           /**< write mutex (private)                                                          */
+} rspamd_mempool_rwlock_t;
+#else
+typedef pthread_mutex_t rspamd_mempool_mutex_t;
+typedef pthread_rwlock_t rspamd_mempool_rwlock_t;
+#endif
 
 /**
  * Pool page structure
@@ -95,13 +109,7 @@ typedef struct memory_pool_stat_s {
        guint oversized_chunks;             /**< oversized chunks                                                                       */
 } rspamd_mempool_stat_t;
 
-/**
- * Rwlock for locking shared memory regions
- */
-typedef struct memory_pool_rwlock_s {
-       rspamd_mempool_mutex_t *__r_lock;                           /**< read mutex (private)                                                           */
-       rspamd_mempool_mutex_t *__w_lock;                           /**< write mutex (private)                                                          */
-} rspamd_mempool_rwlock_t;
+
 
 /**
  * Allocate new memory poll