#define MUTEX_SLEEP_TIME 10000000L
#define MUTEX_SPIN_COUNT 100
-#ifdef _THREAD_SAFE
-pthread_mutex_t stat_mtx = PTHREAD_MUTEX_INITIALIZER;
-# define STAT_LOCK() do { pthread_mutex_lock (&stat_mtx); } while (0)
-# define STAT_UNLOCK() do { pthread_mutex_unlock (&stat_mtx); } while (0)
-#else
-# define STAT_LOCK() do {} while (0)
-# define STAT_UNLOCK() do {} while (0)
-#endif
-
#define POOL_MTX_LOCK() do { rspamd_mutex_lock (pool->mtx); } while (0)
#define POOL_MTX_UNLOCK() do { rspamd_mutex_unlock (pool->mtx); } while (0)
chain->pos = align_ptr (chain->begin, MEM_ALIGNMENT);
chain->len = size;
chain->next = NULL;
- STAT_LOCK ();
- mem_pool_stat->bytes_allocated += size;
- mem_pool_stat->chunks_allocated++;
- STAT_UNLOCK ();
+ g_atomic_int_add (&mem_pool_stat->bytes_allocated, size);
+ g_atomic_int_inc (&mem_pool_stat->chunks_allocated);
return chain;
}
chain->len = size;
chain->lock = NULL;
chain->next = NULL;
- STAT_LOCK ();
- mem_pool_stat->shared_chunks_allocated++;
- mem_pool_stat->bytes_allocated += size;
- STAT_UNLOCK ();
+
+ g_atomic_int_inc (&mem_pool_stat->shared_chunks_allocated);
+ g_atomic_int_add (&mem_pool_stat->bytes_allocated, size);
return chain;
}
/* Attach new pool to chain */
cur->next = new;
new->pos += size;
- STAT_LOCK ();
- mem_pool_stat->bytes_allocated += size;
- STAT_UNLOCK ();
- POOL_MTX_UNLOCK ()
- ;
+ g_atomic_int_add (&mem_pool_stat->bytes_allocated, size);
+
+ POOL_MTX_UNLOCK ();
return new->begin;
}
tmp = align_ptr (cur->pos, MEM_ALIGNMENT);
cur->pos = tmp + size;
- POOL_MTX_UNLOCK ()
- ;
+ POOL_MTX_UNLOCK ();
return tmp;
}
return NULL;
/* Spin again */
g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
}
-#ifdef HAVE_ASM_PAUSE
- __asm __volatile ("pause");
-#elif defined(HAVE_SCHED_YIELD)
- (void)sched_yield ();
-#endif
-#if defined(HAVE_NANOSLEEP)
+#ifdef HAVE_SCHED_YIELD
+ (void)sched_yield ();
+#elif defined(HAVE_NANOSLEEP)
struct timespec ts;
ts.tv_sec = 0;
ts.tv_nsec = MUTEX_SLEEP_TIME;
while (cur) {
tmp = cur;
cur = cur->next;
- STAT_LOCK ();
- mem_pool_stat->chunks_freed++;
- mem_pool_stat->bytes_allocated -= tmp->len;
- STAT_UNLOCK ();
+ g_atomic_int_inc (&mem_pool_stat->chunks_freed);
+ g_atomic_int_add (&mem_pool_stat->bytes_allocated, -tmp->len);
g_slice_free1 (tmp->len, tmp->begin);
g_slice_free (struct _pool_chain, tmp);
}
while (cur) {
tmp = cur;
cur = cur->next;
- STAT_LOCK ();
- mem_pool_stat->chunks_freed++;
- mem_pool_stat->bytes_allocated -= tmp->len;
- STAT_UNLOCK ();
+ g_atomic_int_inc (&mem_pool_stat->chunks_freed);
+ g_atomic_int_add (&mem_pool_stat->bytes_allocated, -tmp->len);
g_slice_free1 (tmp->len, tmp->begin);
g_slice_free (struct _pool_chain, tmp);
}
while (cur_shared) {
tmp_shared = cur_shared;
cur_shared = cur_shared->next;
- STAT_LOCK ();
- mem_pool_stat->chunks_freed++;
- mem_pool_stat->bytes_allocated -= tmp_shared->len;
- STAT_UNLOCK ();
+ g_atomic_int_inc (&mem_pool_stat->chunks_freed);
+ g_atomic_int_add (&mem_pool_stat->bytes_allocated, -tmp_shared->len);
munmap ((void *)tmp_shared, tmp_shared->len +
sizeof (struct _pool_chain_shared));
}
g_hash_table_destroy (pool->variables);
}
- mem_pool_stat->pools_freed++;
+ g_atomic_int_inc (&mem_pool_stat->pools_freed);
POOL_MTX_UNLOCK ();
rspamd_mutex_free (pool->mtx);
g_slice_free (rspamd_mempool_t, pool);
while (cur) {
tmp = cur;
cur = cur->next;
- STAT_LOCK ();
- mem_pool_stat->chunks_freed++;
- mem_pool_stat->bytes_allocated -= tmp->len;
- STAT_UNLOCK ();
+ g_atomic_int_inc (&mem_pool_stat->chunks_freed);
+ g_atomic_int_add (&mem_pool_stat->bytes_allocated, -tmp->len);
g_slice_free1 (tmp->len, tmp->begin);
g_slice_free (struct _pool_chain, tmp);
}
- mem_pool_stat->pools_freed++;
+ g_atomic_int_inc (&mem_pool_stat->pools_freed);
POOL_MTX_UNLOCK ();
}
* Statistics structure
*/
typedef struct memory_pool_stat_s {
- gsize pools_allocated; /**< total number of allocated pools */
- gsize pools_freed; /**< number of freed pools */
- gsize bytes_allocated; /**< bytes that are allocated with pool allocator */
- gsize chunks_allocated; /**< number of chunks that are allocated */
- gsize shared_chunks_allocated; /**< shared chunks allocated */
- gsize chunks_freed; /**< chunks freed */
- gsize oversized_chunks; /**< oversized chunks */
+ guint pools_allocated; /**< total number of allocated pools */
+ guint pools_freed; /**< number of freed pools */
+ guint bytes_allocated; /**< bytes that are allocated with pool allocator */
+ guint chunks_allocated; /**< number of chunks that are allocated */
+ guint shared_chunks_allocated; /**< shared chunks allocated */
+ guint chunks_freed; /**< chunks freed */
+ guint oversized_chunks; /**< oversized chunks */
} rspamd_mempool_stat_t;
/**