aboutsummaryrefslogtreecommitdiffstats
path: root/mem_pool.c
diff options
context:
space:
mode:
Diffstat (limited to 'mem_pool.c')
-rw-r--r--mem_pool.c172
1 files changed, 172 insertions, 0 deletions
diff --git a/mem_pool.c b/mem_pool.c
index 00986c869..f720138dc 100644
--- a/mem_pool.c
+++ b/mem_pool.c
@@ -2,8 +2,23 @@
#include <glib.h>
#include <string.h>
#include <stdlib.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include "config.h"
+
+#ifdef HAVE_SCHED_YIELD
+#include <sched.h>
+#endif
+
+#ifdef HAVE_NANOSLEEP
+#include <time.h>
+#endif
+
#include "mem_pool.h"
+/* Sleep time for spin lock in nanoseconds */
+#define MUTEX_SLEEP_TIME 10000000L
+
#ifdef _THREAD_SAFE
pthread_mutex_t stat_mtx = PTHREAD_MUTEX_INITIALIZER;
#define STAT_LOCK() do { pthread_mutex_lock (&stat_mtx); } while (0)
@@ -27,6 +42,7 @@ pthread_mutex_t stat_mtx = PTHREAD_MUTEX_INITIALIZER;
static size_t bytes_allocated = 0;
static size_t chunks_allocated = 0;
static size_t chunks_freed = 0;
+static size_t shared_chunks_allocated = 0;
static struct _pool_chain *
pool_chain_new (size_t size)
@@ -44,6 +60,43 @@ pool_chain_new (size_t size)
return chain;
}
+static struct _pool_chain_shared *
+pool_chain_new_shared (size_t size)
+{
+ struct _pool_chain_shared *chain;
+
+#if defined(HAVE_MMAP_ANON)
+ chain = mmap (NULL, size + sizeof (struct _pool_chain_shared), PROT_READ|PROT_WRITE, MAP_ANON|MAP_SHARED, -1, 0);
+ chain->begin = ((u_char *)chain) + sizeof (struct _pool_chain_shared);
+ if (chain == MAP_FAILED) {
+ return NULL;
+ }
+#elif defined(HAVE_MMAP_ZERO)
+ int fd;
+
+ fd = open ("/dev/zero", O_RDWR);
+ if (fd == -1) {
+ return NULL;
+ }
+ chain = mmap (NULL, shm->size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+ chain->begin = ((u_char *)chain) + sizeof (struct _pool_chain_shared);
+ if (chain == MAP_FAILED) {
+ return NULL;
+ }
+#else
+# error No mmap methods are defined
+#endif
+ chain->len = size;
+ chain->pos = chain->begin;
+ chain->lock = 0;
+ chain->next = NULL;
+ STAT_LOCK ();
+ shared_chunks_allocated ++;
+ STAT_UNLOCK ();
+
+ return chain;
+}
+
memory_pool_t*
memory_pool_new (size_t size)
{
@@ -51,6 +104,7 @@ memory_pool_new (size_t size)
new = g_malloc (sizeof (memory_pool_t));
new->cur_pool = pool_chain_new (size);
+ new->shared_pool = NULL;
new->first_pool = new->cur_pool;
new->destructors = NULL;
@@ -126,6 +180,112 @@ memory_pool_strdup (memory_pool_t *pool, const char *src)
return newstr;
}
+void *
+memory_pool_alloc_shared (memory_pool_t *pool, size_t size)
+{
+ u_char *tmp;
+ struct _pool_chain_shared *new, *cur;
+
+ if (pool) {
+ cur = pool->shared_pool;
+ if (!cur) {
+ cur = pool_chain_new_shared (pool->first_pool->len);
+ pool->shared_pool = cur;
+ }
+
+ /* Find free space in pool chain */
+ while (memory_pool_free (cur) < size && cur->next) {
+ cur = cur->next;
+ }
+ if (cur->next == NULL && memory_pool_free (cur) < size) {
+ /* Allocate new pool */
+ if (cur->len >= size) {
+ new = pool_chain_new_shared (cur->len);
+ }
+ else {
+ new = pool_chain_new_shared (size + cur->len);
+ }
+ /* Attach new pool to chain */
+ cur->next = new;
+ new->pos += size;
+ STAT_LOCK ();
+ bytes_allocated += size;
+ STAT_UNLOCK ();
+ return new->begin;
+ }
+ tmp = cur->pos;
+ cur->pos += size;
+ STAT_LOCK ();
+ bytes_allocated += size;
+ STAT_UNLOCK ();
+ return tmp;
+ }
+ return NULL;
+}
+
+/* Find pool for a pointer, returns NULL if pointer is not in pool */
+static struct _pool_chain_shared *
+memory_pool_find_pool (memory_pool_t *pool, void *pointer)
+{
+ struct _pool_chain_shared *cur = pool->shared_pool;
+
+ while (cur) {
+ if ((u_char *)pointer >= cur->begin && (u_char *)pointer <= (cur->begin + cur->len)) {
+ return cur;
+ }
+ cur = cur->next;
+ }
+
+ return NULL;
+}
+
+static void
+memory_pool_spin (struct _pool_chain_shared *chain)
+{
+ while (!g_atomic_int_compare_and_exchange (&chain->lock, 0, 1)) {
+ /* lock was aqquired */
+#ifdef HAVE_NANOSLEEP
+ struct timespec ts;
+ ts.tv_sec = 0;
+ ts.tv_nsec = MUTEX_SLEEP_TIME;
+ /* Spin */
+ while (nanosleep (&ts, &ts) == -1 && errno == EINTR);
+#endif
+#ifdef HAVE_SCHED_YIELD
+ (void)sched_yield ();
+#endif
+#if !defined(HAVE_NANOSLEEP) && !defined(HAVE_SCHED_YIELD)
+# error No methods to spin are defined
+#endif
+ }
+}
+
+/* Simple implementation of spinlock */
+void
+memory_pool_lock_shared (memory_pool_t *pool, void *pointer)
+{
+ struct _pool_chain_shared *chain;
+
+ chain = memory_pool_find_pool (pool, pointer);
+ if (chain == NULL) {
+ return;
+ }
+
+ memory_pool_spin (chain);
+}
+
+void memory_pool_unlock_shared (memory_pool_t *pool, void *pointer)
+{
+ struct _pool_chain_shared *chain;
+
+ chain = memory_pool_find_pool (pool, pointer);
+ if (chain == NULL) {
+ return;
+ }
+
+ (void)g_atomic_int_dec_and_test (&chain->lock);
+}
+
void
memory_pool_add_destructor (memory_pool_t *pool, pool_destruct_func func, void *data)
{
@@ -144,6 +304,7 @@ void
memory_pool_delete (memory_pool_t *pool)
{
struct _pool_chain *cur = pool->first_pool, *tmp;
+ struct _pool_chain_shared *cur_shared = pool->shared_pool, *tmp_shared;
struct _pool_destructors *destructor = pool->destructors;
/* Call all pool destructors */
@@ -161,6 +322,16 @@ memory_pool_delete (memory_pool_t *pool)
chunks_freed ++;
STAT_UNLOCK ();
}
+ /* Unmap shared memory */
+ while (cur_shared) {
+ tmp_shared = cur_shared;
+ cur_shared = cur_shared->next;
+ munmap (tmp_shared, tmp_shared->len + sizeof (struct _pool_chain_shared));
+ STAT_LOCK ();
+ chunks_freed ++;
+ STAT_UNLOCK ();
+ }
+
g_free (pool);
}
@@ -169,6 +340,7 @@ memory_pool_stat (memory_pool_stat_t *st)
{
st->bytes_allocated = bytes_allocated;
st->chunks_allocated = chunks_allocated;
+ st->shared_chunks_allocated = shared_chunks_allocated;
st->chunks_freed = chunks_freed;
}