{
gint64 occupied = chain->pos - chain->begin + MEM_ALIGNMENT;
- return occupied < (gint64)chain->len ? chain->len - occupied : 0;
-}
-static gsize
-pool_chain_free_shared (struct _pool_chain_shared *chain)
-{
- gint64 occupied = chain->pos - chain->begin + MEM_ALIGNMENT;
-
- return occupied < (gint64)chain->len ? chain->len - occupied : 0;
+ return (occupied < (gint64)chain->len ?
+ chain->len - occupied : 0);
}
static struct _pool_chain *
-pool_chain_new (gsize size)
+rspamd_mempool_chain_new (gsize size, enum rspamd_mempool_chain_type pool_type)
{
struct _pool_chain *chain;
+ gpointer map;
g_return_val_if_fail (size > 0, NULL);
- chain = g_slice_alloc (sizeof (struct _pool_chain));
- chain->begin = g_slice_alloc (size);
+ if (pool_type == RSPAMD_MEMPOOL_SHARED) {
+#if defined(HAVE_MMAP_ANON)
+ map = mmap (NULL,
+ size + sizeof (struct _pool_chain),
+ PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_SHARED,
+ -1,
+ 0);
+ if (map == MAP_FAILED) {
+ msg_err ("cannot allocate %z bytes of shared memory, aborting", size +
+ sizeof (struct _pool_chain));
+ abort ();
+ }
+ chain = map;
+ chain->begin = ((guint8 *) chain) + sizeof (struct _pool_chain);
+#elif defined(HAVE_MMAP_ZERO)
+ gint fd;
+
+ fd = open ("/dev/zero", O_RDWR);
+ if (fd == -1) {
+ return NULL;
+ }
+ map = mmap (NULL,
+ size + sizeof (struct _pool_chain),
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ fd,
+ 0);
+ if (map == MAP_FAILED) {
+ msg_err ("cannot allocate %z bytes, aborting", size +
+ sizeof (struct _pool_chain));
+ abort ();
+ }
+ chain = map;
+ chain->begin = ((guint8 *) chain) + sizeof (struct _pool_chain);
+#else
+#error No mmap methods are defined
+#endif
+ g_atomic_int_inc (&mem_pool_stat->shared_chunks_allocated);
+ g_atomic_int_add (&mem_pool_stat->bytes_allocated, size);
+ }
+ else {
+ chain = g_slice_alloc (sizeof (struct _pool_chain));
+ chain->begin = g_slice_alloc (size);
+ g_atomic_int_add (&mem_pool_stat->bytes_allocated, size);
+ g_atomic_int_inc (&mem_pool_stat->chunks_allocated);
+ }
+
chain->pos = align_ptr (chain->begin, MEM_ALIGNMENT);
chain->len = size;
- chain->next = NULL;
- g_atomic_int_add (&mem_pool_stat->bytes_allocated, size);
- g_atomic_int_inc (&mem_pool_stat->chunks_allocated);
+ chain->lock = NULL;
return chain;
}
-static struct _pool_chain_shared *
-pool_chain_new_shared (gsize size)
+static void
+rspamd_mempool_create_pool_type (rspamd_mempool_t * pool,
+ enum rspamd_mempool_chain_type pool_type)
{
- struct _pool_chain_shared *chain;
- gpointer map;
+ gsize preallocated_len;
+ switch (pool_type) {
+ case RSPAMD_MEMPOOL_NORMAL:
+ preallocated_len = 32;
+ break;
+ case RSPAMD_MEMPOOL_SHARED:
+ case RSPAMD_MEMPOOL_TMP:
+ default:
+ preallocated_len = 2;
+ break;
+ }
-#if defined(HAVE_MMAP_ANON)
- map = mmap (NULL,
- size + sizeof (struct _pool_chain_shared),
- PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_SHARED,
- -1,
- 0);
- if (map == MAP_FAILED) {
- msg_err ("cannot allocate %z bytes of shared memory, aborting", size +
- sizeof (struct _pool_chain));
- abort ();
- }
- chain = (struct _pool_chain_shared *)map;
- chain->begin = ((guint8 *) chain) + sizeof (struct _pool_chain_shared);
-#elif defined(HAVE_MMAP_ZERO)
- gint fd;
+ pool->pools[pool_type] = g_ptr_array_sized_new (preallocated_len);
+}
- fd = open ("/dev/zero", O_RDWR);
- if (fd == -1) {
- return NULL;
+/**
+ * Get the current pool of the specified type, creating the corresponding
+ * array if it's absent
+ * @param pool
+ * @param pool_type
+ * @return
+ */
+static struct _pool_chain *
+rspamd_mempool_get_chain (rspamd_mempool_t * pool,
+ enum rspamd_mempool_chain_type pool_type)
+{
+ gsize len;
+ g_assert (pool_type >= 0 && pool_type < RSPAMD_MEMPOOL_MAX);
+
+ if (pool->pools[pool_type] == NULL) {
+ rspamd_mempool_create_pool_type (pool, pool_type);
}
- map = mmap (NULL,
- size + sizeof (struct _pool_chain_shared),
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- fd,
- 0);
- if (map == MAP_FAILED) {
- msg_err ("cannot allocate %z bytes, aborting", size +
- sizeof (struct _pool_chain));
- abort ();
- }
- chain = (struct _pool_chain_shared *)map;
- chain->begin = ((guint8 *) chain) + sizeof (struct _pool_chain_shared);
-#else
-# error No mmap methods are defined
-#endif
- chain->pos = align_ptr (chain->begin, MEM_ALIGNMENT);
- chain->len = size;
- chain->lock = NULL;
- chain->next = NULL;
- g_atomic_int_inc (&mem_pool_stat->shared_chunks_allocated);
- g_atomic_int_add (&mem_pool_stat->bytes_allocated, size);
+ len = pool->pools[pool_type]->len;
- return chain;
+ if (len == 0) {
+ return NULL;
+ }
+
+ return (g_ptr_array_index (pool->pools[pool_type], len - 1));
}
+static void
+rspamd_mempool_append_chain (rspamd_mempool_t * pool,
+ struct _pool_chain *chain,
+ enum rspamd_mempool_chain_type pool_type)
+{
+ g_assert (pool_type >= 0 && pool_type < RSPAMD_MEMPOOL_MAX);
+ g_assert (chain != NULL);
+
+ if (pool->pools[pool_type] == NULL) {
+ rspamd_mempool_create_pool_type (pool, pool_type);
+ }
+
+ g_ptr_array_add (pool->pools[pool_type], chain);
+}
/**
* Allocate new memory poll
}
new = g_slice_alloc (sizeof (rspamd_mempool_t));
- new->cur_pool = pool_chain_new (size);
- new->shared_pool = NULL;
- new->cur_pool_tmp = NULL;
- new->destructors = NULL;
+ memset (new->pools, 0, sizeof (gpointer) * RSPAMD_MEMPOOL_MAX);
+ new->destructors = g_array_sized_new (FALSE, FALSE,
+ sizeof (struct _pool_destructors), 32);
+ rspamd_mempool_create_pool_type (new, RSPAMD_MEMPOOL_NORMAL);
/* Set it upon first call of set variable */
new->variables = NULL;
new->elt_len = size;
}
static void *
-memory_pool_alloc_common (rspamd_mempool_t * pool, gsize size, gboolean is_tmp)
+memory_pool_alloc_common (rspamd_mempool_t * pool, gsize size,
+ enum rspamd_mempool_chain_type pool_type)
{
guint8 *tmp;
struct _pool_chain *new, *cur;
return ptr;
}
- if (is_tmp) {
- cur = pool->cur_pool_tmp;
- }
- else {
- cur = pool->cur_pool;
- }
+
+ cur = rspamd_mempool_get_chain (pool, pool_type);
/* Find free space in pool chain */
if (cur) {
if (cur == NULL || free < size) {
/* Allocate new chain element */
if (pool->elt_len >= size + MEM_ALIGNMENT) {
- new = pool_chain_new (pool->elt_len);
+ new = rspamd_mempool_chain_new (pool->elt_len + MEM_ALIGNMENT,
+ pool_type);
}
else {
mem_pool_stat->oversized_chunks++;
- new = pool_chain_new (
- size + pool->elt_len + MEM_ALIGNMENT);
+ new = rspamd_mempool_chain_new (
+ size + pool->elt_len + MEM_ALIGNMENT, pool_type);
}
+
/* Connect to pool subsystem */
- if (is_tmp) {
- LL_PREPEND (pool->cur_pool_tmp, new);
- }
- else {
- LL_PREPEND (pool->cur_pool, new);
- }
+ rspamd_mempool_append_chain (pool, new, pool_type);
/* No need to align again */
tmp = new->pos;
new->pos = tmp + size;
void *
rspamd_mempool_alloc (rspamd_mempool_t * pool, gsize size)
{
- return memory_pool_alloc_common (pool, size, FALSE);
+ return memory_pool_alloc_common (pool, size, RSPAMD_MEMPOOL_NORMAL);
}
void *
rspamd_mempool_alloc_tmp (rspamd_mempool_t * pool, gsize size)
{
- return memory_pool_alloc_common (pool, size, TRUE);
+ return memory_pool_alloc_common (pool, size, RSPAMD_MEMPOOL_TMP);
}
void *
void *
rspamd_mempool_alloc_shared (rspamd_mempool_t * pool, gsize size)
{
- guint8 *tmp;
- struct _pool_chain_shared *new, *cur;
- gsize free = 0;
-
- if (pool) {
- g_return_val_if_fail (size > 0, NULL);
-
- POOL_MTX_LOCK ();
- cur = pool->shared_pool;
- /* Find free space in pool chain */
- if (cur) {
- free = pool_chain_free_shared (cur);
- }
-
- if (free < size) {
- /* Allocate new pool */
- if (pool->elt_len >= size + MEM_ALIGNMENT) {
- new = pool_chain_new_shared (pool->elt_len);
- }
- else {
- mem_pool_stat->oversized_chunks++;
- new = pool_chain_new_shared (
- size + pool->elt_len + MEM_ALIGNMENT);
- }
- /* Attach new pool to chain */
- LL_PREPEND (pool->shared_pool, new);
- tmp = new->pos;
- new->pos = tmp + size;
- g_atomic_int_add (&mem_pool_stat->bytes_allocated, size);
-
- POOL_MTX_UNLOCK ();
- return tmp;
- }
-
- tmp = align_ptr (cur->pos, MEM_ALIGNMENT);
- cur->pos = tmp + size;
- POOL_MTX_UNLOCK ();
-
- return tmp;
- }
-
- return NULL;
+ return memory_pool_alloc_common (pool, size, RSPAMD_MEMPOOL_SHARED);
}
newstr = rspamd_mempool_alloc (pool, len + 1);
memcpy (newstr, src, len);
newstr[len] = '\0';
+
return newstr;
}
newstr = rspamd_mempool_alloc (pool, src->len + 1);
memcpy (newstr, src->str, src->len);
newstr[src->len] = '\0';
+
return newstr;
}
newstr = rspamd_mempool_alloc (pool, src->len + 1);
memcpy (newstr, src->begin, src->len);
newstr[src->len] = '\0';
+
return newstr;
}
newstr = rspamd_mempool_alloc_shared (pool, len + 1);
memcpy (newstr, src, len);
newstr[len] = '\0';
- return newstr;
-}
-
-/* Find pool for a pointer, returns NULL if pointer is not in pool */
-static struct _pool_chain_shared *
-memory_pool_find_pool (rspamd_mempool_t * pool, void *pointer)
-{
- struct _pool_chain_shared *cur = pool->shared_pool;
-
- while (cur) {
- if ((guint8 *) pointer >= cur->begin && (guint8 *) pointer <=
- (cur->begin + cur->len)) {
- return cur;
- }
- cur = cur->next;
- }
-
- return NULL;
-}
-
-/* Simple implementation of spinlock */
-void
-rspamd_mempool_lock_shared (rspamd_mempool_t * pool, void *pointer)
-{
- struct _pool_chain_shared *chain;
- chain = memory_pool_find_pool (pool, pointer);
- if (chain == NULL) {
- return;
- }
- if (chain->lock == NULL) {
- chain->lock = rspamd_mempool_get_mutex (pool);
- }
- rspamd_mempool_lock_mutex (chain->lock);
-}
-
-void
-rspamd_mempool_unlock_shared (rspamd_mempool_t * pool, void *pointer)
-{
- struct _pool_chain_shared *chain;
-
- chain = memory_pool_find_pool (pool, pointer);
- if (chain == NULL) {
- return;
- }
- if (chain->lock == NULL) {
- chain->lock = rspamd_mempool_get_mutex (pool);
- return;
- }
-
- rspamd_mempool_unlock_mutex (chain->lock);
+ return newstr;
}
void
const gchar *function,
const gchar *line)
{
- struct _pool_destructors *cur;
+ struct _pool_destructors cur;
- if (always_malloc) {
- /* Ugly workaround */
- always_malloc = FALSE;
- cur = rspamd_mempool_alloc (pool, sizeof (struct _pool_destructors));
- always_malloc = TRUE;
- }
- else {
- cur = rspamd_mempool_alloc (pool, sizeof (struct _pool_destructors));
- }
- if (cur) {
- POOL_MTX_LOCK ();
- cur->func = func;
- cur->data = data;
- cur->function = function;
- cur->loc = line;
- cur->prev = pool->destructors;
- pool->destructors = cur;
- POOL_MTX_UNLOCK ();
- }
+ POOL_MTX_LOCK ();
+ cur.func = func;
+ cur.data = data;
+ cur.function = function;
+ cur.loc = line;
+
+ g_array_append_val (pool->destructors, cur);
+ POOL_MTX_UNLOCK ();
}
void
void *new_data)
{
struct _pool_destructors *tmp;
+ guint i;
+
+ for (i = 0; i < pool->destructors->len; i ++) {
+ tmp = &g_array_index (pool->destructors, struct _pool_destructors, i);
- tmp = pool->destructors;
- while (tmp) {
if (tmp->func == func && tmp->data == old_data) {
tmp->func = func;
tmp->data = new_data;
break;
}
- tmp = tmp->prev;
}
-
}
void
rspamd_mempool_delete (rspamd_mempool_t * pool)
{
- struct _pool_chain *cur, *tmp;
- struct _pool_chain_shared *cur_shared, *tmp_shared;
- struct _pool_destructors *destructor = pool->destructors;
+ struct _pool_chain *cur;
+ struct _pool_destructors *destructor;
+ guint i, j;
POOL_MTX_LOCK ();
+
/* Call all pool destructors */
- while (destructor) {
+ for (i = 0; i < pool->destructors->len; i ++) {
+ destructor = &g_array_index (pool->destructors, struct _pool_destructors, i);
/* Avoid calling destructors for NULL pointers */
if (destructor->data != NULL) {
destructor->func (destructor->data);
}
- destructor = destructor->prev;
}
- LL_FOREACH_SAFE (pool->cur_pool, cur, tmp) {
- g_atomic_int_inc (&mem_pool_stat->chunks_freed);
- g_atomic_int_add (&mem_pool_stat->bytes_allocated, -cur->len);
- g_slice_free1 (cur->len, cur->begin);
- g_slice_free (struct _pool_chain, cur);
- }
- /* Clean temporary pools */
- LL_FOREACH_SAFE (pool->cur_pool_tmp, cur, tmp) {
- g_atomic_int_inc (&mem_pool_stat->chunks_freed);
- g_atomic_int_add (&mem_pool_stat->bytes_allocated, -cur->len);
- g_slice_free1 (cur->len, cur->begin);
- g_slice_free (struct _pool_chain, cur);
- }
- /* Unmap shared memory */
- LL_FOREACH_SAFE (pool->shared_pool, cur_shared, tmp_shared) {
- g_atomic_int_inc (&mem_pool_stat->chunks_freed);
- g_atomic_int_add (&mem_pool_stat->bytes_allocated, -cur_shared->len);
- munmap ((void *)cur_shared, cur_shared->len +
- sizeof (struct _pool_chain_shared));
+ g_array_free (pool->destructors, TRUE);
+
+ for (i = 0; i < G_N_ELEMENTS (pool->pools); i ++) {
+ if (pool->pools[i]) {
+ for (j = 0; j < pool->pools[i]->len; j++) {
+ cur = g_ptr_array_index (pool->pools[i], j);
+
+ if (i == RSPAMD_MEMPOOL_SHARED) {
+ munmap ((void *)cur, cur->len +
+ sizeof (struct _pool_chain));
+ }
+ else {
+ g_slice_free1 (sizeof (*cur), cur);
+ }
+ }
+
+ g_ptr_array_free (pool->pools[i], TRUE);
+ }
}
if (pool->variables) {
void
rspamd_mempool_cleanup_tmp (rspamd_mempool_t * pool)
{
- struct _pool_chain *cur, *tmp;
+ struct _pool_chain *cur;
+ guint i;
POOL_MTX_LOCK ();
- LL_FOREACH_SAFE (pool->cur_pool_tmp, cur, tmp) {
- g_atomic_int_inc (&mem_pool_stat->chunks_freed);
- g_atomic_int_add (&mem_pool_stat->bytes_allocated, -cur->len);
- g_slice_free1 (cur->len, cur->begin);
- g_slice_free (struct _pool_chain, cur);
+ if (pool->pools[RSPAMD_MEMPOOL_TMP]) {
+ for (i = 0; i < pool->pools[RSPAMD_MEMPOOL_TMP]->len; i++) {
+ cur = g_ptr_array_index (pool->pools[RSPAMD_MEMPOOL_TMP], i);
+ g_slice_free1 (sizeof (*cur), cur);
+ }
+
+ g_ptr_array_free (pool->pools[RSPAMD_MEMPOOL_TMP], TRUE);
+ pool->pools[RSPAMD_MEMPOOL_TMP] = NULL;
}
g_atomic_int_inc (&mem_pool_stat->pools_freed);