You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

mem_pool.c 31KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318
  1. /*-
  2. * Copyright 2016 Vsevolod Stakhov
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "config.h"
  17. #include "mem_pool.h"
  18. #include "fstring.h"
  19. #include "logger.h"
  20. #include "ottery.h"
  21. #include "unix-std.h"
  22. #include "khash.h"
  23. #include "cryptobox.h"
  24. #include "contrib/uthash/utlist.h"
  25. #include "mem_pool_internal.h"
  26. #ifdef WITH_JEMALLOC
  27. #include <jemalloc/jemalloc.h>
  28. #if (JEMALLOC_VERSION_MAJOR == 3 && JEMALLOC_VERSION_MINOR >= 6) || (JEMALLOC_VERSION_MAJOR > 3)
  29. #define HAVE_MALLOC_SIZE 1
  30. #define sys_alloc_size(sz) nallocx(sz, 0)
  31. #endif
  32. #elif defined(__APPLE__)
  33. #include <malloc/malloc.h>
  34. #define HAVE_MALLOC_SIZE 1
  35. #define sys_alloc_size(sz) malloc_good_size(sz)
  36. #endif
  37. #ifdef HAVE_SCHED_YIELD
  38. #include <sched.h>
  39. #endif
  40. /* Sleep time for spin lock in nanoseconds */
  41. #define MUTEX_SLEEP_TIME 10000000L
  42. #define MUTEX_SPIN_COUNT 100
  43. #define POOL_MTX_LOCK() do { } while (0)
  44. #define POOL_MTX_UNLOCK() do { } while (0)
  45. /*
  46. * This define specify whether we should check all pools for free space for new object
  47. * or just begin scan from current (recently attached) pool
  48. * If MEMORY_GREEDY is defined, then we scan all pools to find free space (more CPU usage, slower
  49. * but requires less memory). If it is not defined check only current pool and if object is too large
  50. * to place in it allocate new one (this may cause huge CPU usage in some cases too, but generally faster than
  51. * greedy method)
  52. */
  53. #undef MEMORY_GREEDY
  54. static inline uint32_t
  55. rspamd_entry_hash (const char *str)
  56. {
  57. return (guint)rspamd_cryptobox_fast_hash (str, strlen (str), rspamd_hash_seed ());
  58. }
  59. static inline int
  60. rspamd_entry_equal (const char *k1, const char *k2)
  61. {
  62. return strcmp (k1, k2) == 0;
  63. }
  64. KHASH_INIT(mempool_entry, const gchar *, struct rspamd_mempool_entry_point *,
  65. 1, rspamd_entry_hash, rspamd_entry_equal)
  66. static khash_t(mempool_entry) *mempool_entries = NULL;
  67. /* Internal statistic */
  68. static rspamd_mempool_stat_t *mem_pool_stat = NULL;
  69. /* Environment variable */
  70. static gboolean env_checked = FALSE;
  71. static gboolean always_malloc = FALSE;
  72. /**
  73. * Function that return free space in pool page
  74. * @param x pool page struct
  75. */
  76. static gsize
  77. pool_chain_free (struct _pool_chain *chain)
  78. {
  79. gint64 occupied = chain->pos - chain->begin + MIN_MEM_ALIGNMENT;
  80. return (occupied < (gint64)chain->slice_size ?
  81. chain->slice_size - occupied : 0);
  82. }
  83. /* By default allocate 4Kb chunks of memory */
  84. #define FIXED_POOL_SIZE 4096
  85. static inline struct rspamd_mempool_entry_point *
  86. rspamd_mempool_entry_new (const gchar *loc)
  87. {
  88. struct rspamd_mempool_entry_point **pentry, *entry;
  89. gint r;
  90. khiter_t k;
  91. k = kh_put (mempool_entry, mempool_entries, loc, &r);
  92. if (r >= 0) {
  93. pentry = &kh_value (mempool_entries, k);
  94. entry = g_malloc0 (sizeof (*entry));
  95. *pentry = entry;
  96. memset (entry, 0, sizeof (*entry));
  97. rspamd_strlcpy (entry->src, loc, sizeof (entry->src));
  98. #ifdef HAVE_GETPAGESIZE
  99. entry->cur_suggestion = MAX (getpagesize (), FIXED_POOL_SIZE);
  100. #else
  101. entry->cur_suggestion = MAX (sysconf (_SC_PAGESIZE), FIXED_POOL_SIZE);
  102. #endif
  103. }
  104. else {
  105. g_assert_not_reached ();
  106. }
  107. return entry;
  108. }
  109. RSPAMD_CONSTRUCTOR (rspamd_mempool_entries_ctor)
  110. {
  111. if (mempool_entries == NULL) {
  112. mempool_entries = kh_init (mempool_entry);
  113. }
  114. }
  115. RSPAMD_DESTRUCTOR (rspamd_mempool_entries_dtor)
  116. {
  117. struct rspamd_mempool_entry_point *elt;
  118. kh_foreach_value (mempool_entries, elt, {
  119. g_free (elt);
  120. });
  121. kh_destroy (mempool_entry, mempool_entries);
  122. mempool_entries = NULL;
  123. }
  124. static inline struct rspamd_mempool_entry_point *
  125. rspamd_mempool_get_entry (const gchar *loc)
  126. {
  127. khiter_t k;
  128. struct rspamd_mempool_entry_point *elt;
  129. if (G_UNLIKELY (!mempool_entries)) {
  130. rspamd_mempool_entries_ctor();
  131. }
  132. k = kh_get (mempool_entry, mempool_entries, loc);
  133. if (k != kh_end (mempool_entries)) {
  134. elt = kh_value (mempool_entries, k);
  135. return elt;
  136. }
  137. return rspamd_mempool_entry_new(loc);
  138. }
  139. static struct _pool_chain *
  140. rspamd_mempool_chain_new (gsize size, gsize alignment, enum rspamd_mempool_chain_type pool_type)
  141. {
  142. struct _pool_chain *chain;
  143. gsize total_size = size + sizeof (struct _pool_chain) + alignment,
  144. optimal_size = 0;
  145. gpointer map;
  146. g_assert (size > 0);
  147. if (pool_type == RSPAMD_MEMPOOL_SHARED) {
  148. #if defined(HAVE_MMAP_ANON)
  149. map = mmap (NULL,
  150. total_size,
  151. PROT_READ | PROT_WRITE,
  152. MAP_ANON | MAP_SHARED,
  153. -1,
  154. 0);
  155. if (map == MAP_FAILED) {
  156. g_error ("%s: failed to allocate %"G_GSIZE_FORMAT" bytes",
  157. G_STRLOC, total_size);
  158. abort ();
  159. }
  160. chain = map;
  161. chain->begin = ((guint8 *) chain) + sizeof (struct _pool_chain);
  162. #elif defined(HAVE_MMAP_ZERO)
  163. gint fd;
  164. fd = open ("/dev/zero", O_RDWR);
  165. if (fd == -1) {
  166. return NULL;
  167. }
  168. map = mmap (NULL,
  169. size + sizeof (struct _pool_chain),
  170. PROT_READ | PROT_WRITE,
  171. MAP_SHARED,
  172. fd,
  173. 0);
  174. if (map == MAP_FAILED) {
  175. msg_err ("cannot allocate %z bytes, aborting", size +
  176. sizeof (struct _pool_chain));
  177. abort ();
  178. }
  179. chain = map;
  180. chain->begin = ((guint8 *) chain) + sizeof (struct _pool_chain);
  181. #else
  182. #error No mmap methods are defined
  183. #endif
  184. g_atomic_int_inc (&mem_pool_stat->shared_chunks_allocated);
  185. g_atomic_int_add (&mem_pool_stat->bytes_allocated, total_size);
  186. }
  187. else {
  188. #ifdef HAVE_MALLOC_SIZE
  189. optimal_size = sys_alloc_size (total_size);
  190. #endif
  191. total_size = MAX (total_size, optimal_size);
  192. gint ret = posix_memalign (&map, alignment, total_size);
  193. if (ret != 0 || map == NULL) {
  194. g_error ("%s: failed to allocate %"G_GSIZE_FORMAT" bytes: %d - %s",
  195. G_STRLOC, total_size, ret, strerror (errno));
  196. abort ();
  197. }
  198. chain = map;
  199. chain->begin = ((guint8 *) chain) + sizeof (struct _pool_chain);
  200. g_atomic_int_add (&mem_pool_stat->bytes_allocated, total_size);
  201. g_atomic_int_inc (&mem_pool_stat->chunks_allocated);
  202. }
  203. chain->pos = align_ptr (chain->begin, alignment);
  204. chain->slice_size = total_size - sizeof (struct _pool_chain);
  205. return chain;
  206. }
  207. /**
  208. * Get the current pool of the specified type, creating the corresponding
  209. * array if it's absent
  210. * @param pool
  211. * @param pool_type
  212. * @return
  213. */
  214. static struct _pool_chain *
  215. rspamd_mempool_get_chain (rspamd_mempool_t * pool,
  216. enum rspamd_mempool_chain_type pool_type)
  217. {
  218. g_assert (pool_type >= 0 && pool_type < RSPAMD_MEMPOOL_MAX);
  219. return pool->priv->pools[pool_type];
  220. }
  221. static void
  222. rspamd_mempool_append_chain (rspamd_mempool_t * pool,
  223. struct _pool_chain *chain,
  224. enum rspamd_mempool_chain_type pool_type)
  225. {
  226. g_assert (pool_type >= 0 && pool_type < RSPAMD_MEMPOOL_MAX);
  227. g_assert (chain != NULL);
  228. LL_PREPEND (pool->priv->pools[pool_type], chain);
  229. }
  230. /**
  231. * Allocate new memory poll
  232. * @param size size of pool's page
  233. * @return new memory pool object
  234. */
  235. rspamd_mempool_t *
  236. rspamd_mempool_new_ (gsize size, const gchar *tag, gint flags, const gchar *loc)
  237. {
  238. rspamd_mempool_t *new_pool;
  239. gpointer map;
  240. unsigned char uidbuf[10];
  241. const gchar hexdigits[] = "0123456789abcdef";
  242. unsigned i;
  243. /* Allocate statistic structure if it is not allocated before */
  244. if (mem_pool_stat == NULL) {
  245. #if defined(HAVE_MMAP_ANON)
  246. map = mmap (NULL,
  247. sizeof (rspamd_mempool_stat_t),
  248. PROT_READ | PROT_WRITE,
  249. MAP_ANON | MAP_SHARED,
  250. -1,
  251. 0);
  252. if (map == MAP_FAILED) {
  253. msg_err ("cannot allocate %z bytes, aborting",
  254. sizeof (rspamd_mempool_stat_t));
  255. abort ();
  256. }
  257. mem_pool_stat = (rspamd_mempool_stat_t *)map;
  258. #elif defined(HAVE_MMAP_ZERO)
  259. gint fd;
  260. fd = open ("/dev/zero", O_RDWR);
  261. g_assert (fd != -1);
  262. map = mmap (NULL,
  263. sizeof (rspamd_mempool_stat_t),
  264. PROT_READ | PROT_WRITE,
  265. MAP_SHARED,
  266. fd,
  267. 0);
  268. if (map == MAP_FAILED) {
  269. msg_err ("cannot allocate %z bytes, aborting",
  270. sizeof (rspamd_mempool_stat_t));
  271. abort ();
  272. }
  273. mem_pool_stat = (rspamd_mempool_stat_t *)map;
  274. #else
  275. # error No mmap methods are defined
  276. #endif
  277. memset (map, 0, sizeof (rspamd_mempool_stat_t));
  278. }
  279. if (!env_checked) {
  280. /* Check G_SLICE=always-malloc to allow memory pool debug */
  281. const char *g_slice;
  282. g_slice = getenv ("VALGRIND");
  283. if (g_slice != NULL) {
  284. always_malloc = TRUE;
  285. }
  286. env_checked = TRUE;
  287. }
  288. struct rspamd_mempool_entry_point *entry = rspamd_mempool_get_entry (loc);
  289. gsize total_size;
  290. if (size == 0 && entry) {
  291. size = entry->cur_suggestion;
  292. }
  293. total_size = sizeof (rspamd_mempool_t) +
  294. sizeof (struct rspamd_mempool_specific) +
  295. MIN_MEM_ALIGNMENT +
  296. sizeof (struct _pool_chain) +
  297. size;
  298. if (G_UNLIKELY (flags & RSPAMD_MEMPOOL_DEBUG)) {
  299. total_size += sizeof (GHashTable *);
  300. }
  301. /*
  302. * Memory layout:
  303. * struct rspamd_mempool_t
  304. * <optional debug hash table>
  305. * struct rspamd_mempool_specific
  306. * struct _pool_chain
  307. * alignment (if needed)
  308. * memory chunk
  309. */
  310. guchar *mem_chunk;
  311. gint ret = posix_memalign ((void **)&mem_chunk, MIN_MEM_ALIGNMENT,
  312. total_size);
  313. gsize priv_offset;
  314. if (ret != 0 || mem_chunk == NULL) {
  315. g_error ("%s: failed to allocate %"G_GSIZE_FORMAT" bytes: %d - %s",
  316. G_STRLOC, total_size, ret, strerror (errno));
  317. abort ();
  318. }
  319. /* Set memory layout */
  320. new_pool = (rspamd_mempool_t *)mem_chunk;
  321. if (G_UNLIKELY (flags & RSPAMD_MEMPOOL_DEBUG)) {
  322. /* Allocate debug table */
  323. GHashTable *debug_tbl;
  324. debug_tbl = g_hash_table_new (rspamd_str_hash, rspamd_str_equal);
  325. memcpy (mem_chunk + sizeof (rspamd_mempool_t), &debug_tbl,
  326. sizeof (GHashTable *));
  327. priv_offset = sizeof (rspamd_mempool_t) + sizeof (GHashTable *);
  328. }
  329. else {
  330. priv_offset = sizeof (rspamd_mempool_t);
  331. }
  332. new_pool->priv = (struct rspamd_mempool_specific *)(mem_chunk +
  333. priv_offset);
  334. /* Zero memory for specific and for the first chain */
  335. memset (new_pool->priv, 0, sizeof (struct rspamd_mempool_specific) +
  336. sizeof (struct _pool_chain));
  337. new_pool->priv->entry = entry;
  338. new_pool->priv->elt_len = size;
  339. new_pool->priv->flags = flags;
  340. if (tag) {
  341. rspamd_strlcpy (new_pool->tag.tagname, tag, sizeof (new_pool->tag.tagname));
  342. }
  343. else {
  344. new_pool->tag.tagname[0] = '\0';
  345. }
  346. /* Generate new uid */
  347. ottery_rand_bytes (uidbuf, sizeof (uidbuf));
  348. for (i = 0; i < G_N_ELEMENTS (uidbuf); i ++) {
  349. new_pool->tag.uid[i * 2] = hexdigits[(uidbuf[i] >> 4) & 0xf];
  350. new_pool->tag.uid[i * 2 + 1] = hexdigits[uidbuf[i] & 0xf];
  351. }
  352. new_pool->tag.uid[19] = '\0';
  353. mem_pool_stat->pools_allocated++;
  354. /* Now we can attach one chunk to speed up simple allocations */
  355. struct _pool_chain *nchain;
  356. nchain = (struct _pool_chain *)
  357. (mem_chunk +
  358. priv_offset +
  359. sizeof (struct rspamd_mempool_specific));
  360. guchar *unaligned = mem_chunk +
  361. priv_offset +
  362. sizeof (struct rspamd_mempool_specific) +
  363. sizeof (struct _pool_chain);
  364. nchain->slice_size = size;
  365. nchain->begin = unaligned;
  366. nchain->slice_size = size;
  367. nchain->pos = align_ptr (unaligned, MIN_MEM_ALIGNMENT);
  368. new_pool->priv->pools[RSPAMD_MEMPOOL_NORMAL] = nchain;
  369. new_pool->priv->used_memory = size;
  370. /* Adjust stats */
  371. g_atomic_int_add (&mem_pool_stat->bytes_allocated,
  372. (gint)size);
  373. g_atomic_int_add (&mem_pool_stat->chunks_allocated, 1);
  374. return new_pool;
  375. }
  376. static void *
  377. memory_pool_alloc_common (rspamd_mempool_t * pool, gsize size, gsize alignment,
  378. enum rspamd_mempool_chain_type pool_type,
  379. const gchar *loc)
  380. RSPAMD_ATTR_ALLOC_SIZE(2) RSPAMD_ATTR_ALLOC_ALIGN(MIN_MEM_ALIGNMENT) RSPAMD_ATTR_RETURNS_NONNUL;
  381. void
  382. rspamd_mempool_notify_alloc_ (rspamd_mempool_t *pool, gsize size, const gchar *loc)
  383. {
  384. if (pool && G_UNLIKELY (pool->priv->flags & RSPAMD_MEMPOOL_DEBUG)) {
  385. GHashTable *debug_tbl = *(GHashTable **)(((guchar *)pool + sizeof (*pool)));
  386. gpointer ptr;
  387. ptr = g_hash_table_lookup (debug_tbl, loc);
  388. if (ptr) {
  389. ptr = GSIZE_TO_POINTER (GPOINTER_TO_SIZE (ptr) + size);
  390. }
  391. else {
  392. ptr = GSIZE_TO_POINTER (size);
  393. }
  394. g_hash_table_insert (debug_tbl, (gpointer) loc, ptr);
  395. }
  396. }
  397. static void *
  398. memory_pool_alloc_common (rspamd_mempool_t * pool, gsize size, gsize alignment,
  399. enum rspamd_mempool_chain_type pool_type, const gchar *loc)
  400. {
  401. guint8 *tmp;
  402. struct _pool_chain *new, *cur;
  403. gsize free = 0;
  404. if (pool) {
  405. POOL_MTX_LOCK ();
  406. pool->priv->used_memory += size;
  407. if (G_UNLIKELY (pool->priv->flags & RSPAMD_MEMPOOL_DEBUG)) {
  408. rspamd_mempool_notify_alloc_ (pool, size, loc);
  409. }
  410. if (always_malloc && pool_type != RSPAMD_MEMPOOL_SHARED) {
  411. void *ptr;
  412. if (alignment <= G_MEM_ALIGN) {
  413. ptr = g_malloc(size);
  414. }
  415. else {
  416. ptr = g_malloc(size + alignment);
  417. ptr = align_ptr(ptr, alignment);
  418. }
  419. POOL_MTX_UNLOCK ();
  420. if (pool->priv->trash_stack == NULL) {
  421. pool->priv->trash_stack = g_ptr_array_sized_new (128);
  422. }
  423. g_ptr_array_add (pool->priv->trash_stack, ptr);
  424. return ptr;
  425. }
  426. cur = rspamd_mempool_get_chain (pool, pool_type);
  427. /* Find free space in pool chain */
  428. if (cur) {
  429. free = pool_chain_free (cur);
  430. }
  431. if (cur == NULL || free < size + alignment) {
  432. if (free < size) {
  433. pool->priv->wasted_memory += free;
  434. }
  435. /* Allocate new chain element */
  436. if (pool->priv->elt_len >= size + alignment) {
  437. pool->priv->entry->elts[pool->priv->entry->cur_elts].fragmentation += size;
  438. new = rspamd_mempool_chain_new (pool->priv->elt_len, alignment,
  439. pool_type);
  440. }
  441. else {
  442. mem_pool_stat->oversized_chunks++;
  443. g_atomic_int_add (&mem_pool_stat->fragmented_size,
  444. free);
  445. pool->priv->entry->elts[pool->priv->entry->cur_elts].fragmentation += free;
  446. new = rspamd_mempool_chain_new (size + pool->priv->elt_len, alignment,
  447. pool_type);
  448. }
  449. /* Connect to pool subsystem */
  450. rspamd_mempool_append_chain (pool, new, pool_type);
  451. /* No need to align again, aligned by rspamd_mempool_chain_new */
  452. tmp = new->pos;
  453. new->pos = tmp + size;
  454. POOL_MTX_UNLOCK ();
  455. return tmp;
  456. }
  457. /* No need to allocate page */
  458. tmp = align_ptr (cur->pos, alignment);
  459. cur->pos = tmp + size;
  460. POOL_MTX_UNLOCK ();
  461. return tmp;
  462. }
  463. abort ();
  464. }
  465. void *
  466. rspamd_mempool_alloc_ (rspamd_mempool_t * pool, gsize size, gsize alignment, const gchar *loc)
  467. {
  468. return memory_pool_alloc_common (pool, size, alignment, RSPAMD_MEMPOOL_NORMAL, loc);
  469. }
  470. /*
  471. * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
  472. * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
  473. */
  474. #define MUL_NO_OVERFLOW (1UL << (sizeof(gsize) * 4))
  475. void *
  476. rspamd_mempool_alloc_array_ (rspamd_mempool_t * pool, gsize nmemb, gsize size, gsize alignment, const gchar *loc)
  477. {
  478. if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
  479. nmemb > 0 && G_MAXSIZE / nmemb < size) {
  480. g_error("alloc_array: overflow %"G_GSIZE_FORMAT" * %"G_GSIZE_FORMAT"",
  481. nmemb, size);
  482. g_abort();
  483. }
  484. return memory_pool_alloc_common (pool, size, alignment, RSPAMD_MEMPOOL_NORMAL, loc);
  485. }
  486. void *
  487. rspamd_mempool_alloc0_ (rspamd_mempool_t * pool, gsize size, gsize alignment, const gchar *loc)
  488. {
  489. void *pointer = rspamd_mempool_alloc_ (pool, size, alignment, loc);
  490. memset (pointer, 0, size);
  491. return pointer;
  492. }
  493. void *
  494. rspamd_mempool_alloc0_shared_ (rspamd_mempool_t * pool, gsize size, gsize alignment, const gchar *loc)
  495. {
  496. void *pointer = rspamd_mempool_alloc_shared_ (pool, size, alignment, loc);
  497. memset (pointer, 0, size);
  498. return pointer;
  499. }
  500. void *
  501. rspamd_mempool_alloc_shared_ (rspamd_mempool_t * pool, gsize size, gsize alignment, const gchar *loc)
  502. {
  503. return memory_pool_alloc_common (pool, size, alignment, RSPAMD_MEMPOOL_SHARED, loc);
  504. }
  505. gchar *
  506. rspamd_mempool_strdup_ (rspamd_mempool_t * pool, const gchar *src, const gchar *loc)
  507. {
  508. gsize len;
  509. gchar *newstr;
  510. if (src == NULL) {
  511. return NULL;
  512. }
  513. len = strlen (src);
  514. newstr = rspamd_mempool_alloc_ (pool, len + 1, MIN_MEM_ALIGNMENT, loc);
  515. memcpy (newstr, src, len);
  516. newstr[len] = '\0';
  517. return newstr;
  518. }
  519. gchar *
  520. rspamd_mempool_ftokdup_ (rspamd_mempool_t *pool, const rspamd_ftok_t *src,
  521. const gchar *loc)
  522. {
  523. gchar *newstr;
  524. if (src == NULL) {
  525. return NULL;
  526. }
  527. newstr = rspamd_mempool_alloc_ (pool, src->len + 1, MIN_MEM_ALIGNMENT, loc);
  528. memcpy (newstr, src->begin, src->len);
  529. newstr[src->len] = '\0';
  530. return newstr;
  531. }
  532. void
  533. rspamd_mempool_add_destructor_full (rspamd_mempool_t * pool,
  534. rspamd_mempool_destruct_t func,
  535. void *data,
  536. const gchar *function,
  537. const gchar *line)
  538. {
  539. struct _pool_destructors *cur;
  540. POOL_MTX_LOCK ();
  541. cur = rspamd_mempool_alloc_ (pool, sizeof (*cur),
  542. RSPAMD_ALIGNOF(struct _pool_destructors), line);
  543. cur->func = func;
  544. cur->data = data;
  545. cur->function = function;
  546. cur->loc = line;
  547. cur->next = NULL;
  548. if (pool->priv->dtors_tail) {
  549. pool->priv->dtors_tail->next = cur;
  550. pool->priv->dtors_tail = cur;
  551. }
  552. else {
  553. pool->priv->dtors_head = cur;
  554. pool->priv->dtors_tail = cur;
  555. }
  556. POOL_MTX_UNLOCK ();
  557. }
  558. void
  559. rspamd_mempool_replace_destructor (rspamd_mempool_t * pool,
  560. rspamd_mempool_destruct_t func,
  561. void *old_data,
  562. void *new_data)
  563. {
  564. struct _pool_destructors *tmp;
  565. LL_FOREACH (pool->priv->dtors_head, tmp) {
  566. if (tmp->func == func && tmp->data == old_data) {
  567. tmp->func = func;
  568. tmp->data = new_data;
  569. break;
  570. }
  571. }
  572. }
  573. static gint
  574. cmp_int (gconstpointer a, gconstpointer b)
  575. {
  576. gint i1 = *(const gint *)a, i2 = *(const gint *)b;
  577. return i1 - i2;
  578. }
  579. static void
  580. rspamd_mempool_adjust_entry (struct rspamd_mempool_entry_point *e)
  581. {
  582. gint sz[G_N_ELEMENTS (e->elts)], sel_pos, sel_neg;
  583. guint i, jitter;
  584. for (i = 0; i < G_N_ELEMENTS (sz); i ++) {
  585. sz[i] = e->elts[i].fragmentation - (gint)e->elts[i].leftover;
  586. }
  587. qsort (sz, G_N_ELEMENTS (sz), sizeof (gint), cmp_int);
  588. jitter = rspamd_random_uint64_fast () % 10;
  589. /*
  590. * Take stochastic quantiles
  591. */
  592. sel_pos = sz[50 + jitter];
  593. sel_neg = sz[4 + jitter];
  594. if (-sel_neg > sel_pos) {
  595. /* We need to reduce current suggestion */
  596. e->cur_suggestion /= (1 + (((double)-sel_neg) / e->cur_suggestion)) * 1.5;
  597. }
  598. else {
  599. /* We still want to grow */
  600. e->cur_suggestion *= (1 + (((double)sel_pos) / e->cur_suggestion)) * 1.5;
  601. }
  602. /* Some sane limits counting mempool architecture */
  603. if (e->cur_suggestion < 1024) {
  604. e->cur_suggestion = 1024;
  605. }
  606. else if (e->cur_suggestion > 1024 * 1024 * 10) {
  607. e->cur_suggestion = 1024 * 1024 * 10;
  608. }
  609. memset (e->elts, 0, sizeof (e->elts));
  610. }
  611. static void
  612. rspamd_mempool_variables_cleanup (rspamd_mempool_t * pool)
  613. {
  614. if (pool->priv->variables) {
  615. struct rspamd_mempool_variable *var;
  616. kh_foreach_value_ptr (pool->priv->variables, var, {
  617. if (var->dtor) {
  618. var->dtor (var->data);
  619. }
  620. });
  621. if (pool->priv->entry && pool->priv->entry->cur_vars <
  622. kh_size (pool->priv->variables)) {
  623. /*
  624. * Increase preallocated size in two cases:
  625. * 1) Our previous guess was zero
  626. * 2) Our new variables count is not more than twice larger than
  627. * previous count
  628. * 3) Our variables count is less than some hard limit
  629. */
  630. static const guint max_preallocated_vars = 512;
  631. guint cur_size = kh_size (pool->priv->variables);
  632. guint old_guess = pool->priv->entry->cur_vars;
  633. guint new_guess;
  634. if (old_guess == 0) {
  635. new_guess = MIN (cur_size, max_preallocated_vars);
  636. }
  637. else {
  638. if (old_guess * 2 < cur_size) {
  639. new_guess = MIN (cur_size, max_preallocated_vars);
  640. }
  641. else {
  642. /* Too large step */
  643. new_guess = MIN (old_guess * 2, max_preallocated_vars);
  644. }
  645. }
  646. pool->priv->entry->cur_vars = new_guess;
  647. }
  648. kh_destroy (rspamd_mempool_vars_hash, pool->priv->variables);
  649. pool->priv->variables = NULL;
  650. }
  651. }
  652. void
  653. rspamd_mempool_destructors_enforce (rspamd_mempool_t *pool)
  654. {
  655. struct _pool_destructors *destructor;
  656. POOL_MTX_LOCK ();
  657. LL_FOREACH (pool->priv->dtors_head, destructor) {
  658. /* Avoid calling destructors for NULL pointers */
  659. if (destructor->data != NULL) {
  660. destructor->func (destructor->data);
  661. }
  662. }
  663. pool->priv->dtors_head = pool->priv->dtors_tail = NULL;
  664. rspamd_mempool_variables_cleanup (pool);
  665. POOL_MTX_UNLOCK ();
  666. }
  667. struct mempool_debug_elt {
  668. gsize sz;
  669. const gchar *loc;
  670. };
  671. static gint
  672. rspamd_mempool_debug_elt_cmp (const void *a, const void *b)
  673. {
  674. const struct mempool_debug_elt *e1 = a, *e2 = b;
  675. /* Inverse order */
  676. return (gint)((gssize)e2->sz) - ((gssize)e1->sz);
  677. }
  678. void
  679. rspamd_mempool_delete (rspamd_mempool_t * pool)
  680. {
  681. struct _pool_chain *cur, *tmp;
  682. struct _pool_destructors *destructor;
  683. gpointer ptr;
  684. guint i;
  685. gsize len;
  686. POOL_MTX_LOCK ();
  687. cur = pool->priv->pools[RSPAMD_MEMPOOL_NORMAL];
  688. if (G_UNLIKELY (pool->priv->flags & RSPAMD_MEMPOOL_DEBUG)) {
  689. GHashTable *debug_tbl = *(GHashTable **)(((guchar *)pool) + sizeof (*pool));
  690. /* Show debug info */
  691. gsize ndtor = 0;
  692. LL_COUNT (pool->priv->dtors_head, destructor, ndtor);
  693. msg_info_pool ("destructing of the memory pool %p; elt size = %z; "
  694. "used memory = %Hz; wasted memory = %Hd; "
  695. "vars = %z; destructors = %z",
  696. pool,
  697. pool->priv->elt_len,
  698. pool->priv->used_memory,
  699. pool->priv->wasted_memory,
  700. pool->priv->variables ? (gsize)kh_size (pool->priv->variables) : (gsize)0,
  701. ndtor);
  702. GHashTableIter it;
  703. gpointer k, v;
  704. GArray *sorted_debug_size = g_array_sized_new (FALSE, FALSE,
  705. sizeof (struct mempool_debug_elt),
  706. g_hash_table_size (debug_tbl));
  707. g_hash_table_iter_init (&it, debug_tbl);
  708. while (g_hash_table_iter_next (&it, &k, &v)) {
  709. struct mempool_debug_elt e;
  710. e.loc = (const gchar *)k;
  711. e.sz = GPOINTER_TO_SIZE (v);
  712. g_array_append_val (sorted_debug_size, e);
  713. }
  714. g_array_sort (sorted_debug_size, rspamd_mempool_debug_elt_cmp);
  715. for (guint _i = 0; _i < sorted_debug_size->len; _i ++) {
  716. struct mempool_debug_elt *e;
  717. e = &g_array_index (sorted_debug_size, struct mempool_debug_elt, _i);
  718. msg_info_pool ("allocated %Hz from %s", e->sz, e->loc);
  719. }
  720. g_array_free (sorted_debug_size, TRUE);
  721. g_hash_table_unref (debug_tbl);
  722. }
  723. if (cur && mempool_entries) {
  724. pool->priv->entry->elts[pool->priv->entry->cur_elts].leftover =
  725. pool_chain_free (cur);
  726. pool->priv->entry->cur_elts = (pool->priv->entry->cur_elts + 1) %
  727. G_N_ELEMENTS (pool->priv->entry->elts);
  728. if (pool->priv->entry->cur_elts == 0) {
  729. rspamd_mempool_adjust_entry (pool->priv->entry);
  730. }
  731. }
  732. /* Call all pool destructors */
  733. LL_FOREACH (pool->priv->dtors_head, destructor) {
  734. /* Avoid calling destructors for NULL pointers */
  735. if (destructor->data != NULL) {
  736. destructor->func (destructor->data);
  737. }
  738. }
  739. rspamd_mempool_variables_cleanup (pool);
  740. if (pool->priv->trash_stack) {
  741. for (i = 0; i < pool->priv->trash_stack->len; i++) {
  742. ptr = g_ptr_array_index (pool->priv->trash_stack, i);
  743. g_free (ptr);
  744. }
  745. g_ptr_array_free (pool->priv->trash_stack, TRUE);
  746. }
  747. for (i = 0; i < G_N_ELEMENTS (pool->priv->pools); i ++) {
  748. if (pool->priv->pools[i]) {
  749. LL_FOREACH_SAFE (pool->priv->pools[i], cur, tmp) {
  750. g_atomic_int_add (&mem_pool_stat->bytes_allocated,
  751. -((gint)cur->slice_size));
  752. g_atomic_int_add (&mem_pool_stat->chunks_allocated, -1);
  753. len = cur->slice_size + sizeof (struct _pool_chain);
  754. if (i == RSPAMD_MEMPOOL_SHARED) {
  755. munmap ((void *)cur, len);
  756. }
  757. else {
  758. /* The last pool is special, it is a part of the initial chunk */
  759. if (cur->next != NULL) {
  760. free (cur); /* Not g_free as we use system allocator */
  761. }
  762. }
  763. }
  764. }
  765. }
  766. g_atomic_int_inc (&mem_pool_stat->pools_freed);
  767. POOL_MTX_UNLOCK ();
  768. free (pool); /* allocated by posix_memalign */
  769. }
  770. void
  771. rspamd_mempool_stat (rspamd_mempool_stat_t * st)
  772. {
  773. if (mem_pool_stat != NULL) {
  774. st->pools_allocated = mem_pool_stat->pools_allocated;
  775. st->pools_freed = mem_pool_stat->pools_freed;
  776. st->shared_chunks_allocated = mem_pool_stat->shared_chunks_allocated;
  777. st->bytes_allocated = mem_pool_stat->bytes_allocated;
  778. st->chunks_allocated = mem_pool_stat->chunks_allocated;
  779. st->chunks_freed = mem_pool_stat->chunks_freed;
  780. st->oversized_chunks = mem_pool_stat->oversized_chunks;
  781. }
  782. }
  783. void
  784. rspamd_mempool_stat_reset (void)
  785. {
  786. if (mem_pool_stat != NULL) {
  787. memset (mem_pool_stat, 0, sizeof (rspamd_mempool_stat_t));
  788. }
  789. }
  790. gsize
  791. rspamd_mempool_suggest_size_ (const char *loc)
  792. {
  793. return 0;
  794. }
  795. #if !defined(HAVE_PTHREAD_PROCESS_SHARED) || defined(DISABLE_PTHREAD_MUTEX)
  796. /*
  797. * Own emulation
  798. */
  799. static inline gint
  800. __mutex_spin (rspamd_mempool_mutex_t * mutex)
  801. {
  802. /* check spin count */
  803. if (g_atomic_int_dec_and_test (&mutex->spin)) {
  804. /* This may be deadlock, so check owner of this lock */
  805. if (mutex->owner == getpid ()) {
  806. /* This mutex was locked by calling process, so it is just double lock and we can easily unlock it */
  807. g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
  808. return 0;
  809. }
  810. else if (kill (mutex->owner, 0) == -1) {
  811. /* Owner process was not found, so release lock */
  812. g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
  813. return 0;
  814. }
  815. /* Spin again */
  816. g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
  817. }
  818. #ifdef HAVE_SCHED_YIELD
  819. (void)sched_yield ();
  820. #elif defined(HAVE_NANOSLEEP)
  821. struct timespec ts;
  822. ts.tv_sec = 0;
  823. ts.tv_nsec = MUTEX_SLEEP_TIME;
  824. /* Spin */
  825. while (nanosleep (&ts, &ts) == -1 && errno == EINTR) ;
  826. #else
  827. #error No methods to spin are defined
  828. #endif
  829. return 1;
  830. }
  831. static void
  832. memory_pool_mutex_spin (rspamd_mempool_mutex_t * mutex)
  833. {
  834. while (!g_atomic_int_compare_and_exchange (&mutex->lock, 0, 1)) {
  835. if (!__mutex_spin (mutex)) {
  836. return;
  837. }
  838. }
  839. }
  840. rspamd_mempool_mutex_t *
  841. rspamd_mempool_get_mutex (rspamd_mempool_t * pool)
  842. {
  843. rspamd_mempool_mutex_t *res;
  844. if (pool != NULL) {
  845. res =
  846. rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_mutex_t));
  847. res->lock = 0;
  848. res->owner = 0;
  849. res->spin = MUTEX_SPIN_COUNT;
  850. return res;
  851. }
  852. return NULL;
  853. }
  854. void
  855. rspamd_mempool_lock_mutex (rspamd_mempool_mutex_t * mutex)
  856. {
  857. memory_pool_mutex_spin (mutex);
  858. mutex->owner = getpid ();
  859. }
  860. void
  861. rspamd_mempool_unlock_mutex (rspamd_mempool_mutex_t * mutex)
  862. {
  863. mutex->owner = 0;
  864. (void)g_atomic_int_compare_and_exchange (&mutex->lock, 1, 0);
  865. }
  866. rspamd_mempool_rwlock_t *
  867. rspamd_mempool_get_rwlock (rspamd_mempool_t * pool)
  868. {
  869. rspamd_mempool_rwlock_t *lock;
  870. lock = rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_rwlock_t));
  871. lock->__r_lock = rspamd_mempool_get_mutex (pool);
  872. lock->__w_lock = rspamd_mempool_get_mutex (pool);
  873. return lock;
  874. }
  875. void
  876. rspamd_mempool_rlock_rwlock (rspamd_mempool_rwlock_t * lock)
  877. {
  878. /* Spin on write lock */
  879. while (g_atomic_int_get (&lock->__w_lock->lock)) {
  880. if (!__mutex_spin (lock->__w_lock)) {
  881. break;
  882. }
  883. }
  884. g_atomic_int_inc (&lock->__r_lock->lock);
  885. lock->__r_lock->owner = getpid ();
  886. }
  887. void
  888. rspamd_mempool_wlock_rwlock (rspamd_mempool_rwlock_t * lock)
  889. {
  890. /* Spin on write lock first */
  891. rspamd_mempool_lock_mutex (lock->__w_lock);
  892. /* Now we have write lock set up */
  893. /* Wait all readers */
  894. while (g_atomic_int_get (&lock->__r_lock->lock)) {
  895. __mutex_spin (lock->__r_lock);
  896. }
  897. }
  898. void
  899. rspamd_mempool_runlock_rwlock (rspamd_mempool_rwlock_t * lock)
  900. {
  901. if (g_atomic_int_get (&lock->__r_lock->lock)) {
  902. (void)g_atomic_int_dec_and_test (&lock->__r_lock->lock);
  903. }
  904. }
  905. void
  906. rspamd_mempool_wunlock_rwlock (rspamd_mempool_rwlock_t * lock)
  907. {
  908. rspamd_mempool_unlock_mutex (lock->__w_lock);
  909. }
  910. #else
  911. /*
  912. * Pthread bases shared mutexes
  913. */
  914. rspamd_mempool_mutex_t *
  915. rspamd_mempool_get_mutex (rspamd_mempool_t * pool)
  916. {
  917. rspamd_mempool_mutex_t *res;
  918. pthread_mutexattr_t mattr;
  919. if (pool != NULL) {
  920. res =
  921. rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_mutex_t));
  922. pthread_mutexattr_init (&mattr);
  923. pthread_mutexattr_setpshared (&mattr, PTHREAD_PROCESS_SHARED);
  924. pthread_mutexattr_setrobust (&mattr, PTHREAD_MUTEX_ROBUST);
  925. pthread_mutex_init (res, &mattr);
  926. rspamd_mempool_add_destructor (pool,
  927. (rspamd_mempool_destruct_t)pthread_mutex_destroy, res);
  928. pthread_mutexattr_destroy (&mattr);
  929. return res;
  930. }
  931. return NULL;
  932. }
  933. void
  934. rspamd_mempool_lock_mutex (rspamd_mempool_mutex_t * mutex)
  935. {
  936. pthread_mutex_lock (mutex);
  937. }
  938. void
  939. rspamd_mempool_unlock_mutex (rspamd_mempool_mutex_t * mutex)
  940. {
  941. pthread_mutex_unlock (mutex);
  942. }
  943. rspamd_mempool_rwlock_t *
  944. rspamd_mempool_get_rwlock (rspamd_mempool_t * pool)
  945. {
  946. rspamd_mempool_rwlock_t *res;
  947. pthread_rwlockattr_t mattr;
  948. if (pool != NULL) {
  949. res =
  950. rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_rwlock_t));
  951. pthread_rwlockattr_init (&mattr);
  952. pthread_rwlockattr_setpshared (&mattr, PTHREAD_PROCESS_SHARED);
  953. pthread_rwlock_init (res, &mattr);
  954. rspamd_mempool_add_destructor (pool,
  955. (rspamd_mempool_destruct_t)pthread_rwlock_destroy, res);
  956. pthread_rwlockattr_destroy (&mattr);
  957. return res;
  958. }
  959. return NULL;
  960. }
  961. void
  962. rspamd_mempool_rlock_rwlock (rspamd_mempool_rwlock_t * lock)
  963. {
  964. pthread_rwlock_rdlock (lock);
  965. }
  966. void
  967. rspamd_mempool_wlock_rwlock (rspamd_mempool_rwlock_t * lock)
  968. {
  969. pthread_rwlock_wrlock (lock);
  970. }
  971. void
  972. rspamd_mempool_runlock_rwlock (rspamd_mempool_rwlock_t * lock)
  973. {
  974. pthread_rwlock_unlock (lock);
  975. }
  976. void
  977. rspamd_mempool_wunlock_rwlock (rspamd_mempool_rwlock_t * lock)
  978. {
  979. pthread_rwlock_unlock (lock);
  980. }
  981. #endif
  982. #define RSPAMD_MEMPOOL_VARS_HASH_SEED 0xb32ad7c55eb2e647ULL
  983. void
  984. rspamd_mempool_set_variable (rspamd_mempool_t *pool,
  985. const gchar *name,
  986. gpointer value,
  987. rspamd_mempool_destruct_t destructor)
  988. {
  989. if (pool->priv->variables == NULL) {
  990. pool->priv->variables = kh_init (rspamd_mempool_vars_hash);
  991. if (pool->priv->entry->cur_vars > 0) {
  992. /* Preallocate */
  993. kh_resize (rspamd_mempool_vars_hash,
  994. pool->priv->variables,
  995. pool->priv->entry->cur_vars);
  996. }
  997. }
  998. gint hv = rspamd_cryptobox_fast_hash (name, strlen (name),
  999. RSPAMD_MEMPOOL_VARS_HASH_SEED);
  1000. khiter_t it;
  1001. gint r;
  1002. it = kh_put (rspamd_mempool_vars_hash, pool->priv->variables, hv, &r);
  1003. if (it == kh_end (pool->priv->variables)) {
  1004. g_assert_not_reached ();
  1005. }
  1006. else {
  1007. struct rspamd_mempool_variable *pvar;
  1008. if (r == 0) {
  1009. /* Existing entry, maybe need cleanup */
  1010. pvar = &kh_val (pool->priv->variables, it);
  1011. if (pvar->dtor) {
  1012. pvar->dtor (pvar->data);
  1013. }
  1014. }
  1015. pvar = &kh_val (pool->priv->variables, it);
  1016. pvar->data = value;
  1017. pvar->dtor = destructor;
  1018. }
  1019. }
  1020. gpointer
  1021. rspamd_mempool_get_variable (rspamd_mempool_t *pool, const gchar *name)
  1022. {
  1023. if (pool->priv->variables == NULL) {
  1024. return NULL;
  1025. }
  1026. khiter_t it;
  1027. gint hv = rspamd_cryptobox_fast_hash (name, strlen (name),
  1028. RSPAMD_MEMPOOL_VARS_HASH_SEED);
  1029. it = kh_get (rspamd_mempool_vars_hash, pool->priv->variables, hv);
  1030. if (it != kh_end (pool->priv->variables)) {
  1031. struct rspamd_mempool_variable *pvar;
  1032. pvar = &kh_val (pool->priv->variables, it);
  1033. return pvar->data;
  1034. }
  1035. return NULL;
  1036. }
  1037. void
  1038. rspamd_mempool_remove_variable (rspamd_mempool_t *pool, const gchar *name)
  1039. {
  1040. if (pool->priv->variables != NULL) {
  1041. khiter_t it;
  1042. gint hv = rspamd_cryptobox_fast_hash (name, strlen (name),
  1043. RSPAMD_MEMPOOL_VARS_HASH_SEED);
  1044. it = kh_get (rspamd_mempool_vars_hash, pool->priv->variables, hv);
  1045. if (it != kh_end (pool->priv->variables)) {
  1046. struct rspamd_mempool_variable *pvar;
  1047. pvar = &kh_val (pool->priv->variables, it);
  1048. if (pvar->dtor) {
  1049. pvar->dtor (pvar->data);
  1050. }
  1051. kh_del (rspamd_mempool_vars_hash, pool->priv->variables, it);
  1052. }
  1053. }
  1054. }
  1055. GList *
  1056. rspamd_mempool_glist_prepend (rspamd_mempool_t *pool, GList *l, gpointer p)
  1057. {
  1058. GList *cell;
  1059. cell = rspamd_mempool_alloc (pool, sizeof (*cell));
  1060. cell->prev = NULL;
  1061. cell->data = p;
  1062. if (l == NULL) {
  1063. cell->next = NULL;
  1064. }
  1065. else {
  1066. cell->next = l;
  1067. l->prev = cell;
  1068. }
  1069. return cell;
  1070. }
  1071. GList *
  1072. rspamd_mempool_glist_append (rspamd_mempool_t *pool, GList *l, gpointer p)
  1073. {
  1074. GList *cell, *cur;
  1075. cell = rspamd_mempool_alloc (pool, sizeof (*cell));
  1076. cell->next = NULL;
  1077. cell->data = p;
  1078. if (l) {
  1079. for (cur = l; cur->next != NULL; cur = cur->next) {}
  1080. cur->next = cell;
  1081. cell->prev = cur;
  1082. }
  1083. else {
  1084. l = cell;
  1085. l->prev = NULL;
  1086. }
  1087. return l;
  1088. }
  1089. gsize
  1090. rspamd_mempool_get_used_size (rspamd_mempool_t *pool)
  1091. {
  1092. return pool->priv->used_memory;
  1093. }
  1094. gsize
  1095. rspamd_mempool_get_wasted_size (rspamd_mempool_t *pool)
  1096. {
  1097. return pool->priv->wasted_memory;
  1098. }