Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303
  1. /*-
  2. * Copyright 2016 Vsevolod Stakhov
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "config.h"
  17. #include "mem_pool.h"
  18. #include "fstring.h"
  19. #include "logger.h"
  20. #include "ottery.h"
  21. #include "unix-std.h"
  22. #include "khash.h"
  23. #include "cryptobox.h"
  24. #include "contrib/uthash/utlist.h"
  25. #include "mem_pool_internal.h"
  26. #ifdef WITH_JEMALLOC
  27. #include <jemalloc/jemalloc.h>
  28. #if (JEMALLOC_VERSION_MAJOR == 3 && JEMALLOC_VERSION_MINOR >= 6) || (JEMALLOC_VERSION_MAJOR > 3)
  29. #define HAVE_MALLOC_SIZE 1
  30. #define sys_alloc_size(sz) nallocx(sz, 0)
  31. #endif
  32. #elif defined(__APPLE__)
  33. #include <malloc/malloc.h>
  34. #define HAVE_MALLOC_SIZE 1
  35. #define sys_alloc_size(sz) malloc_good_size(sz)
  36. #endif
  37. #ifdef HAVE_SCHED_YIELD
  38. #include <sched.h>
  39. #endif
  40. /* Sleep time for spin lock in nanoseconds */
  41. #define MUTEX_SLEEP_TIME 10000000L
  42. #define MUTEX_SPIN_COUNT 100
  43. #define POOL_MTX_LOCK() do { } while (0)
  44. #define POOL_MTX_UNLOCK() do { } while (0)
  45. /*
  46. * This define specify whether we should check all pools for free space for new object
  47. * or just begin scan from current (recently attached) pool
  48. * If MEMORY_GREEDY is defined, then we scan all pools to find free space (more CPU usage, slower
  49. * but requires less memory). If it is not defined check only current pool and if object is too large
  50. * to place in it allocate new one (this may cause huge CPU usage in some cases too, but generally faster than
  51. * greedy method)
  52. */
  53. #undef MEMORY_GREEDY
  54. static inline uint32_t
  55. rspamd_entry_hash (const char *str)
  56. {
  57. return (guint)rspamd_cryptobox_fast_hash (str, strlen (str), rspamd_hash_seed ());
  58. }
  59. static inline int
  60. rspamd_entry_equal (const char *k1, const char *k2)
  61. {
  62. return strcmp (k1, k2) == 0;
  63. }
  64. KHASH_INIT(mempool_entry, const gchar *, struct rspamd_mempool_entry_point *,
  65. 1, rspamd_entry_hash, rspamd_entry_equal)
  66. static khash_t(mempool_entry) *mempool_entries = NULL;
  67. /* Internal statistic */
  68. static rspamd_mempool_stat_t *mem_pool_stat = NULL;
  69. /* Environment variable */
  70. static gboolean env_checked = FALSE;
  71. static gboolean always_malloc = FALSE;
  72. /**
  73. * Function that return free space in pool page
  74. * @param x pool page struct
  75. */
  76. static gsize
  77. pool_chain_free (struct _pool_chain *chain)
  78. {
  79. gint64 occupied = chain->pos - chain->begin + MIN_MEM_ALIGNMENT;
  80. return (occupied < (gint64)chain->slice_size ?
  81. chain->slice_size - occupied : 0);
  82. }
  83. /* By default allocate 4Kb chunks of memory */
  84. #define FIXED_POOL_SIZE 4096
  85. static inline struct rspamd_mempool_entry_point *
  86. rspamd_mempool_entry_new (const gchar *loc)
  87. {
  88. struct rspamd_mempool_entry_point **pentry, *entry;
  89. gint r;
  90. khiter_t k;
  91. k = kh_put (mempool_entry, mempool_entries, loc, &r);
  92. if (r >= 0) {
  93. pentry = &kh_value (mempool_entries, k);
  94. entry = g_malloc0 (sizeof (*entry));
  95. *pentry = entry;
  96. memset (entry, 0, sizeof (*entry));
  97. rspamd_strlcpy (entry->src, loc, sizeof (entry->src));
  98. #ifdef HAVE_GETPAGESIZE
  99. entry->cur_suggestion = MAX (getpagesize (), FIXED_POOL_SIZE);
  100. #else
  101. entry->cur_suggestion = MAX (sysconf (_SC_PAGESIZE), FIXED_POOL_SIZE);
  102. #endif
  103. }
  104. else {
  105. g_assert_not_reached ();
  106. }
  107. return entry;
  108. }
  109. RSPAMD_CONSTRUCTOR (rspamd_mempool_entries_ctor)
  110. {
  111. mempool_entries = kh_init (mempool_entry);
  112. }
  113. RSPAMD_DESTRUCTOR (rspamd_mempool_entries_dtor)
  114. {
  115. struct rspamd_mempool_entry_point *elt;
  116. kh_foreach_value (mempool_entries, elt, {
  117. g_free (elt);
  118. });
  119. kh_destroy (mempool_entry, mempool_entries);
  120. mempool_entries = NULL;
  121. }
  122. static inline struct rspamd_mempool_entry_point *
  123. rspamd_mempool_get_entry (const gchar *loc)
  124. {
  125. khiter_t k;
  126. struct rspamd_mempool_entry_point *elt;
  127. k = kh_get (mempool_entry, mempool_entries, loc);
  128. if (k != kh_end (mempool_entries)) {
  129. elt = kh_value (mempool_entries, k);
  130. return elt;
  131. }
  132. return rspamd_mempool_entry_new (loc);
  133. }
  134. static struct _pool_chain *
  135. rspamd_mempool_chain_new (gsize size, enum rspamd_mempool_chain_type pool_type)
  136. {
  137. struct _pool_chain *chain;
  138. gsize total_size = size + sizeof (struct _pool_chain) + MIN_MEM_ALIGNMENT,
  139. optimal_size = 0;
  140. gpointer map;
  141. g_assert (size > 0);
  142. if (pool_type == RSPAMD_MEMPOOL_SHARED) {
  143. #if defined(HAVE_MMAP_ANON)
  144. map = mmap (NULL,
  145. total_size,
  146. PROT_READ | PROT_WRITE,
  147. MAP_ANON | MAP_SHARED,
  148. -1,
  149. 0);
  150. if (map == MAP_FAILED) {
  151. g_error ("%s: failed to allocate %"G_GSIZE_FORMAT" bytes",
  152. G_STRLOC, total_size);
  153. abort ();
  154. }
  155. chain = map;
  156. chain->begin = ((guint8 *) chain) + sizeof (struct _pool_chain);
  157. #elif defined(HAVE_MMAP_ZERO)
  158. gint fd;
  159. fd = open ("/dev/zero", O_RDWR);
  160. if (fd == -1) {
  161. return NULL;
  162. }
  163. map = mmap (NULL,
  164. size + sizeof (struct _pool_chain),
  165. PROT_READ | PROT_WRITE,
  166. MAP_SHARED,
  167. fd,
  168. 0);
  169. if (map == MAP_FAILED) {
  170. msg_err ("cannot allocate %z bytes, aborting", size +
  171. sizeof (struct _pool_chain));
  172. abort ();
  173. }
  174. chain = map;
  175. chain->begin = ((guint8 *) chain) + sizeof (struct _pool_chain);
  176. #else
  177. #error No mmap methods are defined
  178. #endif
  179. g_atomic_int_inc (&mem_pool_stat->shared_chunks_allocated);
  180. g_atomic_int_add (&mem_pool_stat->bytes_allocated, total_size);
  181. }
  182. else {
  183. #ifdef HAVE_MALLOC_SIZE
  184. optimal_size = sys_alloc_size (total_size);
  185. #endif
  186. total_size = MAX (total_size, optimal_size);
  187. gint ret = posix_memalign (&map, MIN_MEM_ALIGNMENT, total_size);
  188. if (ret != 0 || map == NULL) {
  189. g_error ("%s: failed to allocate %"G_GSIZE_FORMAT" bytes: %d - %s",
  190. G_STRLOC, total_size, ret, strerror (errno));
  191. abort ();
  192. }
  193. chain = map;
  194. chain->begin = ((guint8 *) chain) + sizeof (struct _pool_chain);
  195. g_atomic_int_add (&mem_pool_stat->bytes_allocated, total_size);
  196. g_atomic_int_inc (&mem_pool_stat->chunks_allocated);
  197. }
  198. chain->pos = align_ptr (chain->begin, MIN_MEM_ALIGNMENT);
  199. chain->slice_size = total_size - sizeof (struct _pool_chain);
  200. return chain;
  201. }
  202. /**
  203. * Get the current pool of the specified type, creating the corresponding
  204. * array if it's absent
  205. * @param pool
  206. * @param pool_type
  207. * @return
  208. */
  209. static struct _pool_chain *
  210. rspamd_mempool_get_chain (rspamd_mempool_t * pool,
  211. enum rspamd_mempool_chain_type pool_type)
  212. {
  213. g_assert (pool_type >= 0 && pool_type < RSPAMD_MEMPOOL_MAX);
  214. return pool->priv->pools[pool_type];
  215. }
  216. static void
  217. rspamd_mempool_append_chain (rspamd_mempool_t * pool,
  218. struct _pool_chain *chain,
  219. enum rspamd_mempool_chain_type pool_type)
  220. {
  221. g_assert (pool_type >= 0 && pool_type < RSPAMD_MEMPOOL_MAX);
  222. g_assert (chain != NULL);
  223. LL_PREPEND (pool->priv->pools[pool_type], chain);
  224. }
  225. /**
  226. * Allocate new memory poll
  227. * @param size size of pool's page
  228. * @return new memory pool object
  229. */
  230. rspamd_mempool_t *
  231. rspamd_mempool_new_ (gsize size, const gchar *tag, gint flags, const gchar *loc)
  232. {
  233. rspamd_mempool_t *new_pool;
  234. gpointer map;
  235. unsigned char uidbuf[10];
  236. const gchar hexdigits[] = "0123456789abcdef";
  237. unsigned i;
  238. /* Allocate statistic structure if it is not allocated before */
  239. if (mem_pool_stat == NULL) {
  240. #if defined(HAVE_MMAP_ANON)
  241. map = mmap (NULL,
  242. sizeof (rspamd_mempool_stat_t),
  243. PROT_READ | PROT_WRITE,
  244. MAP_ANON | MAP_SHARED,
  245. -1,
  246. 0);
  247. if (map == MAP_FAILED) {
  248. msg_err ("cannot allocate %z bytes, aborting",
  249. sizeof (rspamd_mempool_stat_t));
  250. abort ();
  251. }
  252. mem_pool_stat = (rspamd_mempool_stat_t *)map;
  253. #elif defined(HAVE_MMAP_ZERO)
  254. gint fd;
  255. fd = open ("/dev/zero", O_RDWR);
  256. g_assert (fd != -1);
  257. map = mmap (NULL,
  258. sizeof (rspamd_mempool_stat_t),
  259. PROT_READ | PROT_WRITE,
  260. MAP_SHARED,
  261. fd,
  262. 0);
  263. if (map == MAP_FAILED) {
  264. msg_err ("cannot allocate %z bytes, aborting",
  265. sizeof (rspamd_mempool_stat_t));
  266. abort ();
  267. }
  268. mem_pool_stat = (rspamd_mempool_stat_t *)map;
  269. #else
  270. # error No mmap methods are defined
  271. #endif
  272. memset (map, 0, sizeof (rspamd_mempool_stat_t));
  273. }
  274. if (!env_checked) {
  275. /* Check G_SLICE=always-malloc to allow memory pool debug */
  276. const char *g_slice;
  277. g_slice = getenv ("VALGRIND");
  278. if (g_slice != NULL) {
  279. always_malloc = TRUE;
  280. }
  281. env_checked = TRUE;
  282. }
  283. struct rspamd_mempool_entry_point *entry = rspamd_mempool_get_entry (loc);
  284. gsize total_size;
  285. if (size == 0 && entry) {
  286. size = entry->cur_suggestion;
  287. }
  288. total_size = sizeof (rspamd_mempool_t) +
  289. sizeof (struct rspamd_mempool_specific) +
  290. MIN_MEM_ALIGNMENT +
  291. sizeof (struct _pool_chain) +
  292. size;
  293. if (G_UNLIKELY (flags & RSPAMD_MEMPOOL_DEBUG)) {
  294. total_size += sizeof (GHashTable *);
  295. }
  296. /*
  297. * Memory layout:
  298. * struct rspamd_mempool_t
  299. * <optional debug hash table>
  300. * struct rspamd_mempool_specific
  301. * struct _pool_chain
  302. * alignment (if needed)
  303. * memory chunk
  304. */
  305. guchar *mem_chunk;
  306. gint ret = posix_memalign ((void **)&mem_chunk, MIN_MEM_ALIGNMENT,
  307. total_size);
  308. gsize priv_offset;
  309. if (ret != 0 || mem_chunk == NULL) {
  310. g_error ("%s: failed to allocate %"G_GSIZE_FORMAT" bytes: %d - %s",
  311. G_STRLOC, total_size, ret, strerror (errno));
  312. abort ();
  313. }
  314. /* Set memory layout */
  315. new_pool = (rspamd_mempool_t *)mem_chunk;
  316. if (G_UNLIKELY (flags & RSPAMD_MEMPOOL_DEBUG)) {
  317. /* Allocate debug table */
  318. GHashTable *debug_tbl;
  319. debug_tbl = g_hash_table_new (rspamd_str_hash, rspamd_str_equal);
  320. memcpy (mem_chunk + sizeof (rspamd_mempool_t), &debug_tbl,
  321. sizeof (GHashTable *));
  322. priv_offset = sizeof (rspamd_mempool_t) + sizeof (GHashTable *);
  323. }
  324. else {
  325. priv_offset = sizeof (rspamd_mempool_t);
  326. }
  327. new_pool->priv = (struct rspamd_mempool_specific *)(mem_chunk +
  328. priv_offset);
  329. /* Zero memory for specific and for the first chain */
  330. memset (new_pool->priv, 0, sizeof (struct rspamd_mempool_specific) +
  331. sizeof (struct _pool_chain));
  332. new_pool->priv->entry = entry;
  333. new_pool->priv->elt_len = size;
  334. new_pool->priv->flags = flags;
  335. if (tag) {
  336. rspamd_strlcpy (new_pool->tag.tagname, tag, sizeof (new_pool->tag.tagname));
  337. }
  338. else {
  339. new_pool->tag.tagname[0] = '\0';
  340. }
  341. /* Generate new uid */
  342. ottery_rand_bytes (uidbuf, sizeof (uidbuf));
  343. for (i = 0; i < G_N_ELEMENTS (uidbuf); i ++) {
  344. new_pool->tag.uid[i * 2] = hexdigits[(uidbuf[i] >> 4) & 0xf];
  345. new_pool->tag.uid[i * 2 + 1] = hexdigits[uidbuf[i] & 0xf];
  346. }
  347. new_pool->tag.uid[19] = '\0';
  348. mem_pool_stat->pools_allocated++;
  349. /* Now we can attach one chunk to speed up simple allocations */
  350. struct _pool_chain *nchain;
  351. nchain = (struct _pool_chain *)
  352. (mem_chunk +
  353. priv_offset +
  354. sizeof (struct rspamd_mempool_specific));
  355. guchar *unaligned = mem_chunk +
  356. priv_offset +
  357. sizeof (struct rspamd_mempool_specific) +
  358. sizeof (struct _pool_chain);
  359. nchain->slice_size = size;
  360. nchain->begin = unaligned;
  361. nchain->slice_size = size;
  362. nchain->pos = align_ptr (unaligned, MIN_MEM_ALIGNMENT);
  363. new_pool->priv->pools[RSPAMD_MEMPOOL_NORMAL] = nchain;
  364. new_pool->priv->used_memory = size;
  365. /* Adjust stats */
  366. g_atomic_int_add (&mem_pool_stat->bytes_allocated,
  367. (gint)size);
  368. g_atomic_int_add (&mem_pool_stat->chunks_allocated, 1);
  369. return new_pool;
  370. }
  371. static void *
  372. memory_pool_alloc_common (rspamd_mempool_t * pool, gsize size,
  373. enum rspamd_mempool_chain_type pool_type,
  374. const gchar *loc)
  375. RSPAMD_ATTR_ALLOC_SIZE(2) RSPAMD_ATTR_ALLOC_ALIGN(MIN_MEM_ALIGNMENT) RSPAMD_ATTR_RETURNS_NONNUL;
  376. void
  377. rspamd_mempool_notify_alloc_ (rspamd_mempool_t *pool, gsize size, const gchar *loc)
  378. {
  379. if (pool && G_UNLIKELY (pool->priv->flags & RSPAMD_MEMPOOL_DEBUG)) {
  380. GHashTable *debug_tbl = *(GHashTable **)(((guchar *)pool + sizeof (*pool)));
  381. gpointer ptr;
  382. ptr = g_hash_table_lookup (debug_tbl, loc);
  383. if (ptr) {
  384. ptr = GSIZE_TO_POINTER (GPOINTER_TO_SIZE (ptr) + size);
  385. }
  386. else {
  387. ptr = GSIZE_TO_POINTER (size);
  388. }
  389. g_hash_table_insert (debug_tbl, (gpointer) loc, ptr);
  390. }
  391. }
  392. static void *
  393. memory_pool_alloc_common (rspamd_mempool_t * pool, gsize size,
  394. enum rspamd_mempool_chain_type pool_type, const gchar *loc)
  395. {
  396. guint8 *tmp;
  397. struct _pool_chain *new, *cur;
  398. gsize free = 0;
  399. if (pool) {
  400. POOL_MTX_LOCK ();
  401. pool->priv->used_memory += size;
  402. if (G_UNLIKELY (pool->priv->flags & RSPAMD_MEMPOOL_DEBUG)) {
  403. rspamd_mempool_notify_alloc_ (pool, size, loc);
  404. }
  405. if (always_malloc && pool_type != RSPAMD_MEMPOOL_SHARED) {
  406. void *ptr;
  407. ptr = g_malloc (size);
  408. POOL_MTX_UNLOCK ();
  409. if (pool->priv->trash_stack == NULL) {
  410. pool->priv->trash_stack = g_ptr_array_sized_new (128);
  411. }
  412. g_ptr_array_add (pool->priv->trash_stack, ptr);
  413. return ptr;
  414. }
  415. cur = rspamd_mempool_get_chain (pool, pool_type);
  416. /* Find free space in pool chain */
  417. if (cur) {
  418. free = pool_chain_free (cur);
  419. }
  420. if (cur == NULL || free < size) {
  421. if (free < size) {
  422. pool->priv->wasted_memory += free;
  423. }
  424. /* Allocate new chain element */
  425. if (pool->priv->elt_len >= size + MIN_MEM_ALIGNMENT) {
  426. pool->priv->entry->elts[pool->priv->entry->cur_elts].fragmentation += size;
  427. new = rspamd_mempool_chain_new (pool->priv->elt_len,
  428. pool_type);
  429. }
  430. else {
  431. mem_pool_stat->oversized_chunks++;
  432. g_atomic_int_add (&mem_pool_stat->fragmented_size,
  433. free);
  434. pool->priv->entry->elts[pool->priv->entry->cur_elts].fragmentation += free;
  435. new = rspamd_mempool_chain_new (size + pool->priv->elt_len, pool_type);
  436. }
  437. /* Connect to pool subsystem */
  438. rspamd_mempool_append_chain (pool, new, pool_type);
  439. /* No need to align again, aligned by rspamd_mempool_chain_new */
  440. tmp = new->pos;
  441. new->pos = tmp + size;
  442. POOL_MTX_UNLOCK ();
  443. return tmp;
  444. }
  445. /* No need to allocate page */
  446. tmp = align_ptr (cur->pos, MIN_MEM_ALIGNMENT);
  447. cur->pos = tmp + size;
  448. POOL_MTX_UNLOCK ();
  449. return tmp;
  450. }
  451. abort ();
  452. }
  453. void *
  454. rspamd_mempool_alloc_ (rspamd_mempool_t * pool, gsize size, const gchar *loc)
  455. {
  456. return memory_pool_alloc_common (pool, size, RSPAMD_MEMPOOL_NORMAL, loc);
  457. }
  458. void *
  459. rspamd_mempool_alloc0_ (rspamd_mempool_t * pool, gsize size, const gchar *loc)
  460. {
  461. void *pointer = rspamd_mempool_alloc_ (pool, size, loc);
  462. memset (pointer, 0, size);
  463. return pointer;
  464. }
  465. void *
  466. rspamd_mempool_alloc0_shared_ (rspamd_mempool_t * pool, gsize size, const gchar *loc)
  467. {
  468. void *pointer = rspamd_mempool_alloc_shared (pool, size);
  469. memset (pointer, 0, size);
  470. return pointer;
  471. }
  472. void *
  473. rspamd_mempool_alloc_shared_ (rspamd_mempool_t * pool, gsize size, const gchar *loc)
  474. {
  475. return memory_pool_alloc_common (pool, size, RSPAMD_MEMPOOL_SHARED, loc);
  476. }
  477. gchar *
  478. rspamd_mempool_strdup_ (rspamd_mempool_t * pool, const gchar *src, const gchar *loc)
  479. {
  480. gsize len;
  481. gchar *newstr;
  482. if (src == NULL) {
  483. return NULL;
  484. }
  485. len = strlen (src);
  486. newstr = rspamd_mempool_alloc_ (pool, len + 1, loc);
  487. memcpy (newstr, src, len);
  488. newstr[len] = '\0';
  489. return newstr;
  490. }
  491. gchar *
  492. rspamd_mempool_fstrdup_ (rspamd_mempool_t * pool, const struct f_str_s *src,
  493. const gchar *loc)
  494. {
  495. gchar *newstr;
  496. if (src == NULL) {
  497. return NULL;
  498. }
  499. newstr = rspamd_mempool_alloc_ (pool, src->len + 1, loc);
  500. memcpy (newstr, src->str, src->len);
  501. newstr[src->len] = '\0';
  502. return newstr;
  503. }
  504. gchar *
  505. rspamd_mempool_ftokdup_ (rspamd_mempool_t *pool, const rspamd_ftok_t *src,
  506. const gchar *loc)
  507. {
  508. gchar *newstr;
  509. if (src == NULL) {
  510. return NULL;
  511. }
  512. newstr = rspamd_mempool_alloc_ (pool, src->len + 1, loc);
  513. memcpy (newstr, src->begin, src->len);
  514. newstr[src->len] = '\0';
  515. return newstr;
  516. }
  517. void
  518. rspamd_mempool_add_destructor_full (rspamd_mempool_t * pool,
  519. rspamd_mempool_destruct_t func,
  520. void *data,
  521. const gchar *function,
  522. const gchar *line)
  523. {
  524. struct _pool_destructors *cur;
  525. POOL_MTX_LOCK ();
  526. cur = rspamd_mempool_alloc_ (pool, sizeof (*cur), line);
  527. cur->func = func;
  528. cur->data = data;
  529. cur->function = function;
  530. cur->loc = line;
  531. cur->next = NULL;
  532. if (pool->priv->dtors_tail) {
  533. pool->priv->dtors_tail->next = cur;
  534. pool->priv->dtors_tail = cur;
  535. }
  536. else {
  537. pool->priv->dtors_head = cur;
  538. pool->priv->dtors_tail = cur;
  539. }
  540. POOL_MTX_UNLOCK ();
  541. }
  542. void
  543. rspamd_mempool_replace_destructor (rspamd_mempool_t * pool,
  544. rspamd_mempool_destruct_t func,
  545. void *old_data,
  546. void *new_data)
  547. {
  548. struct _pool_destructors *tmp;
  549. LL_FOREACH (pool->priv->dtors_head, tmp) {
  550. if (tmp->func == func && tmp->data == old_data) {
  551. tmp->func = func;
  552. tmp->data = new_data;
  553. break;
  554. }
  555. }
  556. }
  557. static gint
  558. cmp_int (gconstpointer a, gconstpointer b)
  559. {
  560. gint i1 = *(const gint *)a, i2 = *(const gint *)b;
  561. return i1 - i2;
  562. }
  563. static void
  564. rspamd_mempool_adjust_entry (struct rspamd_mempool_entry_point *e)
  565. {
  566. gint sz[G_N_ELEMENTS (e->elts)], sel_pos, sel_neg;
  567. guint i, jitter;
  568. for (i = 0; i < G_N_ELEMENTS (sz); i ++) {
  569. sz[i] = e->elts[i].fragmentation - (gint)e->elts[i].leftover;
  570. }
  571. qsort (sz, G_N_ELEMENTS (sz), sizeof (gint), cmp_int);
  572. jitter = rspamd_random_uint64_fast () % 10;
  573. /*
  574. * Take stochaistic quantiles
  575. */
  576. sel_pos = sz[50 + jitter];
  577. sel_neg = sz[4 + jitter];
  578. if (-sel_neg > sel_pos) {
  579. /* We need to reduce current suggestion */
  580. e->cur_suggestion /= (1 + (((double)-sel_neg) / e->cur_suggestion)) * 1.5;
  581. }
  582. else {
  583. /* We still want to grow */
  584. e->cur_suggestion *= (1 + (((double)sel_pos) / e->cur_suggestion)) * 1.5;
  585. }
  586. /* Some sane limits counting mempool architecture */
  587. if (e->cur_suggestion < 1024) {
  588. e->cur_suggestion = 1024;
  589. }
  590. else if (e->cur_suggestion > 1024 * 1024 * 10) {
  591. e->cur_suggestion = 1024 * 1024 * 10;
  592. }
  593. memset (e->elts, 0, sizeof (e->elts));
  594. }
  595. static void
  596. rspamd_mempool_variables_cleanup (rspamd_mempool_t * pool)
  597. {
  598. if (pool->priv->variables) {
  599. struct rspamd_mempool_variable *var;
  600. kh_foreach_value_ptr (pool->priv->variables, var, {
  601. if (var->dtor) {
  602. var->dtor (var->data);
  603. }
  604. });
  605. if (pool->priv->entry && pool->priv->entry->cur_vars <
  606. kh_size (pool->priv->variables)) {
  607. /*
  608. * Increase preallocated size in two cases:
  609. * 1) Our previous guess was zero
  610. * 2) Our new variables count is not more than twice larger than
  611. * previous count
  612. * 3) Our variables count is less than some hard limit
  613. */
  614. static const guint max_preallocated_vars = 512;
  615. guint cur_size = kh_size (pool->priv->variables);
  616. guint old_guess = pool->priv->entry->cur_vars;
  617. guint new_guess;
  618. if (old_guess == 0) {
  619. new_guess = MIN (cur_size, max_preallocated_vars);
  620. }
  621. else {
  622. if (old_guess * 2 < cur_size) {
  623. new_guess = MIN (cur_size, max_preallocated_vars);
  624. }
  625. else {
  626. /* Too large step */
  627. new_guess = MIN (old_guess * 2, max_preallocated_vars);
  628. }
  629. }
  630. pool->priv->entry->cur_vars = new_guess;
  631. }
  632. kh_destroy (rspamd_mempool_vars_hash, pool->priv->variables);
  633. pool->priv->variables = NULL;
  634. }
  635. }
  636. void
  637. rspamd_mempool_destructors_enforce (rspamd_mempool_t *pool)
  638. {
  639. struct _pool_destructors *destructor;
  640. POOL_MTX_LOCK ();
  641. LL_FOREACH (pool->priv->dtors_head, destructor) {
  642. /* Avoid calling destructors for NULL pointers */
  643. if (destructor->data != NULL) {
  644. destructor->func (destructor->data);
  645. }
  646. }
  647. pool->priv->dtors_head = pool->priv->dtors_tail = NULL;
  648. rspamd_mempool_variables_cleanup (pool);
  649. POOL_MTX_UNLOCK ();
  650. }
  651. struct mempool_debug_elt {
  652. gsize sz;
  653. const gchar *loc;
  654. };
  655. static gint
  656. rspamd_mempool_debug_elt_cmp (const void *a, const void *b)
  657. {
  658. const struct mempool_debug_elt *e1 = a, *e2 = b;
  659. /* Inverse order */
  660. return (gint)((gssize)e2->sz) - ((gssize)e1->sz);
  661. }
  662. void
  663. rspamd_mempool_delete (rspamd_mempool_t * pool)
  664. {
  665. struct _pool_chain *cur, *tmp;
  666. struct _pool_destructors *destructor;
  667. gpointer ptr;
  668. guint i;
  669. gsize len;
  670. POOL_MTX_LOCK ();
  671. cur = pool->priv->pools[RSPAMD_MEMPOOL_NORMAL];
  672. if (G_UNLIKELY (pool->priv->flags & RSPAMD_MEMPOOL_DEBUG)) {
  673. GHashTable *debug_tbl = *(GHashTable **)(((guchar *)pool) + sizeof (*pool));
  674. /* Show debug info */
  675. gsize ndtor = 0;
  676. LL_COUNT (pool->priv->dtors_head, destructor, ndtor);
  677. msg_info_pool ("destructing of the memory pool %p; elt size = %z; "
  678. "used memory = %Hz; wasted memory = %Hd; "
  679. "vars = %z; destructors = %z",
  680. pool,
  681. pool->priv->elt_len,
  682. pool->priv->used_memory,
  683. pool->priv->wasted_memory,
  684. pool->priv->variables ? (gsize)kh_size (pool->priv->variables) : (gsize)0,
  685. ndtor);
  686. GHashTableIter it;
  687. gpointer k, v;
  688. GArray *sorted_debug_size = g_array_sized_new (FALSE, FALSE,
  689. sizeof (struct mempool_debug_elt),
  690. g_hash_table_size (debug_tbl));
  691. g_hash_table_iter_init (&it, debug_tbl);
  692. while (g_hash_table_iter_next (&it, &k, &v)) {
  693. struct mempool_debug_elt e;
  694. e.loc = (const gchar *)k;
  695. e.sz = GPOINTER_TO_SIZE (v);
  696. g_array_append_val (sorted_debug_size, e);
  697. }
  698. g_array_sort (sorted_debug_size, rspamd_mempool_debug_elt_cmp);
  699. for (guint _i = 0; _i < sorted_debug_size->len; _i ++) {
  700. struct mempool_debug_elt *e;
  701. e = &g_array_index (sorted_debug_size, struct mempool_debug_elt, _i);
  702. msg_info_pool ("allocated %Hz from %s", e->sz, e->loc);
  703. }
  704. g_array_free (sorted_debug_size, TRUE);
  705. g_hash_table_unref (debug_tbl);
  706. }
  707. if (cur && mempool_entries) {
  708. pool->priv->entry->elts[pool->priv->entry->cur_elts].leftover =
  709. pool_chain_free (cur);
  710. pool->priv->entry->cur_elts = (pool->priv->entry->cur_elts + 1) %
  711. G_N_ELEMENTS (pool->priv->entry->elts);
  712. if (pool->priv->entry->cur_elts == 0) {
  713. rspamd_mempool_adjust_entry (pool->priv->entry);
  714. }
  715. }
  716. /* Call all pool destructors */
  717. LL_FOREACH (pool->priv->dtors_head, destructor) {
  718. /* Avoid calling destructors for NULL pointers */
  719. if (destructor->data != NULL) {
  720. destructor->func (destructor->data);
  721. }
  722. }
  723. rspamd_mempool_variables_cleanup (pool);
  724. if (pool->priv->trash_stack) {
  725. for (i = 0; i < pool->priv->trash_stack->len; i++) {
  726. ptr = g_ptr_array_index (pool->priv->trash_stack, i);
  727. g_free (ptr);
  728. }
  729. g_ptr_array_free (pool->priv->trash_stack, TRUE);
  730. }
  731. for (i = 0; i < G_N_ELEMENTS (pool->priv->pools); i ++) {
  732. if (pool->priv->pools[i]) {
  733. LL_FOREACH_SAFE (pool->priv->pools[i], cur, tmp) {
  734. g_atomic_int_add (&mem_pool_stat->bytes_allocated,
  735. -((gint)cur->slice_size));
  736. g_atomic_int_add (&mem_pool_stat->chunks_allocated, -1);
  737. len = cur->slice_size + sizeof (struct _pool_chain);
  738. if (i == RSPAMD_MEMPOOL_SHARED) {
  739. munmap ((void *)cur, len);
  740. }
  741. else {
  742. /* The last pool is special, it is a part of the initial chunk */
  743. if (cur->next != NULL) {
  744. free (cur); /* Not g_free as we use system allocator */
  745. }
  746. }
  747. }
  748. }
  749. }
  750. g_atomic_int_inc (&mem_pool_stat->pools_freed);
  751. POOL_MTX_UNLOCK ();
  752. free (pool); /* allocated by posix_memalign */
  753. }
  754. void
  755. rspamd_mempool_stat (rspamd_mempool_stat_t * st)
  756. {
  757. if (mem_pool_stat != NULL) {
  758. st->pools_allocated = mem_pool_stat->pools_allocated;
  759. st->pools_freed = mem_pool_stat->pools_freed;
  760. st->shared_chunks_allocated = mem_pool_stat->shared_chunks_allocated;
  761. st->bytes_allocated = mem_pool_stat->bytes_allocated;
  762. st->chunks_allocated = mem_pool_stat->chunks_allocated;
  763. st->chunks_freed = mem_pool_stat->chunks_freed;
  764. st->oversized_chunks = mem_pool_stat->oversized_chunks;
  765. }
  766. }
  767. void
  768. rspamd_mempool_stat_reset (void)
  769. {
  770. if (mem_pool_stat != NULL) {
  771. memset (mem_pool_stat, 0, sizeof (rspamd_mempool_stat_t));
  772. }
  773. }
  774. gsize
  775. rspamd_mempool_suggest_size_ (const char *loc)
  776. {
  777. return 0;
  778. }
  779. #if !defined(HAVE_PTHREAD_PROCESS_SHARED) || defined(DISABLE_PTHREAD_MUTEX)
  780. /*
  781. * Own emulation
  782. */
  783. static inline gint
  784. __mutex_spin (rspamd_mempool_mutex_t * mutex)
  785. {
  786. /* check spin count */
  787. if (g_atomic_int_dec_and_test (&mutex->spin)) {
  788. /* This may be deadlock, so check owner of this lock */
  789. if (mutex->owner == getpid ()) {
  790. /* This mutex was locked by calling process, so it is just double lock and we can easily unlock it */
  791. g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
  792. return 0;
  793. }
  794. else if (kill (mutex->owner, 0) == -1) {
  795. /* Owner process was not found, so release lock */
  796. g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
  797. return 0;
  798. }
  799. /* Spin again */
  800. g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
  801. }
  802. #ifdef HAVE_SCHED_YIELD
  803. (void)sched_yield ();
  804. #elif defined(HAVE_NANOSLEEP)
  805. struct timespec ts;
  806. ts.tv_sec = 0;
  807. ts.tv_nsec = MUTEX_SLEEP_TIME;
  808. /* Spin */
  809. while (nanosleep (&ts, &ts) == -1 && errno == EINTR) ;
  810. #else
  811. #error No methods to spin are defined
  812. #endif
  813. return 1;
  814. }
  815. static void
  816. memory_pool_mutex_spin (rspamd_mempool_mutex_t * mutex)
  817. {
  818. while (!g_atomic_int_compare_and_exchange (&mutex->lock, 0, 1)) {
  819. if (!__mutex_spin (mutex)) {
  820. return;
  821. }
  822. }
  823. }
  824. rspamd_mempool_mutex_t *
  825. rspamd_mempool_get_mutex (rspamd_mempool_t * pool)
  826. {
  827. rspamd_mempool_mutex_t *res;
  828. if (pool != NULL) {
  829. res =
  830. rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_mutex_t));
  831. res->lock = 0;
  832. res->owner = 0;
  833. res->spin = MUTEX_SPIN_COUNT;
  834. return res;
  835. }
  836. return NULL;
  837. }
  838. void
  839. rspamd_mempool_lock_mutex (rspamd_mempool_mutex_t * mutex)
  840. {
  841. memory_pool_mutex_spin (mutex);
  842. mutex->owner = getpid ();
  843. }
  844. void
  845. rspamd_mempool_unlock_mutex (rspamd_mempool_mutex_t * mutex)
  846. {
  847. mutex->owner = 0;
  848. (void)g_atomic_int_compare_and_exchange (&mutex->lock, 1, 0);
  849. }
  850. rspamd_mempool_rwlock_t *
  851. rspamd_mempool_get_rwlock (rspamd_mempool_t * pool)
  852. {
  853. rspamd_mempool_rwlock_t *lock;
  854. lock = rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_rwlock_t));
  855. lock->__r_lock = rspamd_mempool_get_mutex (pool);
  856. lock->__w_lock = rspamd_mempool_get_mutex (pool);
  857. return lock;
  858. }
  859. void
  860. rspamd_mempool_rlock_rwlock (rspamd_mempool_rwlock_t * lock)
  861. {
  862. /* Spin on write lock */
  863. while (g_atomic_int_get (&lock->__w_lock->lock)) {
  864. if (!__mutex_spin (lock->__w_lock)) {
  865. break;
  866. }
  867. }
  868. g_atomic_int_inc (&lock->__r_lock->lock);
  869. lock->__r_lock->owner = getpid ();
  870. }
  871. void
  872. rspamd_mempool_wlock_rwlock (rspamd_mempool_rwlock_t * lock)
  873. {
  874. /* Spin on write lock first */
  875. rspamd_mempool_lock_mutex (lock->__w_lock);
  876. /* Now we have write lock set up */
  877. /* Wait all readers */
  878. while (g_atomic_int_get (&lock->__r_lock->lock)) {
  879. __mutex_spin (lock->__r_lock);
  880. }
  881. }
  882. void
  883. rspamd_mempool_runlock_rwlock (rspamd_mempool_rwlock_t * lock)
  884. {
  885. if (g_atomic_int_get (&lock->__r_lock->lock)) {
  886. (void)g_atomic_int_dec_and_test (&lock->__r_lock->lock);
  887. }
  888. }
  889. void
  890. rspamd_mempool_wunlock_rwlock (rspamd_mempool_rwlock_t * lock)
  891. {
  892. rspamd_mempool_unlock_mutex (lock->__w_lock);
  893. }
  894. #else
  895. /*
  896. * Pthread bases shared mutexes
  897. */
  898. rspamd_mempool_mutex_t *
  899. rspamd_mempool_get_mutex (rspamd_mempool_t * pool)
  900. {
  901. rspamd_mempool_mutex_t *res;
  902. pthread_mutexattr_t mattr;
  903. if (pool != NULL) {
  904. res =
  905. rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_mutex_t));
  906. pthread_mutexattr_init (&mattr);
  907. pthread_mutexattr_setpshared (&mattr, PTHREAD_PROCESS_SHARED);
  908. pthread_mutexattr_setrobust (&mattr, PTHREAD_MUTEX_ROBUST);
  909. pthread_mutex_init (res, &mattr);
  910. rspamd_mempool_add_destructor (pool,
  911. (rspamd_mempool_destruct_t)pthread_mutex_destroy, res);
  912. pthread_mutexattr_destroy (&mattr);
  913. return res;
  914. }
  915. return NULL;
  916. }
  917. void
  918. rspamd_mempool_lock_mutex (rspamd_mempool_mutex_t * mutex)
  919. {
  920. pthread_mutex_lock (mutex);
  921. }
  922. void
  923. rspamd_mempool_unlock_mutex (rspamd_mempool_mutex_t * mutex)
  924. {
  925. pthread_mutex_unlock (mutex);
  926. }
  927. rspamd_mempool_rwlock_t *
  928. rspamd_mempool_get_rwlock (rspamd_mempool_t * pool)
  929. {
  930. rspamd_mempool_rwlock_t *res;
  931. pthread_rwlockattr_t mattr;
  932. if (pool != NULL) {
  933. res =
  934. rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_rwlock_t));
  935. pthread_rwlockattr_init (&mattr);
  936. pthread_rwlockattr_setpshared (&mattr, PTHREAD_PROCESS_SHARED);
  937. pthread_rwlock_init (res, &mattr);
  938. rspamd_mempool_add_destructor (pool,
  939. (rspamd_mempool_destruct_t)pthread_rwlock_destroy, res);
  940. pthread_rwlockattr_destroy (&mattr);
  941. return res;
  942. }
  943. return NULL;
  944. }
  945. void
  946. rspamd_mempool_rlock_rwlock (rspamd_mempool_rwlock_t * lock)
  947. {
  948. pthread_rwlock_rdlock (lock);
  949. }
  950. void
  951. rspamd_mempool_wlock_rwlock (rspamd_mempool_rwlock_t * lock)
  952. {
  953. pthread_rwlock_wrlock (lock);
  954. }
  955. void
  956. rspamd_mempool_runlock_rwlock (rspamd_mempool_rwlock_t * lock)
  957. {
  958. pthread_rwlock_unlock (lock);
  959. }
  960. void
  961. rspamd_mempool_wunlock_rwlock (rspamd_mempool_rwlock_t * lock)
  962. {
  963. pthread_rwlock_unlock (lock);
  964. }
  965. #endif
  966. #define RSPAMD_MEMPOOL_VARS_HASH_SEED 0xb32ad7c55eb2e647ULL
  967. void
  968. rspamd_mempool_set_variable (rspamd_mempool_t *pool,
  969. const gchar *name,
  970. gpointer value,
  971. rspamd_mempool_destruct_t destructor)
  972. {
  973. if (pool->priv->variables == NULL) {
  974. pool->priv->variables = kh_init (rspamd_mempool_vars_hash);
  975. if (pool->priv->entry->cur_vars > 0) {
  976. /* Preallocate */
  977. kh_resize (rspamd_mempool_vars_hash,
  978. pool->priv->variables,
  979. pool->priv->entry->cur_vars);
  980. }
  981. }
  982. gint hv = rspamd_cryptobox_fast_hash (name, strlen (name),
  983. RSPAMD_MEMPOOL_VARS_HASH_SEED);
  984. khiter_t it;
  985. gint r;
  986. it = kh_put (rspamd_mempool_vars_hash, pool->priv->variables, hv, &r);
  987. if (it == kh_end (pool->priv->variables)) {
  988. g_assert_not_reached ();
  989. }
  990. else {
  991. struct rspamd_mempool_variable *pvar;
  992. if (r == 0) {
  993. /* Existing entry, maybe need cleanup */
  994. pvar = &kh_val (pool->priv->variables, it);
  995. if (pvar->dtor) {
  996. pvar->dtor (pvar->data);
  997. }
  998. }
  999. pvar = &kh_val (pool->priv->variables, it);
  1000. pvar->data = value;
  1001. pvar->dtor = destructor;
  1002. }
  1003. }
  1004. gpointer
  1005. rspamd_mempool_get_variable (rspamd_mempool_t *pool, const gchar *name)
  1006. {
  1007. if (pool->priv->variables == NULL) {
  1008. return NULL;
  1009. }
  1010. khiter_t it;
  1011. gint hv = rspamd_cryptobox_fast_hash (name, strlen (name),
  1012. RSPAMD_MEMPOOL_VARS_HASH_SEED);
  1013. it = kh_get (rspamd_mempool_vars_hash, pool->priv->variables, hv);
  1014. if (it != kh_end (pool->priv->variables)) {
  1015. struct rspamd_mempool_variable *pvar;
  1016. pvar = &kh_val (pool->priv->variables, it);
  1017. return pvar->data;
  1018. }
  1019. return NULL;
  1020. }
  1021. void
  1022. rspamd_mempool_remove_variable (rspamd_mempool_t *pool, const gchar *name)
  1023. {
  1024. if (pool->priv->variables != NULL) {
  1025. khiter_t it;
  1026. gint hv = rspamd_cryptobox_fast_hash (name, strlen (name),
  1027. RSPAMD_MEMPOOL_VARS_HASH_SEED);
  1028. it = kh_get (rspamd_mempool_vars_hash, pool->priv->variables, hv);
  1029. if (it != kh_end (pool->priv->variables)) {
  1030. struct rspamd_mempool_variable *pvar;
  1031. pvar = &kh_val (pool->priv->variables, it);
  1032. if (pvar->dtor) {
  1033. pvar->dtor (pvar->data);
  1034. }
  1035. kh_del (rspamd_mempool_vars_hash, pool->priv->variables, it);
  1036. }
  1037. }
  1038. }
  1039. GList *
  1040. rspamd_mempool_glist_prepend (rspamd_mempool_t *pool, GList *l, gpointer p)
  1041. {
  1042. GList *cell;
  1043. cell = rspamd_mempool_alloc (pool, sizeof (*cell));
  1044. cell->prev = NULL;
  1045. cell->data = p;
  1046. if (l == NULL) {
  1047. cell->next = NULL;
  1048. }
  1049. else {
  1050. cell->next = l;
  1051. l->prev = cell;
  1052. }
  1053. return cell;
  1054. }
  1055. GList *
  1056. rspamd_mempool_glist_append (rspamd_mempool_t *pool, GList *l, gpointer p)
  1057. {
  1058. GList *cell, *cur;
  1059. cell = rspamd_mempool_alloc (pool, sizeof (*cell));
  1060. cell->next = NULL;
  1061. cell->data = p;
  1062. if (l) {
  1063. for (cur = l; cur->next != NULL; cur = cur->next) {}
  1064. cur->next = cell;
  1065. cell->prev = cur;
  1066. }
  1067. else {
  1068. l = cell;
  1069. l->prev = NULL;
  1070. }
  1071. return l;
  1072. }
  1073. gsize
  1074. rspamd_mempool_get_used_size (rspamd_mempool_t *pool)
  1075. {
  1076. return pool->priv->used_memory;
  1077. }
  1078. gsize
  1079. rspamd_mempool_get_wasted_size (rspamd_mempool_t *pool)
  1080. {
  1081. return pool->priv->wasted_memory;
  1082. }