You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

mem_pool.c 31KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316
  1. /*-
  2. * Copyright 2016 Vsevolod Stakhov
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "config.h"
  17. #include "mem_pool.h"
  18. #include "fstring.h"
  19. #include "logger.h"
  20. #include "ottery.h"
  21. #include "unix-std.h"
  22. #include "khash.h"
  23. #include "cryptobox.h"
  24. #include "contrib/uthash/utlist.h"
  25. #include "mem_pool_internal.h"
  26. #ifdef WITH_JEMALLOC
  27. #include <jemalloc/jemalloc.h>
  28. #if (JEMALLOC_VERSION_MAJOR == 3 && JEMALLOC_VERSION_MINOR >= 6) || (JEMALLOC_VERSION_MAJOR > 3)
  29. #define HAVE_MALLOC_SIZE 1
  30. #define sys_alloc_size(sz) nallocx(sz, 0)
  31. #endif
  32. #elif defined(__APPLE__)
  33. #include <malloc/malloc.h>
  34. #define HAVE_MALLOC_SIZE 1
  35. #define sys_alloc_size(sz) malloc_good_size(sz)
  36. #endif
  37. #ifdef HAVE_SCHED_YIELD
  38. #include <sched.h>
  39. #endif
  40. /* Sleep time for spin lock in nanoseconds */
  41. #define MUTEX_SLEEP_TIME 10000000L
  42. #define MUTEX_SPIN_COUNT 100
  43. #define POOL_MTX_LOCK() do { } while (0)
  44. #define POOL_MTX_UNLOCK() do { } while (0)
  45. /*
  46. * This define specify whether we should check all pools for free space for new object
  47. * or just begin scan from current (recently attached) pool
  48. * If MEMORY_GREEDY is defined, then we scan all pools to find free space (more CPU usage, slower
  49. * but requires less memory). If it is not defined check only current pool and if object is too large
  50. * to place in it allocate new one (this may cause huge CPU usage in some cases too, but generally faster than
  51. * greedy method)
  52. */
  53. #undef MEMORY_GREEDY
  54. static inline uint32_t
  55. rspamd_entry_hash (const char *str)
  56. {
  57. return (guint)rspamd_cryptobox_fast_hash (str, strlen (str), rspamd_hash_seed ());
  58. }
  59. static inline int
  60. rspamd_entry_equal (const char *k1, const char *k2)
  61. {
  62. return strcmp (k1, k2) == 0;
  63. }
  64. KHASH_INIT(mempool_entry, const gchar *, struct rspamd_mempool_entry_point *,
  65. 1, rspamd_entry_hash, rspamd_entry_equal)
  66. static khash_t(mempool_entry) *mempool_entries = NULL;
  67. /* Internal statistic */
  68. static rspamd_mempool_stat_t *mem_pool_stat = NULL;
  69. /* Environment variable */
  70. static gboolean env_checked = FALSE;
  71. static gboolean always_malloc = FALSE;
  72. /**
  73. * Function that return free space in pool page
  74. * @param x pool page struct
  75. */
  76. static gsize
  77. pool_chain_free (struct _pool_chain *chain)
  78. {
  79. gint64 occupied = chain->pos - chain->begin + MIN_MEM_ALIGNMENT;
  80. return (occupied < (gint64)chain->slice_size ?
  81. chain->slice_size - occupied : 0);
  82. }
  83. /* By default allocate 4Kb chunks of memory */
  84. #define FIXED_POOL_SIZE 4096
  85. static inline struct rspamd_mempool_entry_point *
  86. rspamd_mempool_entry_new (const gchar *loc)
  87. {
  88. struct rspamd_mempool_entry_point **pentry, *entry;
  89. gint r;
  90. khiter_t k;
  91. k = kh_put (mempool_entry, mempool_entries, loc, &r);
  92. if (r >= 0) {
  93. pentry = &kh_value (mempool_entries, k);
  94. entry = g_malloc0 (sizeof (*entry));
  95. *pentry = entry;
  96. memset (entry, 0, sizeof (*entry));
  97. rspamd_strlcpy (entry->src, loc, sizeof (entry->src));
  98. #ifdef HAVE_GETPAGESIZE
  99. entry->cur_suggestion = MAX (getpagesize (), FIXED_POOL_SIZE);
  100. #else
  101. entry->cur_suggestion = MAX (sysconf (_SC_PAGESIZE), FIXED_POOL_SIZE);
  102. #endif
  103. }
  104. else {
  105. g_assert_not_reached ();
  106. }
  107. return entry;
  108. }
  109. RSPAMD_CONSTRUCTOR (rspamd_mempool_entries_ctor)
  110. {
  111. if (mempool_entries == NULL) {
  112. mempool_entries = kh_init (mempool_entry);
  113. }
  114. }
  115. RSPAMD_DESTRUCTOR (rspamd_mempool_entries_dtor)
  116. {
  117. struct rspamd_mempool_entry_point *elt;
  118. kh_foreach_value (mempool_entries, elt, {
  119. g_free (elt);
  120. });
  121. kh_destroy (mempool_entry, mempool_entries);
  122. mempool_entries = NULL;
  123. }
  124. static inline struct rspamd_mempool_entry_point *
  125. rspamd_mempool_get_entry (const gchar *loc)
  126. {
  127. khiter_t k;
  128. struct rspamd_mempool_entry_point *elt;
  129. if (G_UNLIKELY (!mempool_entries)) {
  130. rspamd_mempool_entries_ctor();
  131. }
  132. k = kh_get (mempool_entry, mempool_entries, loc);
  133. if (k != kh_end (mempool_entries)) {
  134. elt = kh_value (mempool_entries, k);
  135. return elt;
  136. }
  137. return rspamd_mempool_entry_new(loc);
  138. }
  139. static struct _pool_chain *
  140. rspamd_mempool_chain_new (gsize size, gsize alignment, enum rspamd_mempool_chain_type pool_type)
  141. {
  142. struct _pool_chain *chain;
  143. gsize total_size = size + sizeof (struct _pool_chain) + alignment,
  144. optimal_size = 0;
  145. gpointer map;
  146. g_assert (size > 0);
  147. if (pool_type == RSPAMD_MEMPOOL_SHARED) {
  148. #if defined(HAVE_MMAP_ANON)
  149. map = mmap (NULL,
  150. total_size,
  151. PROT_READ | PROT_WRITE,
  152. MAP_ANON | MAP_SHARED,
  153. -1,
  154. 0);
  155. if (map == MAP_FAILED) {
  156. g_error ("%s: failed to allocate %"G_GSIZE_FORMAT" bytes",
  157. G_STRLOC, total_size);
  158. abort ();
  159. }
  160. chain = map;
  161. chain->begin = ((guint8 *) chain) + sizeof (struct _pool_chain);
  162. #elif defined(HAVE_MMAP_ZERO)
  163. gint fd;
  164. fd = open ("/dev/zero", O_RDWR);
  165. if (fd == -1) {
  166. return NULL;
  167. }
  168. map = mmap (NULL,
  169. size + sizeof (struct _pool_chain),
  170. PROT_READ | PROT_WRITE,
  171. MAP_SHARED,
  172. fd,
  173. 0);
  174. if (map == MAP_FAILED) {
  175. msg_err ("cannot allocate %z bytes, aborting", size +
  176. sizeof (struct _pool_chain));
  177. abort ();
  178. }
  179. chain = map;
  180. chain->begin = ((guint8 *) chain) + sizeof (struct _pool_chain);
  181. #else
  182. #error No mmap methods are defined
  183. #endif
  184. g_atomic_int_inc (&mem_pool_stat->shared_chunks_allocated);
  185. g_atomic_int_add (&mem_pool_stat->bytes_allocated, total_size);
  186. }
  187. else {
  188. #ifdef HAVE_MALLOC_SIZE
  189. optimal_size = sys_alloc_size (total_size);
  190. #endif
  191. total_size = MAX (total_size, optimal_size);
  192. gint ret = posix_memalign (&map, alignment, total_size);
  193. if (ret != 0 || map == NULL) {
  194. g_error ("%s: failed to allocate %"G_GSIZE_FORMAT" bytes: %d - %s",
  195. G_STRLOC, total_size, ret, strerror (errno));
  196. abort ();
  197. }
  198. chain = map;
  199. chain->begin = ((guint8 *) chain) + sizeof (struct _pool_chain);
  200. g_atomic_int_add (&mem_pool_stat->bytes_allocated, total_size);
  201. g_atomic_int_inc (&mem_pool_stat->chunks_allocated);
  202. }
  203. chain->pos = align_ptr (chain->begin, alignment);
  204. chain->slice_size = total_size - sizeof (struct _pool_chain);
  205. return chain;
  206. }
  207. /**
  208. * Get the current pool of the specified type, creating the corresponding
  209. * array if it's absent
  210. * @param pool
  211. * @param pool_type
  212. * @return
  213. */
  214. static struct _pool_chain *
  215. rspamd_mempool_get_chain (rspamd_mempool_t * pool,
  216. enum rspamd_mempool_chain_type pool_type)
  217. {
  218. g_assert (pool_type >= 0 && pool_type < RSPAMD_MEMPOOL_MAX);
  219. return pool->priv->pools[pool_type];
  220. }
  221. static void
  222. rspamd_mempool_append_chain (rspamd_mempool_t * pool,
  223. struct _pool_chain *chain,
  224. enum rspamd_mempool_chain_type pool_type)
  225. {
  226. g_assert (pool_type >= 0 && pool_type < RSPAMD_MEMPOOL_MAX);
  227. g_assert (chain != NULL);
  228. LL_PREPEND (pool->priv->pools[pool_type], chain);
  229. }
  230. /**
  231. * Allocate new memory poll
  232. * @param size size of pool's page
  233. * @return new memory pool object
  234. */
  235. rspamd_mempool_t *
  236. rspamd_mempool_new_ (gsize size, const gchar *tag, gint flags, const gchar *loc)
  237. {
  238. rspamd_mempool_t *new_pool;
  239. gpointer map;
  240. unsigned char uidbuf[10];
  241. const gchar hexdigits[] = "0123456789abcdef";
  242. unsigned i;
  243. /* Allocate statistic structure if it is not allocated before */
  244. if (mem_pool_stat == NULL) {
  245. #if defined(HAVE_MMAP_ANON)
  246. map = mmap (NULL,
  247. sizeof (rspamd_mempool_stat_t),
  248. PROT_READ | PROT_WRITE,
  249. MAP_ANON | MAP_SHARED,
  250. -1,
  251. 0);
  252. if (map == MAP_FAILED) {
  253. msg_err ("cannot allocate %z bytes, aborting",
  254. sizeof (rspamd_mempool_stat_t));
  255. abort ();
  256. }
  257. mem_pool_stat = (rspamd_mempool_stat_t *)map;
  258. #elif defined(HAVE_MMAP_ZERO)
  259. gint fd;
  260. fd = open ("/dev/zero", O_RDWR);
  261. g_assert (fd != -1);
  262. map = mmap (NULL,
  263. sizeof (rspamd_mempool_stat_t),
  264. PROT_READ | PROT_WRITE,
  265. MAP_SHARED,
  266. fd,
  267. 0);
  268. if (map == MAP_FAILED) {
  269. msg_err ("cannot allocate %z bytes, aborting",
  270. sizeof (rspamd_mempool_stat_t));
  271. abort ();
  272. }
  273. mem_pool_stat = (rspamd_mempool_stat_t *)map;
  274. #else
  275. # error No mmap methods are defined
  276. #endif
  277. memset (map, 0, sizeof (rspamd_mempool_stat_t));
  278. }
  279. if (!env_checked) {
  280. /* Check G_SLICE=always-malloc to allow memory pool debug */
  281. const char *g_slice;
  282. g_slice = getenv ("VALGRIND");
  283. if (g_slice != NULL) {
  284. always_malloc = TRUE;
  285. }
  286. env_checked = TRUE;
  287. }
  288. struct rspamd_mempool_entry_point *entry = rspamd_mempool_get_entry (loc);
  289. gsize total_size;
  290. if (size == 0 && entry) {
  291. size = entry->cur_suggestion;
  292. }
  293. total_size = sizeof (rspamd_mempool_t) +
  294. sizeof (struct rspamd_mempool_specific) +
  295. MIN_MEM_ALIGNMENT +
  296. sizeof (struct _pool_chain) +
  297. size;
  298. if (G_UNLIKELY (flags & RSPAMD_MEMPOOL_DEBUG)) {
  299. total_size += sizeof (GHashTable *);
  300. }
  301. /*
  302. * Memory layout:
  303. * struct rspamd_mempool_t
  304. * <optional debug hash table>
  305. * struct rspamd_mempool_specific
  306. * struct _pool_chain
  307. * alignment (if needed)
  308. * memory chunk
  309. */
  310. guchar *mem_chunk;
  311. gint ret = posix_memalign ((void **)&mem_chunk, MIN_MEM_ALIGNMENT,
  312. total_size);
  313. gsize priv_offset;
  314. if (ret != 0 || mem_chunk == NULL) {
  315. g_error ("%s: failed to allocate %"G_GSIZE_FORMAT" bytes: %d - %s",
  316. G_STRLOC, total_size, ret, strerror (errno));
  317. abort ();
  318. }
  319. /* Set memory layout */
  320. new_pool = (rspamd_mempool_t *)mem_chunk;
  321. if (G_UNLIKELY (flags & RSPAMD_MEMPOOL_DEBUG)) {
  322. /* Allocate debug table */
  323. GHashTable *debug_tbl;
  324. debug_tbl = g_hash_table_new (rspamd_str_hash, rspamd_str_equal);
  325. memcpy (mem_chunk + sizeof (rspamd_mempool_t), &debug_tbl,
  326. sizeof (GHashTable *));
  327. priv_offset = sizeof (rspamd_mempool_t) + sizeof (GHashTable *);
  328. }
  329. else {
  330. priv_offset = sizeof (rspamd_mempool_t);
  331. }
  332. new_pool->priv = (struct rspamd_mempool_specific *)(mem_chunk +
  333. priv_offset);
  334. /* Zero memory for specific and for the first chain */
  335. memset (new_pool->priv, 0, sizeof (struct rspamd_mempool_specific) +
  336. sizeof (struct _pool_chain));
  337. new_pool->priv->entry = entry;
  338. new_pool->priv->elt_len = size;
  339. new_pool->priv->flags = flags;
  340. if (tag) {
  341. rspamd_strlcpy (new_pool->tag.tagname, tag, sizeof (new_pool->tag.tagname));
  342. }
  343. else {
  344. new_pool->tag.tagname[0] = '\0';
  345. }
  346. /* Generate new uid */
  347. ottery_rand_bytes (uidbuf, sizeof (uidbuf));
  348. for (i = 0; i < G_N_ELEMENTS (uidbuf); i ++) {
  349. new_pool->tag.uid[i * 2] = hexdigits[(uidbuf[i] >> 4) & 0xf];
  350. new_pool->tag.uid[i * 2 + 1] = hexdigits[uidbuf[i] & 0xf];
  351. }
  352. new_pool->tag.uid[19] = '\0';
  353. mem_pool_stat->pools_allocated++;
  354. /* Now we can attach one chunk to speed up simple allocations */
  355. struct _pool_chain *nchain;
  356. nchain = (struct _pool_chain *)
  357. (mem_chunk +
  358. priv_offset +
  359. sizeof (struct rspamd_mempool_specific));
  360. guchar *unaligned = mem_chunk +
  361. priv_offset +
  362. sizeof (struct rspamd_mempool_specific) +
  363. sizeof (struct _pool_chain);
  364. nchain->slice_size = size;
  365. nchain->begin = unaligned;
  366. nchain->slice_size = size;
  367. nchain->pos = align_ptr (unaligned, MIN_MEM_ALIGNMENT);
  368. new_pool->priv->pools[RSPAMD_MEMPOOL_NORMAL] = nchain;
  369. new_pool->priv->used_memory = size;
  370. /* Adjust stats */
  371. g_atomic_int_add (&mem_pool_stat->bytes_allocated,
  372. (gint)size);
  373. g_atomic_int_add (&mem_pool_stat->chunks_allocated, 1);
  374. return new_pool;
  375. }
  376. static void *
  377. memory_pool_alloc_common (rspamd_mempool_t * pool, gsize size, gsize alignment,
  378. enum rspamd_mempool_chain_type pool_type,
  379. const gchar *loc)
  380. RSPAMD_ATTR_ALLOC_SIZE(2) RSPAMD_ATTR_ALLOC_ALIGN(MIN_MEM_ALIGNMENT) RSPAMD_ATTR_RETURNS_NONNUL;
  381. void
  382. rspamd_mempool_notify_alloc_ (rspamd_mempool_t *pool, gsize size, const gchar *loc)
  383. {
  384. if (pool && G_UNLIKELY (pool->priv->flags & RSPAMD_MEMPOOL_DEBUG)) {
  385. GHashTable *debug_tbl = *(GHashTable **)(((guchar *)pool + sizeof (*pool)));
  386. gpointer ptr;
  387. ptr = g_hash_table_lookup (debug_tbl, loc);
  388. if (ptr) {
  389. ptr = GSIZE_TO_POINTER (GPOINTER_TO_SIZE (ptr) + size);
  390. }
  391. else {
  392. ptr = GSIZE_TO_POINTER (size);
  393. }
  394. g_hash_table_insert (debug_tbl, (gpointer) loc, ptr);
  395. }
  396. }
  397. static void *
  398. memory_pool_alloc_common (rspamd_mempool_t * pool, gsize size, gsize alignment,
  399. enum rspamd_mempool_chain_type pool_type, const gchar *loc)
  400. {
  401. guint8 *tmp;
  402. struct _pool_chain *new, *cur;
  403. gsize free = 0;
  404. if (pool) {
  405. POOL_MTX_LOCK ();
  406. pool->priv->used_memory += size;
  407. if (G_UNLIKELY (pool->priv->flags & RSPAMD_MEMPOOL_DEBUG)) {
  408. rspamd_mempool_notify_alloc_ (pool, size, loc);
  409. }
  410. if (always_malloc && pool_type != RSPAMD_MEMPOOL_SHARED) {
  411. void *ptr;
  412. if (alignment <= G_MEM_ALIGN) {
  413. ptr = g_malloc(size);
  414. }
  415. else {
  416. ptr = g_malloc(size + alignment);
  417. ptr = align_ptr(ptr, alignment);
  418. }
  419. POOL_MTX_UNLOCK ();
  420. if (pool->priv->trash_stack == NULL) {
  421. pool->priv->trash_stack = g_ptr_array_sized_new (128);
  422. }
  423. g_ptr_array_add (pool->priv->trash_stack, ptr);
  424. return ptr;
  425. }
  426. cur = rspamd_mempool_get_chain (pool, pool_type);
  427. /* Find free space in pool chain */
  428. if (cur) {
  429. free = pool_chain_free (cur);
  430. }
  431. if (cur == NULL || free < size + alignment) {
  432. if (free < size) {
  433. pool->priv->wasted_memory += free;
  434. }
  435. /* Allocate new chain element */
  436. if (pool->priv->elt_len >= size + alignment) {
  437. pool->priv->entry->elts[pool->priv->entry->cur_elts].fragmentation += size;
  438. new = rspamd_mempool_chain_new (pool->priv->elt_len, alignment,
  439. pool_type);
  440. }
  441. else {
  442. mem_pool_stat->oversized_chunks++;
  443. g_atomic_int_add (&mem_pool_stat->fragmented_size,
  444. free);
  445. pool->priv->entry->elts[pool->priv->entry->cur_elts].fragmentation += free;
  446. new = rspamd_mempool_chain_new (size + pool->priv->elt_len, alignment,
  447. pool_type);
  448. }
  449. /* Connect to pool subsystem */
  450. rspamd_mempool_append_chain (pool, new, pool_type);
  451. /* No need to align again, aligned by rspamd_mempool_chain_new */
  452. tmp = new->pos;
  453. new->pos = tmp + size;
  454. POOL_MTX_UNLOCK ();
  455. return tmp;
  456. }
  457. /* No need to allocate page */
  458. tmp = align_ptr (cur->pos, alignment);
  459. cur->pos = tmp + size;
  460. POOL_MTX_UNLOCK ();
  461. return tmp;
  462. }
  463. abort ();
  464. }
  465. void *
  466. rspamd_mempool_alloc_ (rspamd_mempool_t * pool, gsize size, gsize alignment, const gchar *loc)
  467. {
  468. return memory_pool_alloc_common (pool, size, alignment, RSPAMD_MEMPOOL_NORMAL, loc);
  469. }
  470. void *
  471. rspamd_mempool_alloc0_ (rspamd_mempool_t * pool, gsize size, gsize alignment, const gchar *loc)
  472. {
  473. void *pointer = rspamd_mempool_alloc_ (pool, size, alignment, loc);
  474. memset (pointer, 0, size);
  475. return pointer;
  476. }
  477. void *
  478. rspamd_mempool_alloc0_shared_ (rspamd_mempool_t * pool, gsize size, gsize alignment, const gchar *loc)
  479. {
  480. void *pointer = rspamd_mempool_alloc_shared_ (pool, size, alignment, loc);
  481. memset (pointer, 0, size);
  482. return pointer;
  483. }
  484. void *
  485. rspamd_mempool_alloc_shared_ (rspamd_mempool_t * pool, gsize size, gsize alignment, const gchar *loc)
  486. {
  487. return memory_pool_alloc_common (pool, size, alignment, RSPAMD_MEMPOOL_SHARED, loc);
  488. }
  489. gchar *
  490. rspamd_mempool_strdup_ (rspamd_mempool_t * pool, const gchar *src, const gchar *loc)
  491. {
  492. gsize len;
  493. gchar *newstr;
  494. if (src == NULL) {
  495. return NULL;
  496. }
  497. len = strlen (src);
  498. newstr = rspamd_mempool_alloc_ (pool, len + 1, MIN_MEM_ALIGNMENT, loc);
  499. memcpy (newstr, src, len);
  500. newstr[len] = '\0';
  501. return newstr;
  502. }
  503. gchar *
  504. rspamd_mempool_fstrdup_ (rspamd_mempool_t * pool, const struct f_str_s *src,
  505. const gchar *loc)
  506. {
  507. gchar *newstr;
  508. if (src == NULL) {
  509. return NULL;
  510. }
  511. newstr = rspamd_mempool_alloc_ (pool, src->len + 1, MIN_MEM_ALIGNMENT, loc);
  512. memcpy (newstr, src->str, src->len);
  513. newstr[src->len] = '\0';
  514. return newstr;
  515. }
  516. gchar *
  517. rspamd_mempool_ftokdup_ (rspamd_mempool_t *pool, const rspamd_ftok_t *src,
  518. const gchar *loc)
  519. {
  520. gchar *newstr;
  521. if (src == NULL) {
  522. return NULL;
  523. }
  524. newstr = rspamd_mempool_alloc_ (pool, src->len + 1, MIN_MEM_ALIGNMENT, loc);
  525. memcpy (newstr, src->begin, src->len);
  526. newstr[src->len] = '\0';
  527. return newstr;
  528. }
  529. void
  530. rspamd_mempool_add_destructor_full (rspamd_mempool_t * pool,
  531. rspamd_mempool_destruct_t func,
  532. void *data,
  533. const gchar *function,
  534. const gchar *line)
  535. {
  536. struct _pool_destructors *cur;
  537. POOL_MTX_LOCK ();
  538. cur = rspamd_mempool_alloc_ (pool, sizeof (*cur),
  539. RSPAMD_ALIGNOF(struct _pool_destructors), line);
  540. cur->func = func;
  541. cur->data = data;
  542. cur->function = function;
  543. cur->loc = line;
  544. cur->next = NULL;
  545. if (pool->priv->dtors_tail) {
  546. pool->priv->dtors_tail->next = cur;
  547. pool->priv->dtors_tail = cur;
  548. }
  549. else {
  550. pool->priv->dtors_head = cur;
  551. pool->priv->dtors_tail = cur;
  552. }
  553. POOL_MTX_UNLOCK ();
  554. }
  555. void
  556. rspamd_mempool_replace_destructor (rspamd_mempool_t * pool,
  557. rspamd_mempool_destruct_t func,
  558. void *old_data,
  559. void *new_data)
  560. {
  561. struct _pool_destructors *tmp;
  562. LL_FOREACH (pool->priv->dtors_head, tmp) {
  563. if (tmp->func == func && tmp->data == old_data) {
  564. tmp->func = func;
  565. tmp->data = new_data;
  566. break;
  567. }
  568. }
  569. }
  570. static gint
  571. cmp_int (gconstpointer a, gconstpointer b)
  572. {
  573. gint i1 = *(const gint *)a, i2 = *(const gint *)b;
  574. return i1 - i2;
  575. }
  576. static void
  577. rspamd_mempool_adjust_entry (struct rspamd_mempool_entry_point *e)
  578. {
  579. gint sz[G_N_ELEMENTS (e->elts)], sel_pos, sel_neg;
  580. guint i, jitter;
  581. for (i = 0; i < G_N_ELEMENTS (sz); i ++) {
  582. sz[i] = e->elts[i].fragmentation - (gint)e->elts[i].leftover;
  583. }
  584. qsort (sz, G_N_ELEMENTS (sz), sizeof (gint), cmp_int);
  585. jitter = rspamd_random_uint64_fast () % 10;
  586. /*
  587. * Take stochaistic quantiles
  588. */
  589. sel_pos = sz[50 + jitter];
  590. sel_neg = sz[4 + jitter];
  591. if (-sel_neg > sel_pos) {
  592. /* We need to reduce current suggestion */
  593. e->cur_suggestion /= (1 + (((double)-sel_neg) / e->cur_suggestion)) * 1.5;
  594. }
  595. else {
  596. /* We still want to grow */
  597. e->cur_suggestion *= (1 + (((double)sel_pos) / e->cur_suggestion)) * 1.5;
  598. }
  599. /* Some sane limits counting mempool architecture */
  600. if (e->cur_suggestion < 1024) {
  601. e->cur_suggestion = 1024;
  602. }
  603. else if (e->cur_suggestion > 1024 * 1024 * 10) {
  604. e->cur_suggestion = 1024 * 1024 * 10;
  605. }
  606. memset (e->elts, 0, sizeof (e->elts));
  607. }
  608. static void
  609. rspamd_mempool_variables_cleanup (rspamd_mempool_t * pool)
  610. {
  611. if (pool->priv->variables) {
  612. struct rspamd_mempool_variable *var;
  613. kh_foreach_value_ptr (pool->priv->variables, var, {
  614. if (var->dtor) {
  615. var->dtor (var->data);
  616. }
  617. });
  618. if (pool->priv->entry && pool->priv->entry->cur_vars <
  619. kh_size (pool->priv->variables)) {
  620. /*
  621. * Increase preallocated size in two cases:
  622. * 1) Our previous guess was zero
  623. * 2) Our new variables count is not more than twice larger than
  624. * previous count
  625. * 3) Our variables count is less than some hard limit
  626. */
  627. static const guint max_preallocated_vars = 512;
  628. guint cur_size = kh_size (pool->priv->variables);
  629. guint old_guess = pool->priv->entry->cur_vars;
  630. guint new_guess;
  631. if (old_guess == 0) {
  632. new_guess = MIN (cur_size, max_preallocated_vars);
  633. }
  634. else {
  635. if (old_guess * 2 < cur_size) {
  636. new_guess = MIN (cur_size, max_preallocated_vars);
  637. }
  638. else {
  639. /* Too large step */
  640. new_guess = MIN (old_guess * 2, max_preallocated_vars);
  641. }
  642. }
  643. pool->priv->entry->cur_vars = new_guess;
  644. }
  645. kh_destroy (rspamd_mempool_vars_hash, pool->priv->variables);
  646. pool->priv->variables = NULL;
  647. }
  648. }
  649. void
  650. rspamd_mempool_destructors_enforce (rspamd_mempool_t *pool)
  651. {
  652. struct _pool_destructors *destructor;
  653. POOL_MTX_LOCK ();
  654. LL_FOREACH (pool->priv->dtors_head, destructor) {
  655. /* Avoid calling destructors for NULL pointers */
  656. if (destructor->data != NULL) {
  657. destructor->func (destructor->data);
  658. }
  659. }
  660. pool->priv->dtors_head = pool->priv->dtors_tail = NULL;
  661. rspamd_mempool_variables_cleanup (pool);
  662. POOL_MTX_UNLOCK ();
  663. }
  664. struct mempool_debug_elt {
  665. gsize sz;
  666. const gchar *loc;
  667. };
  668. static gint
  669. rspamd_mempool_debug_elt_cmp (const void *a, const void *b)
  670. {
  671. const struct mempool_debug_elt *e1 = a, *e2 = b;
  672. /* Inverse order */
  673. return (gint)((gssize)e2->sz) - ((gssize)e1->sz);
  674. }
  675. void
  676. rspamd_mempool_delete (rspamd_mempool_t * pool)
  677. {
  678. struct _pool_chain *cur, *tmp;
  679. struct _pool_destructors *destructor;
  680. gpointer ptr;
  681. guint i;
  682. gsize len;
  683. POOL_MTX_LOCK ();
  684. cur = pool->priv->pools[RSPAMD_MEMPOOL_NORMAL];
  685. if (G_UNLIKELY (pool->priv->flags & RSPAMD_MEMPOOL_DEBUG)) {
  686. GHashTable *debug_tbl = *(GHashTable **)(((guchar *)pool) + sizeof (*pool));
  687. /* Show debug info */
  688. gsize ndtor = 0;
  689. LL_COUNT (pool->priv->dtors_head, destructor, ndtor);
  690. msg_info_pool ("destructing of the memory pool %p; elt size = %z; "
  691. "used memory = %Hz; wasted memory = %Hd; "
  692. "vars = %z; destructors = %z",
  693. pool,
  694. pool->priv->elt_len,
  695. pool->priv->used_memory,
  696. pool->priv->wasted_memory,
  697. pool->priv->variables ? (gsize)kh_size (pool->priv->variables) : (gsize)0,
  698. ndtor);
  699. GHashTableIter it;
  700. gpointer k, v;
  701. GArray *sorted_debug_size = g_array_sized_new (FALSE, FALSE,
  702. sizeof (struct mempool_debug_elt),
  703. g_hash_table_size (debug_tbl));
  704. g_hash_table_iter_init (&it, debug_tbl);
  705. while (g_hash_table_iter_next (&it, &k, &v)) {
  706. struct mempool_debug_elt e;
  707. e.loc = (const gchar *)k;
  708. e.sz = GPOINTER_TO_SIZE (v);
  709. g_array_append_val (sorted_debug_size, e);
  710. }
  711. g_array_sort (sorted_debug_size, rspamd_mempool_debug_elt_cmp);
  712. for (guint _i = 0; _i < sorted_debug_size->len; _i ++) {
  713. struct mempool_debug_elt *e;
  714. e = &g_array_index (sorted_debug_size, struct mempool_debug_elt, _i);
  715. msg_info_pool ("allocated %Hz from %s", e->sz, e->loc);
  716. }
  717. g_array_free (sorted_debug_size, TRUE);
  718. g_hash_table_unref (debug_tbl);
  719. }
  720. if (cur && mempool_entries) {
  721. pool->priv->entry->elts[pool->priv->entry->cur_elts].leftover =
  722. pool_chain_free (cur);
  723. pool->priv->entry->cur_elts = (pool->priv->entry->cur_elts + 1) %
  724. G_N_ELEMENTS (pool->priv->entry->elts);
  725. if (pool->priv->entry->cur_elts == 0) {
  726. rspamd_mempool_adjust_entry (pool->priv->entry);
  727. }
  728. }
  729. /* Call all pool destructors */
  730. LL_FOREACH (pool->priv->dtors_head, destructor) {
  731. /* Avoid calling destructors for NULL pointers */
  732. if (destructor->data != NULL) {
  733. destructor->func (destructor->data);
  734. }
  735. }
  736. rspamd_mempool_variables_cleanup (pool);
  737. if (pool->priv->trash_stack) {
  738. for (i = 0; i < pool->priv->trash_stack->len; i++) {
  739. ptr = g_ptr_array_index (pool->priv->trash_stack, i);
  740. g_free (ptr);
  741. }
  742. g_ptr_array_free (pool->priv->trash_stack, TRUE);
  743. }
  744. for (i = 0; i < G_N_ELEMENTS (pool->priv->pools); i ++) {
  745. if (pool->priv->pools[i]) {
  746. LL_FOREACH_SAFE (pool->priv->pools[i], cur, tmp) {
  747. g_atomic_int_add (&mem_pool_stat->bytes_allocated,
  748. -((gint)cur->slice_size));
  749. g_atomic_int_add (&mem_pool_stat->chunks_allocated, -1);
  750. len = cur->slice_size + sizeof (struct _pool_chain);
  751. if (i == RSPAMD_MEMPOOL_SHARED) {
  752. munmap ((void *)cur, len);
  753. }
  754. else {
  755. /* The last pool is special, it is a part of the initial chunk */
  756. if (cur->next != NULL) {
  757. free (cur); /* Not g_free as we use system allocator */
  758. }
  759. }
  760. }
  761. }
  762. }
  763. g_atomic_int_inc (&mem_pool_stat->pools_freed);
  764. POOL_MTX_UNLOCK ();
  765. free (pool); /* allocated by posix_memalign */
  766. }
  767. void
  768. rspamd_mempool_stat (rspamd_mempool_stat_t * st)
  769. {
  770. if (mem_pool_stat != NULL) {
  771. st->pools_allocated = mem_pool_stat->pools_allocated;
  772. st->pools_freed = mem_pool_stat->pools_freed;
  773. st->shared_chunks_allocated = mem_pool_stat->shared_chunks_allocated;
  774. st->bytes_allocated = mem_pool_stat->bytes_allocated;
  775. st->chunks_allocated = mem_pool_stat->chunks_allocated;
  776. st->chunks_freed = mem_pool_stat->chunks_freed;
  777. st->oversized_chunks = mem_pool_stat->oversized_chunks;
  778. }
  779. }
  780. void
  781. rspamd_mempool_stat_reset (void)
  782. {
  783. if (mem_pool_stat != NULL) {
  784. memset (mem_pool_stat, 0, sizeof (rspamd_mempool_stat_t));
  785. }
  786. }
  787. gsize
  788. rspamd_mempool_suggest_size_ (const char *loc)
  789. {
  790. return 0;
  791. }
  792. #if !defined(HAVE_PTHREAD_PROCESS_SHARED) || defined(DISABLE_PTHREAD_MUTEX)
  793. /*
  794. * Own emulation
  795. */
  796. static inline gint
  797. __mutex_spin (rspamd_mempool_mutex_t * mutex)
  798. {
  799. /* check spin count */
  800. if (g_atomic_int_dec_and_test (&mutex->spin)) {
  801. /* This may be deadlock, so check owner of this lock */
  802. if (mutex->owner == getpid ()) {
  803. /* This mutex was locked by calling process, so it is just double lock and we can easily unlock it */
  804. g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
  805. return 0;
  806. }
  807. else if (kill (mutex->owner, 0) == -1) {
  808. /* Owner process was not found, so release lock */
  809. g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
  810. return 0;
  811. }
  812. /* Spin again */
  813. g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
  814. }
  815. #ifdef HAVE_SCHED_YIELD
  816. (void)sched_yield ();
  817. #elif defined(HAVE_NANOSLEEP)
  818. struct timespec ts;
  819. ts.tv_sec = 0;
  820. ts.tv_nsec = MUTEX_SLEEP_TIME;
  821. /* Spin */
  822. while (nanosleep (&ts, &ts) == -1 && errno == EINTR) ;
  823. #else
  824. #error No methods to spin are defined
  825. #endif
  826. return 1;
  827. }
  828. static void
  829. memory_pool_mutex_spin (rspamd_mempool_mutex_t * mutex)
  830. {
  831. while (!g_atomic_int_compare_and_exchange (&mutex->lock, 0, 1)) {
  832. if (!__mutex_spin (mutex)) {
  833. return;
  834. }
  835. }
  836. }
  837. rspamd_mempool_mutex_t *
  838. rspamd_mempool_get_mutex (rspamd_mempool_t * pool)
  839. {
  840. rspamd_mempool_mutex_t *res;
  841. if (pool != NULL) {
  842. res =
  843. rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_mutex_t));
  844. res->lock = 0;
  845. res->owner = 0;
  846. res->spin = MUTEX_SPIN_COUNT;
  847. return res;
  848. }
  849. return NULL;
  850. }
  851. void
  852. rspamd_mempool_lock_mutex (rspamd_mempool_mutex_t * mutex)
  853. {
  854. memory_pool_mutex_spin (mutex);
  855. mutex->owner = getpid ();
  856. }
  857. void
  858. rspamd_mempool_unlock_mutex (rspamd_mempool_mutex_t * mutex)
  859. {
  860. mutex->owner = 0;
  861. (void)g_atomic_int_compare_and_exchange (&mutex->lock, 1, 0);
  862. }
  863. rspamd_mempool_rwlock_t *
  864. rspamd_mempool_get_rwlock (rspamd_mempool_t * pool)
  865. {
  866. rspamd_mempool_rwlock_t *lock;
  867. lock = rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_rwlock_t));
  868. lock->__r_lock = rspamd_mempool_get_mutex (pool);
  869. lock->__w_lock = rspamd_mempool_get_mutex (pool);
  870. return lock;
  871. }
  872. void
  873. rspamd_mempool_rlock_rwlock (rspamd_mempool_rwlock_t * lock)
  874. {
  875. /* Spin on write lock */
  876. while (g_atomic_int_get (&lock->__w_lock->lock)) {
  877. if (!__mutex_spin (lock->__w_lock)) {
  878. break;
  879. }
  880. }
  881. g_atomic_int_inc (&lock->__r_lock->lock);
  882. lock->__r_lock->owner = getpid ();
  883. }
  884. void
  885. rspamd_mempool_wlock_rwlock (rspamd_mempool_rwlock_t * lock)
  886. {
  887. /* Spin on write lock first */
  888. rspamd_mempool_lock_mutex (lock->__w_lock);
  889. /* Now we have write lock set up */
  890. /* Wait all readers */
  891. while (g_atomic_int_get (&lock->__r_lock->lock)) {
  892. __mutex_spin (lock->__r_lock);
  893. }
  894. }
  895. void
  896. rspamd_mempool_runlock_rwlock (rspamd_mempool_rwlock_t * lock)
  897. {
  898. if (g_atomic_int_get (&lock->__r_lock->lock)) {
  899. (void)g_atomic_int_dec_and_test (&lock->__r_lock->lock);
  900. }
  901. }
  902. void
  903. rspamd_mempool_wunlock_rwlock (rspamd_mempool_rwlock_t * lock)
  904. {
  905. rspamd_mempool_unlock_mutex (lock->__w_lock);
  906. }
  907. #else
  908. /*
  909. * Pthread bases shared mutexes
  910. */
  911. rspamd_mempool_mutex_t *
  912. rspamd_mempool_get_mutex (rspamd_mempool_t * pool)
  913. {
  914. rspamd_mempool_mutex_t *res;
  915. pthread_mutexattr_t mattr;
  916. if (pool != NULL) {
  917. res =
  918. rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_mutex_t));
  919. pthread_mutexattr_init (&mattr);
  920. pthread_mutexattr_setpshared (&mattr, PTHREAD_PROCESS_SHARED);
  921. pthread_mutexattr_setrobust (&mattr, PTHREAD_MUTEX_ROBUST);
  922. pthread_mutex_init (res, &mattr);
  923. rspamd_mempool_add_destructor (pool,
  924. (rspamd_mempool_destruct_t)pthread_mutex_destroy, res);
  925. pthread_mutexattr_destroy (&mattr);
  926. return res;
  927. }
  928. return NULL;
  929. }
  930. void
  931. rspamd_mempool_lock_mutex (rspamd_mempool_mutex_t * mutex)
  932. {
  933. pthread_mutex_lock (mutex);
  934. }
  935. void
  936. rspamd_mempool_unlock_mutex (rspamd_mempool_mutex_t * mutex)
  937. {
  938. pthread_mutex_unlock (mutex);
  939. }
  940. rspamd_mempool_rwlock_t *
  941. rspamd_mempool_get_rwlock (rspamd_mempool_t * pool)
  942. {
  943. rspamd_mempool_rwlock_t *res;
  944. pthread_rwlockattr_t mattr;
  945. if (pool != NULL) {
  946. res =
  947. rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_rwlock_t));
  948. pthread_rwlockattr_init (&mattr);
  949. pthread_rwlockattr_setpshared (&mattr, PTHREAD_PROCESS_SHARED);
  950. pthread_rwlock_init (res, &mattr);
  951. rspamd_mempool_add_destructor (pool,
  952. (rspamd_mempool_destruct_t)pthread_rwlock_destroy, res);
  953. pthread_rwlockattr_destroy (&mattr);
  954. return res;
  955. }
  956. return NULL;
  957. }
  958. void
  959. rspamd_mempool_rlock_rwlock (rspamd_mempool_rwlock_t * lock)
  960. {
  961. pthread_rwlock_rdlock (lock);
  962. }
  963. void
  964. rspamd_mempool_wlock_rwlock (rspamd_mempool_rwlock_t * lock)
  965. {
  966. pthread_rwlock_wrlock (lock);
  967. }
  968. void
  969. rspamd_mempool_runlock_rwlock (rspamd_mempool_rwlock_t * lock)
  970. {
  971. pthread_rwlock_unlock (lock);
  972. }
  973. void
  974. rspamd_mempool_wunlock_rwlock (rspamd_mempool_rwlock_t * lock)
  975. {
  976. pthread_rwlock_unlock (lock);
  977. }
  978. #endif
  979. #define RSPAMD_MEMPOOL_VARS_HASH_SEED 0xb32ad7c55eb2e647ULL
  980. void
  981. rspamd_mempool_set_variable (rspamd_mempool_t *pool,
  982. const gchar *name,
  983. gpointer value,
  984. rspamd_mempool_destruct_t destructor)
  985. {
  986. if (pool->priv->variables == NULL) {
  987. pool->priv->variables = kh_init (rspamd_mempool_vars_hash);
  988. if (pool->priv->entry->cur_vars > 0) {
  989. /* Preallocate */
  990. kh_resize (rspamd_mempool_vars_hash,
  991. pool->priv->variables,
  992. pool->priv->entry->cur_vars);
  993. }
  994. }
  995. gint hv = rspamd_cryptobox_fast_hash (name, strlen (name),
  996. RSPAMD_MEMPOOL_VARS_HASH_SEED);
  997. khiter_t it;
  998. gint r;
  999. it = kh_put (rspamd_mempool_vars_hash, pool->priv->variables, hv, &r);
  1000. if (it == kh_end (pool->priv->variables)) {
  1001. g_assert_not_reached ();
  1002. }
  1003. else {
  1004. struct rspamd_mempool_variable *pvar;
  1005. if (r == 0) {
  1006. /* Existing entry, maybe need cleanup */
  1007. pvar = &kh_val (pool->priv->variables, it);
  1008. if (pvar->dtor) {
  1009. pvar->dtor (pvar->data);
  1010. }
  1011. }
  1012. pvar = &kh_val (pool->priv->variables, it);
  1013. pvar->data = value;
  1014. pvar->dtor = destructor;
  1015. }
  1016. }
  1017. gpointer
  1018. rspamd_mempool_get_variable (rspamd_mempool_t *pool, const gchar *name)
  1019. {
  1020. if (pool->priv->variables == NULL) {
  1021. return NULL;
  1022. }
  1023. khiter_t it;
  1024. gint hv = rspamd_cryptobox_fast_hash (name, strlen (name),
  1025. RSPAMD_MEMPOOL_VARS_HASH_SEED);
  1026. it = kh_get (rspamd_mempool_vars_hash, pool->priv->variables, hv);
  1027. if (it != kh_end (pool->priv->variables)) {
  1028. struct rspamd_mempool_variable *pvar;
  1029. pvar = &kh_val (pool->priv->variables, it);
  1030. return pvar->data;
  1031. }
  1032. return NULL;
  1033. }
  1034. void
  1035. rspamd_mempool_remove_variable (rspamd_mempool_t *pool, const gchar *name)
  1036. {
  1037. if (pool->priv->variables != NULL) {
  1038. khiter_t it;
  1039. gint hv = rspamd_cryptobox_fast_hash (name, strlen (name),
  1040. RSPAMD_MEMPOOL_VARS_HASH_SEED);
  1041. it = kh_get (rspamd_mempool_vars_hash, pool->priv->variables, hv);
  1042. if (it != kh_end (pool->priv->variables)) {
  1043. struct rspamd_mempool_variable *pvar;
  1044. pvar = &kh_val (pool->priv->variables, it);
  1045. if (pvar->dtor) {
  1046. pvar->dtor (pvar->data);
  1047. }
  1048. kh_del (rspamd_mempool_vars_hash, pool->priv->variables, it);
  1049. }
  1050. }
  1051. }
  1052. GList *
  1053. rspamd_mempool_glist_prepend (rspamd_mempool_t *pool, GList *l, gpointer p)
  1054. {
  1055. GList *cell;
  1056. cell = rspamd_mempool_alloc (pool, sizeof (*cell));
  1057. cell->prev = NULL;
  1058. cell->data = p;
  1059. if (l == NULL) {
  1060. cell->next = NULL;
  1061. }
  1062. else {
  1063. cell->next = l;
  1064. l->prev = cell;
  1065. }
  1066. return cell;
  1067. }
  1068. GList *
  1069. rspamd_mempool_glist_append (rspamd_mempool_t *pool, GList *l, gpointer p)
  1070. {
  1071. GList *cell, *cur;
  1072. cell = rspamd_mempool_alloc (pool, sizeof (*cell));
  1073. cell->next = NULL;
  1074. cell->data = p;
  1075. if (l) {
  1076. for (cur = l; cur->next != NULL; cur = cur->next) {}
  1077. cur->next = cell;
  1078. cell->prev = cur;
  1079. }
  1080. else {
  1081. l = cell;
  1082. l->prev = NULL;
  1083. }
  1084. return l;
  1085. }
  1086. gsize
  1087. rspamd_mempool_get_used_size (rspamd_mempool_t *pool)
  1088. {
  1089. return pool->priv->used_memory;
  1090. }
  1091. gsize
  1092. rspamd_mempool_get_wasted_size (rspamd_mempool_t *pool)
  1093. {
  1094. return pool->priv->wasted_memory;
  1095. }