You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

mem_pool.c 32KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327
  1. /*
  2. * Copyright 2024 Vsevolod Stakhov
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "config.h"
  17. #include "mem_pool.h"
  18. #include "fstring.h"
  19. #include "logger.h"
  20. #include "ottery.h"
  21. #include "unix-std.h"
  22. #include "khash.h"
  23. #include "cryptobox.h"
  24. #include "contrib/uthash/utlist.h"
  25. #include "mem_pool_internal.h"
  26. #ifdef WITH_JEMALLOC
  27. #include <jemalloc/jemalloc.h>
  28. #if (JEMALLOC_VERSION_MAJOR == 3 && JEMALLOC_VERSION_MINOR >= 6) || (JEMALLOC_VERSION_MAJOR > 3)
  29. #define HAVE_MALLOC_SIZE 1
  30. #define sys_alloc_size(sz) nallocx(sz, 0)
  31. #endif
  32. #elif defined(__APPLE__)
  33. #include <malloc/malloc.h>
  34. #define HAVE_MALLOC_SIZE 1
  35. #define sys_alloc_size(sz) malloc_good_size(sz)
  36. #endif
  37. #ifdef HAVE_SCHED_YIELD
  38. #include <sched.h>
  39. #endif
  40. /* Sleep time for spin lock in nanoseconds */
  41. #define MUTEX_SLEEP_TIME 10000000L
  42. #define MUTEX_SPIN_COUNT 100
  43. #define POOL_MTX_LOCK() \
  44. do { \
  45. } while (0)
  46. #define POOL_MTX_UNLOCK() \
  47. do { \
  48. } while (0)
  49. /*
  50. * This define specify whether we should check all pools for free space for new object
  51. * or just begin scan from current (recently attached) pool
  52. * If MEMORY_GREEDY is defined, then we scan all pools to find free space (more CPU usage, slower
  53. * but requires less memory). If it is not defined check only current pool and if object is too large
  54. * to place in it allocate new one (this may cause huge CPU usage in some cases too, but generally faster than
  55. * greedy method)
  56. */
  57. #undef MEMORY_GREEDY
  58. static inline uint32_t
  59. rspamd_entry_hash(const char *str)
  60. {
  61. return (unsigned int) rspamd_cryptobox_fast_hash(str, strlen(str), rspamd_hash_seed());
  62. }
  63. static inline int
  64. rspamd_entry_equal(const char *k1, const char *k2)
  65. {
  66. return strcmp(k1, k2) == 0;
  67. }
  68. KHASH_INIT(mempool_entry, const char *, struct rspamd_mempool_entry_point *,
  69. 1, rspamd_entry_hash, rspamd_entry_equal)
  70. static khash_t(mempool_entry) *mempool_entries = NULL;
  71. /* Internal statistic */
  72. static rspamd_mempool_stat_t *mem_pool_stat = NULL;
  73. /* Environment variable */
  74. static gboolean env_checked = FALSE;
  75. static gboolean always_malloc = FALSE;
  76. /**
  77. * Function that return free space in pool page
  78. * @param x pool page struct
  79. */
  80. static gsize
  81. pool_chain_free(struct _pool_chain *chain)
  82. {
  83. int64_t occupied = chain->pos - chain->begin + MIN_MEM_ALIGNMENT;
  84. return (occupied < (int64_t) chain->slice_size ? chain->slice_size - occupied : 0);
  85. }
  86. /* By default allocate 4Kb chunks of memory */
  87. #define FIXED_POOL_SIZE 4096
  88. static inline struct rspamd_mempool_entry_point *
  89. rspamd_mempool_entry_new(const char *loc)
  90. {
  91. struct rspamd_mempool_entry_point **pentry, *entry;
  92. int r;
  93. khiter_t k;
  94. k = kh_put(mempool_entry, mempool_entries, loc, &r);
  95. if (r >= 0) {
  96. pentry = &kh_value(mempool_entries, k);
  97. entry = g_malloc0(sizeof(*entry));
  98. *pentry = entry;
  99. memset(entry, 0, sizeof(*entry));
  100. rspamd_strlcpy(entry->src, loc, sizeof(entry->src));
  101. #ifdef HAVE_GETPAGESIZE
  102. entry->cur_suggestion = MAX(getpagesize(), FIXED_POOL_SIZE);
  103. #else
  104. entry->cur_suggestion = MAX(sysconf(_SC_PAGESIZE), FIXED_POOL_SIZE);
  105. #endif
  106. }
  107. else {
  108. g_assert_not_reached();
  109. }
  110. return entry;
  111. }
  112. RSPAMD_CONSTRUCTOR(rspamd_mempool_entries_ctor)
  113. {
  114. if (mempool_entries == NULL) {
  115. mempool_entries = kh_init(mempool_entry);
  116. }
  117. }
  118. RSPAMD_DESTRUCTOR(rspamd_mempool_entries_dtor)
  119. {
  120. struct rspamd_mempool_entry_point *elt;
  121. kh_foreach_value(mempool_entries, elt, {
  122. g_free(elt);
  123. });
  124. kh_destroy(mempool_entry, mempool_entries);
  125. mempool_entries = NULL;
  126. }
  127. static inline struct rspamd_mempool_entry_point *
  128. rspamd_mempool_get_entry(const char *loc)
  129. {
  130. khiter_t k;
  131. struct rspamd_mempool_entry_point *elt;
  132. if (G_UNLIKELY(!mempool_entries)) {
  133. rspamd_mempool_entries_ctor();
  134. }
  135. k = kh_get(mempool_entry, mempool_entries, loc);
  136. if (k != kh_end(mempool_entries)) {
  137. elt = kh_value(mempool_entries, k);
  138. return elt;
  139. }
  140. return rspamd_mempool_entry_new(loc);
  141. }
  142. static struct _pool_chain *
  143. rspamd_mempool_chain_new(gsize size, gsize alignment, enum rspamd_mempool_chain_type pool_type)
  144. {
  145. struct _pool_chain *chain;
  146. gsize total_size = size + sizeof(struct _pool_chain) + alignment,
  147. optimal_size = 0;
  148. gpointer map;
  149. g_assert(size > 0);
  150. if (pool_type == RSPAMD_MEMPOOL_SHARED) {
  151. #if defined(HAVE_MMAP_ANON)
  152. map = mmap(NULL,
  153. total_size,
  154. PROT_READ | PROT_WRITE,
  155. MAP_ANON | MAP_SHARED,
  156. -1,
  157. 0);
  158. if (map == MAP_FAILED) {
  159. g_error("%s: failed to allocate %" G_GSIZE_FORMAT " bytes",
  160. G_STRLOC, total_size);
  161. abort();
  162. }
  163. chain = map;
  164. chain->begin = ((uint8_t *) chain) + sizeof(struct _pool_chain);
  165. #elif defined(HAVE_MMAP_ZERO)
  166. int fd;
  167. fd = open("/dev/zero", O_RDWR);
  168. if (fd == -1) {
  169. return NULL;
  170. }
  171. map = mmap(NULL,
  172. size + sizeof(struct _pool_chain),
  173. PROT_READ | PROT_WRITE,
  174. MAP_SHARED,
  175. fd,
  176. 0);
  177. if (map == MAP_FAILED) {
  178. msg_err("cannot allocate %z bytes, aborting", size +
  179. sizeof(struct _pool_chain));
  180. abort();
  181. }
  182. chain = map;
  183. chain->begin = ((uint8_t *) chain) + sizeof(struct _pool_chain);
  184. #else
  185. #error No mmap methods are defined
  186. #endif
  187. g_atomic_int_inc(&mem_pool_stat->shared_chunks_allocated);
  188. g_atomic_int_add(&mem_pool_stat->bytes_allocated, total_size);
  189. }
  190. else {
  191. #ifdef HAVE_MALLOC_SIZE
  192. optimal_size = sys_alloc_size(total_size);
  193. #endif
  194. total_size = MAX(total_size, optimal_size);
  195. int ret = posix_memalign(&map, alignment, total_size);
  196. if (ret != 0 || map == NULL) {
  197. g_error("%s: failed to allocate %" G_GSIZE_FORMAT " bytes: %d - %s",
  198. G_STRLOC, total_size, ret, strerror(errno));
  199. abort();
  200. }
  201. chain = map;
  202. chain->begin = ((uint8_t *) chain) + sizeof(struct _pool_chain);
  203. g_atomic_int_add(&mem_pool_stat->bytes_allocated, total_size);
  204. g_atomic_int_inc(&mem_pool_stat->chunks_allocated);
  205. }
  206. chain->pos = align_ptr(chain->begin, alignment);
  207. chain->slice_size = total_size - sizeof(struct _pool_chain);
  208. return chain;
  209. }
  210. /**
  211. * Get the current pool of the specified type, creating the corresponding
  212. * array if it's absent
  213. * @param pool
  214. * @param pool_type
  215. * @return
  216. */
  217. static struct _pool_chain *
  218. rspamd_mempool_get_chain(rspamd_mempool_t *pool,
  219. enum rspamd_mempool_chain_type pool_type)
  220. {
  221. g_assert(pool_type >= 0 && pool_type < RSPAMD_MEMPOOL_MAX);
  222. return pool->priv->pools[pool_type];
  223. }
  224. static void
  225. rspamd_mempool_append_chain(rspamd_mempool_t *pool,
  226. struct _pool_chain *chain,
  227. enum rspamd_mempool_chain_type pool_type)
  228. {
  229. g_assert(pool_type >= 0 && pool_type < RSPAMD_MEMPOOL_MAX);
  230. g_assert(chain != NULL);
  231. LL_PREPEND(pool->priv->pools[pool_type], chain);
  232. }
  233. /**
  234. * Allocate new memory poll
  235. * @param size size of pool's page
  236. * @return new memory pool object
  237. */
  238. rspamd_mempool_t *
  239. rspamd_mempool_new_(gsize size, const char *tag, int flags, const char *loc)
  240. {
  241. rspamd_mempool_t *new_pool;
  242. gpointer map;
  243. /* Allocate statistic structure if it is not allocated before */
  244. if (mem_pool_stat == NULL) {
  245. #if defined(HAVE_MMAP_ANON)
  246. map = mmap(NULL,
  247. sizeof(rspamd_mempool_stat_t),
  248. PROT_READ | PROT_WRITE,
  249. MAP_ANON | MAP_SHARED,
  250. -1,
  251. 0);
  252. if (map == MAP_FAILED) {
  253. msg_err("cannot allocate %z bytes, aborting",
  254. sizeof(rspamd_mempool_stat_t));
  255. abort();
  256. }
  257. mem_pool_stat = (rspamd_mempool_stat_t *) map;
  258. #elif defined(HAVE_MMAP_ZERO)
  259. int fd;
  260. fd = open("/dev/zero", O_RDWR);
  261. g_assert(fd != -1);
  262. map = mmap(NULL,
  263. sizeof(rspamd_mempool_stat_t),
  264. PROT_READ | PROT_WRITE,
  265. MAP_SHARED,
  266. fd,
  267. 0);
  268. if (map == MAP_FAILED) {
  269. msg_err("cannot allocate %z bytes, aborting",
  270. sizeof(rspamd_mempool_stat_t));
  271. abort();
  272. }
  273. mem_pool_stat = (rspamd_mempool_stat_t *) map;
  274. #else
  275. #error No mmap methods are defined
  276. #endif
  277. memset(map, 0, sizeof(rspamd_mempool_stat_t));
  278. }
  279. if (!env_checked) {
  280. /* Check G_SLICE=always-malloc to allow memory pool debug */
  281. const char *g_slice;
  282. g_slice = getenv("VALGRIND");
  283. if (g_slice != NULL) {
  284. always_malloc = TRUE;
  285. }
  286. env_checked = TRUE;
  287. }
  288. struct rspamd_mempool_entry_point *entry = rspamd_mempool_get_entry(loc);
  289. gsize total_size;
  290. if (size == 0 && entry) {
  291. size = entry->cur_suggestion;
  292. }
  293. total_size = sizeof(rspamd_mempool_t) +
  294. sizeof(struct rspamd_mempool_specific) +
  295. MIN_MEM_ALIGNMENT +
  296. sizeof(struct _pool_chain) +
  297. size;
  298. if (G_UNLIKELY(flags & RSPAMD_MEMPOOL_DEBUG)) {
  299. total_size += sizeof(GHashTable *);
  300. }
  301. /*
  302. * Memory layout:
  303. * struct rspamd_mempool_t
  304. * <optional debug hash table>
  305. * struct rspamd_mempool_specific
  306. * struct _pool_chain
  307. * alignment (if needed)
  308. * memory chunk
  309. */
  310. unsigned char *mem_chunk;
  311. int ret = posix_memalign((void **) &mem_chunk, MIN_MEM_ALIGNMENT,
  312. total_size);
  313. gsize priv_offset;
  314. if (ret != 0 || mem_chunk == NULL) {
  315. g_error("%s: failed to allocate %" G_GSIZE_FORMAT " bytes: %d - %s",
  316. G_STRLOC, total_size, ret, strerror(errno));
  317. abort();
  318. }
  319. /* Set memory layout */
  320. new_pool = (rspamd_mempool_t *) mem_chunk;
  321. if (G_UNLIKELY(flags & RSPAMD_MEMPOOL_DEBUG)) {
  322. /* Allocate debug table */
  323. GHashTable *debug_tbl;
  324. debug_tbl = g_hash_table_new(rspamd_str_hash, rspamd_str_equal);
  325. memcpy(mem_chunk + sizeof(rspamd_mempool_t), &debug_tbl,
  326. sizeof(GHashTable *));
  327. priv_offset = sizeof(rspamd_mempool_t) + sizeof(GHashTable *);
  328. }
  329. else {
  330. priv_offset = sizeof(rspamd_mempool_t);
  331. }
  332. new_pool->priv = (struct rspamd_mempool_specific *) (mem_chunk +
  333. priv_offset);
  334. /* Zero memory for specific and for the first chain */
  335. memset(new_pool->priv, 0, sizeof(struct rspamd_mempool_specific) + sizeof(struct _pool_chain));
  336. new_pool->priv->entry = entry;
  337. new_pool->priv->elt_len = size;
  338. new_pool->priv->flags = flags;
  339. if (tag) {
  340. rspamd_strlcpy(new_pool->tag.tagname, tag, sizeof(new_pool->tag.tagname));
  341. }
  342. else {
  343. new_pool->tag.tagname[0] = '\0';
  344. }
  345. /* Generate new uid */
  346. uint64_t uid = rspamd_random_uint64_fast();
  347. rspamd_encode_hex_buf((unsigned char *) &uid, sizeof(uid),
  348. new_pool->tag.uid, sizeof(new_pool->tag.uid) - 1);
  349. new_pool->tag.uid[sizeof(new_pool->tag.uid) - 1] = '\0';
  350. mem_pool_stat->pools_allocated++;
  351. /* Now we can attach one chunk to speed up simple allocations */
  352. struct _pool_chain *nchain;
  353. nchain = (struct _pool_chain *) (mem_chunk +
  354. priv_offset +
  355. sizeof(struct rspamd_mempool_specific));
  356. unsigned char *unaligned = mem_chunk +
  357. priv_offset +
  358. sizeof(struct rspamd_mempool_specific) +
  359. sizeof(struct _pool_chain);
  360. nchain->slice_size = size;
  361. nchain->begin = unaligned;
  362. nchain->slice_size = size;
  363. nchain->pos = align_ptr(unaligned, MIN_MEM_ALIGNMENT);
  364. new_pool->priv->pools[RSPAMD_MEMPOOL_NORMAL] = nchain;
  365. new_pool->priv->used_memory = size;
  366. /* Adjust stats */
  367. g_atomic_int_add(&mem_pool_stat->bytes_allocated,
  368. (int) size);
  369. g_atomic_int_add(&mem_pool_stat->chunks_allocated, 1);
  370. return new_pool;
  371. }
  372. static void *
  373. memory_pool_alloc_common(rspamd_mempool_t *pool, gsize size, gsize alignment,
  374. enum rspamd_mempool_chain_type pool_type,
  375. const char *loc)
  376. RSPAMD_ATTR_ALLOC_SIZE(2) RSPAMD_ATTR_ALLOC_ALIGN(MIN_MEM_ALIGNMENT) RSPAMD_ATTR_RETURNS_NONNUL;
  377. void rspamd_mempool_notify_alloc_(rspamd_mempool_t *pool, gsize size, const char *loc)
  378. {
  379. if (pool && G_UNLIKELY(pool->priv->flags & RSPAMD_MEMPOOL_DEBUG)) {
  380. GHashTable *debug_tbl = *(GHashTable **) (((unsigned char *) pool + sizeof(*pool)));
  381. gpointer ptr;
  382. ptr = g_hash_table_lookup(debug_tbl, loc);
  383. if (ptr) {
  384. ptr = GSIZE_TO_POINTER(GPOINTER_TO_SIZE(ptr) + size);
  385. }
  386. else {
  387. ptr = GSIZE_TO_POINTER(size);
  388. }
  389. g_hash_table_insert(debug_tbl, (gpointer) loc, ptr);
  390. }
  391. }
  392. static void *
  393. memory_pool_alloc_common(rspamd_mempool_t *pool, gsize size, gsize alignment,
  394. enum rspamd_mempool_chain_type pool_type, const char *loc)
  395. {
  396. uint8_t *tmp;
  397. struct _pool_chain *new, *cur;
  398. gsize free = 0;
  399. if (pool) {
  400. POOL_MTX_LOCK();
  401. pool->priv->used_memory += size;
  402. if (G_UNLIKELY(pool->priv->flags & RSPAMD_MEMPOOL_DEBUG)) {
  403. rspamd_mempool_notify_alloc_(pool, size, loc);
  404. }
  405. if (always_malloc && pool_type != RSPAMD_MEMPOOL_SHARED) {
  406. void *ptr;
  407. if (alignment <= G_MEM_ALIGN) {
  408. ptr = g_malloc(size);
  409. }
  410. else {
  411. ptr = g_malloc(size + alignment);
  412. ptr = align_ptr(ptr, alignment);
  413. }
  414. POOL_MTX_UNLOCK();
  415. if (pool->priv->trash_stack == NULL) {
  416. pool->priv->trash_stack = g_ptr_array_sized_new(128);
  417. }
  418. g_ptr_array_add(pool->priv->trash_stack, ptr);
  419. return ptr;
  420. }
  421. cur = rspamd_mempool_get_chain(pool, pool_type);
  422. /* Find free space in pool chain */
  423. if (cur) {
  424. free = pool_chain_free(cur);
  425. }
  426. if (cur == NULL || free < size + alignment) {
  427. if (free < size) {
  428. pool->priv->wasted_memory += free;
  429. }
  430. /* Allocate new chain element */
  431. if (pool->priv->elt_len >= size + alignment) {
  432. pool->priv->entry->elts[pool->priv->entry->cur_elts].fragmentation += size;
  433. new = rspamd_mempool_chain_new(pool->priv->elt_len, alignment,
  434. pool_type);
  435. }
  436. else {
  437. mem_pool_stat->oversized_chunks++;
  438. g_atomic_int_add(&mem_pool_stat->fragmented_size,
  439. free);
  440. pool->priv->entry->elts[pool->priv->entry->cur_elts].fragmentation += free;
  441. new = rspamd_mempool_chain_new(size + pool->priv->elt_len, alignment,
  442. pool_type);
  443. }
  444. /* Connect to pool subsystem */
  445. rspamd_mempool_append_chain(pool, new, pool_type);
  446. /* No need to align again, aligned by rspamd_mempool_chain_new */
  447. tmp = new->pos;
  448. new->pos = tmp + size;
  449. POOL_MTX_UNLOCK();
  450. return tmp;
  451. }
  452. /* No need to allocate page */
  453. tmp = align_ptr(cur->pos, alignment);
  454. cur->pos = tmp + size;
  455. POOL_MTX_UNLOCK();
  456. return tmp;
  457. }
  458. abort();
  459. }
  460. void *
  461. rspamd_mempool_alloc_(rspamd_mempool_t *pool, gsize size, gsize alignment, const char *loc)
  462. {
  463. return memory_pool_alloc_common(pool, size, alignment, RSPAMD_MEMPOOL_NORMAL, loc);
  464. }
  465. /*
  466. * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
  467. * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
  468. */
  469. #define MUL_NO_OVERFLOW (1UL << (sizeof(gsize) * 4))
  470. void *
  471. rspamd_mempool_alloc_array_(rspamd_mempool_t *pool, gsize nmemb, gsize size, gsize alignment, const char *loc)
  472. {
  473. if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
  474. nmemb > 0 && G_MAXSIZE / nmemb < size) {
  475. g_error("alloc_array: overflow %" G_GSIZE_FORMAT " * %" G_GSIZE_FORMAT "",
  476. nmemb, size);
  477. g_abort();
  478. }
  479. return memory_pool_alloc_common(pool, size * nmemb, alignment, RSPAMD_MEMPOOL_NORMAL, loc);
  480. }
  481. void *
  482. rspamd_mempool_alloc0_(rspamd_mempool_t *pool, gsize size, gsize alignment, const char *loc)
  483. {
  484. void *pointer = rspamd_mempool_alloc_(pool, size, alignment, loc);
  485. memset(pointer, 0, size);
  486. return pointer;
  487. }
  488. void *
  489. rspamd_mempool_alloc0_shared_(rspamd_mempool_t *pool, gsize size, gsize alignment, const char *loc)
  490. {
  491. void *pointer = rspamd_mempool_alloc_shared_(pool, size, alignment, loc);
  492. memset(pointer, 0, size);
  493. return pointer;
  494. }
  495. void *
  496. rspamd_mempool_alloc_shared_(rspamd_mempool_t *pool, gsize size, gsize alignment, const char *loc)
  497. {
  498. return memory_pool_alloc_common(pool, size, alignment, RSPAMD_MEMPOOL_SHARED, loc);
  499. }
  500. char *
  501. rspamd_mempool_strdup_(rspamd_mempool_t *pool, const char *src, const char *loc)
  502. {
  503. if (src == NULL) {
  504. return NULL;
  505. }
  506. return rspamd_mempool_strdup_len_(pool, src, strlen(src), loc);
  507. }
  508. char *
  509. rspamd_mempool_strdup_len_(rspamd_mempool_t *pool, const char *src, gsize len, const char *loc)
  510. {
  511. char *newstr;
  512. if (src == NULL) {
  513. return NULL;
  514. }
  515. newstr = rspamd_mempool_alloc_(pool, len + 1, MIN_MEM_ALIGNMENT, loc);
  516. memcpy(newstr, src, len);
  517. newstr[len] = '\0';
  518. return newstr;
  519. }
  520. char *
  521. rspamd_mempool_ftokdup_(rspamd_mempool_t *pool, const rspamd_ftok_t *src,
  522. const char *loc)
  523. {
  524. char *newstr;
  525. if (src == NULL) {
  526. return NULL;
  527. }
  528. newstr = rspamd_mempool_alloc_(pool, src->len + 1, MIN_MEM_ALIGNMENT, loc);
  529. memcpy(newstr, src->begin, src->len);
  530. newstr[src->len] = '\0';
  531. return newstr;
  532. }
  533. void rspamd_mempool_add_destructor_full(rspamd_mempool_t *pool,
  534. rspamd_mempool_destruct_t func,
  535. void *data,
  536. const char *function,
  537. const char *line)
  538. {
  539. struct _pool_destructors *cur;
  540. POOL_MTX_LOCK();
  541. cur = rspamd_mempool_alloc_(pool, sizeof(*cur),
  542. RSPAMD_ALIGNOF(struct _pool_destructors), line);
  543. cur->func = func;
  544. cur->data = data;
  545. cur->function = function;
  546. cur->loc = line;
  547. cur->next = NULL;
  548. if (pool->priv->dtors_tail) {
  549. pool->priv->dtors_tail->next = cur;
  550. pool->priv->dtors_tail = cur;
  551. }
  552. else {
  553. pool->priv->dtors_head = cur;
  554. pool->priv->dtors_tail = cur;
  555. }
  556. POOL_MTX_UNLOCK();
  557. }
  558. void rspamd_mempool_replace_destructor(rspamd_mempool_t *pool,
  559. rspamd_mempool_destruct_t func,
  560. void *old_data,
  561. void *new_data)
  562. {
  563. struct _pool_destructors *tmp;
  564. LL_FOREACH(pool->priv->dtors_head, tmp)
  565. {
  566. if (tmp->func == func && tmp->data == old_data) {
  567. tmp->func = func;
  568. tmp->data = new_data;
  569. break;
  570. }
  571. }
  572. }
  573. static int
  574. cmp_int(gconstpointer a, gconstpointer b)
  575. {
  576. int i1 = *(const int *) a, i2 = *(const int *) b;
  577. return i1 - i2;
  578. }
  579. static void
  580. rspamd_mempool_adjust_entry(struct rspamd_mempool_entry_point *e)
  581. {
  582. int sz[G_N_ELEMENTS(e->elts)], sel_pos, sel_neg;
  583. unsigned int i, jitter;
  584. for (i = 0; i < G_N_ELEMENTS(sz); i++) {
  585. sz[i] = e->elts[i].fragmentation - (int) e->elts[i].leftover;
  586. }
  587. qsort(sz, G_N_ELEMENTS(sz), sizeof(int), cmp_int);
  588. jitter = rspamd_random_uint64_fast() % 10;
  589. /*
  590. * Take stochastic quantiles
  591. */
  592. sel_pos = sz[50 + jitter];
  593. sel_neg = sz[4 + jitter];
  594. if (-sel_neg > sel_pos) {
  595. /* We need to reduce current suggestion */
  596. e->cur_suggestion /= (1 + (((double) -sel_neg) / e->cur_suggestion)) * 1.5;
  597. }
  598. else {
  599. /* We still want to grow */
  600. e->cur_suggestion *= (1 + (((double) sel_pos) / e->cur_suggestion)) * 1.5;
  601. }
  602. /* Some sane limits counting mempool architecture */
  603. if (e->cur_suggestion < 1024) {
  604. e->cur_suggestion = 1024;
  605. }
  606. else if (e->cur_suggestion > 1024 * 1024 * 10) {
  607. e->cur_suggestion = 1024 * 1024 * 10;
  608. }
  609. memset(e->elts, 0, sizeof(e->elts));
  610. }
  611. static void
  612. rspamd_mempool_variables_cleanup(rspamd_mempool_t *pool)
  613. {
  614. if (pool->priv->variables) {
  615. struct rspamd_mempool_variable *var;
  616. kh_foreach_value_ptr(pool->priv->variables, var, {
  617. if (var->dtor) {
  618. var->dtor(var->data);
  619. }
  620. });
  621. if (pool->priv->entry && pool->priv->entry->cur_vars <
  622. kh_size(pool->priv->variables)) {
  623. /*
  624. * Increase preallocated size in two cases:
  625. * 1) Our previous guess was zero
  626. * 2) Our new variables count is not more than twice larger than
  627. * previous count
  628. * 3) Our variables count is less than some hard limit
  629. */
  630. static const unsigned int max_preallocated_vars = 512;
  631. unsigned int cur_size = kh_size(pool->priv->variables);
  632. unsigned int old_guess = pool->priv->entry->cur_vars;
  633. unsigned int new_guess;
  634. if (old_guess == 0) {
  635. new_guess = MIN(cur_size, max_preallocated_vars);
  636. }
  637. else {
  638. if (old_guess * 2 < cur_size) {
  639. new_guess = MIN(cur_size, max_preallocated_vars);
  640. }
  641. else {
  642. /* Too large step */
  643. new_guess = MIN(old_guess * 2, max_preallocated_vars);
  644. }
  645. }
  646. pool->priv->entry->cur_vars = new_guess;
  647. }
  648. kh_destroy(rspamd_mempool_vars_hash, pool->priv->variables);
  649. pool->priv->variables = NULL;
  650. }
  651. }
  652. void rspamd_mempool_destructors_enforce(rspamd_mempool_t *pool)
  653. {
  654. struct _pool_destructors *destructor;
  655. POOL_MTX_LOCK();
  656. LL_FOREACH(pool->priv->dtors_head, destructor)
  657. {
  658. /* Avoid calling destructors for NULL pointers */
  659. if (destructor->data != NULL) {
  660. destructor->func(destructor->data);
  661. }
  662. }
  663. pool->priv->dtors_head = pool->priv->dtors_tail = NULL;
  664. rspamd_mempool_variables_cleanup(pool);
  665. POOL_MTX_UNLOCK();
  666. }
  667. struct mempool_debug_elt {
  668. gsize sz;
  669. const char *loc;
  670. };
  671. static int
  672. rspamd_mempool_debug_elt_cmp(const void *a, const void *b)
  673. {
  674. const struct mempool_debug_elt *e1 = a, *e2 = b;
  675. /* Inverse order */
  676. return (int) ((gssize) e2->sz) - ((gssize) e1->sz);
  677. }
  678. void rspamd_mempool_delete(rspamd_mempool_t *pool)
  679. {
  680. struct _pool_chain *cur, *tmp;
  681. struct _pool_destructors *destructor;
  682. gpointer ptr;
  683. unsigned int i;
  684. gsize len;
  685. POOL_MTX_LOCK();
  686. cur = pool->priv->pools[RSPAMD_MEMPOOL_NORMAL];
  687. if (G_UNLIKELY(pool->priv->flags & RSPAMD_MEMPOOL_DEBUG)) {
  688. GHashTable *debug_tbl = *(GHashTable **) (((unsigned char *) pool) + sizeof(*pool));
  689. /* Show debug info */
  690. gsize ndtor = 0;
  691. LL_COUNT(pool->priv->dtors_head, destructor, ndtor);
  692. msg_info_pool("destructing of the memory pool %p; elt size = %z; "
  693. "used memory = %Hz; wasted memory = %Hd; "
  694. "vars = %z; destructors = %z",
  695. pool,
  696. pool->priv->elt_len,
  697. pool->priv->used_memory,
  698. pool->priv->wasted_memory,
  699. pool->priv->variables ? (gsize) kh_size(pool->priv->variables) : (gsize) 0,
  700. ndtor);
  701. GHashTableIter it;
  702. gpointer k, v;
  703. GArray *sorted_debug_size = g_array_sized_new(FALSE, FALSE,
  704. sizeof(struct mempool_debug_elt),
  705. g_hash_table_size(debug_tbl));
  706. g_hash_table_iter_init(&it, debug_tbl);
  707. while (g_hash_table_iter_next(&it, &k, &v)) {
  708. struct mempool_debug_elt e;
  709. e.loc = (const char *) k;
  710. e.sz = GPOINTER_TO_SIZE(v);
  711. g_array_append_val(sorted_debug_size, e);
  712. }
  713. g_array_sort(sorted_debug_size, rspamd_mempool_debug_elt_cmp);
  714. for (unsigned int _i = 0; _i < sorted_debug_size->len; _i++) {
  715. struct mempool_debug_elt *e;
  716. e = &g_array_index(sorted_debug_size, struct mempool_debug_elt, _i);
  717. msg_info_pool("allocated %Hz from %s", e->sz, e->loc);
  718. }
  719. g_array_free(sorted_debug_size, TRUE);
  720. g_hash_table_unref(debug_tbl);
  721. }
  722. if (cur && mempool_entries) {
  723. pool->priv->entry->elts[pool->priv->entry->cur_elts].leftover =
  724. pool_chain_free(cur);
  725. pool->priv->entry->cur_elts = (pool->priv->entry->cur_elts + 1) %
  726. G_N_ELEMENTS(pool->priv->entry->elts);
  727. if (pool->priv->entry->cur_elts == 0) {
  728. rspamd_mempool_adjust_entry(pool->priv->entry);
  729. }
  730. }
  731. /* Call all pool destructors */
  732. LL_FOREACH(pool->priv->dtors_head, destructor)
  733. {
  734. /* Avoid calling destructors for NULL pointers */
  735. if (destructor->data != NULL) {
  736. destructor->func(destructor->data);
  737. }
  738. }
  739. rspamd_mempool_variables_cleanup(pool);
  740. if (pool->priv->trash_stack) {
  741. for (i = 0; i < pool->priv->trash_stack->len; i++) {
  742. ptr = g_ptr_array_index(pool->priv->trash_stack, i);
  743. g_free(ptr);
  744. }
  745. g_ptr_array_free(pool->priv->trash_stack, TRUE);
  746. }
  747. for (i = 0; i < G_N_ELEMENTS(pool->priv->pools); i++) {
  748. if (pool->priv->pools[i]) {
  749. LL_FOREACH_SAFE(pool->priv->pools[i], cur, tmp)
  750. {
  751. g_atomic_int_add(&mem_pool_stat->bytes_allocated,
  752. -((int) cur->slice_size));
  753. g_atomic_int_add(&mem_pool_stat->chunks_allocated, -1);
  754. len = cur->slice_size + sizeof(struct _pool_chain);
  755. if (i == RSPAMD_MEMPOOL_SHARED) {
  756. munmap((void *) cur, len);
  757. }
  758. else {
  759. /* The last pool is special, it is a part of the initial chunk */
  760. if (cur->next != NULL) {
  761. free(cur); /* Not g_free as we use system allocator */
  762. }
  763. }
  764. }
  765. }
  766. }
  767. g_atomic_int_inc(&mem_pool_stat->pools_freed);
  768. POOL_MTX_UNLOCK();
  769. free(pool); /* allocated by posix_memalign */
  770. }
  771. void rspamd_mempool_stat(rspamd_mempool_stat_t *st)
  772. {
  773. if (mem_pool_stat != NULL) {
  774. st->pools_allocated = mem_pool_stat->pools_allocated;
  775. st->pools_freed = mem_pool_stat->pools_freed;
  776. st->shared_chunks_allocated = mem_pool_stat->shared_chunks_allocated;
  777. st->bytes_allocated = mem_pool_stat->bytes_allocated;
  778. st->chunks_allocated = mem_pool_stat->chunks_allocated;
  779. st->chunks_freed = mem_pool_stat->chunks_freed;
  780. st->oversized_chunks = mem_pool_stat->oversized_chunks;
  781. }
  782. }
  783. void rspamd_mempool_stat_reset(void)
  784. {
  785. if (mem_pool_stat != NULL) {
  786. memset(mem_pool_stat, 0, sizeof(rspamd_mempool_stat_t));
  787. }
  788. }
  789. gsize rspamd_mempool_suggest_size_(const char *loc)
  790. {
  791. return 0;
  792. }
  793. #if !defined(HAVE_PTHREAD_PROCESS_SHARED) || defined(DISABLE_PTHREAD_MUTEX)
  794. /*
  795. * Own emulation
  796. */
  797. static inline int
  798. __mutex_spin(rspamd_mempool_mutex_t *mutex)
  799. {
  800. /* check spin count */
  801. if (g_atomic_int_dec_and_test(&mutex->spin)) {
  802. /* This may be deadlock, so check owner of this lock */
  803. if (mutex->owner == getpid()) {
  804. /* This mutex was locked by calling process, so it is just double lock and we can easily unlock it */
  805. g_atomic_int_set(&mutex->spin, MUTEX_SPIN_COUNT);
  806. return 0;
  807. }
  808. else if (kill(mutex->owner, 0) == -1) {
  809. /* Owner process was not found, so release lock */
  810. g_atomic_int_set(&mutex->spin, MUTEX_SPIN_COUNT);
  811. return 0;
  812. }
  813. /* Spin again */
  814. g_atomic_int_set(&mutex->spin, MUTEX_SPIN_COUNT);
  815. }
  816. #ifdef HAVE_SCHED_YIELD
  817. (void) sched_yield();
  818. #elif defined(HAVE_NANOSLEEP)
  819. struct timespec ts;
  820. ts.tv_sec = 0;
  821. ts.tv_nsec = MUTEX_SLEEP_TIME;
  822. /* Spin */
  823. while (nanosleep(&ts, &ts) == -1 && errno == EINTR)
  824. ;
  825. #else
  826. #error No methods to spin are defined
  827. #endif
  828. return 1;
  829. }
  830. static void
  831. memory_pool_mutex_spin(rspamd_mempool_mutex_t *mutex)
  832. {
  833. while (!g_atomic_int_compare_and_exchange(&mutex->lock, 0, 1)) {
  834. if (!__mutex_spin(mutex)) {
  835. return;
  836. }
  837. }
  838. }
  839. rspamd_mempool_mutex_t *
  840. rspamd_mempool_get_mutex(rspamd_mempool_t *pool)
  841. {
  842. rspamd_mempool_mutex_t *res;
  843. if (pool != NULL) {
  844. res =
  845. rspamd_mempool_alloc_shared(pool, sizeof(rspamd_mempool_mutex_t));
  846. res->lock = 0;
  847. res->owner = 0;
  848. res->spin = MUTEX_SPIN_COUNT;
  849. return res;
  850. }
  851. return NULL;
  852. }
  853. void rspamd_mempool_lock_mutex(rspamd_mempool_mutex_t *mutex)
  854. {
  855. memory_pool_mutex_spin(mutex);
  856. mutex->owner = getpid();
  857. }
  858. void rspamd_mempool_unlock_mutex(rspamd_mempool_mutex_t *mutex)
  859. {
  860. mutex->owner = 0;
  861. (void) g_atomic_int_compare_and_exchange(&mutex->lock, 1, 0);
  862. }
  863. rspamd_mempool_rwlock_t *
  864. rspamd_mempool_get_rwlock(rspamd_mempool_t *pool)
  865. {
  866. rspamd_mempool_rwlock_t *lock;
  867. lock = rspamd_mempool_alloc_shared(pool, sizeof(rspamd_mempool_rwlock_t));
  868. lock->__r_lock = rspamd_mempool_get_mutex(pool);
  869. lock->__w_lock = rspamd_mempool_get_mutex(pool);
  870. return lock;
  871. }
  872. void rspamd_mempool_rlock_rwlock(rspamd_mempool_rwlock_t *lock)
  873. {
  874. /* Spin on write lock */
  875. while (g_atomic_int_get(&lock->__w_lock->lock)) {
  876. if (!__mutex_spin(lock->__w_lock)) {
  877. break;
  878. }
  879. }
  880. g_atomic_int_inc(&lock->__r_lock->lock);
  881. lock->__r_lock->owner = getpid();
  882. }
  883. void rspamd_mempool_wlock_rwlock(rspamd_mempool_rwlock_t *lock)
  884. {
  885. /* Spin on write lock first */
  886. rspamd_mempool_lock_mutex(lock->__w_lock);
  887. /* Now we have write lock set up */
  888. /* Wait all readers */
  889. while (g_atomic_int_get(&lock->__r_lock->lock)) {
  890. __mutex_spin(lock->__r_lock);
  891. }
  892. }
  893. void rspamd_mempool_runlock_rwlock(rspamd_mempool_rwlock_t *lock)
  894. {
  895. if (g_atomic_int_get(&lock->__r_lock->lock)) {
  896. (void) g_atomic_int_dec_and_test(&lock->__r_lock->lock);
  897. }
  898. }
  899. void rspamd_mempool_wunlock_rwlock(rspamd_mempool_rwlock_t *lock)
  900. {
  901. rspamd_mempool_unlock_mutex(lock->__w_lock);
  902. }
  903. #else
  904. /*
  905. * Pthread bases shared mutexes
  906. */
  907. rspamd_mempool_mutex_t *
  908. rspamd_mempool_get_mutex(rspamd_mempool_t *pool)
  909. {
  910. rspamd_mempool_mutex_t *res;
  911. pthread_mutexattr_t mattr;
  912. if (pool != NULL) {
  913. res =
  914. rspamd_mempool_alloc_shared(pool, sizeof(rspamd_mempool_mutex_t));
  915. pthread_mutexattr_init(&mattr);
  916. pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED);
  917. pthread_mutexattr_setrobust(&mattr, PTHREAD_MUTEX_ROBUST);
  918. pthread_mutex_init(res, &mattr);
  919. rspamd_mempool_add_destructor(pool,
  920. (rspamd_mempool_destruct_t) pthread_mutex_destroy, res);
  921. pthread_mutexattr_destroy(&mattr);
  922. return res;
  923. }
  924. return NULL;
  925. }
  926. void rspamd_mempool_lock_mutex(rspamd_mempool_mutex_t *mutex)
  927. {
  928. pthread_mutex_lock(mutex);
  929. }
  930. void rspamd_mempool_unlock_mutex(rspamd_mempool_mutex_t *mutex)
  931. {
  932. pthread_mutex_unlock(mutex);
  933. }
  934. rspamd_mempool_rwlock_t *
  935. rspamd_mempool_get_rwlock(rspamd_mempool_t *pool)
  936. {
  937. rspamd_mempool_rwlock_t *res;
  938. pthread_rwlockattr_t mattr;
  939. if (pool != NULL) {
  940. res =
  941. rspamd_mempool_alloc_shared(pool, sizeof(rspamd_mempool_rwlock_t));
  942. pthread_rwlockattr_init(&mattr);
  943. pthread_rwlockattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED);
  944. pthread_rwlock_init(res, &mattr);
  945. rspamd_mempool_add_destructor(pool,
  946. (rspamd_mempool_destruct_t) pthread_rwlock_destroy, res);
  947. pthread_rwlockattr_destroy(&mattr);
  948. return res;
  949. }
  950. return NULL;
  951. }
  952. void rspamd_mempool_rlock_rwlock(rspamd_mempool_rwlock_t *lock)
  953. {
  954. pthread_rwlock_rdlock(lock);
  955. }
  956. void rspamd_mempool_wlock_rwlock(rspamd_mempool_rwlock_t *lock)
  957. {
  958. pthread_rwlock_wrlock(lock);
  959. }
  960. void rspamd_mempool_runlock_rwlock(rspamd_mempool_rwlock_t *lock)
  961. {
  962. pthread_rwlock_unlock(lock);
  963. }
  964. void rspamd_mempool_wunlock_rwlock(rspamd_mempool_rwlock_t *lock)
  965. {
  966. pthread_rwlock_unlock(lock);
  967. }
  968. #endif
  969. #define RSPAMD_MEMPOOL_VARS_HASH_SEED 0xb32ad7c55eb2e647ULL
  970. void rspamd_mempool_set_variable(rspamd_mempool_t *pool,
  971. const char *name,
  972. gpointer value,
  973. rspamd_mempool_destruct_t destructor)
  974. {
  975. if (pool->priv->variables == NULL) {
  976. pool->priv->variables = kh_init(rspamd_mempool_vars_hash);
  977. if (pool->priv->entry->cur_vars > 0) {
  978. /* Preallocate */
  979. kh_resize(rspamd_mempool_vars_hash,
  980. pool->priv->variables,
  981. pool->priv->entry->cur_vars);
  982. }
  983. }
  984. int hv = rspamd_cryptobox_fast_hash(name, strlen(name),
  985. RSPAMD_MEMPOOL_VARS_HASH_SEED);
  986. khiter_t it;
  987. int r;
  988. it = kh_put(rspamd_mempool_vars_hash, pool->priv->variables, hv, &r);
  989. if (it == kh_end(pool->priv->variables)) {
  990. g_assert_not_reached();
  991. }
  992. else {
  993. struct rspamd_mempool_variable *pvar;
  994. if (r == 0) {
  995. /* Existing entry, maybe need cleanup */
  996. pvar = &kh_val(pool->priv->variables, it);
  997. if (pvar->dtor) {
  998. pvar->dtor(pvar->data);
  999. }
  1000. }
  1001. pvar = &kh_val(pool->priv->variables, it);
  1002. pvar->data = value;
  1003. pvar->dtor = destructor;
  1004. }
  1005. }
  1006. gpointer
  1007. rspamd_mempool_get_variable(rspamd_mempool_t *pool, const char *name)
  1008. {
  1009. if (pool->priv->variables == NULL) {
  1010. return NULL;
  1011. }
  1012. khiter_t it;
  1013. int hv = rspamd_cryptobox_fast_hash(name, strlen(name),
  1014. RSPAMD_MEMPOOL_VARS_HASH_SEED);
  1015. it = kh_get(rspamd_mempool_vars_hash, pool->priv->variables, hv);
  1016. if (it != kh_end(pool->priv->variables)) {
  1017. struct rspamd_mempool_variable *pvar;
  1018. pvar = &kh_val(pool->priv->variables, it);
  1019. return pvar->data;
  1020. }
  1021. return NULL;
  1022. }
  1023. gpointer
  1024. rspamd_mempool_steal_variable(rspamd_mempool_t *pool, const char *name)
  1025. {
  1026. if (pool->priv->variables == NULL) {
  1027. return NULL;
  1028. }
  1029. khiter_t it;
  1030. int hv = rspamd_cryptobox_fast_hash(name, strlen(name),
  1031. RSPAMD_MEMPOOL_VARS_HASH_SEED);
  1032. it = kh_get(rspamd_mempool_vars_hash, pool->priv->variables, hv);
  1033. if (it != kh_end(pool->priv->variables)) {
  1034. struct rspamd_mempool_variable *pvar;
  1035. pvar = &kh_val(pool->priv->variables, it);
  1036. kh_del(rspamd_mempool_vars_hash, pool->priv->variables, it);
  1037. return pvar->data;
  1038. }
  1039. return NULL;
  1040. }
  1041. void rspamd_mempool_remove_variable(rspamd_mempool_t *pool, const char *name)
  1042. {
  1043. if (pool->priv->variables != NULL) {
  1044. khiter_t it;
  1045. int hv = rspamd_cryptobox_fast_hash(name, strlen(name),
  1046. RSPAMD_MEMPOOL_VARS_HASH_SEED);
  1047. it = kh_get(rspamd_mempool_vars_hash, pool->priv->variables, hv);
  1048. if (it != kh_end(pool->priv->variables)) {
  1049. struct rspamd_mempool_variable *pvar;
  1050. pvar = &kh_val(pool->priv->variables, it);
  1051. if (pvar->dtor) {
  1052. pvar->dtor(pvar->data);
  1053. }
  1054. kh_del(rspamd_mempool_vars_hash, pool->priv->variables, it);
  1055. }
  1056. }
  1057. }
  1058. GList *
  1059. rspamd_mempool_glist_prepend(rspamd_mempool_t *pool, GList *l, gpointer p)
  1060. {
  1061. GList *cell;
  1062. cell = rspamd_mempool_alloc(pool, sizeof(*cell));
  1063. cell->prev = NULL;
  1064. cell->data = p;
  1065. if (l == NULL) {
  1066. cell->next = NULL;
  1067. }
  1068. else {
  1069. cell->next = l;
  1070. l->prev = cell;
  1071. }
  1072. return cell;
  1073. }
  1074. GList *
  1075. rspamd_mempool_glist_append(rspamd_mempool_t *pool, GList *l, gpointer p)
  1076. {
  1077. GList *cell, *cur;
  1078. cell = rspamd_mempool_alloc(pool, sizeof(*cell));
  1079. cell->next = NULL;
  1080. cell->data = p;
  1081. if (l) {
  1082. for (cur = l; cur->next != NULL; cur = cur->next) {}
  1083. cur->next = cell;
  1084. cell->prev = cur;
  1085. }
  1086. else {
  1087. l = cell;
  1088. l->prev = NULL;
  1089. }
  1090. return l;
  1091. }
  1092. gsize rspamd_mempool_get_used_size(rspamd_mempool_t *pool)
  1093. {
  1094. return pool->priv->used_memory;
  1095. }
  1096. gsize rspamd_mempool_get_wasted_size(rspamd_mempool_t *pool)
  1097. {
  1098. return pool->priv->wasted_memory;
  1099. }