You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

mem_pool.c 24KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069
  1. /*-
  2. * Copyright 2016 Vsevolod Stakhov
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "config.h"
  17. #include "mem_pool.h"
  18. #include "fstring.h"
  19. #include "logger.h"
  20. #include "ottery.h"
  21. #include "unix-std.h"
  22. #include "khash.h"
  23. #include "cryptobox.h"
  24. #include "contrib/uthash/utlist.h"
  25. #ifdef WITH_JEMALLOC
  26. #include <jemalloc/jemalloc.h>
  27. #if (JEMALLOC_VERSION_MAJOR == 3 && JEMALLOC_VERSION_MINOR >= 6) || (JEMALLOC_VERSION_MAJOR > 3)
  28. #define HAVE_MALLOC_SIZE 1
  29. #define sys_alloc_size(sz) nallocx(sz, 0)
  30. #endif
  31. #elif defined(__APPLE__)
  32. #include <malloc/malloc.h>
  33. #define HAVE_MALLOC_SIZE 1
  34. #define sys_alloc_size(sz) malloc_good_size(sz)
  35. #endif
  36. #ifdef HAVE_SCHED_YIELD
  37. #include <sched.h>
  38. #endif
  39. /* Sleep time for spin lock in nanoseconds */
  40. #define MUTEX_SLEEP_TIME 10000000L
  41. #define MUTEX_SPIN_COUNT 100
  42. #define POOL_MTX_LOCK() do { } while (0)
  43. #define POOL_MTX_UNLOCK() do { } while (0)
  44. /*
  45. * This define specify whether we should check all pools for free space for new object
  46. * or just begin scan from current (recently attached) pool
  47. * If MEMORY_GREEDY is defined, then we scan all pools to find free space (more CPU usage, slower
  48. * but requires less memory). If it is not defined check only current pool and if object is too large
  49. * to place in it allocate new one (this may cause huge CPU usage in some cases too, but generally faster than
  50. * greedy method)
  51. */
  52. #undef MEMORY_GREEDY
  53. #define ENTRY_LEN 128
  54. #define ENTRY_NELTS 64
  55. struct entry_elt {
  56. guint32 fragmentation;
  57. guint32 leftover;
  58. };
  59. struct rspamd_mempool_entry_point {
  60. gchar src[ENTRY_LEN];
  61. guint32 cur_suggestion;
  62. guint32 cur_elts;
  63. struct entry_elt elts[ENTRY_NELTS];
  64. };
  65. static inline uint32_t
  66. rspamd_entry_hash (const char *str)
  67. {
  68. return (guint)rspamd_cryptobox_fast_hash (str, strlen (str), rspamd_hash_seed ());
  69. }
  70. static inline int
  71. rspamd_entry_equal (const char *k1, const char *k2)
  72. {
  73. return strcmp (k1, k2) == 0;
  74. }
  75. KHASH_INIT(mempool_entry, const gchar *, struct rspamd_mempool_entry_point *,
  76. 1, rspamd_entry_hash, rspamd_entry_equal)
  77. static khash_t(mempool_entry) *mempool_entries = NULL;
  78. /* Internal statistic */
  79. static rspamd_mempool_stat_t *mem_pool_stat = NULL;
  80. /* Environment variable */
  81. static gboolean env_checked = FALSE;
  82. static gboolean always_malloc = FALSE;
  83. /**
  84. * Function that return free space in pool page
  85. * @param x pool page struct
  86. */
  87. static gsize
  88. pool_chain_free (struct _pool_chain *chain)
  89. {
  90. gint64 occupied = chain->pos - chain->begin + MIN_MEM_ALIGNMENT;
  91. return (occupied < (gint64)chain->slice_size ?
  92. chain->slice_size - occupied : 0);
  93. }
  94. /* By default allocate 8Kb chunks of memory */
  95. #define FIXED_POOL_SIZE 4096
  96. static inline struct rspamd_mempool_entry_point *
  97. rspamd_mempool_entry_new (const gchar *loc)
  98. {
  99. struct rspamd_mempool_entry_point **pentry, *entry;
  100. gint r;
  101. khiter_t k;
  102. k = kh_put (mempool_entry, mempool_entries, loc, &r);
  103. if (r >= 0) {
  104. pentry = &kh_value (mempool_entries, k);
  105. entry = g_malloc0 (sizeof (*entry));
  106. *pentry = entry;
  107. memset (entry, 0, sizeof (*entry));
  108. rspamd_strlcpy (entry->src, loc, sizeof (entry->src));
  109. #ifdef HAVE_GETPAGESIZE
  110. entry->cur_suggestion = MAX (getpagesize (), FIXED_POOL_SIZE);
  111. #else
  112. entry->cur_suggestion = MAX (sysconf (_SC_PAGESIZE), FIXED_POOL_SIZE);
  113. #endif
  114. }
  115. else {
  116. g_assert_not_reached ();
  117. }
  118. return entry;
  119. }
  120. static inline struct rspamd_mempool_entry_point *
  121. rspamd_mempool_get_entry (const gchar *loc)
  122. {
  123. khiter_t k;
  124. struct rspamd_mempool_entry_point *elt;
  125. if (mempool_entries == NULL) {
  126. mempool_entries = kh_init (mempool_entry);
  127. }
  128. else {
  129. k = kh_get (mempool_entry, mempool_entries, loc);
  130. if (k != kh_end (mempool_entries)) {
  131. elt = kh_value (mempool_entries, k);
  132. return elt;
  133. }
  134. }
  135. return rspamd_mempool_entry_new (loc);
  136. }
  137. static struct _pool_chain *
  138. rspamd_mempool_chain_new (gsize size, enum rspamd_mempool_chain_type pool_type)
  139. {
  140. struct _pool_chain *chain;
  141. gsize total_size = size + sizeof (struct _pool_chain) + MIN_MEM_ALIGNMENT,
  142. optimal_size = 0;
  143. gpointer map;
  144. g_assert (size > 0);
  145. if (pool_type == RSPAMD_MEMPOOL_SHARED) {
  146. #if defined(HAVE_MMAP_ANON)
  147. map = mmap (NULL,
  148. total_size,
  149. PROT_READ | PROT_WRITE,
  150. MAP_ANON | MAP_SHARED,
  151. -1,
  152. 0);
  153. if (map == MAP_FAILED) {
  154. g_error ("%s: failed to allocate %"G_GSIZE_FORMAT" bytes",
  155. G_STRLOC, total_size);
  156. abort ();
  157. }
  158. chain = map;
  159. chain->begin = ((guint8 *) chain) + sizeof (struct _pool_chain);
  160. #elif defined(HAVE_MMAP_ZERO)
  161. gint fd;
  162. fd = open ("/dev/zero", O_RDWR);
  163. if (fd == -1) {
  164. return NULL;
  165. }
  166. map = mmap (NULL,
  167. size + sizeof (struct _pool_chain),
  168. PROT_READ | PROT_WRITE,
  169. MAP_SHARED,
  170. fd,
  171. 0);
  172. if (map == MAP_FAILED) {
  173. msg_err ("cannot allocate %z bytes, aborting", size +
  174. sizeof (struct _pool_chain));
  175. abort ();
  176. }
  177. chain = map;
  178. chain->begin = ((guint8 *) chain) + sizeof (struct _pool_chain);
  179. #else
  180. #error No mmap methods are defined
  181. #endif
  182. g_atomic_int_inc (&mem_pool_stat->shared_chunks_allocated);
  183. g_atomic_int_add (&mem_pool_stat->bytes_allocated, total_size);
  184. }
  185. else {
  186. #ifdef HAVE_MALLOC_SIZE
  187. optimal_size = sys_alloc_size (total_size);
  188. #endif
  189. total_size = MAX (total_size, optimal_size);
  190. map = malloc (total_size);
  191. if (map == NULL) {
  192. g_error ("%s: failed to allocate %"G_GSIZE_FORMAT" bytes",
  193. G_STRLOC, total_size);
  194. abort ();
  195. }
  196. chain = map;
  197. chain->begin = ((guint8 *) chain) + sizeof (struct _pool_chain);
  198. g_atomic_int_add (&mem_pool_stat->bytes_allocated, total_size);
  199. g_atomic_int_inc (&mem_pool_stat->chunks_allocated);
  200. }
  201. chain->pos = align_ptr (chain->begin, MIN_MEM_ALIGNMENT);
  202. chain->slice_size = total_size - sizeof (struct _pool_chain);
  203. chain->lock = NULL;
  204. return chain;
  205. }
  206. /**
  207. * Get the current pool of the specified type, creating the corresponding
  208. * array if it's absent
  209. * @param pool
  210. * @param pool_type
  211. * @return
  212. */
  213. static struct _pool_chain *
  214. rspamd_mempool_get_chain (rspamd_mempool_t * pool,
  215. enum rspamd_mempool_chain_type pool_type)
  216. {
  217. g_assert (pool_type >= 0 && pool_type < RSPAMD_MEMPOOL_MAX);
  218. return pool->pools[pool_type];
  219. }
  220. static void
  221. rspamd_mempool_append_chain (rspamd_mempool_t * pool,
  222. struct _pool_chain *chain,
  223. enum rspamd_mempool_chain_type pool_type)
  224. {
  225. g_assert (pool_type >= 0 && pool_type < RSPAMD_MEMPOOL_MAX);
  226. g_assert (chain != NULL);
  227. LL_PREPEND (pool->pools[pool_type], chain);
  228. }
  229. /**
  230. * Allocate new memory poll
  231. * @param size size of pool's page
  232. * @return new memory pool object
  233. */
  234. rspamd_mempool_t *
  235. rspamd_mempool_new_ (gsize size, const gchar *tag, const gchar *loc)
  236. {
  237. rspamd_mempool_t *new_pool;
  238. gpointer map;
  239. unsigned char uidbuf[10];
  240. const gchar hexdigits[] = "0123456789abcdef";
  241. unsigned i;
  242. /* Allocate statistic structure if it is not allocated before */
  243. if (mem_pool_stat == NULL) {
  244. #if defined(HAVE_MMAP_ANON)
  245. map = mmap (NULL,
  246. sizeof (rspamd_mempool_stat_t),
  247. PROT_READ | PROT_WRITE,
  248. MAP_ANON | MAP_SHARED,
  249. -1,
  250. 0);
  251. if (map == MAP_FAILED) {
  252. msg_err ("cannot allocate %z bytes, aborting",
  253. sizeof (rspamd_mempool_stat_t));
  254. abort ();
  255. }
  256. mem_pool_stat = (rspamd_mempool_stat_t *)map;
  257. #elif defined(HAVE_MMAP_ZERO)
  258. gint fd;
  259. fd = open ("/dev/zero", O_RDWR);
  260. g_assert (fd != -1);
  261. map = mmap (NULL,
  262. sizeof (rspamd_mempool_stat_t),
  263. PROT_READ | PROT_WRITE,
  264. MAP_SHARED,
  265. fd,
  266. 0);
  267. if (map == MAP_FAILED) {
  268. msg_err ("cannot allocate %z bytes, aborting",
  269. sizeof (rspamd_mempool_stat_t));
  270. abort ();
  271. }
  272. mem_pool_stat = (rspamd_mempool_stat_t *)map;
  273. #else
  274. # error No mmap methods are defined
  275. #endif
  276. memset (map, 0, sizeof (rspamd_mempool_stat_t));
  277. }
  278. if (!env_checked) {
  279. /* Check G_SLICE=always-malloc to allow memory pool debug */
  280. const char *g_slice;
  281. g_slice = getenv ("VALGRIND");
  282. if (g_slice != NULL) {
  283. always_malloc = TRUE;
  284. }
  285. env_checked = TRUE;
  286. }
  287. new_pool = g_malloc0 (sizeof (rspamd_mempool_t));
  288. new_pool->entry = rspamd_mempool_get_entry (loc);
  289. new_pool->destructors = g_array_sized_new (FALSE, FALSE,
  290. sizeof (struct _pool_destructors), 32);
  291. /* Set it upon first call of set variable */
  292. if (size == 0) {
  293. new_pool->elt_len = new_pool->entry->cur_suggestion;
  294. }
  295. else {
  296. new_pool->elt_len = size;
  297. }
  298. if (tag) {
  299. rspamd_strlcpy (new_pool->tag.tagname, tag, sizeof (new_pool->tag.tagname));
  300. }
  301. else {
  302. new_pool->tag.tagname[0] = '\0';
  303. }
  304. /* Generate new uid */
  305. ottery_rand_bytes (uidbuf, sizeof (uidbuf));
  306. for (i = 0; i < G_N_ELEMENTS (uidbuf); i ++) {
  307. new_pool->tag.uid[i * 2] = hexdigits[(uidbuf[i] >> 4) & 0xf];
  308. new_pool->tag.uid[i * 2 + 1] = hexdigits[uidbuf[i] & 0xf];
  309. }
  310. new_pool->tag.uid[19] = '\0';
  311. mem_pool_stat->pools_allocated++;
  312. return new_pool;
  313. }
  314. static void *
  315. memory_pool_alloc_common (rspamd_mempool_t * pool, gsize size,
  316. enum rspamd_mempool_chain_type pool_type)
  317. RSPAMD_ATTR_ALLOC_SIZE(2) RSPAMD_ATTR_ALLOC_ALIGN(MIN_MEM_ALIGNMENT) RSPAMD_ATTR_RETURNS_NONNUL;
  318. static void *
  319. memory_pool_alloc_common (rspamd_mempool_t * pool, gsize size,
  320. enum rspamd_mempool_chain_type pool_type)
  321. {
  322. guint8 *tmp;
  323. struct _pool_chain *new, *cur;
  324. gsize free = 0;
  325. if (pool) {
  326. POOL_MTX_LOCK ();
  327. if (always_malloc && pool_type != RSPAMD_MEMPOOL_SHARED) {
  328. void *ptr;
  329. ptr = g_malloc (size);
  330. POOL_MTX_UNLOCK ();
  331. if (pool->trash_stack == NULL) {
  332. pool->trash_stack = g_ptr_array_sized_new (128);
  333. }
  334. g_ptr_array_add (pool->trash_stack, ptr);
  335. return ptr;
  336. }
  337. cur = rspamd_mempool_get_chain (pool, pool_type);
  338. /* Find free space in pool chain */
  339. if (cur) {
  340. free = pool_chain_free (cur);
  341. }
  342. if (cur == NULL || free < size) {
  343. /* Allocate new chain element */
  344. if (pool->elt_len >= size + MIN_MEM_ALIGNMENT) {
  345. pool->entry->elts[pool->entry->cur_elts].fragmentation += size;
  346. new = rspamd_mempool_chain_new (pool->elt_len,
  347. pool_type);
  348. }
  349. else {
  350. mem_pool_stat->oversized_chunks++;
  351. g_atomic_int_add (&mem_pool_stat->fragmented_size,
  352. free);
  353. pool->entry->elts[pool->entry->cur_elts].fragmentation += free;
  354. new = rspamd_mempool_chain_new (size + pool->elt_len, pool_type);
  355. }
  356. /* Connect to pool subsystem */
  357. rspamd_mempool_append_chain (pool, new, pool_type);
  358. /* No need to align again, aligned by rspamd_mempool_chain_new */
  359. tmp = new->pos;
  360. new->pos = tmp + size;
  361. POOL_MTX_UNLOCK ();
  362. return tmp;
  363. }
  364. /* No need to allocate page */
  365. tmp = align_ptr (cur->pos, MIN_MEM_ALIGNMENT);
  366. cur->pos = tmp + size;
  367. POOL_MTX_UNLOCK ();
  368. return tmp;
  369. }
  370. abort ();
  371. }
  372. void *
  373. rspamd_mempool_alloc (rspamd_mempool_t * pool, gsize size)
  374. {
  375. return memory_pool_alloc_common (pool, size, RSPAMD_MEMPOOL_NORMAL);
  376. }
  377. void *
  378. rspamd_mempool_alloc_tmp (rspamd_mempool_t * pool, gsize size)
  379. {
  380. return memory_pool_alloc_common (pool, size, RSPAMD_MEMPOOL_TMP);
  381. }
  382. void *
  383. rspamd_mempool_alloc0 (rspamd_mempool_t * pool, gsize size)
  384. {
  385. void *pointer = rspamd_mempool_alloc (pool, size);
  386. memset (pointer, 0, size);
  387. return pointer;
  388. }
  389. void *
  390. rspamd_mempool_alloc0_tmp (rspamd_mempool_t * pool, gsize size)
  391. {
  392. void *pointer = rspamd_mempool_alloc_tmp (pool, size);
  393. memset (pointer, 0, size);
  394. return pointer;
  395. }
  396. void *
  397. rspamd_mempool_alloc0_shared (rspamd_mempool_t * pool, gsize size)
  398. {
  399. void *pointer = rspamd_mempool_alloc_shared (pool, size);
  400. memset (pointer, 0, size);
  401. return pointer;
  402. }
  403. void *
  404. rspamd_mempool_alloc_shared (rspamd_mempool_t * pool, gsize size)
  405. {
  406. return memory_pool_alloc_common (pool, size, RSPAMD_MEMPOOL_SHARED);
  407. }
  408. gchar *
  409. rspamd_mempool_strdup (rspamd_mempool_t * pool, const gchar *src)
  410. {
  411. gsize len;
  412. gchar *newstr;
  413. if (src == NULL) {
  414. return NULL;
  415. }
  416. len = strlen (src);
  417. newstr = rspamd_mempool_alloc (pool, len + 1);
  418. memcpy (newstr, src, len);
  419. newstr[len] = '\0';
  420. return newstr;
  421. }
  422. gchar *
  423. rspamd_mempool_fstrdup (rspamd_mempool_t * pool, const struct f_str_s *src)
  424. {
  425. gchar *newstr;
  426. if (src == NULL) {
  427. return NULL;
  428. }
  429. newstr = rspamd_mempool_alloc (pool, src->len + 1);
  430. memcpy (newstr, src->str, src->len);
  431. newstr[src->len] = '\0';
  432. return newstr;
  433. }
  434. gchar *
  435. rspamd_mempool_ftokdup (rspamd_mempool_t *pool, const rspamd_ftok_t *src)
  436. {
  437. gchar *newstr;
  438. if (src == NULL) {
  439. return NULL;
  440. }
  441. newstr = rspamd_mempool_alloc (pool, src->len + 1);
  442. memcpy (newstr, src->begin, src->len);
  443. newstr[src->len] = '\0';
  444. return newstr;
  445. }
  446. void
  447. rspamd_mempool_add_destructor_full (rspamd_mempool_t * pool,
  448. rspamd_mempool_destruct_t func,
  449. void *data,
  450. const gchar *function,
  451. const gchar *line)
  452. {
  453. struct _pool_destructors cur;
  454. POOL_MTX_LOCK ();
  455. cur.func = func;
  456. cur.data = data;
  457. cur.function = function;
  458. cur.loc = line;
  459. g_array_append_val (pool->destructors, cur);
  460. POOL_MTX_UNLOCK ();
  461. }
  462. void
  463. rspamd_mempool_replace_destructor (rspamd_mempool_t * pool,
  464. rspamd_mempool_destruct_t func,
  465. void *old_data,
  466. void *new_data)
  467. {
  468. struct _pool_destructors *tmp;
  469. guint i;
  470. for (i = 0; i < pool->destructors->len; i ++) {
  471. tmp = &g_array_index (pool->destructors, struct _pool_destructors, i);
  472. if (tmp->func == func && tmp->data == old_data) {
  473. tmp->func = func;
  474. tmp->data = new_data;
  475. break;
  476. }
  477. }
  478. }
  479. static gint
  480. cmp_int (gconstpointer a, gconstpointer b)
  481. {
  482. gint i1 = *(const gint *)a, i2 = *(const gint *)b;
  483. return i1 - i2;
  484. }
  485. static void
  486. rspamd_mempool_adjust_entry (struct rspamd_mempool_entry_point *e)
  487. {
  488. gint sz[G_N_ELEMENTS (e->elts)], sel_pos, sel_neg;
  489. guint i, jitter;
  490. for (i = 0; i < G_N_ELEMENTS (sz); i ++) {
  491. sz[i] = e->elts[i].fragmentation - (gint)e->elts[i].leftover;
  492. }
  493. qsort (sz, G_N_ELEMENTS (sz), sizeof (gint), cmp_int);
  494. jitter = rspamd_random_uint64_fast () % 10;
  495. /*
  496. * Take stochaistic quantiles
  497. */
  498. sel_pos = sz[50 + jitter];
  499. sel_neg = sz[4 + jitter];
  500. if (sel_neg > 0) {
  501. /* We need to increase our suggestion */
  502. e->cur_suggestion *= (1 + (((double)sel_pos) / e->cur_suggestion)) * 1.5;
  503. }
  504. else if (-sel_neg > sel_pos) {
  505. /* We need to reduce current suggestion */
  506. e->cur_suggestion /= (1 + (((double)-sel_neg) / e->cur_suggestion)) * 1.5;
  507. }
  508. else {
  509. /* We still want to grow */
  510. e->cur_suggestion *= (1 + (((double)sel_pos) / e->cur_suggestion)) * 1.5;
  511. }
  512. /* Some sane limits counting mempool architecture */
  513. if (e->cur_suggestion < 1024) {
  514. e->cur_suggestion = 1024;
  515. }
  516. else if (e->cur_suggestion > 1024 * 1024 * 10) {
  517. e->cur_suggestion = 1024 * 1024 * 10;
  518. }
  519. memset (e->elts, 0, sizeof (e->elts));
  520. }
  521. void
  522. rspamd_mempool_destructors_enforce (rspamd_mempool_t *pool)
  523. {
  524. struct _pool_destructors *destructor;
  525. guint i;
  526. POOL_MTX_LOCK ();
  527. for (i = 0; i < pool->destructors->len; i ++) {
  528. destructor = &g_array_index (pool->destructors, struct _pool_destructors, i);
  529. /* Avoid calling destructors for NULL pointers */
  530. if (destructor->data != NULL) {
  531. destructor->func (destructor->data);
  532. }
  533. }
  534. pool->destructors->len = 0;
  535. POOL_MTX_UNLOCK ();
  536. }
  537. void
  538. rspamd_mempool_delete (rspamd_mempool_t * pool)
  539. {
  540. struct _pool_chain *cur, *tmp;
  541. struct _pool_destructors *destructor;
  542. gpointer ptr;
  543. guint i;
  544. gsize len;
  545. POOL_MTX_LOCK ();
  546. cur = NULL;
  547. if (pool->pools[RSPAMD_MEMPOOL_NORMAL] != NULL) {
  548. cur = pool->pools[RSPAMD_MEMPOOL_NORMAL];
  549. }
  550. if (cur) {
  551. pool->entry->elts[pool->entry->cur_elts].leftover =
  552. pool_chain_free (cur);
  553. pool->entry->cur_elts = (pool->entry->cur_elts + 1) %
  554. G_N_ELEMENTS (pool->entry->elts);
  555. if (pool->entry->cur_elts == 0) {
  556. rspamd_mempool_adjust_entry (pool->entry);
  557. }
  558. }
  559. /* Call all pool destructors */
  560. for (i = 0; i < pool->destructors->len; i ++) {
  561. destructor = &g_array_index (pool->destructors, struct _pool_destructors, i);
  562. /* Avoid calling destructors for NULL pointers */
  563. if (destructor->data != NULL) {
  564. destructor->func (destructor->data);
  565. }
  566. }
  567. g_array_free (pool->destructors, TRUE);
  568. for (i = 0; i < G_N_ELEMENTS (pool->pools); i ++) {
  569. if (pool->pools[i]) {
  570. LL_FOREACH_SAFE (pool->pools[i], cur, tmp) {
  571. g_atomic_int_add (&mem_pool_stat->bytes_allocated,
  572. -((gint)cur->slice_size));
  573. g_atomic_int_add (&mem_pool_stat->chunks_allocated, -1);
  574. len = cur->slice_size + sizeof (struct _pool_chain);
  575. if (i == RSPAMD_MEMPOOL_SHARED) {
  576. munmap ((void *)cur, len);
  577. }
  578. else {
  579. free (cur); /* Not g_free as we use system allocator */
  580. }
  581. }
  582. }
  583. }
  584. if (pool->variables) {
  585. g_hash_table_destroy (pool->variables);
  586. }
  587. if (pool->trash_stack) {
  588. for (i = 0; i < pool->trash_stack->len; i++) {
  589. ptr = g_ptr_array_index (pool->trash_stack, i);
  590. g_free (ptr);
  591. }
  592. g_ptr_array_free (pool->trash_stack, TRUE);
  593. }
  594. g_atomic_int_inc (&mem_pool_stat->pools_freed);
  595. POOL_MTX_UNLOCK ();
  596. g_free (pool);
  597. }
  598. void
  599. rspamd_mempool_cleanup_tmp (rspamd_mempool_t * pool)
  600. {
  601. struct _pool_chain *cur, *tmp;
  602. POOL_MTX_LOCK ();
  603. if (pool->pools[RSPAMD_MEMPOOL_TMP]) {
  604. LL_FOREACH_SAFE (pool->pools[RSPAMD_MEMPOOL_TMP], cur, tmp) {
  605. g_atomic_int_add (&mem_pool_stat->bytes_allocated,
  606. -((gint)cur->slice_size));
  607. g_atomic_int_add (&mem_pool_stat->chunks_allocated, -1);
  608. free (cur);
  609. }
  610. pool->pools[RSPAMD_MEMPOOL_TMP] = NULL;
  611. }
  612. g_atomic_int_inc (&mem_pool_stat->pools_freed);
  613. POOL_MTX_UNLOCK ();
  614. }
  615. void
  616. rspamd_mempool_stat (rspamd_mempool_stat_t * st)
  617. {
  618. if (mem_pool_stat != NULL) {
  619. st->pools_allocated = mem_pool_stat->pools_allocated;
  620. st->pools_freed = mem_pool_stat->pools_freed;
  621. st->shared_chunks_allocated = mem_pool_stat->shared_chunks_allocated;
  622. st->bytes_allocated = mem_pool_stat->bytes_allocated;
  623. st->chunks_allocated = mem_pool_stat->chunks_allocated;
  624. st->chunks_freed = mem_pool_stat->chunks_freed;
  625. st->oversized_chunks = mem_pool_stat->oversized_chunks;
  626. }
  627. }
  628. void
  629. rspamd_mempool_stat_reset (void)
  630. {
  631. if (mem_pool_stat != NULL) {
  632. memset (mem_pool_stat, 0, sizeof (rspamd_mempool_stat_t));
  633. }
  634. }
  635. gsize
  636. rspamd_mempool_suggest_size_ (const char *loc)
  637. {
  638. return 0;
  639. }
  640. #if !defined(HAVE_PTHREAD_PROCESS_SHARED) || defined(DISABLE_PTHREAD_MUTEX)
  641. /*
  642. * Own emulation
  643. */
  644. static inline gint
  645. __mutex_spin (rspamd_mempool_mutex_t * mutex)
  646. {
  647. /* check spin count */
  648. if (g_atomic_int_dec_and_test (&mutex->spin)) {
  649. /* This may be deadlock, so check owner of this lock */
  650. if (mutex->owner == getpid ()) {
  651. /* This mutex was locked by calling process, so it is just double lock and we can easily unlock it */
  652. g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
  653. return 0;
  654. }
  655. else if (kill (mutex->owner, 0) == -1) {
  656. /* Owner process was not found, so release lock */
  657. g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
  658. return 0;
  659. }
  660. /* Spin again */
  661. g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
  662. }
  663. #ifdef HAVE_SCHED_YIELD
  664. (void)sched_yield ();
  665. #elif defined(HAVE_NANOSLEEP)
  666. struct timespec ts;
  667. ts.tv_sec = 0;
  668. ts.tv_nsec = MUTEX_SLEEP_TIME;
  669. /* Spin */
  670. while (nanosleep (&ts, &ts) == -1 && errno == EINTR) ;
  671. #else
  672. #error No methods to spin are defined
  673. #endif
  674. return 1;
  675. }
  676. static void
  677. memory_pool_mutex_spin (rspamd_mempool_mutex_t * mutex)
  678. {
  679. while (!g_atomic_int_compare_and_exchange (&mutex->lock, 0, 1)) {
  680. if (!__mutex_spin (mutex)) {
  681. return;
  682. }
  683. }
  684. }
  685. rspamd_mempool_mutex_t *
  686. rspamd_mempool_get_mutex (rspamd_mempool_t * pool)
  687. {
  688. rspamd_mempool_mutex_t *res;
  689. if (pool != NULL) {
  690. res =
  691. rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_mutex_t));
  692. res->lock = 0;
  693. res->owner = 0;
  694. res->spin = MUTEX_SPIN_COUNT;
  695. return res;
  696. }
  697. return NULL;
  698. }
  699. void
  700. rspamd_mempool_lock_mutex (rspamd_mempool_mutex_t * mutex)
  701. {
  702. memory_pool_mutex_spin (mutex);
  703. mutex->owner = getpid ();
  704. }
  705. void
  706. rspamd_mempool_unlock_mutex (rspamd_mempool_mutex_t * mutex)
  707. {
  708. mutex->owner = 0;
  709. (void)g_atomic_int_compare_and_exchange (&mutex->lock, 1, 0);
  710. }
  711. rspamd_mempool_rwlock_t *
  712. rspamd_mempool_get_rwlock (rspamd_mempool_t * pool)
  713. {
  714. rspamd_mempool_rwlock_t *lock;
  715. lock = rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_rwlock_t));
  716. lock->__r_lock = rspamd_mempool_get_mutex (pool);
  717. lock->__w_lock = rspamd_mempool_get_mutex (pool);
  718. return lock;
  719. }
  720. void
  721. rspamd_mempool_rlock_rwlock (rspamd_mempool_rwlock_t * lock)
  722. {
  723. /* Spin on write lock */
  724. while (g_atomic_int_get (&lock->__w_lock->lock)) {
  725. if (!__mutex_spin (lock->__w_lock)) {
  726. break;
  727. }
  728. }
  729. g_atomic_int_inc (&lock->__r_lock->lock);
  730. lock->__r_lock->owner = getpid ();
  731. }
  732. void
  733. rspamd_mempool_wlock_rwlock (rspamd_mempool_rwlock_t * lock)
  734. {
  735. /* Spin on write lock first */
  736. rspamd_mempool_lock_mutex (lock->__w_lock);
  737. /* Now we have write lock set up */
  738. /* Wait all readers */
  739. while (g_atomic_int_get (&lock->__r_lock->lock)) {
  740. __mutex_spin (lock->__r_lock);
  741. }
  742. }
  743. void
  744. rspamd_mempool_runlock_rwlock (rspamd_mempool_rwlock_t * lock)
  745. {
  746. if (g_atomic_int_get (&lock->__r_lock->lock)) {
  747. (void)g_atomic_int_dec_and_test (&lock->__r_lock->lock);
  748. }
  749. }
  750. void
  751. rspamd_mempool_wunlock_rwlock (rspamd_mempool_rwlock_t * lock)
  752. {
  753. rspamd_mempool_unlock_mutex (lock->__w_lock);
  754. }
  755. #else
  756. /*
  757. * Pthread bases shared mutexes
  758. */
  759. rspamd_mempool_mutex_t *
  760. rspamd_mempool_get_mutex (rspamd_mempool_t * pool)
  761. {
  762. rspamd_mempool_mutex_t *res;
  763. pthread_mutexattr_t mattr;
  764. if (pool != NULL) {
  765. res =
  766. rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_mutex_t));
  767. pthread_mutexattr_init (&mattr);
  768. pthread_mutexattr_setpshared (&mattr, PTHREAD_PROCESS_SHARED);
  769. pthread_mutexattr_setrobust (&mattr, PTHREAD_MUTEX_ROBUST);
  770. pthread_mutex_init (res, &mattr);
  771. rspamd_mempool_add_destructor (pool,
  772. (rspamd_mempool_destruct_t)pthread_mutex_destroy, res);
  773. pthread_mutexattr_destroy (&mattr);
  774. return res;
  775. }
  776. return NULL;
  777. }
  778. void
  779. rspamd_mempool_lock_mutex (rspamd_mempool_mutex_t * mutex)
  780. {
  781. pthread_mutex_lock (mutex);
  782. }
  783. void
  784. rspamd_mempool_unlock_mutex (rspamd_mempool_mutex_t * mutex)
  785. {
  786. pthread_mutex_unlock (mutex);
  787. }
  788. rspamd_mempool_rwlock_t *
  789. rspamd_mempool_get_rwlock (rspamd_mempool_t * pool)
  790. {
  791. rspamd_mempool_rwlock_t *res;
  792. pthread_rwlockattr_t mattr;
  793. if (pool != NULL) {
  794. res =
  795. rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_rwlock_t));
  796. pthread_rwlockattr_init (&mattr);
  797. pthread_rwlockattr_setpshared (&mattr, PTHREAD_PROCESS_SHARED);
  798. pthread_rwlock_init (res, &mattr);
  799. rspamd_mempool_add_destructor (pool,
  800. (rspamd_mempool_destruct_t)pthread_rwlock_destroy, res);
  801. pthread_rwlockattr_destroy (&mattr);
  802. return res;
  803. }
  804. return NULL;
  805. }
  806. void
  807. rspamd_mempool_rlock_rwlock (rspamd_mempool_rwlock_t * lock)
  808. {
  809. pthread_rwlock_rdlock (lock);
  810. }
  811. void
  812. rspamd_mempool_wlock_rwlock (rspamd_mempool_rwlock_t * lock)
  813. {
  814. pthread_rwlock_wrlock (lock);
  815. }
  816. void
  817. rspamd_mempool_runlock_rwlock (rspamd_mempool_rwlock_t * lock)
  818. {
  819. pthread_rwlock_unlock (lock);
  820. }
  821. void
  822. rspamd_mempool_wunlock_rwlock (rspamd_mempool_rwlock_t * lock)
  823. {
  824. pthread_rwlock_unlock (lock);
  825. }
  826. #endif
  827. void
  828. rspamd_mempool_set_variable (rspamd_mempool_t *pool,
  829. const gchar *name,
  830. gpointer value,
  831. rspamd_mempool_destruct_t destructor)
  832. {
  833. if (pool->variables == NULL) {
  834. pool->variables = g_hash_table_new (rspamd_str_hash, rspamd_str_equal);
  835. }
  836. g_hash_table_insert (pool->variables, rspamd_mempool_strdup (pool,
  837. name), value);
  838. if (destructor != NULL) {
  839. rspamd_mempool_add_destructor (pool, destructor, value);
  840. }
  841. }
  842. gpointer
  843. rspamd_mempool_get_variable (rspamd_mempool_t *pool, const gchar *name)
  844. {
  845. if (pool->variables == NULL) {
  846. return NULL;
  847. }
  848. return g_hash_table_lookup (pool->variables, name);
  849. }
  850. void
  851. rspamd_mempool_remove_variable (rspamd_mempool_t *pool, const gchar *name)
  852. {
  853. if (pool->variables != NULL) {
  854. g_hash_table_remove (pool->variables, name);
  855. }
  856. }
  857. GList *
  858. rspamd_mempool_glist_prepend (rspamd_mempool_t *pool, GList *l, gpointer p)
  859. {
  860. GList *cell;
  861. cell = rspamd_mempool_alloc (pool, sizeof (*cell));
  862. cell->prev = NULL;
  863. cell->data = p;
  864. if (l == NULL) {
  865. cell->next = NULL;
  866. }
  867. else {
  868. cell->next = l;
  869. l->prev = cell;
  870. }
  871. return cell;
  872. }
  873. GList *
  874. rspamd_mempool_glist_append (rspamd_mempool_t *pool, GList *l, gpointer p)
  875. {
  876. GList *cell, *cur;
  877. cell = rspamd_mempool_alloc (pool, sizeof (*cell));
  878. cell->next = NULL;
  879. cell->data = p;
  880. if (l) {
  881. for (cur = l; cur->next != NULL; cur = cur->next) {}
  882. cur->next = cell;
  883. cell->prev = cur;
  884. }
  885. else {
  886. l = cell;
  887. l->prev = NULL;
  888. }
  889. return l;
  890. }