You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

mem_pool.c 25KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111
  1. /*-
  2. * Copyright 2016 Vsevolod Stakhov
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "config.h"
  17. #include "mem_pool.h"
  18. #include "fstring.h"
  19. #include "logger.h"
  20. #include "ottery.h"
  21. #include "unix-std.h"
  22. #include "khash.h"
  23. #include "cryptobox.h"
  24. #ifdef WITH_JEMALLOC
  25. #include <jemalloc/jemalloc.h>
  26. #if (JEMALLOC_VERSION_MAJOR == 3 && JEMALLOC_VERSION_MINOR >= 6) || (JEMALLOC_VERSION_MAJOR > 3)
  27. #define HAVE_MALLOC_SIZE 1
  28. #define sys_alloc_size(sz) nallocx(sz, 0)
  29. #endif
  30. #elif defined(__APPLE__)
  31. #include <malloc/malloc.h>
  32. #define HAVE_MALLOC_SIZE 1
  33. #define sys_alloc_size(sz) malloc_good_size(sz)
  34. #endif
  35. #ifdef HAVE_SCHED_YIELD
  36. #include <sched.h>
  37. #endif
  38. /* Sleep time for spin lock in nanoseconds */
  39. #define MUTEX_SLEEP_TIME 10000000L
  40. #define MUTEX_SPIN_COUNT 100
  41. #define POOL_MTX_LOCK() do { } while (0)
  42. #define POOL_MTX_UNLOCK() do { } while (0)
  43. /*
  44. * This define specify whether we should check all pools for free space for new object
  45. * or just begin scan from current (recently attached) pool
  46. * If MEMORY_GREEDY is defined, then we scan all pools to find free space (more CPU usage, slower
  47. * but requires less memory). If it is not defined check only current pool and if object is too large
  48. * to place in it allocate new one (this may cause huge CPU usage in some cases too, but generally faster than
  49. * greedy method)
  50. */
  51. #undef MEMORY_GREEDY
  52. #define ENTRY_LEN 128
  53. #define ENTRY_NELTS 64
  54. struct entry_elt {
  55. guint32 fragmentation;
  56. guint32 leftover;
  57. };
  58. struct rspamd_mempool_entry_point {
  59. gchar src[ENTRY_LEN];
  60. guint32 cur_suggestion;
  61. guint32 cur_elts;
  62. struct entry_elt elts[ENTRY_NELTS];
  63. };
  64. static inline uint32_t
  65. rspamd_entry_hash (const char *str)
  66. {
  67. return rspamd_cryptobox_fast_hash (str, strlen (str), rspamd_hash_seed ());
  68. }
  69. static inline int
  70. rspamd_entry_equal (const char *k1, const char *k2)
  71. {
  72. return strcmp (k1, k2) == 0;
  73. }
  74. KHASH_INIT(mempool_entry, const gchar *, struct rspamd_mempool_entry_point *,
  75. 1, rspamd_entry_hash, rspamd_entry_equal)
  76. static khash_t(mempool_entry) *mempool_entries = NULL;
  77. /* Internal statistic */
  78. static rspamd_mempool_stat_t *mem_pool_stat = NULL;
  79. /* Environment variable */
  80. static gboolean env_checked = FALSE;
  81. static gboolean always_malloc = FALSE;
  82. /**
  83. * Function that return free space in pool page
  84. * @param x pool page struct
  85. */
  86. static gsize
  87. pool_chain_free (struct _pool_chain *chain)
  88. {
  89. gint64 occupied = chain->pos - chain->begin + MEM_ALIGNMENT;
  90. return (occupied < (gint64)chain->len ?
  91. chain->len - occupied : 0);
  92. }
  93. /* By default allocate 8Kb chunks of memory */
  94. #define FIXED_POOL_SIZE 4096
  95. static inline struct rspamd_mempool_entry_point *
  96. rspamd_mempool_entry_new (const gchar *loc)
  97. {
  98. struct rspamd_mempool_entry_point **pentry, *entry;
  99. gint r;
  100. khiter_t k;
  101. k = kh_put (mempool_entry, mempool_entries, loc, &r);
  102. if (r >= 0) {
  103. pentry = &kh_value (mempool_entries, k);
  104. entry = g_malloc0 (sizeof (*entry));
  105. *pentry = entry;
  106. memset (entry, 0, sizeof (*entry));
  107. rspamd_strlcpy (entry->src, loc, sizeof (entry->src));
  108. #ifdef HAVE_GETPAGESIZE
  109. entry->cur_suggestion = MAX (getpagesize (), FIXED_POOL_SIZE);
  110. #else
  111. entry->cur_suggestion = MAX (sysconf (_SC_PAGESIZE), FIXED_POOL_SIZE);
  112. #endif
  113. }
  114. else {
  115. g_assert_not_reached ();
  116. }
  117. return entry;
  118. }
  119. static inline struct rspamd_mempool_entry_point *
  120. rspamd_mempool_get_entry (const gchar *loc)
  121. {
  122. khiter_t k;
  123. struct rspamd_mempool_entry_point *elt;
  124. if (mempool_entries == NULL) {
  125. mempool_entries = kh_init (mempool_entry);
  126. }
  127. else {
  128. k = kh_get (mempool_entry, mempool_entries, loc);
  129. if (k != kh_end (mempool_entries)) {
  130. elt = kh_value (mempool_entries, k);
  131. return elt;
  132. }
  133. }
  134. return rspamd_mempool_entry_new (loc);
  135. }
  136. static struct _pool_chain *
  137. rspamd_mempool_chain_new (gsize size, enum rspamd_mempool_chain_type pool_type)
  138. {
  139. struct _pool_chain *chain;
  140. gsize total_size = size + sizeof (struct _pool_chain) + MEM_ALIGNMENT,
  141. optimal_size = 0;
  142. gpointer map;
  143. g_assert (size > 0);
  144. if (pool_type == RSPAMD_MEMPOOL_SHARED) {
  145. #if defined(HAVE_MMAP_ANON)
  146. map = mmap (NULL,
  147. total_size,
  148. PROT_READ | PROT_WRITE,
  149. MAP_ANON | MAP_SHARED,
  150. -1,
  151. 0);
  152. if (map == MAP_FAILED) {
  153. g_error ("%s: failed to allocate %"G_GSIZE_FORMAT" bytes",
  154. G_STRLOC, total_size);
  155. abort ();
  156. }
  157. chain = map;
  158. chain->begin = ((guint8 *) chain) + sizeof (struct _pool_chain);
  159. #elif defined(HAVE_MMAP_ZERO)
  160. gint fd;
  161. fd = open ("/dev/zero", O_RDWR);
  162. if (fd == -1) {
  163. return NULL;
  164. }
  165. map = mmap (NULL,
  166. size + sizeof (struct _pool_chain),
  167. PROT_READ | PROT_WRITE,
  168. MAP_SHARED,
  169. fd,
  170. 0);
  171. if (map == MAP_FAILED) {
  172. msg_err ("cannot allocate %z bytes, aborting", size +
  173. sizeof (struct _pool_chain));
  174. abort ();
  175. }
  176. chain = map;
  177. chain->begin = ((guint8 *) chain) + sizeof (struct _pool_chain);
  178. #else
  179. #error No mmap methods are defined
  180. #endif
  181. g_atomic_int_inc (&mem_pool_stat->shared_chunks_allocated);
  182. g_atomic_int_add (&mem_pool_stat->bytes_allocated, total_size);
  183. }
  184. else {
  185. #ifdef HAVE_MALLOC_SIZE
  186. optimal_size = sys_alloc_size (total_size);
  187. #endif
  188. total_size = MAX (total_size, optimal_size);
  189. map = malloc (total_size);
  190. if (map == NULL) {
  191. g_error ("%s: failed to allocate %"G_GSIZE_FORMAT" bytes",
  192. G_STRLOC, total_size);
  193. abort ();
  194. }
  195. chain = map;
  196. chain->begin = ((guint8 *) chain) + sizeof (struct _pool_chain);
  197. g_atomic_int_add (&mem_pool_stat->bytes_allocated, total_size);
  198. g_atomic_int_inc (&mem_pool_stat->chunks_allocated);
  199. }
  200. chain->pos = align_ptr (chain->begin, MEM_ALIGNMENT);
  201. chain->len = total_size - sizeof (struct _pool_chain);
  202. chain->lock = NULL;
  203. return chain;
  204. }
  205. static void
  206. rspamd_mempool_create_pool_type (rspamd_mempool_t * pool,
  207. enum rspamd_mempool_chain_type pool_type)
  208. {
  209. gsize preallocated_len;
  210. switch (pool_type) {
  211. case RSPAMD_MEMPOOL_NORMAL:
  212. preallocated_len = 32;
  213. break;
  214. case RSPAMD_MEMPOOL_SHARED:
  215. case RSPAMD_MEMPOOL_TMP:
  216. default:
  217. preallocated_len = 2;
  218. break;
  219. }
  220. pool->pools[pool_type] = g_ptr_array_sized_new (preallocated_len);
  221. }
  222. /**
  223. * Get the current pool of the specified type, creating the corresponding
  224. * array if it's absent
  225. * @param pool
  226. * @param pool_type
  227. * @return
  228. */
  229. static struct _pool_chain *
  230. rspamd_mempool_get_chain (rspamd_mempool_t * pool,
  231. enum rspamd_mempool_chain_type pool_type)
  232. {
  233. gsize len;
  234. g_assert (pool_type >= 0 && pool_type < RSPAMD_MEMPOOL_MAX);
  235. if (pool->pools[pool_type] == NULL) {
  236. rspamd_mempool_create_pool_type (pool, pool_type);
  237. }
  238. len = pool->pools[pool_type]->len;
  239. if (len == 0) {
  240. return NULL;
  241. }
  242. return (g_ptr_array_index (pool->pools[pool_type], len - 1));
  243. }
  244. static void
  245. rspamd_mempool_append_chain (rspamd_mempool_t * pool,
  246. struct _pool_chain *chain,
  247. enum rspamd_mempool_chain_type pool_type)
  248. {
  249. g_assert (pool_type >= 0 && pool_type < RSPAMD_MEMPOOL_MAX);
  250. g_assert (chain != NULL);
  251. if (pool->pools[pool_type] == NULL) {
  252. rspamd_mempool_create_pool_type (pool, pool_type);
  253. }
  254. g_ptr_array_add (pool->pools[pool_type], chain);
  255. }
  256. /**
  257. * Allocate new memory poll
  258. * @param size size of pool's page
  259. * @return new memory pool object
  260. */
  261. rspamd_mempool_t *
  262. rspamd_mempool_new_ (gsize size, const gchar *tag, const gchar *loc)
  263. {
  264. rspamd_mempool_t *new;
  265. gpointer map;
  266. unsigned char uidbuf[10];
  267. const gchar hexdigits[] = "0123456789abcdef";
  268. unsigned i;
  269. /* Allocate statistic structure if it is not allocated before */
  270. if (mem_pool_stat == NULL) {
  271. #if defined(HAVE_MMAP_ANON)
  272. map = mmap (NULL,
  273. sizeof (rspamd_mempool_stat_t),
  274. PROT_READ | PROT_WRITE,
  275. MAP_ANON | MAP_SHARED,
  276. -1,
  277. 0);
  278. if (map == MAP_FAILED) {
  279. msg_err ("cannot allocate %z bytes, aborting",
  280. sizeof (rspamd_mempool_stat_t));
  281. abort ();
  282. }
  283. mem_pool_stat = (rspamd_mempool_stat_t *)map;
  284. #elif defined(HAVE_MMAP_ZERO)
  285. gint fd;
  286. fd = open ("/dev/zero", O_RDWR);
  287. g_assert (fd != -1);
  288. map = mmap (NULL,
  289. sizeof (rspamd_mempool_stat_t),
  290. PROT_READ | PROT_WRITE,
  291. MAP_SHARED,
  292. fd,
  293. 0);
  294. if (map == MAP_FAILED) {
  295. msg_err ("cannot allocate %z bytes, aborting",
  296. sizeof (rspamd_mempool_stat_t));
  297. abort ();
  298. }
  299. mem_pool_stat = (rspamd_mempool_stat_t *)map;
  300. #else
  301. # error No mmap methods are defined
  302. #endif
  303. memset (map, 0, sizeof (rspamd_mempool_stat_t));
  304. }
  305. if (!env_checked) {
  306. /* Check G_SLICE=always-malloc to allow memory pool debug */
  307. const char *g_slice;
  308. g_slice = getenv ("VALGRIND");
  309. if (g_slice != NULL) {
  310. always_malloc = TRUE;
  311. }
  312. env_checked = TRUE;
  313. }
  314. new = g_malloc0 (sizeof (rspamd_mempool_t));
  315. new->entry = rspamd_mempool_get_entry (loc);
  316. new->destructors = g_array_sized_new (FALSE, FALSE,
  317. sizeof (struct _pool_destructors), 32);
  318. rspamd_mempool_create_pool_type (new, RSPAMD_MEMPOOL_NORMAL);
  319. /* Set it upon first call of set variable */
  320. if (size == 0) {
  321. new->elt_len = new->entry->cur_suggestion;
  322. }
  323. else {
  324. new->elt_len = size;
  325. }
  326. if (tag) {
  327. rspamd_strlcpy (new->tag.tagname, tag, sizeof (new->tag.tagname));
  328. }
  329. else {
  330. new->tag.tagname[0] = '\0';
  331. }
  332. /* Generate new uid */
  333. ottery_rand_bytes (uidbuf, sizeof (uidbuf));
  334. for (i = 0; i < G_N_ELEMENTS (uidbuf); i ++) {
  335. new->tag.uid[i * 2] = hexdigits[(uidbuf[i] >> 4) & 0xf];
  336. new->tag.uid[i * 2 + 1] = hexdigits[uidbuf[i] & 0xf];
  337. }
  338. new->tag.uid[19] = '\0';
  339. mem_pool_stat->pools_allocated++;
  340. return new;
  341. }
  342. static void *
  343. memory_pool_alloc_common (rspamd_mempool_t * pool, gsize size,
  344. enum rspamd_mempool_chain_type pool_type)
  345. RSPAMD_ATTR_ALLOC_SIZE(2) RSPAMD_ATTR_ALLOC_ALIGN(MEM_ALIGNMENT) RSPAMD_ATTR_RETURNS_NONNUL;
  346. static void *
  347. memory_pool_alloc_common (rspamd_mempool_t * pool, gsize size,
  348. enum rspamd_mempool_chain_type pool_type)
  349. {
  350. guint8 *tmp;
  351. struct _pool_chain *new, *cur;
  352. gsize free = 0;
  353. if (pool) {
  354. POOL_MTX_LOCK ();
  355. if (always_malloc && pool_type != RSPAMD_MEMPOOL_SHARED) {
  356. void *ptr;
  357. ptr = g_malloc (size);
  358. POOL_MTX_UNLOCK ();
  359. if (pool->trash_stack == NULL) {
  360. pool->trash_stack = g_ptr_array_sized_new (128);
  361. }
  362. g_ptr_array_add (pool->trash_stack, ptr);
  363. return ptr;
  364. }
  365. cur = rspamd_mempool_get_chain (pool, pool_type);
  366. /* Find free space in pool chain */
  367. if (cur) {
  368. free = pool_chain_free (cur);
  369. }
  370. if (cur == NULL || free < size) {
  371. /* Allocate new chain element */
  372. if (pool->elt_len >= size + MEM_ALIGNMENT) {
  373. pool->entry->elts[pool->entry->cur_elts].fragmentation += size;
  374. new = rspamd_mempool_chain_new (pool->elt_len,
  375. pool_type);
  376. }
  377. else {
  378. mem_pool_stat->oversized_chunks++;
  379. g_atomic_int_add (&mem_pool_stat->fragmented_size,
  380. free);
  381. pool->entry->elts[pool->entry->cur_elts].fragmentation += free;
  382. new = rspamd_mempool_chain_new (size + pool->elt_len, pool_type);
  383. }
  384. /* Connect to pool subsystem */
  385. rspamd_mempool_append_chain (pool, new, pool_type);
  386. /* No need to align again, aligned by rspamd_mempool_chain_new */
  387. tmp = new->pos;
  388. new->pos = tmp + size;
  389. POOL_MTX_UNLOCK ();
  390. return tmp;
  391. }
  392. /* No need to allocate page */
  393. tmp = align_ptr (cur->pos, MEM_ALIGNMENT);
  394. cur->pos = tmp + size;
  395. POOL_MTX_UNLOCK ();
  396. return tmp;
  397. }
  398. abort ();
  399. }
  400. void *
  401. rspamd_mempool_alloc (rspamd_mempool_t * pool, gsize size)
  402. {
  403. return memory_pool_alloc_common (pool, size, RSPAMD_MEMPOOL_NORMAL);
  404. }
  405. void *
  406. rspamd_mempool_alloc_tmp (rspamd_mempool_t * pool, gsize size)
  407. {
  408. return memory_pool_alloc_common (pool, size, RSPAMD_MEMPOOL_TMP);
  409. }
  410. void *
  411. rspamd_mempool_alloc0 (rspamd_mempool_t * pool, gsize size)
  412. {
  413. void *pointer = rspamd_mempool_alloc (pool, size);
  414. memset (pointer, 0, size);
  415. return pointer;
  416. }
  417. void *
  418. rspamd_mempool_alloc0_tmp (rspamd_mempool_t * pool, gsize size)
  419. {
  420. void *pointer = rspamd_mempool_alloc_tmp (pool, size);
  421. memset (pointer, 0, size);
  422. return pointer;
  423. }
  424. void *
  425. rspamd_mempool_alloc0_shared (rspamd_mempool_t * pool, gsize size)
  426. {
  427. void *pointer = rspamd_mempool_alloc_shared (pool, size);
  428. memset (pointer, 0, size);
  429. return pointer;
  430. }
  431. void *
  432. rspamd_mempool_alloc_shared (rspamd_mempool_t * pool, gsize size)
  433. {
  434. return memory_pool_alloc_common (pool, size, RSPAMD_MEMPOOL_SHARED);
  435. }
  436. gchar *
  437. rspamd_mempool_strdup (rspamd_mempool_t * pool, const gchar *src)
  438. {
  439. gsize len;
  440. gchar *newstr;
  441. if (src == NULL) {
  442. return NULL;
  443. }
  444. len = strlen (src);
  445. newstr = rspamd_mempool_alloc (pool, len + 1);
  446. memcpy (newstr, src, len);
  447. newstr[len] = '\0';
  448. return newstr;
  449. }
  450. gchar *
  451. rspamd_mempool_fstrdup (rspamd_mempool_t * pool, const struct f_str_s *src)
  452. {
  453. gchar *newstr;
  454. if (src == NULL) {
  455. return NULL;
  456. }
  457. newstr = rspamd_mempool_alloc (pool, src->len + 1);
  458. memcpy (newstr, src->str, src->len);
  459. newstr[src->len] = '\0';
  460. return newstr;
  461. }
  462. gchar *
  463. rspamd_mempool_ftokdup (rspamd_mempool_t *pool, const rspamd_ftok_t *src)
  464. {
  465. gchar *newstr;
  466. if (src == NULL) {
  467. return NULL;
  468. }
  469. newstr = rspamd_mempool_alloc (pool, src->len + 1);
  470. memcpy (newstr, src->begin, src->len);
  471. newstr[src->len] = '\0';
  472. return newstr;
  473. }
  474. void
  475. rspamd_mempool_add_destructor_full (rspamd_mempool_t * pool,
  476. rspamd_mempool_destruct_t func,
  477. void *data,
  478. const gchar *function,
  479. const gchar *line)
  480. {
  481. struct _pool_destructors cur;
  482. POOL_MTX_LOCK ();
  483. cur.func = func;
  484. cur.data = data;
  485. cur.function = function;
  486. cur.loc = line;
  487. g_array_append_val (pool->destructors, cur);
  488. POOL_MTX_UNLOCK ();
  489. }
  490. void
  491. rspamd_mempool_replace_destructor (rspamd_mempool_t * pool,
  492. rspamd_mempool_destruct_t func,
  493. void *old_data,
  494. void *new_data)
  495. {
  496. struct _pool_destructors *tmp;
  497. guint i;
  498. for (i = 0; i < pool->destructors->len; i ++) {
  499. tmp = &g_array_index (pool->destructors, struct _pool_destructors, i);
  500. if (tmp->func == func && tmp->data == old_data) {
  501. tmp->func = func;
  502. tmp->data = new_data;
  503. break;
  504. }
  505. }
  506. }
  507. static gint
  508. cmp_int (gconstpointer a, gconstpointer b)
  509. {
  510. gint i1 = *(const gint *)a, i2 = *(const gint *)b;
  511. return i1 - i2;
  512. }
  513. static void
  514. rspamd_mempool_adjust_entry (struct rspamd_mempool_entry_point *e)
  515. {
  516. gint sz[G_N_ELEMENTS (e->elts)], sel_pos, sel_neg;
  517. guint i, jitter;
  518. for (i = 0; i < G_N_ELEMENTS (sz); i ++) {
  519. sz[i] = e->elts[i].fragmentation - (gint)e->elts[i].leftover;
  520. }
  521. qsort (sz, G_N_ELEMENTS (sz), sizeof (gint), cmp_int);
  522. jitter = rspamd_random_uint64_fast () % 10;
  523. /*
  524. * Take stochaistic quantiles
  525. */
  526. sel_pos = sz[50 + jitter];
  527. sel_neg = sz[4 + jitter];
  528. if (sel_neg > 0) {
  529. /* We need to increase our suggestion */
  530. e->cur_suggestion *= (1 + (((double)sel_pos) / e->cur_suggestion)) * 1.5;
  531. }
  532. else if (-sel_neg > sel_pos) {
  533. /* We need to reduce current suggestion */
  534. e->cur_suggestion /= (1 + (((double)-sel_neg) / e->cur_suggestion)) * 1.5;
  535. }
  536. else {
  537. /* We still want to grow */
  538. e->cur_suggestion *= (1 + (((double)sel_pos) / e->cur_suggestion)) * 1.5;
  539. }
  540. /* Some sane limits counting mempool architecture */
  541. if (e->cur_suggestion < 1024) {
  542. e->cur_suggestion = 1024;
  543. }
  544. else if (e->cur_suggestion > 1024 * 1024 * 10) {
  545. e->cur_suggestion = 1024 * 1024 * 10;
  546. }
  547. memset (e->elts, 0, sizeof (e->elts));
  548. }
  549. void
  550. rspamd_mempool_destructors_enforce (rspamd_mempool_t *pool)
  551. {
  552. struct _pool_destructors *destructor;
  553. guint i;
  554. POOL_MTX_LOCK ();
  555. for (i = 0; i < pool->destructors->len; i ++) {
  556. destructor = &g_array_index (pool->destructors, struct _pool_destructors, i);
  557. /* Avoid calling destructors for NULL pointers */
  558. if (destructor->data != NULL) {
  559. destructor->func (destructor->data);
  560. }
  561. }
  562. pool->destructors->len = 0;
  563. POOL_MTX_UNLOCK ();
  564. }
  565. void
  566. rspamd_mempool_delete (rspamd_mempool_t * pool)
  567. {
  568. struct _pool_chain *cur;
  569. struct _pool_destructors *destructor;
  570. gpointer ptr;
  571. guint i, j;
  572. gsize len;
  573. POOL_MTX_LOCK ();
  574. cur = NULL;
  575. if (pool->pools[RSPAMD_MEMPOOL_NORMAL] != NULL &&
  576. pool->pools[RSPAMD_MEMPOOL_NORMAL]->len > 0) {
  577. cur = g_ptr_array_index (pool->pools[RSPAMD_MEMPOOL_NORMAL],
  578. pool->pools[RSPAMD_MEMPOOL_NORMAL]->len - 1);
  579. }
  580. if (cur) {
  581. pool->entry->elts[pool->entry->cur_elts].leftover =
  582. pool_chain_free (cur);
  583. pool->entry->cur_elts = (pool->entry->cur_elts + 1) %
  584. G_N_ELEMENTS (pool->entry->elts);
  585. if (pool->entry->cur_elts == 0) {
  586. rspamd_mempool_adjust_entry (pool->entry);
  587. }
  588. }
  589. /* Call all pool destructors */
  590. for (i = 0; i < pool->destructors->len; i ++) {
  591. destructor = &g_array_index (pool->destructors, struct _pool_destructors, i);
  592. /* Avoid calling destructors for NULL pointers */
  593. if (destructor->data != NULL) {
  594. destructor->func (destructor->data);
  595. }
  596. }
  597. g_array_free (pool->destructors, TRUE);
  598. for (i = 0; i < G_N_ELEMENTS (pool->pools); i ++) {
  599. if (pool->pools[i]) {
  600. for (j = 0; j < pool->pools[i]->len; j++) {
  601. cur = g_ptr_array_index (pool->pools[i], j);
  602. g_atomic_int_add (&mem_pool_stat->bytes_allocated,
  603. -((gint)cur->len));
  604. g_atomic_int_add (&mem_pool_stat->chunks_allocated, -1);
  605. len = cur->len + sizeof (struct _pool_chain);
  606. if (i == RSPAMD_MEMPOOL_SHARED) {
  607. munmap ((void *)cur, len);
  608. }
  609. else {
  610. free (cur); /* Not g_free as we use system allocator */
  611. }
  612. }
  613. g_ptr_array_free (pool->pools[i], TRUE);
  614. }
  615. }
  616. if (pool->variables) {
  617. g_hash_table_destroy (pool->variables);
  618. }
  619. if (pool->trash_stack) {
  620. for (i = 0; i < pool->trash_stack->len; i++) {
  621. ptr = g_ptr_array_index (pool->trash_stack, i);
  622. g_free (ptr);
  623. }
  624. g_ptr_array_free (pool->trash_stack, TRUE);
  625. }
  626. g_atomic_int_inc (&mem_pool_stat->pools_freed);
  627. POOL_MTX_UNLOCK ();
  628. g_free (pool);
  629. }
  630. void
  631. rspamd_mempool_cleanup_tmp (rspamd_mempool_t * pool)
  632. {
  633. struct _pool_chain *cur;
  634. guint i;
  635. POOL_MTX_LOCK ();
  636. if (pool->pools[RSPAMD_MEMPOOL_TMP]) {
  637. for (i = 0; i < pool->pools[RSPAMD_MEMPOOL_TMP]->len; i++) {
  638. cur = g_ptr_array_index (pool->pools[RSPAMD_MEMPOOL_TMP], i);
  639. g_atomic_int_add (&mem_pool_stat->bytes_allocated,
  640. -((gint)cur->len));
  641. g_atomic_int_add (&mem_pool_stat->chunks_allocated, -1);
  642. free (cur);
  643. }
  644. g_ptr_array_free (pool->pools[RSPAMD_MEMPOOL_TMP], TRUE);
  645. pool->pools[RSPAMD_MEMPOOL_TMP] = NULL;
  646. }
  647. g_atomic_int_inc (&mem_pool_stat->pools_freed);
  648. POOL_MTX_UNLOCK ();
  649. }
  650. void
  651. rspamd_mempool_stat (rspamd_mempool_stat_t * st)
  652. {
  653. if (mem_pool_stat != NULL) {
  654. st->pools_allocated = mem_pool_stat->pools_allocated;
  655. st->pools_freed = mem_pool_stat->pools_freed;
  656. st->shared_chunks_allocated = mem_pool_stat->shared_chunks_allocated;
  657. st->bytes_allocated = mem_pool_stat->bytes_allocated;
  658. st->chunks_allocated = mem_pool_stat->chunks_allocated;
  659. st->chunks_freed = mem_pool_stat->chunks_freed;
  660. st->oversized_chunks = mem_pool_stat->oversized_chunks;
  661. }
  662. }
  663. void
  664. rspamd_mempool_stat_reset (void)
  665. {
  666. if (mem_pool_stat != NULL) {
  667. memset (mem_pool_stat, 0, sizeof (rspamd_mempool_stat_t));
  668. }
  669. }
  670. gsize
  671. rspamd_mempool_suggest_size_ (const char *loc)
  672. {
  673. return 0;
  674. }
  675. #if !defined(HAVE_PTHREAD_PROCESS_SHARED) || defined(DISABLE_PTHREAD_MUTEX)
  676. /*
  677. * Own emulation
  678. */
  679. static inline gint
  680. __mutex_spin (rspamd_mempool_mutex_t * mutex)
  681. {
  682. /* check spin count */
  683. if (g_atomic_int_dec_and_test (&mutex->spin)) {
  684. /* This may be deadlock, so check owner of this lock */
  685. if (mutex->owner == getpid ()) {
  686. /* This mutex was locked by calling process, so it is just double lock and we can easily unlock it */
  687. g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
  688. return 0;
  689. }
  690. else if (kill (mutex->owner, 0) == -1) {
  691. /* Owner process was not found, so release lock */
  692. g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
  693. return 0;
  694. }
  695. /* Spin again */
  696. g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
  697. }
  698. #ifdef HAVE_SCHED_YIELD
  699. (void)sched_yield ();
  700. #elif defined(HAVE_NANOSLEEP)
  701. struct timespec ts;
  702. ts.tv_sec = 0;
  703. ts.tv_nsec = MUTEX_SLEEP_TIME;
  704. /* Spin */
  705. while (nanosleep (&ts, &ts) == -1 && errno == EINTR) ;
  706. #else
  707. #error No methods to spin are defined
  708. #endif
  709. return 1;
  710. }
  711. static void
  712. memory_pool_mutex_spin (rspamd_mempool_mutex_t * mutex)
  713. {
  714. while (!g_atomic_int_compare_and_exchange (&mutex->lock, 0, 1)) {
  715. if (!__mutex_spin (mutex)) {
  716. return;
  717. }
  718. }
  719. }
  720. rspamd_mempool_mutex_t *
  721. rspamd_mempool_get_mutex (rspamd_mempool_t * pool)
  722. {
  723. rspamd_mempool_mutex_t *res;
  724. if (pool != NULL) {
  725. res =
  726. rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_mutex_t));
  727. res->lock = 0;
  728. res->owner = 0;
  729. res->spin = MUTEX_SPIN_COUNT;
  730. return res;
  731. }
  732. return NULL;
  733. }
  734. void
  735. rspamd_mempool_lock_mutex (rspamd_mempool_mutex_t * mutex)
  736. {
  737. memory_pool_mutex_spin (mutex);
  738. mutex->owner = getpid ();
  739. }
  740. void
  741. rspamd_mempool_unlock_mutex (rspamd_mempool_mutex_t * mutex)
  742. {
  743. mutex->owner = 0;
  744. (void)g_atomic_int_compare_and_exchange (&mutex->lock, 1, 0);
  745. }
  746. rspamd_mempool_rwlock_t *
  747. rspamd_mempool_get_rwlock (rspamd_mempool_t * pool)
  748. {
  749. rspamd_mempool_rwlock_t *lock;
  750. lock = rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_rwlock_t));
  751. lock->__r_lock = rspamd_mempool_get_mutex (pool);
  752. lock->__w_lock = rspamd_mempool_get_mutex (pool);
  753. return lock;
  754. }
  755. void
  756. rspamd_mempool_rlock_rwlock (rspamd_mempool_rwlock_t * lock)
  757. {
  758. /* Spin on write lock */
  759. while (g_atomic_int_get (&lock->__w_lock->lock)) {
  760. if (!__mutex_spin (lock->__w_lock)) {
  761. break;
  762. }
  763. }
  764. g_atomic_int_inc (&lock->__r_lock->lock);
  765. lock->__r_lock->owner = getpid ();
  766. }
  767. void
  768. rspamd_mempool_wlock_rwlock (rspamd_mempool_rwlock_t * lock)
  769. {
  770. /* Spin on write lock first */
  771. rspamd_mempool_lock_mutex (lock->__w_lock);
  772. /* Now we have write lock set up */
  773. /* Wait all readers */
  774. while (g_atomic_int_get (&lock->__r_lock->lock)) {
  775. __mutex_spin (lock->__r_lock);
  776. }
  777. }
  778. void
  779. rspamd_mempool_runlock_rwlock (rspamd_mempool_rwlock_t * lock)
  780. {
  781. if (g_atomic_int_get (&lock->__r_lock->lock)) {
  782. (void)g_atomic_int_dec_and_test (&lock->__r_lock->lock);
  783. }
  784. }
  785. void
  786. rspamd_mempool_wunlock_rwlock (rspamd_mempool_rwlock_t * lock)
  787. {
  788. rspamd_mempool_unlock_mutex (lock->__w_lock);
  789. }
  790. #else
  791. /*
  792. * Pthread bases shared mutexes
  793. */
  794. rspamd_mempool_mutex_t *
  795. rspamd_mempool_get_mutex (rspamd_mempool_t * pool)
  796. {
  797. rspamd_mempool_mutex_t *res;
  798. pthread_mutexattr_t mattr;
  799. if (pool != NULL) {
  800. res =
  801. rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_mutex_t));
  802. pthread_mutexattr_init (&mattr);
  803. pthread_mutexattr_setpshared (&mattr, PTHREAD_PROCESS_SHARED);
  804. pthread_mutexattr_setrobust (&mattr, PTHREAD_MUTEX_ROBUST);
  805. pthread_mutex_init (res, &mattr);
  806. rspamd_mempool_add_destructor (pool,
  807. (rspamd_mempool_destruct_t)pthread_mutex_destroy, res);
  808. pthread_mutexattr_destroy (&mattr);
  809. return res;
  810. }
  811. return NULL;
  812. }
  813. void
  814. rspamd_mempool_lock_mutex (rspamd_mempool_mutex_t * mutex)
  815. {
  816. pthread_mutex_lock (mutex);
  817. }
  818. void
  819. rspamd_mempool_unlock_mutex (rspamd_mempool_mutex_t * mutex)
  820. {
  821. pthread_mutex_unlock (mutex);
  822. }
  823. rspamd_mempool_rwlock_t *
  824. rspamd_mempool_get_rwlock (rspamd_mempool_t * pool)
  825. {
  826. rspamd_mempool_rwlock_t *res;
  827. pthread_rwlockattr_t mattr;
  828. if (pool != NULL) {
  829. res =
  830. rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_rwlock_t));
  831. pthread_rwlockattr_init (&mattr);
  832. pthread_rwlockattr_setpshared (&mattr, PTHREAD_PROCESS_SHARED);
  833. pthread_rwlock_init (res, &mattr);
  834. rspamd_mempool_add_destructor (pool,
  835. (rspamd_mempool_destruct_t)pthread_rwlock_destroy, res);
  836. pthread_rwlockattr_destroy (&mattr);
  837. return res;
  838. }
  839. return NULL;
  840. }
  841. void
  842. rspamd_mempool_rlock_rwlock (rspamd_mempool_rwlock_t * lock)
  843. {
  844. pthread_rwlock_rdlock (lock);
  845. }
  846. void
  847. rspamd_mempool_wlock_rwlock (rspamd_mempool_rwlock_t * lock)
  848. {
  849. pthread_rwlock_wrlock (lock);
  850. }
  851. void
  852. rspamd_mempool_runlock_rwlock (rspamd_mempool_rwlock_t * lock)
  853. {
  854. pthread_rwlock_unlock (lock);
  855. }
  856. void
  857. rspamd_mempool_wunlock_rwlock (rspamd_mempool_rwlock_t * lock)
  858. {
  859. pthread_rwlock_unlock (lock);
  860. }
  861. #endif
  862. void
  863. rspamd_mempool_set_variable (rspamd_mempool_t *pool,
  864. const gchar *name,
  865. gpointer value,
  866. rspamd_mempool_destruct_t destructor)
  867. {
  868. if (pool->variables == NULL) {
  869. pool->variables = g_hash_table_new (rspamd_str_hash, rspamd_str_equal);
  870. }
  871. g_hash_table_insert (pool->variables, rspamd_mempool_strdup (pool,
  872. name), value);
  873. if (destructor != NULL) {
  874. rspamd_mempool_add_destructor (pool, destructor, value);
  875. }
  876. }
  877. gpointer
  878. rspamd_mempool_get_variable (rspamd_mempool_t *pool, const gchar *name)
  879. {
  880. if (pool->variables == NULL) {
  881. return NULL;
  882. }
  883. return g_hash_table_lookup (pool->variables, name);
  884. }
  885. void
  886. rspamd_mempool_remove_variable (rspamd_mempool_t *pool, const gchar *name)
  887. {
  888. if (pool->variables != NULL) {
  889. g_hash_table_remove (pool->variables, name);
  890. }
  891. }
  892. GList *
  893. rspamd_mempool_glist_prepend (rspamd_mempool_t *pool, GList *l, gpointer p)
  894. {
  895. GList *cell;
  896. cell = rspamd_mempool_alloc (pool, sizeof (*cell));
  897. cell->prev = NULL;
  898. cell->data = p;
  899. if (l == NULL) {
  900. cell->next = NULL;
  901. }
  902. else {
  903. cell->next = l;
  904. l->prev = cell;
  905. }
  906. return cell;
  907. }
  908. GList *
  909. rspamd_mempool_glist_append (rspamd_mempool_t *pool, GList *l, gpointer p)
  910. {
  911. GList *cell, *cur;
  912. cell = rspamd_mempool_alloc (pool, sizeof (*cell));
  913. cell->next = NULL;
  914. cell->data = p;
  915. if (l) {
  916. for (cur = l; cur->next != NULL; cur = cur->next) {}
  917. cur->next = cell;
  918. cell->prev = cur;
  919. }
  920. else {
  921. l = cell;
  922. l->prev = NULL;
  923. }
  924. return l;
  925. }