Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.

mem_pool.c 25KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113
  1. /*-
  2. * Copyright 2016 Vsevolod Stakhov
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "config.h"
  17. #include "mem_pool.h"
  18. #include "fstring.h"
  19. #include "logger.h"
  20. #include "ottery.h"
  21. #include "unix-std.h"
  22. #include "khash.h"
  23. #include "cryptobox.h"
  24. #ifdef WITH_JEMALLOC
  25. #include <jemalloc/jemalloc.h>
  26. #if (JEMALLOC_VERSION_MAJOR == 3 && JEMALLOC_VERSION_MINOR >= 6) || (JEMALLOC_VERSION_MAJOR > 3)
  27. #define HAVE_MALLOC_SIZE 1
  28. #define sys_alloc_size(sz) nallocx(sz, 0)
  29. #endif
  30. #elif defined(__APPLE__)
  31. #include <malloc/malloc.h>
  32. #define HAVE_MALLOC_SIZE 1
  33. #define sys_alloc_size(sz) malloc_good_size(sz)
  34. #endif
  35. #ifdef HAVE_SCHED_YIELD
  36. #include <sched.h>
  37. #endif
  38. /* Sleep time for spin lock in nanoseconds */
  39. #define MUTEX_SLEEP_TIME 10000000L
  40. #define MUTEX_SPIN_COUNT 100
  41. #define POOL_MTX_LOCK() do { } while (0)
  42. #define POOL_MTX_UNLOCK() do { } while (0)
  43. /*
  44. * This define specify whether we should check all pools for free space for new object
  45. * or just begin scan from current (recently attached) pool
  46. * If MEMORY_GREEDY is defined, then we scan all pools to find free space (more CPU usage, slower
  47. * but requires less memory). If it is not defined check only current pool and if object is too large
  48. * to place in it allocate new one (this may cause huge CPU usage in some cases too, but generally faster than
  49. * greedy method)
  50. */
  51. #undef MEMORY_GREEDY
  52. #define ENTRY_LEN 128
  53. #define ENTRY_NELTS 64
  54. struct entry_elt {
  55. guint32 fragmentation;
  56. guint32 leftover;
  57. };
  58. struct rspamd_mempool_entry_point {
  59. gchar src[ENTRY_LEN];
  60. guint32 cur_suggestion;
  61. guint32 cur_elts;
  62. struct entry_elt elts[ENTRY_NELTS];
  63. };
  64. static inline uint32_t
  65. rspamd_entry_hash (const char *str)
  66. {
  67. return rspamd_cryptobox_fast_hash (str, strlen (str), rspamd_hash_seed ());
  68. }
  69. static inline int
  70. rspamd_entry_equal (const char *k1, const char *k2)
  71. {
  72. return strcmp (k1, k2) == 0;
  73. }
  74. KHASH_INIT(mempool_entry, const gchar *, struct rspamd_mempool_entry_point *,
  75. 1, rspamd_entry_hash, rspamd_entry_equal)
  76. static khash_t(mempool_entry) *mempool_entries = NULL;
  77. /* Internal statistic */
  78. static rspamd_mempool_stat_t *mem_pool_stat = NULL;
  79. /* Environment variable */
  80. static gboolean env_checked = FALSE;
  81. static gboolean always_malloc = FALSE;
  82. /**
  83. * Function that return free space in pool page
  84. * @param x pool page struct
  85. */
  86. static gsize
  87. pool_chain_free (struct _pool_chain *chain)
  88. {
  89. gint64 occupied = chain->pos - chain->begin + MEM_ALIGNMENT;
  90. return (occupied < (gint64)chain->len ?
  91. chain->len - occupied : 0);
  92. }
  93. /* By default allocate 8Kb chunks of memory */
  94. #define FIXED_POOL_SIZE 4096
  95. static inline struct rspamd_mempool_entry_point *
  96. rspamd_mempool_entry_new (const gchar *loc)
  97. {
  98. struct rspamd_mempool_entry_point **pentry, *entry;
  99. gint r;
  100. khiter_t k;
  101. k = kh_put (mempool_entry, mempool_entries, loc, &r);
  102. if (r >= 0) {
  103. pentry = &kh_value (mempool_entries, k);
  104. entry = g_malloc0 (sizeof (*entry));
  105. *pentry = entry;
  106. memset (entry, 0, sizeof (*entry));
  107. rspamd_strlcpy (entry->src, loc, sizeof (entry->src));
  108. #ifdef HAVE_GETPAGESIZE
  109. entry->cur_suggestion = MAX (getpagesize (), FIXED_POOL_SIZE);
  110. #else
  111. entry->cur_suggestion = MAX (sysconf (_SC_PAGESIZE), FIXED_POOL_SIZE);
  112. #endif
  113. }
  114. else {
  115. g_assert_not_reached ();
  116. }
  117. return entry;
  118. }
  119. static inline struct rspamd_mempool_entry_point *
  120. rspamd_mempool_get_entry (const gchar *loc)
  121. {
  122. khiter_t k;
  123. struct rspamd_mempool_entry_point *elt;
  124. if (mempool_entries == NULL) {
  125. mempool_entries = kh_init (mempool_entry);
  126. }
  127. else {
  128. k = kh_get (mempool_entry, mempool_entries, loc);
  129. if (k != kh_end (mempool_entries)) {
  130. elt = kh_value (mempool_entries, k);
  131. return elt;
  132. }
  133. }
  134. return rspamd_mempool_entry_new (loc);
  135. }
  136. static struct _pool_chain *
  137. rspamd_mempool_chain_new (gsize size, enum rspamd_mempool_chain_type pool_type)
  138. {
  139. struct _pool_chain *chain;
  140. gsize total_size = size + sizeof (struct _pool_chain) + MEM_ALIGNMENT,
  141. optimal_size = 0;
  142. gpointer map;
  143. g_assert (size > 0);
  144. if (pool_type == RSPAMD_MEMPOOL_SHARED) {
  145. #if defined(HAVE_MMAP_ANON)
  146. map = mmap (NULL,
  147. total_size,
  148. PROT_READ | PROT_WRITE,
  149. MAP_ANON | MAP_SHARED,
  150. -1,
  151. 0);
  152. if (map == MAP_FAILED) {
  153. g_error ("%s: failed to allocate %"G_GSIZE_FORMAT" bytes",
  154. G_STRLOC, total_size);
  155. abort ();
  156. }
  157. chain = map;
  158. chain->begin = ((guint8 *) chain) + sizeof (struct _pool_chain);
  159. #elif defined(HAVE_MMAP_ZERO)
  160. gint fd;
  161. fd = open ("/dev/zero", O_RDWR);
  162. if (fd == -1) {
  163. return NULL;
  164. }
  165. map = mmap (NULL,
  166. size + sizeof (struct _pool_chain),
  167. PROT_READ | PROT_WRITE,
  168. MAP_SHARED,
  169. fd,
  170. 0);
  171. if (map == MAP_FAILED) {
  172. msg_err ("cannot allocate %z bytes, aborting", size +
  173. sizeof (struct _pool_chain));
  174. abort ();
  175. }
  176. chain = map;
  177. chain->begin = ((guint8 *) chain) + sizeof (struct _pool_chain);
  178. #else
  179. #error No mmap methods are defined
  180. #endif
  181. g_atomic_int_inc (&mem_pool_stat->shared_chunks_allocated);
  182. g_atomic_int_add (&mem_pool_stat->bytes_allocated, total_size);
  183. }
  184. else {
  185. #ifdef HAVE_MALLOC_SIZE
  186. optimal_size = sys_alloc_size (total_size);
  187. #endif
  188. total_size = MAX (total_size, optimal_size);
  189. map = malloc (total_size);
  190. if (map == NULL) {
  191. g_error ("%s: failed to allocate %"G_GSIZE_FORMAT" bytes",
  192. G_STRLOC, total_size);
  193. abort ();
  194. }
  195. chain = map;
  196. chain->begin = ((guint8 *) chain) + sizeof (struct _pool_chain);
  197. g_atomic_int_add (&mem_pool_stat->bytes_allocated, total_size);
  198. g_atomic_int_inc (&mem_pool_stat->chunks_allocated);
  199. }
  200. chain->pos = align_ptr (chain->begin, MEM_ALIGNMENT);
  201. chain->len = total_size - sizeof (struct _pool_chain);
  202. chain->lock = NULL;
  203. return chain;
  204. }
  205. static void
  206. rspamd_mempool_create_pool_type (rspamd_mempool_t * pool,
  207. enum rspamd_mempool_chain_type pool_type)
  208. {
  209. gsize preallocated_len;
  210. switch (pool_type) {
  211. case RSPAMD_MEMPOOL_NORMAL:
  212. preallocated_len = 32;
  213. break;
  214. case RSPAMD_MEMPOOL_SHARED:
  215. case RSPAMD_MEMPOOL_TMP:
  216. default:
  217. preallocated_len = 2;
  218. break;
  219. }
  220. pool->pools[pool_type] = g_ptr_array_sized_new (preallocated_len);
  221. }
  222. /**
  223. * Get the current pool of the specified type, creating the corresponding
  224. * array if it's absent
  225. * @param pool
  226. * @param pool_type
  227. * @return
  228. */
  229. static struct _pool_chain *
  230. rspamd_mempool_get_chain (rspamd_mempool_t * pool,
  231. enum rspamd_mempool_chain_type pool_type)
  232. {
  233. gsize len;
  234. g_assert (pool_type >= 0 && pool_type < RSPAMD_MEMPOOL_MAX);
  235. if (pool->pools[pool_type] == NULL) {
  236. rspamd_mempool_create_pool_type (pool, pool_type);
  237. }
  238. len = pool->pools[pool_type]->len;
  239. if (len == 0) {
  240. return NULL;
  241. }
  242. return (g_ptr_array_index (pool->pools[pool_type], len - 1));
  243. }
  244. static void
  245. rspamd_mempool_append_chain (rspamd_mempool_t * pool,
  246. struct _pool_chain *chain,
  247. enum rspamd_mempool_chain_type pool_type)
  248. {
  249. g_assert (pool_type >= 0 && pool_type < RSPAMD_MEMPOOL_MAX);
  250. g_assert (chain != NULL);
  251. if (pool->pools[pool_type] == NULL) {
  252. rspamd_mempool_create_pool_type (pool, pool_type);
  253. }
  254. g_ptr_array_add (pool->pools[pool_type], chain);
  255. }
  256. /**
  257. * Allocate new memory poll
  258. * @param size size of pool's page
  259. * @return new memory pool object
  260. */
  261. rspamd_mempool_t *
  262. rspamd_mempool_new_ (gsize size, const gchar *tag, const gchar *loc)
  263. {
  264. rspamd_mempool_t *new;
  265. gpointer map;
  266. unsigned char uidbuf[10];
  267. const gchar hexdigits[] = "0123456789abcdef";
  268. unsigned i;
  269. /* Allocate statistic structure if it is not allocated before */
  270. if (mem_pool_stat == NULL) {
  271. #if defined(HAVE_MMAP_ANON)
  272. map = mmap (NULL,
  273. sizeof (rspamd_mempool_stat_t),
  274. PROT_READ | PROT_WRITE,
  275. MAP_ANON | MAP_SHARED,
  276. -1,
  277. 0);
  278. if (map == MAP_FAILED) {
  279. msg_err ("cannot allocate %z bytes, aborting",
  280. sizeof (rspamd_mempool_stat_t));
  281. abort ();
  282. }
  283. mem_pool_stat = (rspamd_mempool_stat_t *)map;
  284. #elif defined(HAVE_MMAP_ZERO)
  285. gint fd;
  286. fd = open ("/dev/zero", O_RDWR);
  287. g_assert (fd != -1);
  288. map = mmap (NULL,
  289. sizeof (rspamd_mempool_stat_t),
  290. PROT_READ | PROT_WRITE,
  291. MAP_SHARED,
  292. fd,
  293. 0);
  294. if (map == MAP_FAILED) {
  295. msg_err ("cannot allocate %z bytes, aborting",
  296. sizeof (rspamd_mempool_stat_t));
  297. abort ();
  298. }
  299. mem_pool_stat = (rspamd_mempool_stat_t *)map;
  300. #else
  301. # error No mmap methods are defined
  302. #endif
  303. memset (map, 0, sizeof (rspamd_mempool_stat_t));
  304. }
  305. if (!env_checked) {
  306. /* Check G_SLICE=always-malloc to allow memory pool debug */
  307. const char *g_slice;
  308. g_slice = getenv ("VALGRIND");
  309. if (g_slice != NULL) {
  310. always_malloc = TRUE;
  311. }
  312. env_checked = TRUE;
  313. }
  314. new = g_malloc0 (sizeof (rspamd_mempool_t));
  315. new->entry = rspamd_mempool_get_entry (loc);
  316. new->destructors = g_array_sized_new (FALSE, FALSE,
  317. sizeof (struct _pool_destructors), 32);
  318. rspamd_mempool_create_pool_type (new, RSPAMD_MEMPOOL_NORMAL);
  319. /* Set it upon first call of set variable */
  320. if (size == 0) {
  321. new->elt_len = new->entry->cur_suggestion;
  322. }
  323. else {
  324. new->elt_len = size;
  325. }
  326. if (tag) {
  327. rspamd_strlcpy (new->tag.tagname, tag, sizeof (new->tag.tagname));
  328. }
  329. else {
  330. new->tag.tagname[0] = '\0';
  331. }
  332. /* Generate new uid */
  333. ottery_rand_bytes (uidbuf, sizeof (uidbuf));
  334. for (i = 0; i < G_N_ELEMENTS (uidbuf); i ++) {
  335. new->tag.uid[i * 2] = hexdigits[(uidbuf[i] >> 4) & 0xf];
  336. new->tag.uid[i * 2 + 1] = hexdigits[uidbuf[i] & 0xf];
  337. }
  338. new->tag.uid[19] = '\0';
  339. mem_pool_stat->pools_allocated++;
  340. return new;
  341. }
  342. static void *
  343. memory_pool_alloc_common (rspamd_mempool_t * pool, gsize size,
  344. enum rspamd_mempool_chain_type pool_type)
  345. RSPAMD_ATTR_ALLOC_SIZE(2) RSPAMD_ATTR_ALLOC_ALIGN(MEM_ALIGNMENT) RSPAMD_ATTR_RETURNS_NONNUL;
  346. static void *
  347. memory_pool_alloc_common (rspamd_mempool_t * pool, gsize size,
  348. enum rspamd_mempool_chain_type pool_type)
  349. {
  350. guint8 *tmp;
  351. struct _pool_chain *new, *cur;
  352. gsize free = 0;
  353. if (pool) {
  354. POOL_MTX_LOCK ();
  355. if (always_malloc && pool_type != RSPAMD_MEMPOOL_SHARED) {
  356. void *ptr;
  357. ptr = g_malloc (size);
  358. POOL_MTX_UNLOCK ();
  359. if (pool->trash_stack == NULL) {
  360. pool->trash_stack = g_ptr_array_sized_new (128);
  361. }
  362. g_ptr_array_add (pool->trash_stack, ptr);
  363. return ptr;
  364. }
  365. cur = rspamd_mempool_get_chain (pool, pool_type);
  366. /* Find free space in pool chain */
  367. if (cur) {
  368. free = pool_chain_free (cur);
  369. }
  370. if (cur == NULL || free < size) {
  371. /* Allocate new chain element */
  372. if (pool->elt_len >= size + MEM_ALIGNMENT) {
  373. pool->entry->elts[pool->entry->cur_elts].fragmentation += size;
  374. new = rspamd_mempool_chain_new (pool->elt_len,
  375. pool_type);
  376. }
  377. else {
  378. mem_pool_stat->oversized_chunks++;
  379. g_atomic_int_add (&mem_pool_stat->fragmented_size,
  380. free);
  381. pool->entry->elts[pool->entry->cur_elts].fragmentation += free;
  382. new = rspamd_mempool_chain_new (size + pool->elt_len, pool_type);
  383. }
  384. /* Connect to pool subsystem */
  385. rspamd_mempool_append_chain (pool, new, pool_type);
  386. /* No need to align again, aligned by rspamd_mempool_chain_new */
  387. tmp = new->pos;
  388. new->pos = tmp + size;
  389. POOL_MTX_UNLOCK ();
  390. return tmp;
  391. }
  392. /* No need to allocate page */
  393. tmp = align_ptr (cur->pos, MEM_ALIGNMENT);
  394. cur->pos = tmp + size;
  395. POOL_MTX_UNLOCK ();
  396. return tmp;
  397. }
  398. abort ();
  399. }
  400. void *
  401. rspamd_mempool_alloc (rspamd_mempool_t * pool, gsize size)
  402. {
  403. return memory_pool_alloc_common (pool, size, RSPAMD_MEMPOOL_NORMAL);
  404. }
  405. void *
  406. rspamd_mempool_alloc_tmp (rspamd_mempool_t * pool, gsize size)
  407. {
  408. return memory_pool_alloc_common (pool, size, RSPAMD_MEMPOOL_TMP);
  409. }
  410. void *
  411. rspamd_mempool_alloc0 (rspamd_mempool_t * pool, gsize size)
  412. {
  413. void *pointer = rspamd_mempool_alloc (pool, size);
  414. if (pointer) {
  415. memset (pointer, 0, size);
  416. }
  417. return pointer;
  418. }
  419. void *
  420. rspamd_mempool_alloc0_tmp (rspamd_mempool_t * pool, gsize size)
  421. {
  422. void *pointer = rspamd_mempool_alloc_tmp (pool, size);
  423. if (pointer) {
  424. memset (pointer, 0, size);
  425. }
  426. return pointer;
  427. }
  428. void *
  429. rspamd_mempool_alloc0_shared (rspamd_mempool_t * pool, gsize size)
  430. {
  431. void *pointer = rspamd_mempool_alloc_shared (pool, size);
  432. if (pointer) {
  433. memset (pointer, 0, size);
  434. }
  435. return pointer;
  436. }
  437. void *
  438. rspamd_mempool_alloc_shared (rspamd_mempool_t * pool, gsize size)
  439. {
  440. return memory_pool_alloc_common (pool, size, RSPAMD_MEMPOOL_SHARED);
  441. }
  442. gchar *
  443. rspamd_mempool_strdup (rspamd_mempool_t * pool, const gchar *src)
  444. {
  445. gsize len;
  446. gchar *newstr;
  447. if (src == NULL) {
  448. return NULL;
  449. }
  450. len = strlen (src);
  451. newstr = rspamd_mempool_alloc (pool, len + 1);
  452. memcpy (newstr, src, len);
  453. newstr[len] = '\0';
  454. return newstr;
  455. }
  456. gchar *
  457. rspamd_mempool_fstrdup (rspamd_mempool_t * pool, const struct f_str_s *src)
  458. {
  459. gchar *newstr;
  460. if (src == NULL) {
  461. return NULL;
  462. }
  463. newstr = rspamd_mempool_alloc (pool, src->len + 1);
  464. memcpy (newstr, src->str, src->len);
  465. newstr[src->len] = '\0';
  466. return newstr;
  467. }
  468. gchar *
  469. rspamd_mempool_ftokdup (rspamd_mempool_t *pool, const rspamd_ftok_t *src)
  470. {
  471. gchar *newstr;
  472. if (src == NULL) {
  473. return NULL;
  474. }
  475. newstr = rspamd_mempool_alloc (pool, src->len + 1);
  476. memcpy (newstr, src->begin, src->len);
  477. newstr[src->len] = '\0';
  478. return newstr;
  479. }
  480. void
  481. rspamd_mempool_add_destructor_full (rspamd_mempool_t * pool,
  482. rspamd_mempool_destruct_t func,
  483. void *data,
  484. const gchar *function,
  485. const gchar *line)
  486. {
  487. struct _pool_destructors cur;
  488. POOL_MTX_LOCK ();
  489. cur.func = func;
  490. cur.data = data;
  491. cur.function = function;
  492. cur.loc = line;
  493. g_array_append_val (pool->destructors, cur);
  494. POOL_MTX_UNLOCK ();
  495. }
  496. void
  497. rspamd_mempool_replace_destructor (rspamd_mempool_t * pool,
  498. rspamd_mempool_destruct_t func,
  499. void *old_data,
  500. void *new_data)
  501. {
  502. struct _pool_destructors *tmp;
  503. guint i;
  504. for (i = 0; i < pool->destructors->len; i ++) {
  505. tmp = &g_array_index (pool->destructors, struct _pool_destructors, i);
  506. if (tmp->func == func && tmp->data == old_data) {
  507. tmp->func = func;
  508. tmp->data = new_data;
  509. break;
  510. }
  511. }
  512. }
  513. static gint
  514. cmp_int (gconstpointer a, gconstpointer b)
  515. {
  516. gint i1 = *(const gint *)a, i2 = *(const gint *)b;
  517. return i1 - i2;
  518. }
  519. static void
  520. rspamd_mempool_adjust_entry (struct rspamd_mempool_entry_point *e)
  521. {
  522. gint sz[G_N_ELEMENTS (e->elts)], sel_pos, sel_neg;
  523. guint i, jitter;
  524. for (i = 0; i < G_N_ELEMENTS (sz); i ++) {
  525. sz[i] = e->elts[i].fragmentation - (gint)e->elts[i].leftover;
  526. }
  527. qsort (sz, G_N_ELEMENTS (sz), sizeof (gint), cmp_int);
  528. jitter = rspamd_random_uint64_fast () % 10;
  529. /*
  530. * Take stochaistic quantiles
  531. */
  532. sel_pos = sz[50 + jitter];
  533. sel_neg = sz[4 + jitter];
  534. if (sel_neg > 0) {
  535. /* We need to increase our suggestion */
  536. e->cur_suggestion *= (1 + (((double)sel_pos) / e->cur_suggestion)) * 1.5;
  537. }
  538. else if (-sel_neg > sel_pos) {
  539. /* We need to reduce current suggestion */
  540. e->cur_suggestion /= (1 + (((double)-sel_neg) / e->cur_suggestion)) * 1.5;
  541. }
  542. else {
  543. /* We still want to grow */
  544. e->cur_suggestion *= (1 + (((double)sel_pos) / e->cur_suggestion)) * 1.5;
  545. }
  546. /* Some sane limits counting mempool architecture */
  547. if (e->cur_suggestion < 1024) {
  548. e->cur_suggestion = 1024;
  549. }
  550. else if (e->cur_suggestion > 1024 * 1024 * 10) {
  551. e->cur_suggestion = 1024 * 1024 * 10;
  552. }
  553. memset (e->elts, 0, sizeof (e->elts));
  554. }
  555. void
  556. rspamd_mempool_destructors_enforce (rspamd_mempool_t *pool)
  557. {
  558. struct _pool_destructors *destructor;
  559. guint i;
  560. POOL_MTX_LOCK ();
  561. for (i = 0; i < pool->destructors->len; i ++) {
  562. destructor = &g_array_index (pool->destructors, struct _pool_destructors, i);
  563. /* Avoid calling destructors for NULL pointers */
  564. if (destructor->data != NULL) {
  565. destructor->func (destructor->data);
  566. }
  567. }
  568. pool->destructors->len = 0;
  569. POOL_MTX_UNLOCK ();
  570. }
  571. void
  572. rspamd_mempool_delete (rspamd_mempool_t * pool)
  573. {
  574. struct _pool_chain *cur;
  575. struct _pool_destructors *destructor;
  576. gpointer ptr;
  577. guint i, j;
  578. gsize len;
  579. POOL_MTX_LOCK ();
  580. cur = NULL;
  581. if (pool->pools[RSPAMD_MEMPOOL_NORMAL] != NULL &&
  582. pool->pools[RSPAMD_MEMPOOL_NORMAL]->len > 0) {
  583. cur = g_ptr_array_index (pool->pools[RSPAMD_MEMPOOL_NORMAL],
  584. pool->pools[RSPAMD_MEMPOOL_NORMAL]->len - 1);
  585. }
  586. if (cur) {
  587. pool->entry->elts[pool->entry->cur_elts].leftover =
  588. pool_chain_free (cur);
  589. pool->entry->cur_elts = (pool->entry->cur_elts + 1) %
  590. G_N_ELEMENTS (pool->entry->elts);
  591. if (pool->entry->cur_elts == 0) {
  592. rspamd_mempool_adjust_entry (pool->entry);
  593. }
  594. }
  595. /* Call all pool destructors */
  596. for (i = 0; i < pool->destructors->len; i ++) {
  597. destructor = &g_array_index (pool->destructors, struct _pool_destructors, i);
  598. /* Avoid calling destructors for NULL pointers */
  599. if (destructor->data != NULL) {
  600. destructor->func (destructor->data);
  601. }
  602. }
  603. g_array_free (pool->destructors, TRUE);
  604. for (i = 0; i < G_N_ELEMENTS (pool->pools); i ++) {
  605. if (pool->pools[i]) {
  606. for (j = 0; j < pool->pools[i]->len; j++) {
  607. cur = g_ptr_array_index (pool->pools[i], j);
  608. g_atomic_int_add (&mem_pool_stat->bytes_allocated,
  609. -((gint)cur->len));
  610. g_atomic_int_add (&mem_pool_stat->chunks_allocated, -1);
  611. len = cur->len + sizeof (struct _pool_chain);
  612. if (i == RSPAMD_MEMPOOL_SHARED) {
  613. munmap ((void *)cur, len);
  614. }
  615. else {
  616. free (cur); /* Not g_free as we use system allocator */
  617. }
  618. }
  619. g_ptr_array_free (pool->pools[i], TRUE);
  620. }
  621. }
  622. if (pool->variables) {
  623. g_hash_table_destroy (pool->variables);
  624. }
  625. if (pool->trash_stack) {
  626. for (i = 0; i < pool->trash_stack->len; i++) {
  627. ptr = g_ptr_array_index (pool->trash_stack, i);
  628. g_free (ptr);
  629. }
  630. g_ptr_array_free (pool->trash_stack, TRUE);
  631. }
  632. g_atomic_int_inc (&mem_pool_stat->pools_freed);
  633. POOL_MTX_UNLOCK ();
  634. g_free (pool);
  635. }
  636. void
  637. rspamd_mempool_cleanup_tmp (rspamd_mempool_t * pool)
  638. {
  639. struct _pool_chain *cur;
  640. guint i;
  641. POOL_MTX_LOCK ();
  642. if (pool->pools[RSPAMD_MEMPOOL_TMP]) {
  643. for (i = 0; i < pool->pools[RSPAMD_MEMPOOL_TMP]->len; i++) {
  644. cur = g_ptr_array_index (pool->pools[RSPAMD_MEMPOOL_TMP], i);
  645. g_atomic_int_add (&mem_pool_stat->bytes_allocated,
  646. -((gint)cur->len));
  647. g_atomic_int_add (&mem_pool_stat->chunks_allocated, -1);
  648. free (cur);
  649. }
  650. g_ptr_array_free (pool->pools[RSPAMD_MEMPOOL_TMP], TRUE);
  651. pool->pools[RSPAMD_MEMPOOL_TMP] = NULL;
  652. }
  653. g_atomic_int_inc (&mem_pool_stat->pools_freed);
  654. POOL_MTX_UNLOCK ();
  655. }
  656. void
  657. rspamd_mempool_stat (rspamd_mempool_stat_t * st)
  658. {
  659. if (mem_pool_stat != NULL) {
  660. st->pools_allocated = mem_pool_stat->pools_allocated;
  661. st->pools_freed = mem_pool_stat->pools_freed;
  662. st->shared_chunks_allocated = mem_pool_stat->shared_chunks_allocated;
  663. st->bytes_allocated = mem_pool_stat->bytes_allocated;
  664. st->chunks_allocated = mem_pool_stat->chunks_allocated;
  665. st->shared_chunks_allocated = mem_pool_stat->shared_chunks_allocated;
  666. st->chunks_freed = mem_pool_stat->chunks_freed;
  667. st->oversized_chunks = mem_pool_stat->oversized_chunks;
  668. }
  669. }
  670. void
  671. rspamd_mempool_stat_reset (void)
  672. {
  673. if (mem_pool_stat != NULL) {
  674. memset (mem_pool_stat, 0, sizeof (rspamd_mempool_stat_t));
  675. }
  676. }
  677. gsize
  678. rspamd_mempool_suggest_size_ (const char *loc)
  679. {
  680. return 0;
  681. }
  682. #if !defined(HAVE_PTHREAD_PROCESS_SHARED) || defined(DISABLE_PTHREAD_MUTEX)
  683. /*
  684. * Own emulation
  685. */
  686. static inline gint
  687. __mutex_spin (rspamd_mempool_mutex_t * mutex)
  688. {
  689. /* check spin count */
  690. if (g_atomic_int_dec_and_test (&mutex->spin)) {
  691. /* This may be deadlock, so check owner of this lock */
  692. if (mutex->owner == getpid ()) {
  693. /* This mutex was locked by calling process, so it is just double lock and we can easily unlock it */
  694. g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
  695. return 0;
  696. }
  697. else if (kill (mutex->owner, 0) == -1) {
  698. /* Owner process was not found, so release lock */
  699. g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
  700. return 0;
  701. }
  702. /* Spin again */
  703. g_atomic_int_set (&mutex->spin, MUTEX_SPIN_COUNT);
  704. }
  705. #ifdef HAVE_SCHED_YIELD
  706. (void)sched_yield ();
  707. #elif defined(HAVE_NANOSLEEP)
  708. struct timespec ts;
  709. ts.tv_sec = 0;
  710. ts.tv_nsec = MUTEX_SLEEP_TIME;
  711. /* Spin */
  712. while (nanosleep (&ts, &ts) == -1 && errno == EINTR) ;
  713. #else
  714. #error No methods to spin are defined
  715. #endif
  716. return 1;
  717. }
  718. static void
  719. memory_pool_mutex_spin (rspamd_mempool_mutex_t * mutex)
  720. {
  721. while (!g_atomic_int_compare_and_exchange (&mutex->lock, 0, 1)) {
  722. if (!__mutex_spin (mutex)) {
  723. return;
  724. }
  725. }
  726. }
  727. rspamd_mempool_mutex_t *
  728. rspamd_mempool_get_mutex (rspamd_mempool_t * pool)
  729. {
  730. rspamd_mempool_mutex_t *res;
  731. if (pool != NULL) {
  732. res =
  733. rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_mutex_t));
  734. res->lock = 0;
  735. res->owner = 0;
  736. res->spin = MUTEX_SPIN_COUNT;
  737. return res;
  738. }
  739. return NULL;
  740. }
  741. void
  742. rspamd_mempool_lock_mutex (rspamd_mempool_mutex_t * mutex)
  743. {
  744. memory_pool_mutex_spin (mutex);
  745. mutex->owner = getpid ();
  746. }
  747. void
  748. rspamd_mempool_unlock_mutex (rspamd_mempool_mutex_t * mutex)
  749. {
  750. mutex->owner = 0;
  751. (void)g_atomic_int_compare_and_exchange (&mutex->lock, 1, 0);
  752. }
  753. rspamd_mempool_rwlock_t *
  754. rspamd_mempool_get_rwlock (rspamd_mempool_t * pool)
  755. {
  756. rspamd_mempool_rwlock_t *lock;
  757. lock = rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_rwlock_t));
  758. lock->__r_lock = rspamd_mempool_get_mutex (pool);
  759. lock->__w_lock = rspamd_mempool_get_mutex (pool);
  760. return lock;
  761. }
  762. void
  763. rspamd_mempool_rlock_rwlock (rspamd_mempool_rwlock_t * lock)
  764. {
  765. /* Spin on write lock */
  766. while (g_atomic_int_get (&lock->__w_lock->lock)) {
  767. if (!__mutex_spin (lock->__w_lock)) {
  768. break;
  769. }
  770. }
  771. g_atomic_int_inc (&lock->__r_lock->lock);
  772. lock->__r_lock->owner = getpid ();
  773. }
  774. void
  775. rspamd_mempool_wlock_rwlock (rspamd_mempool_rwlock_t * lock)
  776. {
  777. /* Spin on write lock first */
  778. rspamd_mempool_lock_mutex (lock->__w_lock);
  779. /* Now we have write lock set up */
  780. /* Wait all readers */
  781. while (g_atomic_int_get (&lock->__r_lock->lock)) {
  782. __mutex_spin (lock->__r_lock);
  783. }
  784. }
  785. void
  786. rspamd_mempool_runlock_rwlock (rspamd_mempool_rwlock_t * lock)
  787. {
  788. if (g_atomic_int_get (&lock->__r_lock->lock)) {
  789. (void)g_atomic_int_dec_and_test (&lock->__r_lock->lock);
  790. }
  791. }
  792. void
  793. rspamd_mempool_wunlock_rwlock (rspamd_mempool_rwlock_t * lock)
  794. {
  795. rspamd_mempool_unlock_mutex (lock->__w_lock);
  796. }
  797. #else
  798. /*
  799. * Pthread bases shared mutexes
  800. */
  801. rspamd_mempool_mutex_t *
  802. rspamd_mempool_get_mutex (rspamd_mempool_t * pool)
  803. {
  804. rspamd_mempool_mutex_t *res;
  805. pthread_mutexattr_t mattr;
  806. if (pool != NULL) {
  807. res =
  808. rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_mutex_t));
  809. pthread_mutexattr_init (&mattr);
  810. pthread_mutexattr_setpshared (&mattr, PTHREAD_PROCESS_SHARED);
  811. pthread_mutexattr_setrobust (&mattr, PTHREAD_MUTEX_ROBUST);
  812. pthread_mutex_init (res, &mattr);
  813. rspamd_mempool_add_destructor (pool,
  814. (rspamd_mempool_destruct_t)pthread_mutex_destroy, res);
  815. pthread_mutexattr_destroy (&mattr);
  816. return res;
  817. }
  818. return NULL;
  819. }
  820. void
  821. rspamd_mempool_lock_mutex (rspamd_mempool_mutex_t * mutex)
  822. {
  823. pthread_mutex_lock (mutex);
  824. }
  825. void
  826. rspamd_mempool_unlock_mutex (rspamd_mempool_mutex_t * mutex)
  827. {
  828. pthread_mutex_unlock (mutex);
  829. }
  830. rspamd_mempool_rwlock_t *
  831. rspamd_mempool_get_rwlock (rspamd_mempool_t * pool)
  832. {
  833. rspamd_mempool_rwlock_t *res;
  834. pthread_rwlockattr_t mattr;
  835. if (pool != NULL) {
  836. res =
  837. rspamd_mempool_alloc_shared (pool, sizeof (rspamd_mempool_rwlock_t));
  838. pthread_rwlockattr_init (&mattr);
  839. pthread_rwlockattr_setpshared (&mattr, PTHREAD_PROCESS_SHARED);
  840. pthread_rwlock_init (res, &mattr);
  841. rspamd_mempool_add_destructor (pool,
  842. (rspamd_mempool_destruct_t)pthread_rwlock_destroy, res);
  843. pthread_rwlockattr_destroy (&mattr);
  844. return res;
  845. }
  846. return NULL;
  847. }
  848. void
  849. rspamd_mempool_rlock_rwlock (rspamd_mempool_rwlock_t * lock)
  850. {
  851. pthread_rwlock_rdlock (lock);
  852. }
  853. void
  854. rspamd_mempool_wlock_rwlock (rspamd_mempool_rwlock_t * lock)
  855. {
  856. pthread_rwlock_wrlock (lock);
  857. }
  858. void
  859. rspamd_mempool_runlock_rwlock (rspamd_mempool_rwlock_t * lock)
  860. {
  861. pthread_rwlock_unlock (lock);
  862. }
  863. void
  864. rspamd_mempool_wunlock_rwlock (rspamd_mempool_rwlock_t * lock)
  865. {
  866. pthread_rwlock_unlock (lock);
  867. }
  868. #endif
  869. void
  870. rspamd_mempool_set_variable (rspamd_mempool_t *pool,
  871. const gchar *name,
  872. gpointer value,
  873. rspamd_mempool_destruct_t destructor)
  874. {
  875. if (pool->variables == NULL) {
  876. pool->variables = g_hash_table_new (rspamd_str_hash, rspamd_str_equal);
  877. }
  878. g_hash_table_insert (pool->variables, rspamd_mempool_strdup (pool,
  879. name), value);
  880. if (destructor != NULL) {
  881. rspamd_mempool_add_destructor (pool, destructor, value);
  882. }
  883. }
  884. gpointer
  885. rspamd_mempool_get_variable (rspamd_mempool_t *pool, const gchar *name)
  886. {
  887. if (pool->variables == NULL) {
  888. return NULL;
  889. }
  890. return g_hash_table_lookup (pool->variables, name);
  891. }
  892. void
  893. rspamd_mempool_remove_variable (rspamd_mempool_t *pool, const gchar *name)
  894. {
  895. if (pool->variables != NULL) {
  896. g_hash_table_remove (pool->variables, name);
  897. }
  898. }
  899. GList *
  900. rspamd_mempool_glist_prepend (rspamd_mempool_t *pool, GList *l, gpointer p)
  901. {
  902. GList *cell;
  903. cell = rspamd_mempool_alloc (pool, sizeof (*cell));
  904. cell->prev = NULL;
  905. cell->data = p;
  906. if (l == NULL) {
  907. cell->next = NULL;
  908. }
  909. else {
  910. cell->next = l;
  911. l->prev = cell;
  912. }
  913. return cell;
  914. }
  915. GList *
  916. rspamd_mempool_glist_append (rspamd_mempool_t *pool, GList *l, gpointer p)
  917. {
  918. GList *cell, *cur;
  919. cell = rspamd_mempool_alloc (pool, sizeof (*cell));
  920. cell->next = NULL;
  921. cell->data = p;
  922. if (l) {
  923. for (cur = l; cur->next != NULL; cur = cur->next) {}
  924. cur->next = cell;
  925. cell->prev = cur;
  926. }
  927. else {
  928. l = cell;
  929. l->prev = NULL;
  930. }
  931. return l;
  932. }