You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360
  1. #include <sys/types.h>
  2. #include <glib.h>
  3. #include <string.h>
  4. #include <stdlib.h>
  5. #include <sys/mman.h>
  6. #include <errno.h>
  7. #include "config.h"
  8. #ifdef HAVE_SCHED_YIELD
  9. #include <sched.h>
  10. #endif
  11. #ifdef HAVE_NANOSLEEP
  12. #include <time.h>
  13. #endif
  14. #include "mem_pool.h"
  15. /* Sleep time for spin lock in nanoseconds */
  16. #define MUTEX_SLEEP_TIME 10000000L
  17. #ifdef _THREAD_SAFE
  18. pthread_mutex_t stat_mtx = PTHREAD_MUTEX_INITIALIZER;
  19. #define STAT_LOCK() do { pthread_mutex_lock (&stat_mtx); } while (0)
  20. #define STAT_UNLOCK() do { pthread_mutex_unlock (&stat_mtx); } while (0)
  21. #else
  22. #define STAT_LOCK() do {} while (0)
  23. #define STAT_UNLOCK() do {} while (0)
  24. #endif
  25. /*
  26. * This define specify whether we should check all pools for free space for new object
  27. * or just begin scan from current (recently attached) pool
  28. * If MEMORY_GREEDY is defined, then we scan all pools to find free space (more CPU usage, slower
  29. * but requires less memory). If it is not defined check only current pool and if object is too large
  30. * to place in it allocate new one (this may cause huge CPU usage in some cases too, but generally faster than
  31. * greedy method)
  32. */
  33. #undef MEMORY_GREEDY
  34. /* Internal statistic */
  35. static size_t bytes_allocated = 0;
  36. static size_t chunks_allocated = 0;
  37. static size_t chunks_freed = 0;
  38. static size_t shared_chunks_allocated = 0;
  39. static struct _pool_chain *
  40. pool_chain_new (size_t size)
  41. {
  42. struct _pool_chain *chain;
  43. chain = g_malloc (sizeof (struct _pool_chain));
  44. chain->begin = g_malloc (size);
  45. chain->len = size;
  46. chain->pos = chain->begin;
  47. chain->next = NULL;
  48. STAT_LOCK ();
  49. chunks_allocated ++;
  50. STAT_UNLOCK ();
  51. return chain;
  52. }
  53. static struct _pool_chain_shared *
  54. pool_chain_new_shared (size_t size)
  55. {
  56. struct _pool_chain_shared *chain;
  57. #if defined(HAVE_MMAP_ANON)
  58. chain = mmap (NULL, size + sizeof (struct _pool_chain_shared), PROT_READ|PROT_WRITE, MAP_ANON|MAP_SHARED, -1, 0);
  59. chain->begin = ((u_char *)chain) + sizeof (struct _pool_chain_shared);
  60. if (chain == MAP_FAILED) {
  61. return NULL;
  62. }
  63. #elif defined(HAVE_MMAP_ZERO)
  64. int fd;
  65. fd = open ("/dev/zero", O_RDWR);
  66. if (fd == -1) {
  67. return NULL;
  68. }
  69. chain = mmap (NULL, shm->size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
  70. chain->begin = ((u_char *)chain) + sizeof (struct _pool_chain_shared);
  71. if (chain == MAP_FAILED) {
  72. return NULL;
  73. }
  74. #else
  75. # error No mmap methods are defined
  76. #endif
  77. chain->len = size;
  78. chain->pos = chain->begin;
  79. chain->lock = 0;
  80. chain->next = NULL;
  81. STAT_LOCK ();
  82. shared_chunks_allocated ++;
  83. STAT_UNLOCK ();
  84. return chain;
  85. }
  86. memory_pool_t*
  87. memory_pool_new (size_t size)
  88. {
  89. memory_pool_t *new;
  90. new = g_malloc (sizeof (memory_pool_t));
  91. new->cur_pool = pool_chain_new (size);
  92. new->shared_pool = NULL;
  93. new->first_pool = new->cur_pool;
  94. new->destructors = NULL;
  95. return new;
  96. }
  97. void *
  98. memory_pool_alloc (memory_pool_t *pool, size_t size)
  99. {
  100. u_char *tmp;
  101. struct _pool_chain *new, *cur;
  102. if (pool) {
  103. #ifdef MEMORY_GREEDY
  104. cur = pool->first_pool;
  105. #else
  106. cur = pool->cur_pool;
  107. #endif
  108. /* Find free space in pool chain */
  109. while (memory_pool_free (cur) < size && cur->next) {
  110. cur = cur->next;
  111. }
  112. if (cur->next == NULL && memory_pool_free (cur) < size) {
  113. /* Allocate new pool */
  114. if (cur->len >= size) {
  115. new = pool_chain_new (cur->len);
  116. }
  117. else {
  118. new = pool_chain_new (size + cur->len);
  119. }
  120. /* Attach new pool to chain */
  121. cur->next = new;
  122. pool->cur_pool = new;
  123. new->pos += size;
  124. STAT_LOCK ();
  125. bytes_allocated += size;
  126. STAT_UNLOCK ();
  127. return new->begin;
  128. }
  129. tmp = cur->pos;
  130. cur->pos += size;
  131. STAT_LOCK ();
  132. bytes_allocated += size;
  133. STAT_UNLOCK ();
  134. return tmp;
  135. }
  136. return NULL;
  137. }
  138. void *
  139. memory_pool_alloc0 (memory_pool_t *pool, size_t size)
  140. {
  141. void *pointer = memory_pool_alloc (pool, size);
  142. if (pointer) {
  143. bzero (pointer, size);
  144. }
  145. return pointer;
  146. }
  147. char *
  148. memory_pool_strdup (memory_pool_t *pool, const char *src)
  149. {
  150. size_t len;
  151. char *newstr;
  152. if (src == NULL) {
  153. return NULL;
  154. }
  155. len = strlen (src);
  156. newstr = memory_pool_alloc (pool, len + 1);
  157. memcpy (newstr, src, len + 1);
  158. return newstr;
  159. }
  160. void *
  161. memory_pool_alloc_shared (memory_pool_t *pool, size_t size)
  162. {
  163. u_char *tmp;
  164. struct _pool_chain_shared *new, *cur;
  165. if (pool) {
  166. cur = pool->shared_pool;
  167. if (!cur) {
  168. cur = pool_chain_new_shared (pool->first_pool->len);
  169. pool->shared_pool = cur;
  170. }
  171. /* Find free space in pool chain */
  172. while (memory_pool_free (cur) < size && cur->next) {
  173. cur = cur->next;
  174. }
  175. if (cur->next == NULL && memory_pool_free (cur) < size) {
  176. /* Allocate new pool */
  177. if (cur->len >= size) {
  178. new = pool_chain_new_shared (cur->len);
  179. }
  180. else {
  181. new = pool_chain_new_shared (size + cur->len);
  182. }
  183. /* Attach new pool to chain */
  184. cur->next = new;
  185. new->pos += size;
  186. STAT_LOCK ();
  187. bytes_allocated += size;
  188. STAT_UNLOCK ();
  189. return new->begin;
  190. }
  191. tmp = cur->pos;
  192. cur->pos += size;
  193. STAT_LOCK ();
  194. bytes_allocated += size;
  195. STAT_UNLOCK ();
  196. return tmp;
  197. }
  198. return NULL;
  199. }
  200. /* Find pool for a pointer, returns NULL if pointer is not in pool */
  201. static struct _pool_chain_shared *
  202. memory_pool_find_pool (memory_pool_t *pool, void *pointer)
  203. {
  204. struct _pool_chain_shared *cur = pool->shared_pool;
  205. while (cur) {
  206. if ((u_char *)pointer >= cur->begin && (u_char *)pointer <= (cur->begin + cur->len)) {
  207. return cur;
  208. }
  209. cur = cur->next;
  210. }
  211. return NULL;
  212. }
  213. static void
  214. memory_pool_spin (struct _pool_chain_shared *chain)
  215. {
  216. while (!g_atomic_int_compare_and_exchange (&chain->lock, 0, 1)) {
  217. /* lock was aqquired */
  218. #ifdef HAVE_NANOSLEEP
  219. struct timespec ts;
  220. ts.tv_sec = 0;
  221. ts.tv_nsec = MUTEX_SLEEP_TIME;
  222. /* Spin */
  223. while (nanosleep (&ts, &ts) == -1 && errno == EINTR);
  224. #endif
  225. #ifdef HAVE_SCHED_YIELD
  226. (void)sched_yield ();
  227. #endif
  228. #if !defined(HAVE_NANOSLEEP) && !defined(HAVE_SCHED_YIELD)
  229. # error No methods to spin are defined
  230. #endif
  231. }
  232. }
  233. /* Simple implementation of spinlock */
  234. void
  235. memory_pool_lock_shared (memory_pool_t *pool, void *pointer)
  236. {
  237. struct _pool_chain_shared *chain;
  238. chain = memory_pool_find_pool (pool, pointer);
  239. if (chain == NULL) {
  240. return;
  241. }
  242. memory_pool_spin (chain);
  243. }
  244. void memory_pool_unlock_shared (memory_pool_t *pool, void *pointer)
  245. {
  246. struct _pool_chain_shared *chain;
  247. chain = memory_pool_find_pool (pool, pointer);
  248. if (chain == NULL) {
  249. return;
  250. }
  251. (void)g_atomic_int_dec_and_test (&chain->lock);
  252. }
  253. void
  254. memory_pool_add_destructor (memory_pool_t *pool, pool_destruct_func func, void *data)
  255. {
  256. struct _pool_destructors *cur;
  257. cur = memory_pool_alloc (pool, sizeof (struct _pool_destructors));
  258. if (cur) {
  259. cur->func = func;
  260. cur->data = data;
  261. cur->prev = pool->destructors;
  262. pool->destructors = cur;
  263. }
  264. }
  265. void
  266. memory_pool_delete (memory_pool_t *pool)
  267. {
  268. struct _pool_chain *cur = pool->first_pool, *tmp;
  269. struct _pool_chain_shared *cur_shared = pool->shared_pool, *tmp_shared;
  270. struct _pool_destructors *destructor = pool->destructors;
  271. /* Call all pool destructors */
  272. while (destructor) {
  273. destructor->func (destructor->data);
  274. destructor = destructor->prev;
  275. }
  276. while (cur) {
  277. tmp = cur;
  278. cur = cur->next;
  279. g_free (tmp->begin);
  280. g_free (tmp);
  281. STAT_LOCK ();
  282. chunks_freed ++;
  283. STAT_UNLOCK ();
  284. }
  285. /* Unmap shared memory */
  286. while (cur_shared) {
  287. tmp_shared = cur_shared;
  288. cur_shared = cur_shared->next;
  289. munmap (tmp_shared, tmp_shared->len + sizeof (struct _pool_chain_shared));
  290. STAT_LOCK ();
  291. chunks_freed ++;
  292. STAT_UNLOCK ();
  293. }
  294. g_free (pool);
  295. }
  296. void
  297. memory_pool_stat (memory_pool_stat_t *st)
  298. {
  299. st->bytes_allocated = bytes_allocated;
  300. st->chunks_allocated = chunks_allocated;
  301. st->shared_chunks_allocated = shared_chunks_allocated;
  302. st->chunks_freed = chunks_freed;
  303. }
  304. #define FIXED_POOL_SIZE 4095
  305. size_t
  306. memory_pool_get_size ()
  307. {
  308. #ifdef HAVE_GETPAGESIZE
  309. return getpagesize () - 1;
  310. #else
  311. return FIXED_POOL_SIZE;
  312. #endif
  313. }
  314. /*
  315. * vi:ts=4
  316. */