You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

workerqueue_test.go 7.7KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252
  1. // Copyright 2023 The Gitea Authors. All rights reserved.
  2. // SPDX-License-Identifier: MIT
  3. package queue
  4. import (
  5. "context"
  6. "strconv"
  7. "sync"
  8. "testing"
  9. "time"
  10. "code.gitea.io/gitea/modules/setting"
  11. "github.com/stretchr/testify/assert"
  12. )
  13. func runWorkerPoolQueue[T any](q *WorkerPoolQueue[T]) func() {
  14. go q.Run()
  15. return func() {
  16. q.ShutdownWait(1 * time.Second)
  17. }
  18. }
  19. func TestWorkerPoolQueueUnhandled(t *testing.T) {
  20. oldUnhandledItemRequeueDuration := unhandledItemRequeueDuration.Load()
  21. unhandledItemRequeueDuration.Store(0)
  22. defer unhandledItemRequeueDuration.Store(oldUnhandledItemRequeueDuration)
  23. mu := sync.Mutex{}
  24. test := func(t *testing.T, queueSetting setting.QueueSettings) {
  25. queueSetting.Length = 100
  26. queueSetting.Type = "channel"
  27. queueSetting.Datadir = t.TempDir() + "/test-queue"
  28. m := map[int]int{}
  29. // odds are handled once, evens are handled twice
  30. handler := func(items ...int) (unhandled []int) {
  31. testRecorder.Record("handle:%v", items)
  32. for _, item := range items {
  33. mu.Lock()
  34. if item%2 == 0 && m[item] == 0 {
  35. unhandled = append(unhandled, item)
  36. }
  37. m[item]++
  38. mu.Unlock()
  39. }
  40. return unhandled
  41. }
  42. q, _ := newWorkerPoolQueueForTest("test-workpoolqueue", queueSetting, handler, false)
  43. stop := runWorkerPoolQueue(q)
  44. for i := 0; i < queueSetting.Length; i++ {
  45. testRecorder.Record("push:%v", i)
  46. assert.NoError(t, q.Push(i))
  47. }
  48. assert.NoError(t, q.FlushWithContext(context.Background(), 0))
  49. stop()
  50. ok := true
  51. for i := 0; i < queueSetting.Length; i++ {
  52. if i%2 == 0 {
  53. ok = ok && assert.EqualValues(t, 2, m[i], "test %s: item %d", t.Name(), i)
  54. } else {
  55. ok = ok && assert.EqualValues(t, 1, m[i], "test %s: item %d", t.Name(), i)
  56. }
  57. }
  58. if !ok {
  59. t.Logf("m: %v", m)
  60. t.Logf("records: %v", testRecorder.Records())
  61. }
  62. testRecorder.Reset()
  63. }
  64. runCount := 2 // we can run these tests even hundreds times to see its stability
  65. t.Run("1/1", func(t *testing.T) {
  66. for i := 0; i < runCount; i++ {
  67. test(t, setting.QueueSettings{BatchLength: 1, MaxWorkers: 1})
  68. }
  69. })
  70. t.Run("3/1", func(t *testing.T) {
  71. for i := 0; i < runCount; i++ {
  72. test(t, setting.QueueSettings{BatchLength: 3, MaxWorkers: 1})
  73. }
  74. })
  75. t.Run("4/5", func(t *testing.T) {
  76. for i := 0; i < runCount; i++ {
  77. test(t, setting.QueueSettings{BatchLength: 4, MaxWorkers: 5})
  78. }
  79. })
  80. }
  81. func TestWorkerPoolQueuePersistence(t *testing.T) {
  82. runCount := 2 // we can run these tests even hundreds times to see its stability
  83. t.Run("1/1", func(t *testing.T) {
  84. for i := 0; i < runCount; i++ {
  85. testWorkerPoolQueuePersistence(t, setting.QueueSettings{BatchLength: 1, MaxWorkers: 1, Length: 100})
  86. }
  87. })
  88. t.Run("3/1", func(t *testing.T) {
  89. for i := 0; i < runCount; i++ {
  90. testWorkerPoolQueuePersistence(t, setting.QueueSettings{BatchLength: 3, MaxWorkers: 1, Length: 100})
  91. }
  92. })
  93. t.Run("4/5", func(t *testing.T) {
  94. for i := 0; i < runCount; i++ {
  95. testWorkerPoolQueuePersistence(t, setting.QueueSettings{BatchLength: 4, MaxWorkers: 5, Length: 100})
  96. }
  97. })
  98. }
  99. func testWorkerPoolQueuePersistence(t *testing.T, queueSetting setting.QueueSettings) {
  100. testCount := queueSetting.Length
  101. queueSetting.Type = "level"
  102. queueSetting.Datadir = t.TempDir() + "/test-queue"
  103. mu := sync.Mutex{}
  104. var tasksQ1, tasksQ2 []string
  105. q1 := func() {
  106. startWhenAllReady := make(chan struct{}) // only start data consuming when the "testCount" tasks are all pushed into queue
  107. stopAt20Shutdown := make(chan struct{}) // stop and shutdown at the 20th item
  108. testHandler := func(data ...string) []string {
  109. <-startWhenAllReady
  110. time.Sleep(10 * time.Millisecond)
  111. for _, s := range data {
  112. mu.Lock()
  113. tasksQ1 = append(tasksQ1, s)
  114. mu.Unlock()
  115. if s == "task-20" {
  116. close(stopAt20Shutdown)
  117. }
  118. }
  119. return nil
  120. }
  121. q, _ := newWorkerPoolQueueForTest("pr_patch_checker_test", queueSetting, testHandler, true)
  122. stop := runWorkerPoolQueue(q)
  123. for i := 0; i < testCount; i++ {
  124. _ = q.Push("task-" + strconv.Itoa(i))
  125. }
  126. close(startWhenAllReady)
  127. <-stopAt20Shutdown // it's possible to have more than 20 tasks executed
  128. stop()
  129. }
  130. q1() // run some tasks and shutdown at an intermediate point
  131. time.Sleep(100 * time.Millisecond) // because the handler in q1 has a slight delay, we need to wait for it to finish
  132. q2 := func() {
  133. testHandler := func(data ...string) []string {
  134. for _, s := range data {
  135. mu.Lock()
  136. tasksQ2 = append(tasksQ2, s)
  137. mu.Unlock()
  138. }
  139. return nil
  140. }
  141. q, _ := newWorkerPoolQueueForTest("pr_patch_checker_test", queueSetting, testHandler, true)
  142. stop := runWorkerPoolQueue(q)
  143. assert.NoError(t, q.FlushWithContext(context.Background(), 0))
  144. stop()
  145. }
  146. q2() // restart the queue to continue to execute the tasks in it
  147. assert.NotZero(t, len(tasksQ1))
  148. assert.NotZero(t, len(tasksQ2))
  149. assert.EqualValues(t, testCount, len(tasksQ1)+len(tasksQ2))
  150. }
  151. func TestWorkerPoolQueueActiveWorkers(t *testing.T) {
  152. oldWorkerIdleDuration := workerIdleDuration
  153. workerIdleDuration = 300 * time.Millisecond
  154. defer func() {
  155. workerIdleDuration = oldWorkerIdleDuration
  156. }()
  157. handler := func(items ...int) (unhandled []int) {
  158. time.Sleep(100 * time.Millisecond)
  159. return nil
  160. }
  161. q, _ := newWorkerPoolQueueForTest("test-workpoolqueue", setting.QueueSettings{Type: "channel", BatchLength: 1, MaxWorkers: 1, Length: 100}, handler, false)
  162. stop := runWorkerPoolQueue(q)
  163. for i := 0; i < 5; i++ {
  164. assert.NoError(t, q.Push(i))
  165. }
  166. time.Sleep(50 * time.Millisecond)
  167. assert.EqualValues(t, 1, q.GetWorkerNumber())
  168. assert.EqualValues(t, 1, q.GetWorkerActiveNumber())
  169. time.Sleep(500 * time.Millisecond)
  170. assert.EqualValues(t, 1, q.GetWorkerNumber())
  171. assert.EqualValues(t, 0, q.GetWorkerActiveNumber())
  172. time.Sleep(workerIdleDuration)
  173. assert.EqualValues(t, 1, q.GetWorkerNumber()) // there is at least one worker after the queue begins working
  174. stop()
  175. q, _ = newWorkerPoolQueueForTest("test-workpoolqueue", setting.QueueSettings{Type: "channel", BatchLength: 1, MaxWorkers: 3, Length: 100}, handler, false)
  176. stop = runWorkerPoolQueue(q)
  177. for i := 0; i < 15; i++ {
  178. assert.NoError(t, q.Push(i))
  179. }
  180. time.Sleep(50 * time.Millisecond)
  181. assert.EqualValues(t, 3, q.GetWorkerNumber())
  182. assert.EqualValues(t, 3, q.GetWorkerActiveNumber())
  183. time.Sleep(500 * time.Millisecond)
  184. assert.EqualValues(t, 3, q.GetWorkerNumber())
  185. assert.EqualValues(t, 0, q.GetWorkerActiveNumber())
  186. time.Sleep(workerIdleDuration)
  187. assert.EqualValues(t, 1, q.GetWorkerNumber()) // there is at least one worker after the queue begins working
  188. stop()
  189. }
  190. func TestWorkerPoolQueueShutdown(t *testing.T) {
  191. oldUnhandledItemRequeueDuration := unhandledItemRequeueDuration.Load()
  192. unhandledItemRequeueDuration.Store(int64(100 * time.Millisecond))
  193. defer unhandledItemRequeueDuration.Store(oldUnhandledItemRequeueDuration)
  194. // simulate a slow handler, it doesn't handle any item (all items will be pushed back to the queue)
  195. handlerCalled := make(chan struct{})
  196. handler := func(items ...int) (unhandled []int) {
  197. if items[0] == 0 {
  198. close(handlerCalled)
  199. }
  200. time.Sleep(400 * time.Millisecond)
  201. return items
  202. }
  203. qs := setting.QueueSettings{Type: "level", Datadir: t.TempDir() + "/queue", BatchLength: 3, MaxWorkers: 4, Length: 20}
  204. q, _ := newWorkerPoolQueueForTest("test-workpoolqueue", qs, handler, false)
  205. stop := runWorkerPoolQueue(q)
  206. for i := 0; i < qs.Length; i++ {
  207. assert.NoError(t, q.Push(i))
  208. }
  209. <-handlerCalled
  210. time.Sleep(200 * time.Millisecond) // wait for a while to make sure all workers are active
  211. assert.EqualValues(t, 4, q.GetWorkerActiveNumber())
  212. stop() // stop triggers shutdown
  213. assert.EqualValues(t, 0, q.GetWorkerActiveNumber())
  214. // no item was ever handled, so we still get all of them again
  215. q, _ = newWorkerPoolQueueForTest("test-workpoolqueue", qs, handler, false)
  216. assert.EqualValues(t, 20, q.GetQueueItemNumber())
  217. }