You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

workergroup.go 9.9KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. // Copyright 2023 The Gitea Authors. All rights reserved.
  2. // SPDX-License-Identifier: MIT
  3. package queue
  4. import (
  5. "context"
  6. "runtime/pprof"
  7. "sync"
  8. "sync/atomic"
  9. "time"
  10. "code.gitea.io/gitea/modules/log"
  11. )
  12. var (
  13. infiniteTimerC = make(chan time.Time)
  14. batchDebounceDuration = 100 * time.Millisecond
  15. workerIdleDuration = 1 * time.Second
  16. shutdownDefaultTimeout = 2 * time.Second
  17. unhandledItemRequeueDuration atomic.Int64 // to avoid data race during test
  18. )
  19. func init() {
  20. unhandledItemRequeueDuration.Store(int64(5 * time.Second))
  21. }
  22. // workerGroup is a group of workers to work with a WorkerPoolQueue
  23. type workerGroup[T any] struct {
  24. q *WorkerPoolQueue[T]
  25. wg sync.WaitGroup
  26. ctxWorker context.Context
  27. ctxWorkerCancel context.CancelFunc
  28. batchBuffer []T
  29. popItemChan chan []byte
  30. popItemErr chan error
  31. }
  32. func (wg *workerGroup[T]) doPrepareWorkerContext() {
  33. wg.ctxWorker, wg.ctxWorkerCancel = context.WithCancel(wg.q.ctxRun)
  34. }
  35. // doDispatchBatchToWorker dispatches a batch of items to worker's channel.
  36. // If the channel is full, it tries to start a new worker if possible.
  37. func (q *WorkerPoolQueue[T]) doDispatchBatchToWorker(wg *workerGroup[T], flushChan chan flushType) {
  38. batch := wg.batchBuffer
  39. wg.batchBuffer = nil
  40. if len(batch) == 0 {
  41. return
  42. }
  43. full := false
  44. select {
  45. case q.batchChan <- batch:
  46. default:
  47. full = true
  48. }
  49. q.workerNumMu.Lock()
  50. noWorker := q.workerNum == 0
  51. if full || noWorker {
  52. if q.workerNum < q.workerMaxNum || noWorker && q.workerMaxNum <= 0 {
  53. q.workerNum++
  54. q.doStartNewWorker(wg)
  55. }
  56. }
  57. q.workerNumMu.Unlock()
  58. if full {
  59. select {
  60. case q.batchChan <- batch:
  61. case flush := <-flushChan:
  62. q.doWorkerHandle(batch)
  63. q.doFlush(wg, flush)
  64. case <-q.ctxRun.Done():
  65. wg.batchBuffer = batch // return the batch to buffer, the "doRun" function will handle it
  66. }
  67. }
  68. }
  69. // doWorkerHandle calls the safeHandler to handle a batch of items, and it increases/decreases the active worker number.
  70. // If the context has been canceled, it should not be caller because the "Push" still needs the context, in such case, call q.safeHandler directly
  71. func (q *WorkerPoolQueue[T]) doWorkerHandle(batch []T) {
  72. q.workerNumMu.Lock()
  73. q.workerActiveNum++
  74. q.workerNumMu.Unlock()
  75. defer func() {
  76. q.workerNumMu.Lock()
  77. q.workerActiveNum--
  78. q.workerNumMu.Unlock()
  79. }()
  80. unhandled := q.safeHandler(batch...)
  81. // if none of the items were handled, it should back-off for a few seconds
  82. // in this case the handler (eg: document indexer) may have encountered some errors/failures
  83. if len(unhandled) == len(batch) && unhandledItemRequeueDuration.Load() != 0 {
  84. log.Error("Queue %q failed to handle batch of %d items, backoff for a few seconds", q.GetName(), len(batch))
  85. select {
  86. case <-q.ctxRun.Done():
  87. case <-time.After(time.Duration(unhandledItemRequeueDuration.Load())):
  88. }
  89. }
  90. for _, item := range unhandled {
  91. if err := q.Push(item); err != nil {
  92. if !q.basePushForShutdown(item) {
  93. log.Error("Failed to requeue item for queue %q when calling handler: %v", q.GetName(), err)
  94. }
  95. }
  96. }
  97. }
  98. // basePushForShutdown tries to requeue items into the base queue when the WorkerPoolQueue is shutting down.
  99. // If the queue is shutting down, it returns true and try to push the items
  100. // Otherwise it does nothing and returns false
  101. func (q *WorkerPoolQueue[T]) basePushForShutdown(items ...T) bool {
  102. shutdownTimeout := time.Duration(q.shutdownTimeout.Load())
  103. if shutdownTimeout == 0 {
  104. return false
  105. }
  106. ctxShutdown, ctxShutdownCancel := context.WithTimeout(context.Background(), shutdownTimeout)
  107. defer ctxShutdownCancel()
  108. for _, item := range items {
  109. // if there is still any error, the queue can do nothing instead of losing the items
  110. if err := q.baseQueue.PushItem(ctxShutdown, q.marshal(item)); err != nil {
  111. log.Error("Failed to requeue item for queue %q when shutting down: %v", q.GetName(), err)
  112. }
  113. }
  114. return true
  115. }
  116. // doStartNewWorker starts a new worker for the queue, the worker reads from worker's channel and handles the items.
  117. func (q *WorkerPoolQueue[T]) doStartNewWorker(wp *workerGroup[T]) {
  118. wp.wg.Add(1)
  119. go func() {
  120. defer wp.wg.Done()
  121. log.Debug("Queue %q starts new worker", q.GetName())
  122. defer log.Debug("Queue %q stops idle worker", q.GetName())
  123. t := time.NewTicker(workerIdleDuration)
  124. keepWorking := true
  125. stopWorking := func() {
  126. q.workerNumMu.Lock()
  127. keepWorking = false
  128. q.workerNum--
  129. q.workerNumMu.Unlock()
  130. }
  131. for keepWorking {
  132. select {
  133. case <-wp.ctxWorker.Done():
  134. stopWorking()
  135. case batch, ok := <-q.batchChan:
  136. if !ok {
  137. stopWorking()
  138. } else {
  139. q.doWorkerHandle(batch)
  140. t.Reset(workerIdleDuration)
  141. }
  142. case <-t.C:
  143. q.workerNumMu.Lock()
  144. keepWorking = q.workerNum <= 1
  145. if !keepWorking {
  146. q.workerNum--
  147. }
  148. q.workerNumMu.Unlock()
  149. }
  150. }
  151. }()
  152. }
  153. // doFlush flushes the queue: it tries to read all items from the queue and handles them.
  154. // It is for testing purpose only. It's not designed to work for a cluster.
  155. func (q *WorkerPoolQueue[T]) doFlush(wg *workerGroup[T], flush flushType) {
  156. log.Debug("Queue %q starts flushing", q.GetName())
  157. defer log.Debug("Queue %q finishes flushing", q.GetName())
  158. // stop all workers, and prepare a new worker context to start new workers
  159. wg.ctxWorkerCancel()
  160. wg.wg.Wait()
  161. defer func() {
  162. close(flush)
  163. wg.doPrepareWorkerContext()
  164. }()
  165. // drain the batch channel first
  166. loop:
  167. for {
  168. select {
  169. case batch := <-q.batchChan:
  170. q.doWorkerHandle(batch)
  171. default:
  172. break loop
  173. }
  174. }
  175. // drain the popItem channel
  176. emptyCounter := 0
  177. for {
  178. select {
  179. case data, dataOk := <-wg.popItemChan:
  180. if !dataOk {
  181. return
  182. }
  183. emptyCounter = 0
  184. if v, jsonOk := q.unmarshal(data); !jsonOk {
  185. continue
  186. } else {
  187. q.doWorkerHandle([]T{v})
  188. }
  189. case err := <-wg.popItemErr:
  190. if !q.isCtxRunCanceled() {
  191. log.Error("Failed to pop item from queue %q (doFlush): %v", q.GetName(), err)
  192. }
  193. return
  194. case <-q.ctxRun.Done():
  195. log.Debug("Queue %q is shutting down", q.GetName())
  196. return
  197. case <-time.After(20 * time.Millisecond):
  198. // There is no reliable way to make sure all queue items are consumed by the Flush, there always might be some items stored in some buffers/temp variables.
  199. // If we run Gitea in a cluster, we can even not guarantee all items are consumed in a deterministic instance.
  200. // Luckily, the "Flush" trick is only used in tests, so far so good.
  201. if cnt, _ := q.baseQueue.Len(q.ctxRun); cnt == 0 && len(wg.popItemChan) == 0 {
  202. emptyCounter++
  203. }
  204. if emptyCounter >= 2 {
  205. return
  206. }
  207. }
  208. }
  209. }
  210. func (q *WorkerPoolQueue[T]) isCtxRunCanceled() bool {
  211. select {
  212. case <-q.ctxRun.Done():
  213. return true
  214. default:
  215. return false
  216. }
  217. }
  218. var skipFlushChan = make(chan flushType) // an empty flush chan, used to skip reading other flush requests
  219. // doRun is the main loop of the queue. All related "doXxx" functions are executed in its context.
  220. func (q *WorkerPoolQueue[T]) doRun() {
  221. pprof.SetGoroutineLabels(q.ctxRun)
  222. log.Debug("Queue %q starts running", q.GetName())
  223. defer log.Debug("Queue %q stops running", q.GetName())
  224. wg := &workerGroup[T]{q: q}
  225. wg.doPrepareWorkerContext()
  226. wg.popItemChan, wg.popItemErr = popItemByChan(q.ctxRun, q.baseQueue.PopItem)
  227. defer func() {
  228. q.ctxRunCancel()
  229. // drain all data on the fly
  230. // since the queue is shutting down, the items can't be dispatched to workers because the context is canceled
  231. // it can't call doWorkerHandle either, because there is no chance to push unhandled items back to the queue
  232. var unhandled []T
  233. close(q.batchChan)
  234. for batch := range q.batchChan {
  235. unhandled = append(unhandled, batch...)
  236. }
  237. unhandled = append(unhandled, wg.batchBuffer...)
  238. for data := range wg.popItemChan {
  239. if v, ok := q.unmarshal(data); ok {
  240. unhandled = append(unhandled, v)
  241. }
  242. }
  243. shutdownTimeout := time.Duration(q.shutdownTimeout.Load())
  244. if shutdownTimeout != 0 {
  245. // if there is a shutdown context, try to push the items back to the base queue
  246. q.basePushForShutdown(unhandled...)
  247. workerDone := make(chan struct{})
  248. // the only way to wait for the workers, because the handlers do not have context to wait for
  249. go func() { wg.wg.Wait(); close(workerDone) }()
  250. select {
  251. case <-workerDone:
  252. case <-time.After(shutdownTimeout):
  253. log.Error("Queue %q is shutting down, but workers are still running after timeout", q.GetName())
  254. }
  255. } else {
  256. // if there is no shutdown context, just call the handler to try to handle the items. if the handler fails again, the items are lost
  257. q.safeHandler(unhandled...)
  258. }
  259. close(q.shutdownDone)
  260. }()
  261. var batchDispatchC <-chan time.Time = infiniteTimerC
  262. for {
  263. select {
  264. case data, dataOk := <-wg.popItemChan:
  265. if !dataOk {
  266. return
  267. }
  268. if v, jsonOk := q.unmarshal(data); !jsonOk {
  269. testRecorder.Record("pop:corrupted:%s", data) // in rare cases the levelqueue(leveldb) might be corrupted
  270. continue
  271. } else {
  272. wg.batchBuffer = append(wg.batchBuffer, v)
  273. }
  274. if len(wg.batchBuffer) >= q.batchLength {
  275. q.doDispatchBatchToWorker(wg, q.flushChan)
  276. } else if batchDispatchC == infiniteTimerC {
  277. batchDispatchC = time.After(batchDebounceDuration)
  278. } // else: batchDispatchC is already a debounce timer, it will be triggered soon
  279. case <-batchDispatchC:
  280. batchDispatchC = infiniteTimerC
  281. q.doDispatchBatchToWorker(wg, q.flushChan)
  282. case flush := <-q.flushChan:
  283. // before flushing, it needs to try to dispatch the batch to worker first, in case there is no worker running
  284. // after the flushing, there is at least one worker running, so "doFlush" could wait for workers to finish
  285. // since we are already in a "flush" operation, so the dispatching function shouldn't read the flush chan.
  286. q.doDispatchBatchToWorker(wg, skipFlushChan)
  287. q.doFlush(wg, flush)
  288. case err := <-wg.popItemErr:
  289. if !q.isCtxRunCanceled() {
  290. log.Error("Failed to pop item from queue %q (doRun): %v", q.GetName(), err)
  291. }
  292. return
  293. case <-q.ctxRun.Done():
  294. log.Debug("Queue %q is shutting down", q.GetName())
  295. return
  296. }
  297. }
  298. }