You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

workergroup.go 10KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. // Copyright 2023 The Gitea Authors. All rights reserved.
  2. // SPDX-License-Identifier: MIT
  3. package queue
  4. import (
  5. "context"
  6. "runtime/pprof"
  7. "sync"
  8. "sync/atomic"
  9. "time"
  10. "code.gitea.io/gitea/modules/log"
  11. )
  12. var (
  13. infiniteTimerC = make(chan time.Time)
  14. batchDebounceDuration = 100 * time.Millisecond
  15. workerIdleDuration = 1 * time.Second
  16. shutdownDefaultTimeout = 2 * time.Second
  17. unhandledItemRequeueDuration atomic.Int64 // to avoid data race during test
  18. )
  19. func init() {
  20. unhandledItemRequeueDuration.Store(int64(5 * time.Second))
  21. }
  22. // workerGroup is a group of workers to work with a WorkerPoolQueue
  23. type workerGroup[T any] struct {
  24. q *WorkerPoolQueue[T]
  25. wg sync.WaitGroup
  26. ctxWorker context.Context
  27. ctxWorkerCancel context.CancelFunc
  28. batchBuffer []T
  29. popItemChan chan []byte
  30. popItemErr chan error
  31. }
  32. func (wg *workerGroup[T]) doPrepareWorkerContext() {
  33. wg.ctxWorker, wg.ctxWorkerCancel = context.WithCancel(wg.q.ctxRun)
  34. }
  35. // doDispatchBatchToWorker dispatches a batch of items to worker's channel.
  36. // If the channel is full, it tries to start a new worker if possible.
  37. func (q *WorkerPoolQueue[T]) doDispatchBatchToWorker(wg *workerGroup[T], flushChan chan flushType) {
  38. batch := wg.batchBuffer
  39. wg.batchBuffer = nil
  40. if len(batch) == 0 {
  41. return
  42. }
  43. full := false
  44. select {
  45. case q.batchChan <- batch:
  46. default:
  47. full = true
  48. }
  49. // TODO: the logic could be improved in the future, to avoid a data-race between "doStartNewWorker" and "workerNum"
  50. // The root problem is that if we skip "doStartNewWorker" here, the "workerNum" might be decreased by other workers later
  51. // So ideally, it should check whether there are enough workers by some approaches, and start new workers if necessary.
  52. q.workerNumMu.Lock()
  53. noWorker := q.workerNum == 0
  54. if full || noWorker {
  55. if q.workerNum < q.workerMaxNum || noWorker && q.workerMaxNum <= 0 {
  56. q.workerNum++
  57. q.doStartNewWorker(wg)
  58. }
  59. }
  60. q.workerNumMu.Unlock()
  61. if full {
  62. select {
  63. case q.batchChan <- batch:
  64. case flush := <-flushChan:
  65. q.doWorkerHandle(batch)
  66. q.doFlush(wg, flush)
  67. case <-q.ctxRun.Done():
  68. wg.batchBuffer = batch // return the batch to buffer, the "doRun" function will handle it
  69. }
  70. }
  71. }
  72. // doWorkerHandle calls the safeHandler to handle a batch of items, and it increases/decreases the active worker number.
  73. // If the context has been canceled, it should not be caller because the "Push" still needs the context, in such case, call q.safeHandler directly
  74. func (q *WorkerPoolQueue[T]) doWorkerHandle(batch []T) {
  75. q.workerNumMu.Lock()
  76. q.workerActiveNum++
  77. q.workerNumMu.Unlock()
  78. defer func() {
  79. q.workerNumMu.Lock()
  80. q.workerActiveNum--
  81. q.workerNumMu.Unlock()
  82. }()
  83. unhandled := q.safeHandler(batch...)
  84. // if none of the items were handled, it should back-off for a few seconds
  85. // in this case the handler (eg: document indexer) may have encountered some errors/failures
  86. if len(unhandled) == len(batch) && unhandledItemRequeueDuration.Load() != 0 {
  87. log.Error("Queue %q failed to handle batch of %d items, backoff for a few seconds", q.GetName(), len(batch))
  88. select {
  89. case <-q.ctxRun.Done():
  90. case <-time.After(time.Duration(unhandledItemRequeueDuration.Load())):
  91. }
  92. }
  93. for _, item := range unhandled {
  94. if err := q.Push(item); err != nil {
  95. if !q.basePushForShutdown(item) {
  96. log.Error("Failed to requeue item for queue %q when calling handler: %v", q.GetName(), err)
  97. }
  98. }
  99. }
  100. }
  101. // basePushForShutdown tries to requeue items into the base queue when the WorkerPoolQueue is shutting down.
  102. // If the queue is shutting down, it returns true and try to push the items
  103. // Otherwise it does nothing and returns false
  104. func (q *WorkerPoolQueue[T]) basePushForShutdown(items ...T) bool {
  105. shutdownTimeout := time.Duration(q.shutdownTimeout.Load())
  106. if shutdownTimeout == 0 {
  107. return false
  108. }
  109. ctxShutdown, ctxShutdownCancel := context.WithTimeout(context.Background(), shutdownTimeout)
  110. defer ctxShutdownCancel()
  111. for _, item := range items {
  112. // if there is still any error, the queue can do nothing instead of losing the items
  113. if err := q.baseQueue.PushItem(ctxShutdown, q.marshal(item)); err != nil {
  114. log.Error("Failed to requeue item for queue %q when shutting down: %v", q.GetName(), err)
  115. }
  116. }
  117. return true
  118. }
  119. // doStartNewWorker starts a new worker for the queue, the worker reads from worker's channel and handles the items.
  120. func (q *WorkerPoolQueue[T]) doStartNewWorker(wp *workerGroup[T]) {
  121. wp.wg.Add(1)
  122. go func() {
  123. defer wp.wg.Done()
  124. log.Debug("Queue %q starts new worker", q.GetName())
  125. defer log.Debug("Queue %q stops idle worker", q.GetName())
  126. atomic.AddInt32(&q.workerStartedCounter, 1) // Only increase counter, used for debugging
  127. t := time.NewTicker(workerIdleDuration)
  128. defer t.Stop()
  129. keepWorking := true
  130. stopWorking := func() {
  131. q.workerNumMu.Lock()
  132. keepWorking = false
  133. q.workerNum--
  134. q.workerNumMu.Unlock()
  135. }
  136. for keepWorking {
  137. select {
  138. case <-wp.ctxWorker.Done():
  139. stopWorking()
  140. case batch, ok := <-q.batchChan:
  141. if !ok {
  142. stopWorking()
  143. continue
  144. }
  145. q.doWorkerHandle(batch)
  146. // reset the idle ticker, and drain the tick after reset in case a tick is already triggered
  147. t.Reset(workerIdleDuration)
  148. select {
  149. case <-t.C:
  150. default:
  151. }
  152. case <-t.C:
  153. q.workerNumMu.Lock()
  154. keepWorking = q.workerNum <= 1 // keep the last worker running
  155. if !keepWorking {
  156. q.workerNum--
  157. }
  158. q.workerNumMu.Unlock()
  159. }
  160. }
  161. }()
  162. }
  163. // doFlush flushes the queue: it tries to read all items from the queue and handles them.
  164. // It is for testing purpose only. It's not designed to work for a cluster.
  165. func (q *WorkerPoolQueue[T]) doFlush(wg *workerGroup[T], flush flushType) {
  166. log.Debug("Queue %q starts flushing", q.GetName())
  167. defer log.Debug("Queue %q finishes flushing", q.GetName())
  168. // stop all workers, and prepare a new worker context to start new workers
  169. wg.ctxWorkerCancel()
  170. wg.wg.Wait()
  171. defer func() {
  172. close(flush)
  173. wg.doPrepareWorkerContext()
  174. }()
  175. // drain the batch channel first
  176. loop:
  177. for {
  178. select {
  179. case batch := <-q.batchChan:
  180. q.doWorkerHandle(batch)
  181. default:
  182. break loop
  183. }
  184. }
  185. // drain the popItem channel
  186. emptyCounter := 0
  187. for {
  188. select {
  189. case data, dataOk := <-wg.popItemChan:
  190. if !dataOk {
  191. return
  192. }
  193. emptyCounter = 0
  194. if v, jsonOk := q.unmarshal(data); !jsonOk {
  195. continue
  196. } else {
  197. q.doWorkerHandle([]T{v})
  198. }
  199. case err := <-wg.popItemErr:
  200. if !q.isCtxRunCanceled() {
  201. log.Error("Failed to pop item from queue %q (doFlush): %v", q.GetName(), err)
  202. }
  203. return
  204. case <-q.ctxRun.Done():
  205. log.Debug("Queue %q is shutting down", q.GetName())
  206. return
  207. case <-time.After(20 * time.Millisecond):
  208. // There is no reliable way to make sure all queue items are consumed by the Flush, there always might be some items stored in some buffers/temp variables.
  209. // If we run Gitea in a cluster, we can even not guarantee all items are consumed in a deterministic instance.
  210. // Luckily, the "Flush" trick is only used in tests, so far so good.
  211. if cnt, _ := q.baseQueue.Len(q.ctxRun); cnt == 0 && len(wg.popItemChan) == 0 {
  212. emptyCounter++
  213. }
  214. if emptyCounter >= 2 {
  215. return
  216. }
  217. }
  218. }
  219. }
  220. func (q *WorkerPoolQueue[T]) isCtxRunCanceled() bool {
  221. select {
  222. case <-q.ctxRun.Done():
  223. return true
  224. default:
  225. return false
  226. }
  227. }
  228. var skipFlushChan = make(chan flushType) // an empty flush chan, used to skip reading other flush requests
  229. // doRun is the main loop of the queue. All related "doXxx" functions are executed in its context.
  230. func (q *WorkerPoolQueue[T]) doRun() {
  231. pprof.SetGoroutineLabels(q.ctxRun)
  232. log.Debug("Queue %q starts running", q.GetName())
  233. defer log.Debug("Queue %q stops running", q.GetName())
  234. wg := &workerGroup[T]{q: q}
  235. wg.doPrepareWorkerContext()
  236. wg.popItemChan, wg.popItemErr = popItemByChan(q.ctxRun, q.baseQueue.PopItem)
  237. defer func() {
  238. q.ctxRunCancel()
  239. // drain all data on the fly
  240. // since the queue is shutting down, the items can't be dispatched to workers because the context is canceled
  241. // it can't call doWorkerHandle either, because there is no chance to push unhandled items back to the queue
  242. var unhandled []T
  243. close(q.batchChan)
  244. for batch := range q.batchChan {
  245. unhandled = append(unhandled, batch...)
  246. }
  247. unhandled = append(unhandled, wg.batchBuffer...)
  248. for data := range wg.popItemChan {
  249. if v, ok := q.unmarshal(data); ok {
  250. unhandled = append(unhandled, v)
  251. }
  252. }
  253. shutdownTimeout := time.Duration(q.shutdownTimeout.Load())
  254. if shutdownTimeout != 0 {
  255. // if there is a shutdown context, try to push the items back to the base queue
  256. q.basePushForShutdown(unhandled...)
  257. workerDone := make(chan struct{})
  258. // the only way to wait for the workers, because the handlers do not have context to wait for
  259. go func() { wg.wg.Wait(); close(workerDone) }()
  260. select {
  261. case <-workerDone:
  262. case <-time.After(shutdownTimeout):
  263. log.Error("Queue %q is shutting down, but workers are still running after timeout", q.GetName())
  264. }
  265. } else {
  266. // if there is no shutdown context, just call the handler to try to handle the items. if the handler fails again, the items are lost
  267. q.safeHandler(unhandled...)
  268. }
  269. close(q.shutdownDone)
  270. }()
  271. var batchDispatchC <-chan time.Time = infiniteTimerC
  272. for {
  273. select {
  274. case data, dataOk := <-wg.popItemChan:
  275. if !dataOk {
  276. return
  277. }
  278. if v, jsonOk := q.unmarshal(data); !jsonOk {
  279. testRecorder.Record("pop:corrupted:%s", data) // in rare cases the levelqueue(leveldb) might be corrupted
  280. continue
  281. } else {
  282. wg.batchBuffer = append(wg.batchBuffer, v)
  283. }
  284. if len(wg.batchBuffer) >= q.batchLength {
  285. q.doDispatchBatchToWorker(wg, q.flushChan)
  286. } else if batchDispatchC == infiniteTimerC {
  287. batchDispatchC = time.After(batchDebounceDuration)
  288. } // else: batchDispatchC is already a debounce timer, it will be triggered soon
  289. case <-batchDispatchC:
  290. batchDispatchC = infiniteTimerC
  291. q.doDispatchBatchToWorker(wg, q.flushChan)
  292. case flush := <-q.flushChan:
  293. // before flushing, it needs to try to dispatch the batch to worker first, in case there is no worker running
  294. // after the flushing, there is at least one worker running, so "doFlush" could wait for workers to finish
  295. // since we are already in a "flush" operation, so the dispatching function shouldn't read the flush chan.
  296. q.doDispatchBatchToWorker(wg, skipFlushChan)
  297. q.doFlush(wg, flush)
  298. case err := <-wg.popItemErr:
  299. if !q.isCtxRunCanceled() {
  300. log.Error("Failed to pop item from queue %q (doRun): %v", q.GetName(), err)
  301. }
  302. return
  303. case <-q.ctxRun.Done():
  304. log.Debug("Queue %q is shutting down", q.GetName())
  305. return
  306. }
  307. }
  308. }