summaryrefslogtreecommitdiffstats
path: root/modules/queue/workerqueue.go
diff options
context:
space:
mode:
authorwxiaoguang <wxiaoguang@gmail.com>2023-05-08 19:49:59 +0800
committerGitHub <noreply@github.com>2023-05-08 19:49:59 +0800
commit6f9c278559789066aa831c1df25b0d866103d02d (patch)
treee3a1880e0d4cf88916f9d1b65d82fbd4c41ea47f /modules/queue/workerqueue.go
parentcb700aedd1e670fb47b8cf0cd67fb117a1ad88a2 (diff)
downloadgitea-6f9c278559789066aa831c1df25b0d866103d02d.tar.gz
gitea-6f9c278559789066aa831c1df25b0d866103d02d.zip
Rewrite queue (#24505)
# ⚠️ Breaking Many deprecated queue config options are removed (actually, they should have been removed in 1.18/1.19). If you see the fatal message when starting Gitea: "Please update your app.ini to remove deprecated config options", please follow the error messages to remove these options from your app.ini. Example: ``` 2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]` 2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]` 2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options ``` Many options in `[queue]` are are dropped, including: `WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`, `BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed from app.ini. # The problem The old queue package has some legacy problems: * complexity: I doubt few people could tell how it works. * maintainability: Too many channels and mutex/cond are mixed together, too many different structs/interfaces depends each other. * stability: due to the complexity & maintainability, sometimes there are strange bugs and difficult to debug, and some code doesn't have test (indeed some code is difficult to test because a lot of things are mixed together). * general applicability: although it is called "queue", its behavior is not a well-known queue. * scalability: it doesn't seem easy to make it work with a cluster without breaking its behaviors. It came from some very old code to "avoid breaking", however, its technical debt is too heavy now. It's a good time to introduce a better "queue" package. # The new queue package It keeps using old config and concept as much as possible. * It only contains two major kinds of concepts: * The "base queue": channel, levelqueue, redis * They have the same abstraction, the same interface, and they are tested by the same testing code. * The "WokerPoolQueue", it uses the "base queue" to provide "worker pool" function, calls the "handler" to process the data in the base queue. * The new code doesn't do "PushBack" * Think about a queue with many workers, the "PushBack" can't guarantee the order for re-queued unhandled items, so in new code it just does "normal push" * The new code doesn't do "pause/resume" * The "pause/resume" was designed to handle some handler's failure: eg: document indexer (elasticsearch) is down * If a queue is paused for long time, either the producers blocks or the new items are dropped. * The new code doesn't do such "pause/resume" trick, it's not a common queue's behavior and it doesn't help much. * If there are unhandled items, the "push" function just blocks for a few seconds and then re-queue them and retry. * The new code doesn't do "worker booster" * Gitea's queue's handlers are light functions, the cost is only the go-routine, so it doesn't make sense to "boost" them. * The new code only use "max worker number" to limit the concurrent workers. * The new "Push" never blocks forever * Instead of creating more and more blocking goroutines, return an error is more friendly to the server and to the end user. There are more details in code comments: eg: the "Flush" problem, the strange "code.index" hanging problem, the "immediate" queue problem. Almost ready for review. TODO: * [x] add some necessary comments during review * [x] add some more tests if necessary * [x] update documents and config options * [x] test max worker / active worker * [x] re-run the CI tasks to see whether any test is flaky * [x] improve the `handleOldLengthConfiguration` to provide more friendly messages * [x] fine tune default config values (eg: length?) ## Code coverage: ![image](https://user-images.githubusercontent.com/2114189/236620635-55576955-f95d-4810-b12f-879026a3afdf.png)
Diffstat (limited to 'modules/queue/workerqueue.go')
-rw-r--r--modules/queue/workerqueue.go241
1 files changed, 241 insertions, 0 deletions
diff --git a/modules/queue/workerqueue.go b/modules/queue/workerqueue.go
new file mode 100644
index 0000000000..493bea17aa
--- /dev/null
+++ b/modules/queue/workerqueue.go
@@ -0,0 +1,241 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package queue
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "code.gitea.io/gitea/modules/graceful"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+)
+
+// WorkerPoolQueue is a queue that uses a pool of workers to process items
+// It can use different underlying (base) queue types
+type WorkerPoolQueue[T any] struct {
+ ctxRun context.Context
+ ctxRunCancel context.CancelFunc
+ ctxShutdown atomic.Pointer[context.Context]
+ shutdownDone chan struct{}
+
+ origHandler HandlerFuncT[T]
+ safeHandler HandlerFuncT[T]
+
+ baseQueueType string
+ baseConfig *BaseConfig
+ baseQueue baseQueue
+
+ batchChan chan []T
+ flushChan chan flushType
+
+ batchLength int
+ workerNum int
+ workerMaxNum int
+ workerActiveNum int
+ workerNumMu sync.Mutex
+}
+
+type flushType chan struct{}
+
+var _ ManagedWorkerPoolQueue = (*WorkerPoolQueue[any])(nil)
+
+func (q *WorkerPoolQueue[T]) GetName() string {
+ return q.baseConfig.ManagedName
+}
+
+func (q *WorkerPoolQueue[T]) GetType() string {
+ return q.baseQueueType
+}
+
+func (q *WorkerPoolQueue[T]) GetItemTypeName() string {
+ var t T
+ return fmt.Sprintf("%T", t)
+}
+
+func (q *WorkerPoolQueue[T]) GetWorkerNumber() int {
+ q.workerNumMu.Lock()
+ defer q.workerNumMu.Unlock()
+ return q.workerNum
+}
+
+func (q *WorkerPoolQueue[T]) GetWorkerActiveNumber() int {
+ q.workerNumMu.Lock()
+ defer q.workerNumMu.Unlock()
+ return q.workerActiveNum
+}
+
+func (q *WorkerPoolQueue[T]) GetWorkerMaxNumber() int {
+ q.workerNumMu.Lock()
+ defer q.workerNumMu.Unlock()
+ return q.workerMaxNum
+}
+
+func (q *WorkerPoolQueue[T]) SetWorkerMaxNumber(num int) {
+ q.workerNumMu.Lock()
+ defer q.workerNumMu.Unlock()
+ q.workerMaxNum = num
+}
+
+func (q *WorkerPoolQueue[T]) GetQueueItemNumber() int {
+ cnt, err := q.baseQueue.Len(q.ctxRun)
+ if err != nil {
+ log.Error("Failed to get number of items in queue %q: %v", q.GetName(), err)
+ }
+ return cnt
+}
+
+func (q *WorkerPoolQueue[T]) FlushWithContext(ctx context.Context, timeout time.Duration) (err error) {
+ if q.isBaseQueueDummy() {
+ return
+ }
+
+ log.Debug("Try to flush queue %q with timeout %v", q.GetName(), timeout)
+ defer log.Debug("Finish flushing queue %q, err: %v", q.GetName(), err)
+
+ var after <-chan time.Time
+ after = infiniteTimerC
+ if timeout > 0 {
+ after = time.After(timeout)
+ }
+ c := make(flushType)
+
+ // send flush request
+ // if it blocks, it means that there is a flush in progress or the queue hasn't been started yet
+ select {
+ case q.flushChan <- c:
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-q.ctxRun.Done():
+ return q.ctxRun.Err()
+ case <-after:
+ return context.DeadlineExceeded
+ }
+
+ // wait for flush to finish
+ select {
+ case <-c:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-q.ctxRun.Done():
+ return q.ctxRun.Err()
+ case <-after:
+ return context.DeadlineExceeded
+ }
+}
+
+func (q *WorkerPoolQueue[T]) marshal(data T) []byte {
+ bs, err := json.Marshal(data)
+ if err != nil {
+ log.Error("Failed to marshal item for queue %q: %v", q.GetName(), err)
+ return nil
+ }
+ return bs
+}
+
+func (q *WorkerPoolQueue[T]) unmarshal(data []byte) (t T, ok bool) {
+ if err := json.Unmarshal(data, &t); err != nil {
+ log.Error("Failed to unmarshal item from queue %q: %v", q.GetName(), err)
+ return t, false
+ }
+ return t, true
+}
+
+func (q *WorkerPoolQueue[T]) isBaseQueueDummy() bool {
+ _, isDummy := q.baseQueue.(*baseDummy)
+ return isDummy
+}
+
+// Push adds an item to the queue, it may block for a while and then returns an error if the queue is full
+func (q *WorkerPoolQueue[T]) Push(data T) error {
+ if q.isBaseQueueDummy() && q.safeHandler != nil {
+ // FIXME: the "immediate" queue is only for testing, but it really causes problems because its behavior is different from a real queue.
+ // Even if tests pass, it doesn't mean that there is no bug in code.
+ if data, ok := q.unmarshal(q.marshal(data)); ok {
+ q.safeHandler(data)
+ }
+ }
+ return q.baseQueue.PushItem(q.ctxRun, q.marshal(data))
+}
+
+// Has only works for unique queues. Keep in mind that this check may not be reliable (due to lacking of proper transaction support)
+// There could be a small chance that duplicate items appear in the queue
+func (q *WorkerPoolQueue[T]) Has(data T) (bool, error) {
+ return q.baseQueue.HasItem(q.ctxRun, q.marshal(data))
+}
+
+func (q *WorkerPoolQueue[T]) Run(atShutdown, atTerminate func(func())) {
+ atShutdown(func() {
+ // in case some queue handlers are slow or have hanging bugs, at most wait for a short time
+ q.ShutdownWait(1 * time.Second)
+ })
+ q.doRun()
+}
+
+// ShutdownWait shuts down the queue, waits for all workers to finish their jobs, and pushes the unhandled items back to the base queue
+// It waits for all workers (handlers) to finish their jobs, in case some buggy handlers would hang forever, a reasonable timeout is needed
+func (q *WorkerPoolQueue[T]) ShutdownWait(timeout time.Duration) {
+ shutdownCtx, shutdownCtxCancel := context.WithTimeout(context.Background(), timeout)
+ defer shutdownCtxCancel()
+ if q.ctxShutdown.CompareAndSwap(nil, &shutdownCtx) {
+ q.ctxRunCancel()
+ }
+ <-q.shutdownDone
+}
+
+func getNewQueueFn(t string) (string, func(cfg *BaseConfig, unique bool) (baseQueue, error)) {
+ switch t {
+ case "dummy", "immediate":
+ return t, newBaseDummy
+ case "channel":
+ return t, newBaseChannelGeneric
+ case "redis":
+ return t, newBaseRedisGeneric
+ default: // level(leveldb,levelqueue,persistable-channel)
+ return "level", newBaseLevelQueueGeneric
+ }
+}
+
+func NewWorkerPoolQueueBySetting[T any](name string, queueSetting setting.QueueSettings, handler HandlerFuncT[T], unique bool) (*WorkerPoolQueue[T], error) {
+ if handler == nil {
+ log.Debug("Use dummy queue for %q because handler is nil and caller doesn't want to process the queue items", name)
+ queueSetting.Type = "dummy"
+ }
+
+ var w WorkerPoolQueue[T]
+ var err error
+ queueType, newQueueFn := getNewQueueFn(queueSetting.Type)
+ w.baseQueueType = queueType
+ w.baseConfig = toBaseConfig(name, queueSetting)
+ w.baseQueue, err = newQueueFn(w.baseConfig, unique)
+ if err != nil {
+ return nil, err
+ }
+ log.Trace("Created queue %q of type %q", name, queueType)
+
+ w.ctxRun, w.ctxRunCancel = context.WithCancel(graceful.GetManager().ShutdownContext())
+ w.batchChan = make(chan []T)
+ w.flushChan = make(chan flushType)
+ w.shutdownDone = make(chan struct{})
+ w.workerMaxNum = queueSetting.MaxWorkers
+ w.batchLength = queueSetting.BatchLength
+
+ w.origHandler = handler
+ w.safeHandler = func(t ...T) (unhandled []T) {
+ defer func() {
+ err := recover()
+ if err != nil {
+ log.Error("Recovered from panic in queue %q handler: %v\n%s", name, err, log.Stack(2))
+ }
+ }()
+ return w.origHandler(t...)
+ }
+
+ return &w, nil
+}