summaryrefslogtreecommitdiffstats
path: root/modules/queue/queue_channel.go
diff options
context:
space:
mode:
authorwxiaoguang <wxiaoguang@gmail.com>2023-05-08 19:49:59 +0800
committerGitHub <noreply@github.com>2023-05-08 19:49:59 +0800
commit6f9c278559789066aa831c1df25b0d866103d02d (patch)
treee3a1880e0d4cf88916f9d1b65d82fbd4c41ea47f /modules/queue/queue_channel.go
parentcb700aedd1e670fb47b8cf0cd67fb117a1ad88a2 (diff)
downloadgitea-6f9c278559789066aa831c1df25b0d866103d02d.tar.gz
gitea-6f9c278559789066aa831c1df25b0d866103d02d.zip
Rewrite queue (#24505)
# ⚠️ Breaking Many deprecated queue config options are removed (actually, they should have been removed in 1.18/1.19). If you see the fatal message when starting Gitea: "Please update your app.ini to remove deprecated config options", please follow the error messages to remove these options from your app.ini. Example: ``` 2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]` 2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]` 2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options ``` Many options in `[queue]` are are dropped, including: `WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`, `BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed from app.ini. # The problem The old queue package has some legacy problems: * complexity: I doubt few people could tell how it works. * maintainability: Too many channels and mutex/cond are mixed together, too many different structs/interfaces depends each other. * stability: due to the complexity & maintainability, sometimes there are strange bugs and difficult to debug, and some code doesn't have test (indeed some code is difficult to test because a lot of things are mixed together). * general applicability: although it is called "queue", its behavior is not a well-known queue. * scalability: it doesn't seem easy to make it work with a cluster without breaking its behaviors. It came from some very old code to "avoid breaking", however, its technical debt is too heavy now. It's a good time to introduce a better "queue" package. # The new queue package It keeps using old config and concept as much as possible. * It only contains two major kinds of concepts: * The "base queue": channel, levelqueue, redis * They have the same abstraction, the same interface, and they are tested by the same testing code. * The "WokerPoolQueue", it uses the "base queue" to provide "worker pool" function, calls the "handler" to process the data in the base queue. * The new code doesn't do "PushBack" * Think about a queue with many workers, the "PushBack" can't guarantee the order for re-queued unhandled items, so in new code it just does "normal push" * The new code doesn't do "pause/resume" * The "pause/resume" was designed to handle some handler's failure: eg: document indexer (elasticsearch) is down * If a queue is paused for long time, either the producers blocks or the new items are dropped. * The new code doesn't do such "pause/resume" trick, it's not a common queue's behavior and it doesn't help much. * If there are unhandled items, the "push" function just blocks for a few seconds and then re-queue them and retry. * The new code doesn't do "worker booster" * Gitea's queue's handlers are light functions, the cost is only the go-routine, so it doesn't make sense to "boost" them. * The new code only use "max worker number" to limit the concurrent workers. * The new "Push" never blocks forever * Instead of creating more and more blocking goroutines, return an error is more friendly to the server and to the end user. There are more details in code comments: eg: the "Flush" problem, the strange "code.index" hanging problem, the "immediate" queue problem. Almost ready for review. TODO: * [x] add some necessary comments during review * [x] add some more tests if necessary * [x] update documents and config options * [x] test max worker / active worker * [x] re-run the CI tasks to see whether any test is flaky * [x] improve the `handleOldLengthConfiguration` to provide more friendly messages * [x] fine tune default config values (eg: length?) ## Code coverage: ![image](https://user-images.githubusercontent.com/2114189/236620635-55576955-f95d-4810-b12f-879026a3afdf.png)
Diffstat (limited to 'modules/queue/queue_channel.go')
-rw-r--r--modules/queue/queue_channel.go160
1 files changed, 0 insertions, 160 deletions
diff --git a/modules/queue/queue_channel.go b/modules/queue/queue_channel.go
deleted file mode 100644
index baac097393..0000000000
--- a/modules/queue/queue_channel.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2019 The Gitea Authors. All rights reserved.
-// SPDX-License-Identifier: MIT
-
-package queue
-
-import (
- "context"
- "fmt"
- "runtime/pprof"
- "sync/atomic"
- "time"
-
- "code.gitea.io/gitea/modules/log"
-)
-
-// ChannelQueueType is the type for channel queue
-const ChannelQueueType Type = "channel"
-
-// ChannelQueueConfiguration is the configuration for a ChannelQueue
-type ChannelQueueConfiguration struct {
- WorkerPoolConfiguration
- Workers int
-}
-
-// ChannelQueue implements Queue
-//
-// A channel queue is not persistable and does not shutdown or terminate cleanly
-// It is basically a very thin wrapper around a WorkerPool
-type ChannelQueue struct {
- *WorkerPool
- shutdownCtx context.Context
- shutdownCtxCancel context.CancelFunc
- terminateCtx context.Context
- terminateCtxCancel context.CancelFunc
- exemplar interface{}
- workers int
- name string
-}
-
-// NewChannelQueue creates a memory channel queue
-func NewChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) {
- configInterface, err := toConfig(ChannelQueueConfiguration{}, cfg)
- if err != nil {
- return nil, err
- }
- config := configInterface.(ChannelQueueConfiguration)
- if config.BatchLength == 0 {
- config.BatchLength = 1
- }
-
- terminateCtx, terminateCtxCancel := context.WithCancel(context.Background())
- shutdownCtx, shutdownCtxCancel := context.WithCancel(terminateCtx)
-
- queue := &ChannelQueue{
- shutdownCtx: shutdownCtx,
- shutdownCtxCancel: shutdownCtxCancel,
- terminateCtx: terminateCtx,
- terminateCtxCancel: terminateCtxCancel,
- exemplar: exemplar,
- workers: config.Workers,
- name: config.Name,
- }
- queue.WorkerPool = NewWorkerPool(func(data ...Data) []Data {
- unhandled := handle(data...)
- if len(unhandled) > 0 {
- // We can only pushback to the channel if we're paused.
- if queue.IsPaused() {
- atomic.AddInt64(&queue.numInQueue, int64(len(unhandled)))
- go func() {
- for _, datum := range data {
- queue.dataChan <- datum
- }
- }()
- return nil
- }
- }
- return unhandled
- }, config.WorkerPoolConfiguration)
-
- queue.qid = GetManager().Add(queue, ChannelQueueType, config, exemplar)
- return queue, nil
-}
-
-// Run starts to run the queue
-func (q *ChannelQueue) Run(atShutdown, atTerminate func(func())) {
- pprof.SetGoroutineLabels(q.baseCtx)
- atShutdown(q.Shutdown)
- atTerminate(q.Terminate)
- log.Debug("ChannelQueue: %s Starting", q.name)
- _ = q.AddWorkers(q.workers, 0)
-}
-
-// Push will push data into the queue
-func (q *ChannelQueue) Push(data Data) error {
- if !assignableTo(data, q.exemplar) {
- return fmt.Errorf("unable to assign data: %v to same type as exemplar: %v in queue: %s", data, q.exemplar, q.name)
- }
- q.WorkerPool.Push(data)
- return nil
-}
-
-// Flush flushes the channel with a timeout - the Flush worker will be registered as a flush worker with the manager
-func (q *ChannelQueue) Flush(timeout time.Duration) error {
- if q.IsPaused() {
- return nil
- }
- ctx, cancel := q.commonRegisterWorkers(1, timeout, true)
- defer cancel()
- return q.FlushWithContext(ctx)
-}
-
-// Shutdown processing from this queue
-func (q *ChannelQueue) Shutdown() {
- q.lock.Lock()
- defer q.lock.Unlock()
- select {
- case <-q.shutdownCtx.Done():
- log.Trace("ChannelQueue: %s Already Shutting down", q.name)
- return
- default:
- }
- log.Trace("ChannelQueue: %s Shutting down", q.name)
- go func() {
- log.Trace("ChannelQueue: %s Flushing", q.name)
- // We can't use Cleanup here because that will close the channel
- if err := q.FlushWithContext(q.terminateCtx); err != nil {
- count := atomic.LoadInt64(&q.numInQueue)
- if count > 0 {
- log.Warn("ChannelQueue: %s Terminated before completed flushing", q.name)
- }
- return
- }
- log.Debug("ChannelQueue: %s Flushed", q.name)
- }()
- q.shutdownCtxCancel()
- log.Debug("ChannelQueue: %s Shutdown", q.name)
-}
-
-// Terminate this queue and close the queue
-func (q *ChannelQueue) Terminate() {
- log.Trace("ChannelQueue: %s Terminating", q.name)
- q.Shutdown()
- select {
- case <-q.terminateCtx.Done():
- return
- default:
- }
- q.terminateCtxCancel()
- q.baseCtxFinished()
- log.Debug("ChannelQueue: %s Terminated", q.name)
-}
-
-// Name returns the name of this queue
-func (q *ChannelQueue) Name() string {
- return q.name
-}
-
-func init() {
- queuesMap[ChannelQueueType] = NewChannelQueue
-}