summaryrefslogtreecommitdiffstats
path: root/services
diff options
context:
space:
mode:
authorwxiaoguang <wxiaoguang@gmail.com>2023-05-08 19:49:59 +0800
committerGitHub <noreply@github.com>2023-05-08 19:49:59 +0800
commit6f9c278559789066aa831c1df25b0d866103d02d (patch)
treee3a1880e0d4cf88916f9d1b65d82fbd4c41ea47f /services
parentcb700aedd1e670fb47b8cf0cd67fb117a1ad88a2 (diff)
downloadgitea-6f9c278559789066aa831c1df25b0d866103d02d.tar.gz
gitea-6f9c278559789066aa831c1df25b0d866103d02d.zip
Rewrite queue (#24505)
# ⚠️ Breaking Many deprecated queue config options are removed (actually, they should have been removed in 1.18/1.19). If you see the fatal message when starting Gitea: "Please update your app.ini to remove deprecated config options", please follow the error messages to remove these options from your app.ini. Example: ``` 2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]` 2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]` 2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options ``` Many options in `[queue]` are are dropped, including: `WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`, `BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed from app.ini. # The problem The old queue package has some legacy problems: * complexity: I doubt few people could tell how it works. * maintainability: Too many channels and mutex/cond are mixed together, too many different structs/interfaces depends each other. * stability: due to the complexity & maintainability, sometimes there are strange bugs and difficult to debug, and some code doesn't have test (indeed some code is difficult to test because a lot of things are mixed together). * general applicability: although it is called "queue", its behavior is not a well-known queue. * scalability: it doesn't seem easy to make it work with a cluster without breaking its behaviors. It came from some very old code to "avoid breaking", however, its technical debt is too heavy now. It's a good time to introduce a better "queue" package. # The new queue package It keeps using old config and concept as much as possible. * It only contains two major kinds of concepts: * The "base queue": channel, levelqueue, redis * They have the same abstraction, the same interface, and they are tested by the same testing code. * The "WokerPoolQueue", it uses the "base queue" to provide "worker pool" function, calls the "handler" to process the data in the base queue. * The new code doesn't do "PushBack" * Think about a queue with many workers, the "PushBack" can't guarantee the order for re-queued unhandled items, so in new code it just does "normal push" * The new code doesn't do "pause/resume" * The "pause/resume" was designed to handle some handler's failure: eg: document indexer (elasticsearch) is down * If a queue is paused for long time, either the producers blocks or the new items are dropped. * The new code doesn't do such "pause/resume" trick, it's not a common queue's behavior and it doesn't help much. * If there are unhandled items, the "push" function just blocks for a few seconds and then re-queue them and retry. * The new code doesn't do "worker booster" * Gitea's queue's handlers are light functions, the cost is only the go-routine, so it doesn't make sense to "boost" them. * The new code only use "max worker number" to limit the concurrent workers. * The new "Push" never blocks forever * Instead of creating more and more blocking goroutines, return an error is more friendly to the server and to the end user. There are more details in code comments: eg: the "Flush" problem, the strange "code.index" hanging problem, the "immediate" queue problem. Almost ready for review. TODO: * [x] add some necessary comments during review * [x] add some more tests if necessary * [x] update documents and config options * [x] test max worker / active worker * [x] re-run the CI tasks to see whether any test is flaky * [x] improve the `handleOldLengthConfiguration` to provide more friendly messages * [x] fine tune default config values (eg: length?) ## Code coverage: ![image](https://user-images.githubusercontent.com/2114189/236620635-55576955-f95d-4810-b12f-879026a3afdf.png)
Diffstat (limited to 'services')
-rw-r--r--services/actions/init.go2
-rw-r--r--services/actions/job_emitter.go11
-rw-r--r--services/automerge/automerge.go18
-rw-r--r--services/convert/utils_test.go2
-rw-r--r--services/mailer/mailer.go9
-rw-r--r--services/migrations/github.go3
-rw-r--r--services/mirror/mirror.go7
-rw-r--r--services/pull/check.go35
-rw-r--r--services/pull/check_test.go29
-rw-r--r--services/repository/archiver/archiver.go15
-rw-r--r--services/repository/push.go9
-rw-r--r--services/task/task.go9
-rw-r--r--services/webhook/deliver.go2
-rw-r--r--services/webhook/webhook.go10
14 files changed, 67 insertions, 94 deletions
diff --git a/services/actions/init.go b/services/actions/init.go
index 3fd03eeb6f..8a9a30084a 100644
--- a/services/actions/init.go
+++ b/services/actions/init.go
@@ -15,7 +15,7 @@ func Init() {
return
}
- jobEmitterQueue = queue.CreateUniqueQueue("actions_ready_job", jobEmitterQueueHandle, new(jobUpdate))
+ jobEmitterQueue = queue.CreateUniqueQueue("actions_ready_job", jobEmitterQueueHandler)
go graceful.GetManager().RunWithShutdownFns(jobEmitterQueue.Run)
notification.RegisterNotifier(NewNotifier())
diff --git a/services/actions/job_emitter.go b/services/actions/job_emitter.go
index c6b6fc551e..f7ec615364 100644
--- a/services/actions/job_emitter.go
+++ b/services/actions/job_emitter.go
@@ -16,7 +16,7 @@ import (
"xorm.io/builder"
)
-var jobEmitterQueue queue.UniqueQueue
+var jobEmitterQueue *queue.WorkerPoolQueue[*jobUpdate]
type jobUpdate struct {
RunID int64
@@ -32,13 +32,12 @@ func EmitJobsIfReady(runID int64) error {
return err
}
-func jobEmitterQueueHandle(data ...queue.Data) []queue.Data {
+func jobEmitterQueueHandler(items ...*jobUpdate) []*jobUpdate {
ctx := graceful.GetManager().ShutdownContext()
- var ret []queue.Data
- for _, d := range data {
- update := d.(*jobUpdate)
+ var ret []*jobUpdate
+ for _, update := range items {
if err := checkJobsOfRun(ctx, update.RunID); err != nil {
- ret = append(ret, d)
+ ret = append(ret, update)
}
}
return ret
diff --git a/services/automerge/automerge.go b/services/automerge/automerge.go
index 9946047640..f001a6ccc5 100644
--- a/services/automerge/automerge.go
+++ b/services/automerge/automerge.go
@@ -25,11 +25,11 @@ import (
)
// prAutoMergeQueue represents a queue to handle update pull request tests
-var prAutoMergeQueue queue.UniqueQueue
+var prAutoMergeQueue *queue.WorkerPoolQueue[string]
// Init runs the task queue to that handles auto merges
func Init() error {
- prAutoMergeQueue = queue.CreateUniqueQueue("pr_auto_merge", handle, "")
+ prAutoMergeQueue = queue.CreateUniqueQueue("pr_auto_merge", handler)
if prAutoMergeQueue == nil {
return fmt.Errorf("Unable to create pr_auto_merge Queue")
}
@@ -38,12 +38,12 @@ func Init() error {
}
// handle passed PR IDs and test the PRs
-func handle(data ...queue.Data) []queue.Data {
- for _, d := range data {
+func handler(items ...string) []string {
+ for _, s := range items {
var id int64
var sha string
- if _, err := fmt.Sscanf(d.(string), "%d_%s", &id, &sha); err != nil {
- log.Error("could not parse data from pr_auto_merge queue (%v): %v", d, err)
+ if _, err := fmt.Sscanf(s, "%d_%s", &id, &sha); err != nil {
+ log.Error("could not parse data from pr_auto_merge queue (%v): %v", s, err)
continue
}
handlePull(id, sha)
@@ -52,10 +52,8 @@ func handle(data ...queue.Data) []queue.Data {
}
func addToQueue(pr *issues_model.PullRequest, sha string) {
- if err := prAutoMergeQueue.PushFunc(fmt.Sprintf("%d_%s", pr.ID, sha), func() error {
- log.Trace("Adding pullID: %d to the pull requests patch checking queue with sha %s", pr.ID, sha)
- return nil
- }); err != nil {
+ log.Trace("Adding pullID: %d to the pull requests patch checking queue with sha %s", pr.ID, sha)
+ if err := prAutoMergeQueue.Push(fmt.Sprintf("%d_%s", pr.ID, sha)); err != nil {
log.Error("Error adding pullID: %d to the pull requests patch checking queue %v", pr.ID, err)
}
}
diff --git a/services/convert/utils_test.go b/services/convert/utils_test.go
index d1ec5980ce..1ac03a3097 100644
--- a/services/convert/utils_test.go
+++ b/services/convert/utils_test.go
@@ -7,8 +7,6 @@ import (
"testing"
"github.com/stretchr/testify/assert"
-
- _ "github.com/mattn/go-sqlite3"
)
func TestToCorrectPageSize(t *testing.T) {
diff --git a/services/mailer/mailer.go b/services/mailer/mailer.go
index 3d878b7c8c..5aeda9ed79 100644
--- a/services/mailer/mailer.go
+++ b/services/mailer/mailer.go
@@ -378,7 +378,7 @@ func (s *dummySender) Send(from string, to []string, msg io.WriterTo) error {
return nil
}
-var mailQueue queue.Queue
+var mailQueue *queue.WorkerPoolQueue[*Message]
// Sender sender for sending mail synchronously
var Sender gomail.Sender
@@ -401,9 +401,8 @@ func NewContext(ctx context.Context) {
Sender = &smtpSender{}
}
- mailQueue = queue.CreateQueue("mail", func(data ...queue.Data) []queue.Data {
- for _, datum := range data {
- msg := datum.(*Message)
+ mailQueue = queue.CreateSimpleQueue("mail", func(items ...*Message) []*Message {
+ for _, msg := range items {
gomailMsg := msg.ToMessage()
log.Trace("New e-mail sending request %s: %s", gomailMsg.GetHeader("To"), msg.Info)
if err := gomail.Send(Sender, gomailMsg); err != nil {
@@ -413,7 +412,7 @@ func NewContext(ctx context.Context) {
}
}
return nil
- }, &Message{})
+ })
go graceful.GetManager().RunWithShutdownFns(mailQueue.Run)
diff --git a/services/migrations/github.go b/services/migrations/github.go
index 26e8813536..3e63fddb6a 100644
--- a/services/migrations/github.go
+++ b/services/migrations/github.go
@@ -19,7 +19,6 @@ import (
base "code.gitea.io/gitea/modules/migration"
"code.gitea.io/gitea/modules/proxy"
"code.gitea.io/gitea/modules/structs"
- "code.gitea.io/gitea/modules/util"
"github.com/google/go-github/v51/github"
"golang.org/x/oauth2"
@@ -164,7 +163,7 @@ func (g *GithubDownloaderV3) waitAndPickClient() {
timer := time.NewTimer(time.Until(g.rates[g.curClientIdx].Reset.Time))
select {
case <-g.ctx.Done():
- util.StopTimer(timer)
+ timer.Stop()
return
case <-timer.C:
}
diff --git a/services/mirror/mirror.go b/services/mirror/mirror.go
index 9e569a70e3..35ba09521b 100644
--- a/services/mirror/mirror.go
+++ b/services/mirror/mirror.go
@@ -120,9 +120,8 @@ func Update(ctx context.Context, pullLimit, pushLimit int) error {
return nil
}
-func queueHandle(data ...queue.Data) []queue.Data {
- for _, datum := range data {
- req := datum.(*mirror_module.SyncRequest)
+func queueHandler(items ...*mirror_module.SyncRequest) []*mirror_module.SyncRequest {
+ for _, req := range items {
doMirrorSync(graceful.GetManager().ShutdownContext(), req)
}
return nil
@@ -130,5 +129,5 @@ func queueHandle(data ...queue.Data) []queue.Data {
// InitSyncMirrors initializes a go routine to sync the mirrors
func InitSyncMirrors() {
- mirror_module.StartSyncMirrors(queueHandle)
+ mirror_module.StartSyncMirrors(queueHandler)
}
diff --git a/services/pull/check.go b/services/pull/check.go
index 02d9015414..8bc2bdff1d 100644
--- a/services/pull/check.go
+++ b/services/pull/check.go
@@ -30,7 +30,7 @@ import (
)
// prPatchCheckerQueue represents a queue to handle update pull request tests
-var prPatchCheckerQueue queue.UniqueQueue
+var prPatchCheckerQueue *queue.WorkerPoolQueue[string]
var (
ErrIsClosed = errors.New("pull is closed")
@@ -44,16 +44,14 @@ var (
// AddToTaskQueue adds itself to pull request test task queue.
func AddToTaskQueue(pr *issues_model.PullRequest) {
- err := prPatchCheckerQueue.PushFunc(strconv.FormatInt(pr.ID, 10), func() error {
- pr.Status = issues_model.PullRequestStatusChecking
- err := pr.UpdateColsIfNotMerged(db.DefaultContext, "status")
- if err != nil {
- log.Error("AddToTaskQueue(%-v).UpdateCols.(add to queue): %v", pr, err)
- } else {
- log.Trace("Adding %-v to the test pull requests queue", pr)
- }
- return err
- })
+ pr.Status = issues_model.PullRequestStatusChecking
+ err := pr.UpdateColsIfNotMerged(db.DefaultContext, "status")
+ if err != nil {
+ log.Error("AddToTaskQueue(%-v).UpdateCols.(add to queue): %v", pr, err)
+ return
+ }
+ log.Trace("Adding %-v to the test pull requests queue", pr)
+ err = prPatchCheckerQueue.Push(strconv.FormatInt(pr.ID, 10))
if err != nil && err != queue.ErrAlreadyInQueue {
log.Error("Error adding %-v to the test pull requests queue: %v", pr, err)
}
@@ -315,10 +313,8 @@ func InitializePullRequests(ctx context.Context) {
case <-ctx.Done():
return
default:
- if err := prPatchCheckerQueue.PushFunc(strconv.FormatInt(prID, 10), func() error {
- log.Trace("Adding PR[%d] to the pull requests patch checking queue", prID)
- return nil
- }); err != nil {
+ log.Trace("Adding PR[%d] to the pull requests patch checking queue", prID)
+ if err := prPatchCheckerQueue.Push(strconv.FormatInt(prID, 10)); err != nil {
log.Error("Error adding PR[%d] to the pull requests patch checking queue %v", prID, err)
}
}
@@ -326,10 +322,9 @@ func InitializePullRequests(ctx context.Context) {
}
// handle passed PR IDs and test the PRs
-func handle(data ...queue.Data) []queue.Data {
- for _, datum := range data {
- id, _ := strconv.ParseInt(datum.(string), 10, 64)
-
+func handler(items ...string) []string {
+ for _, s := range items {
+ id, _ := strconv.ParseInt(s, 10, 64)
testPR(id)
}
return nil
@@ -389,7 +384,7 @@ func CheckPRsForBaseBranch(baseRepo *repo_model.Repository, baseBranchName strin
// Init runs the task queue to test all the checking status pull requests
func Init() error {
- prPatchCheckerQueue = queue.CreateUniqueQueue("pr_patch_checker", handle, "")
+ prPatchCheckerQueue = queue.CreateUniqueQueue("pr_patch_checker", handler)
if prPatchCheckerQueue == nil {
return fmt.Errorf("Unable to create pr_patch_checker Queue")
diff --git a/services/pull/check_test.go b/services/pull/check_test.go
index 590065250f..52209b4d35 100644
--- a/services/pull/check_test.go
+++ b/services/pull/check_test.go
@@ -12,6 +12,7 @@ import (
issues_model "code.gitea.io/gitea/models/issues"
"code.gitea.io/gitea/models/unittest"
"code.gitea.io/gitea/modules/queue"
+ "code.gitea.io/gitea/modules/setting"
"github.com/stretchr/testify/assert"
)
@@ -20,27 +21,18 @@ func TestPullRequest_AddToTaskQueue(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
idChan := make(chan int64, 10)
-
- q, err := queue.NewChannelUniqueQueue(func(data ...queue.Data) []queue.Data {
- for _, datum := range data {
- id, _ := strconv.ParseInt(datum.(string), 10, 64)
+ testHandler := func(items ...string) []string {
+ for _, s := range items {
+ id, _ := strconv.ParseInt(s, 10, 64)
idChan <- id
}
return nil
- }, queue.ChannelUniqueQueueConfiguration{
- WorkerPoolConfiguration: queue.WorkerPoolConfiguration{
- QueueLength: 10,
- BatchLength: 1,
- Name: "temporary-queue",
- },
- Workers: 1,
- }, "")
- assert.NoError(t, err)
-
- queueShutdown := []func(){}
- queueTerminate := []func(){}
+ }
- prPatchCheckerQueue = q.(queue.UniqueQueue)
+ cfg, err := setting.GetQueueSettings(setting.CfgProvider, "pr_patch_checker")
+ assert.NoError(t, err)
+ prPatchCheckerQueue, err = queue.NewWorkerPoolQueueBySetting("pr_patch_checker", cfg, testHandler, true)
+ assert.NoError(t, err)
pr := unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{ID: 2})
AddToTaskQueue(pr)
@@ -54,7 +46,8 @@ func TestPullRequest_AddToTaskQueue(t *testing.T) {
assert.True(t, has)
assert.NoError(t, err)
- prPatchCheckerQueue.Run(func(shutdown func()) {
+ var queueShutdown, queueTerminate []func()
+ go prPatchCheckerQueue.Run(func(shutdown func()) {
queueShutdown = append(queueShutdown, shutdown)
}, func(terminate func()) {
queueTerminate = append(queueTerminate, terminate)
diff --git a/services/repository/archiver/archiver.go b/services/repository/archiver/archiver.go
index 1da4425cfc..1c514a4112 100644
--- a/services/repository/archiver/archiver.go
+++ b/services/repository/archiver/archiver.go
@@ -295,26 +295,21 @@ func ArchiveRepository(request *ArchiveRequest) (*repo_model.RepoArchiver, error
return doArchive(request)
}
-var archiverQueue queue.UniqueQueue
+var archiverQueue *queue.WorkerPoolQueue[*ArchiveRequest]
// Init initlize archive
func Init() error {
- handler := func(data ...queue.Data) []queue.Data {
- for _, datum := range data {
- archiveReq, ok := datum.(*ArchiveRequest)
- if !ok {
- log.Error("Unable to process provided datum: %v - not possible to cast to IndexerData", datum)
- continue
- }
+ handler := func(items ...*ArchiveRequest) []*ArchiveRequest {
+ for _, archiveReq := range items {
log.Trace("ArchiverData Process: %#v", archiveReq)
if _, err := doArchive(archiveReq); err != nil {
- log.Error("Archive %v failed: %v", datum, err)
+ log.Error("Archive %v failed: %v", archiveReq, err)
}
}
return nil
}
- archiverQueue = queue.CreateUniqueQueue("repo-archive", handler, new(ArchiveRequest))
+ archiverQueue = queue.CreateUniqueQueue("repo-archive", handler)
if archiverQueue == nil {
return errors.New("unable to create codes indexer queue")
}
diff --git a/services/repository/push.go b/services/repository/push.go
index 7f174c71b3..c7ea8f336e 100644
--- a/services/repository/push.go
+++ b/services/repository/push.go
@@ -29,12 +29,11 @@ import (
)
// pushQueue represents a queue to handle update pull request tests
-var pushQueue queue.Queue
+var pushQueue *queue.WorkerPoolQueue[[]*repo_module.PushUpdateOptions]
// handle passed PR IDs and test the PRs
-func handle(data ...queue.Data) []queue.Data {
- for _, datum := range data {
- opts := datum.([]*repo_module.PushUpdateOptions)
+func handler(items ...[]*repo_module.PushUpdateOptions) [][]*repo_module.PushUpdateOptions {
+ for _, opts := range items {
if err := pushUpdates(opts); err != nil {
log.Error("pushUpdate failed: %v", err)
}
@@ -43,7 +42,7 @@ func handle(data ...queue.Data) []queue.Data {
}
func initPushQueue() error {
- pushQueue = queue.CreateQueue("push_update", handle, []*repo_module.PushUpdateOptions{})
+ pushQueue = queue.CreateSimpleQueue("push_update", handler)
if pushQueue == nil {
return errors.New("unable to create push_update Queue")
}
diff --git a/services/task/task.go b/services/task/task.go
index 41bc07f2f6..4f1ba3a60b 100644
--- a/services/task/task.go
+++ b/services/task/task.go
@@ -23,7 +23,7 @@ import (
)
// taskQueue is a global queue of tasks
-var taskQueue queue.Queue
+var taskQueue *queue.WorkerPoolQueue[*admin_model.Task]
// Run a task
func Run(t *admin_model.Task) error {
@@ -37,7 +37,7 @@ func Run(t *admin_model.Task) error {
// Init will start the service to get all unfinished tasks and run them
func Init() error {
- taskQueue = queue.CreateQueue("task", handle, &admin_model.Task{})
+ taskQueue = queue.CreateSimpleQueue("task", handler)
if taskQueue == nil {
return fmt.Errorf("Unable to create Task Queue")
@@ -48,9 +48,8 @@ func Init() error {
return nil
}
-func handle(data ...queue.Data) []queue.Data {
- for _, datum := range data {
- task := datum.(*admin_model.Task)
+func handler(items ...*admin_model.Task) []*admin_model.Task {
+ for _, task := range items {
if err := Run(task); err != nil {
log.Error("Run task failed: %v", err)
}
diff --git a/services/webhook/deliver.go b/services/webhook/deliver.go
index 31246c1555..e817783e55 100644
--- a/services/webhook/deliver.go
+++ b/services/webhook/deliver.go
@@ -283,7 +283,7 @@ func Init() error {
},
}
- hookQueue = queue.CreateUniqueQueue("webhook_sender", handle, int64(0))
+ hookQueue = queue.CreateUniqueQueue("webhook_sender", handler)
if hookQueue == nil {
return fmt.Errorf("Unable to create webhook_sender Queue")
}
diff --git a/services/webhook/webhook.go b/services/webhook/webhook.go
index b862d5bff1..3cd9deafd8 100644
--- a/services/webhook/webhook.go
+++ b/services/webhook/webhook.go
@@ -77,7 +77,7 @@ func IsValidHookTaskType(name string) bool {
}
// hookQueue is a global queue of web hooks
-var hookQueue queue.UniqueQueue
+var hookQueue *queue.WorkerPoolQueue[int64]
// getPayloadBranch returns branch for hook event, if applicable.
func getPayloadBranch(p api.Payloader) string {
@@ -105,13 +105,13 @@ type EventSource struct {
}
// handle delivers hook tasks
-func handle(data ...queue.Data) []queue.Data {
+func handler(items ...int64) []int64 {
ctx := graceful.GetManager().HammerContext()
- for _, taskID := range data {
- task, err := webhook_model.GetHookTaskByID(ctx, taskID.(int64))
+ for _, taskID := range items {
+ task, err := webhook_model.GetHookTaskByID(ctx, taskID)
if err != nil {
- log.Error("GetHookTaskByID[%d] failed: %v", taskID.(int64), err)
+ log.Error("GetHookTaskByID[%d] failed: %v", taskID, err)
continue
}