summaryrefslogtreecommitdiffstats
path: root/modules/queue
diff options
context:
space:
mode:
authorzeripath <art27@cantab.net>2020-01-07 11:23:09 +0000
committerAntoine GIRARD <sapk@users.noreply.github.com>2020-01-07 12:23:09 +0100
commit62eb1b0f2530a5ae1ce9b729378c0c8066174215 (patch)
treee567b2a9d91e69c0f2bccfeaf1a7341b4dda2706 /modules/queue
parentf71e1c8e796b099f4634bcd358e48189a97dcbad (diff)
downloadgitea-62eb1b0f2530a5ae1ce9b729378c0c8066174215.tar.gz
gitea-62eb1b0f2530a5ae1ce9b729378c0c8066174215.zip
Graceful Queues: Issue Indexing and Tasks (#9363)
* Queue: Add generic graceful queues with settings * Queue & Setting: Add worker pool implementation * Queue: Add worker settings * Queue: Make resizing worker pools * Queue: Add name variable to queues * Queue: Add monitoring * Queue: Improve logging * Issues: Gracefulise the issues indexer Remove the old now unused specific queues * Task: Move to generic queue and gracefulise * Issues: Standardise the issues indexer queue settings * Fix test * Queue: Allow Redis to connect to unix * Prevent deadlock during early shutdown of issue indexer * Add MaxWorker settings to queues * Merge branch 'master' into graceful-queues * Update modules/indexer/issues/indexer.go Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com> * Update modules/indexer/issues/indexer.go Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com> * Update modules/queue/queue_channel.go Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com> * Update modules/queue/queue_disk.go * Update modules/queue/queue_disk_channel.go Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com> * Rename queue.Description to queue.ManagedQueue as per @guillep2k * Cancel pool workers when removed * Remove dependency on queue from setting * Update modules/queue/queue_redis.go Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com> * As per @guillep2k add mutex locks on shutdown/terminate * move unlocking out of setInternal * Add warning if number of workers < 0 * Small changes as per @guillep2k * No redis host specified not found * Clean up documentation for queues * Update docs/content/doc/advanced/config-cheat-sheet.en-us.md * Update modules/indexer/issues/indexer_test.go * Ensure that persistable channel queue is added to manager * Rename QUEUE_NAME REDIS_QUEUE_NAME * Revert "Rename QUEUE_NAME REDIS_QUEUE_NAME" This reverts commit 1f83b4fc9b9dabda186257b38c265fe7012f90df. Co-authored-by: guillep2k <18600385+guillep2k@users.noreply.github.com> Co-authored-by: Lauris BH <lauris@nix.lv> Co-authored-by: techknowlogick <matti@mdranta.net> Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
Diffstat (limited to 'modules/queue')
-rw-r--r--modules/queue/manager.go270
-rw-r--r--modules/queue/queue.go133
-rw-r--r--modules/queue/queue_channel.go106
-rw-r--r--modules/queue/queue_channel_test.go91
-rw-r--r--modules/queue/queue_disk.go213
-rw-r--r--modules/queue/queue_disk_channel.go193
-rw-r--r--modules/queue/queue_disk_channel_test.go117
-rw-r--r--modules/queue/queue_disk_test.go126
-rw-r--r--modules/queue/queue_redis.go234
-rw-r--r--modules/queue/queue_test.go43
-rw-r--r--modules/queue/queue_wrapped.go206
-rw-r--r--modules/queue/setting.go75
-rw-r--r--modules/queue/workerpool.go325
13 files changed, 2132 insertions, 0 deletions
diff --git a/modules/queue/manager.go b/modules/queue/manager.go
new file mode 100644
index 0000000000..88b2644848
--- /dev/null
+++ b/modules/queue/manager.go
@@ -0,0 +1,270 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package queue
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "sort"
+ "sync"
+ "time"
+
+ "code.gitea.io/gitea/modules/log"
+)
+
+var manager *Manager
+
+// Manager is a queue manager
+type Manager struct {
+ mutex sync.Mutex
+
+ counter int64
+ Queues map[int64]*ManagedQueue
+}
+
+// ManagedQueue represents a working queue inheriting from Gitea.
+type ManagedQueue struct {
+ mutex sync.Mutex
+ QID int64
+ Queue Queue
+ Type Type
+ Name string
+ Configuration interface{}
+ ExemplarType string
+ Pool ManagedPool
+ counter int64
+ PoolWorkers map[int64]*PoolWorkers
+}
+
+// ManagedPool is a simple interface to get certain details from a worker pool
+type ManagedPool interface {
+ AddWorkers(number int, timeout time.Duration) context.CancelFunc
+ NumberOfWorkers() int
+ MaxNumberOfWorkers() int
+ SetMaxNumberOfWorkers(int)
+ BoostTimeout() time.Duration
+ BlockTimeout() time.Duration
+ BoostWorkers() int
+ SetSettings(maxNumberOfWorkers, boostWorkers int, timeout time.Duration)
+}
+
+// ManagedQueueList implements the sort.Interface
+type ManagedQueueList []*ManagedQueue
+
+// PoolWorkers represents a working queue inheriting from Gitea.
+type PoolWorkers struct {
+ PID int64
+ Workers int
+ Start time.Time
+ Timeout time.Time
+ HasTimeout bool
+ Cancel context.CancelFunc
+}
+
+// PoolWorkersList implements the sort.Interface
+type PoolWorkersList []*PoolWorkers
+
+func init() {
+ _ = GetManager()
+}
+
+// GetManager returns a Manager and initializes one as singleton if there's none yet
+func GetManager() *Manager {
+ if manager == nil {
+ manager = &Manager{
+ Queues: make(map[int64]*ManagedQueue),
+ }
+ }
+ return manager
+}
+
+// Add adds a queue to this manager
+func (m *Manager) Add(queue Queue,
+ t Type,
+ configuration,
+ exemplar interface{},
+ pool ManagedPool) int64 {
+
+ cfg, _ := json.Marshal(configuration)
+ mq := &ManagedQueue{
+ Queue: queue,
+ Type: t,
+ Configuration: string(cfg),
+ ExemplarType: reflect.TypeOf(exemplar).String(),
+ PoolWorkers: make(map[int64]*PoolWorkers),
+ Pool: pool,
+ }
+ m.mutex.Lock()
+ m.counter++
+ mq.QID = m.counter
+ mq.Name = fmt.Sprintf("queue-%d", mq.QID)
+ if named, ok := queue.(Named); ok {
+ mq.Name = named.Name()
+ }
+ m.Queues[mq.QID] = mq
+ m.mutex.Unlock()
+ log.Trace("Queue Manager registered: %s (QID: %d)", mq.Name, mq.QID)
+ return mq.QID
+}
+
+// Remove a queue from the Manager
+func (m *Manager) Remove(qid int64) {
+ m.mutex.Lock()
+ delete(m.Queues, qid)
+ m.mutex.Unlock()
+ log.Trace("Queue Manager removed: QID: %d", qid)
+
+}
+
+// GetManagedQueue by qid
+func (m *Manager) GetManagedQueue(qid int64) *ManagedQueue {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+ return m.Queues[qid]
+}
+
+// ManagedQueues returns the managed queues
+func (m *Manager) ManagedQueues() []*ManagedQueue {
+ m.mutex.Lock()
+ mqs := make([]*ManagedQueue, 0, len(m.Queues))
+ for _, mq := range m.Queues {
+ mqs = append(mqs, mq)
+ }
+ m.mutex.Unlock()
+ sort.Sort(ManagedQueueList(mqs))
+ return mqs
+}
+
+// Workers returns the poolworkers
+func (q *ManagedQueue) Workers() []*PoolWorkers {
+ q.mutex.Lock()
+ workers := make([]*PoolWorkers, 0, len(q.PoolWorkers))
+ for _, worker := range q.PoolWorkers {
+ workers = append(workers, worker)
+ }
+ q.mutex.Unlock()
+ sort.Sort(PoolWorkersList(workers))
+ return workers
+}
+
+// RegisterWorkers registers workers to this queue
+func (q *ManagedQueue) RegisterWorkers(number int, start time.Time, hasTimeout bool, timeout time.Time, cancel context.CancelFunc) int64 {
+ q.mutex.Lock()
+ defer q.mutex.Unlock()
+ q.counter++
+ q.PoolWorkers[q.counter] = &PoolWorkers{
+ PID: q.counter,
+ Workers: number,
+ Start: start,
+ Timeout: timeout,
+ HasTimeout: hasTimeout,
+ Cancel: cancel,
+ }
+ return q.counter
+}
+
+// CancelWorkers cancels pooled workers with pid
+func (q *ManagedQueue) CancelWorkers(pid int64) {
+ q.mutex.Lock()
+ pw, ok := q.PoolWorkers[pid]
+ q.mutex.Unlock()
+ if !ok {
+ return
+ }
+ pw.Cancel()
+}
+
+// RemoveWorkers deletes pooled workers with pid
+func (q *ManagedQueue) RemoveWorkers(pid int64) {
+ q.mutex.Lock()
+ pw, ok := q.PoolWorkers[pid]
+ delete(q.PoolWorkers, pid)
+ q.mutex.Unlock()
+ if ok && pw.Cancel != nil {
+ pw.Cancel()
+ }
+}
+
+// AddWorkers adds workers to the queue if it has registered an add worker function
+func (q *ManagedQueue) AddWorkers(number int, timeout time.Duration) context.CancelFunc {
+ if q.Pool != nil {
+ // the cancel will be added to the pool workers description above
+ return q.Pool.AddWorkers(number, timeout)
+ }
+ return nil
+}
+
+// NumberOfWorkers returns the number of workers in the queue
+func (q *ManagedQueue) NumberOfWorkers() int {
+ if q.Pool != nil {
+ return q.Pool.NumberOfWorkers()
+ }
+ return -1
+}
+
+// MaxNumberOfWorkers returns the maximum number of workers for the pool
+func (q *ManagedQueue) MaxNumberOfWorkers() int {
+ if q.Pool != nil {
+ return q.Pool.MaxNumberOfWorkers()
+ }
+ return 0
+}
+
+// BoostWorkers returns the number of workers for a boost
+func (q *ManagedQueue) BoostWorkers() int {
+ if q.Pool != nil {
+ return q.Pool.BoostWorkers()
+ }
+ return -1
+}
+
+// BoostTimeout returns the timeout of the next boost
+func (q *ManagedQueue) BoostTimeout() time.Duration {
+ if q.Pool != nil {
+ return q.Pool.BoostTimeout()
+ }
+ return 0
+}
+
+// BlockTimeout returns the timeout til the next boost
+func (q *ManagedQueue) BlockTimeout() time.Duration {
+ if q.Pool != nil {
+ return q.Pool.BlockTimeout()
+ }
+ return 0
+}
+
+// SetSettings sets the setable boost values
+func (q *ManagedQueue) SetSettings(maxNumberOfWorkers, boostWorkers int, timeout time.Duration) {
+ if q.Pool != nil {
+ q.Pool.SetSettings(maxNumberOfWorkers, boostWorkers, timeout)
+ }
+}
+
+func (l ManagedQueueList) Len() int {
+ return len(l)
+}
+
+func (l ManagedQueueList) Less(i, j int) bool {
+ return l[i].Name < l[j].Name
+}
+
+func (l ManagedQueueList) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+func (l PoolWorkersList) Len() int {
+ return len(l)
+}
+
+func (l PoolWorkersList) Less(i, j int) bool {
+ return l[i].Start.Before(l[j].Start)
+}
+
+func (l PoolWorkersList) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
diff --git a/modules/queue/queue.go b/modules/queue/queue.go
new file mode 100644
index 0000000000..d458a7d506
--- /dev/null
+++ b/modules/queue/queue.go
@@ -0,0 +1,133 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package queue
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "reflect"
+)
+
+// ErrInvalidConfiguration is called when there is invalid configuration for a queue
+type ErrInvalidConfiguration struct {
+ cfg interface{}
+ err error
+}
+
+func (err ErrInvalidConfiguration) Error() string {
+ if err.err != nil {
+ return fmt.Sprintf("Invalid Configuration Argument: %v: Error: %v", err.cfg, err.err)
+ }
+ return fmt.Sprintf("Invalid Configuration Argument: %v", err.cfg)
+}
+
+// IsErrInvalidConfiguration checks if an error is an ErrInvalidConfiguration
+func IsErrInvalidConfiguration(err error) bool {
+ _, ok := err.(ErrInvalidConfiguration)
+ return ok
+}
+
+// Type is a type of Queue
+type Type string
+
+// Data defines an type of queuable data
+type Data interface{}
+
+// HandlerFunc is a function that takes a variable amount of data and processes it
+type HandlerFunc func(...Data)
+
+// NewQueueFunc is a function that creates a queue
+type NewQueueFunc func(handler HandlerFunc, config interface{}, exemplar interface{}) (Queue, error)
+
+// Shutdownable represents a queue that can be shutdown
+type Shutdownable interface {
+ Shutdown()
+ Terminate()
+}
+
+// Named represents a queue with a name
+type Named interface {
+ Name() string
+}
+
+// Queue defines an interface to save an issue indexer queue
+type Queue interface {
+ Run(atShutdown, atTerminate func(context.Context, func()))
+ Push(Data) error
+}
+
+// DummyQueueType is the type for the dummy queue
+const DummyQueueType Type = "dummy"
+
+// NewDummyQueue creates a new DummyQueue
+func NewDummyQueue(handler HandlerFunc, opts, exemplar interface{}) (Queue, error) {
+ return &DummyQueue{}, nil
+}
+
+// DummyQueue represents an empty queue
+type DummyQueue struct {
+}
+
+// Run starts to run the queue
+func (b *DummyQueue) Run(_, _ func(context.Context, func())) {}
+
+// Push pushes data to the queue
+func (b *DummyQueue) Push(Data) error {
+ return nil
+}
+
+func toConfig(exemplar, cfg interface{}) (interface{}, error) {
+ if reflect.TypeOf(cfg).AssignableTo(reflect.TypeOf(exemplar)) {
+ return cfg, nil
+ }
+
+ configBytes, ok := cfg.([]byte)
+ if !ok {
+ configStr, ok := cfg.(string)
+ if !ok {
+ return nil, ErrInvalidConfiguration{cfg: cfg}
+ }
+ configBytes = []byte(configStr)
+ }
+ newVal := reflect.New(reflect.TypeOf(exemplar))
+ if err := json.Unmarshal(configBytes, newVal.Interface()); err != nil {
+ return nil, ErrInvalidConfiguration{cfg: cfg, err: err}
+ }
+ return newVal.Elem().Interface(), nil
+}
+
+var queuesMap = map[Type]NewQueueFunc{DummyQueueType: NewDummyQueue}
+
+// RegisteredTypes provides the list of requested types of queues
+func RegisteredTypes() []Type {
+ types := make([]Type, len(queuesMap))
+ i := 0
+ for key := range queuesMap {
+ types[i] = key
+ i++
+ }
+ return types
+}
+
+// RegisteredTypesAsString provides the list of requested types of queues
+func RegisteredTypesAsString() []string {
+ types := make([]string, len(queuesMap))
+ i := 0
+ for key := range queuesMap {
+ types[i] = string(key)
+ i++
+ }
+ return types
+}
+
+// NewQueue takes a queue Type and HandlerFunc some options and possibly an exemplar and returns a Queue or an error
+func NewQueue(queueType Type, handlerFunc HandlerFunc, opts, exemplar interface{}) (Queue, error) {
+ newFn, ok := queuesMap[queueType]
+ if !ok {
+ return nil, fmt.Errorf("Unsupported queue type: %v", queueType)
+ }
+ return newFn(handlerFunc, opts, exemplar)
+}
diff --git a/modules/queue/queue_channel.go b/modules/queue/queue_channel.go
new file mode 100644
index 0000000000..c8f8a53804
--- /dev/null
+++ b/modules/queue/queue_channel.go
@@ -0,0 +1,106 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package queue
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "time"
+
+ "code.gitea.io/gitea/modules/log"
+)
+
+// ChannelQueueType is the type for channel queue
+const ChannelQueueType Type = "channel"
+
+// ChannelQueueConfiguration is the configuration for a ChannelQueue
+type ChannelQueueConfiguration struct {
+ QueueLength int
+ BatchLength int
+ Workers int
+ MaxWorkers int
+ BlockTimeout time.Duration
+ BoostTimeout time.Duration
+ BoostWorkers int
+ Name string
+}
+
+// ChannelQueue implements
+type ChannelQueue struct {
+ pool *WorkerPool
+ exemplar interface{}
+ workers int
+ name string
+}
+
+// NewChannelQueue create a memory channel queue
+func NewChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) {
+ configInterface, err := toConfig(ChannelQueueConfiguration{}, cfg)
+ if err != nil {
+ return nil, err
+ }
+ config := configInterface.(ChannelQueueConfiguration)
+ if config.BatchLength == 0 {
+ config.BatchLength = 1
+ }
+ dataChan := make(chan Data, config.QueueLength)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ queue := &ChannelQueue{
+ pool: &WorkerPool{
+ baseCtx: ctx,
+ cancel: cancel,
+ batchLength: config.BatchLength,
+ handle: handle,
+ dataChan: dataChan,
+ blockTimeout: config.BlockTimeout,
+ boostTimeout: config.BoostTimeout,
+ boostWorkers: config.BoostWorkers,
+ maxNumberOfWorkers: config.MaxWorkers,
+ },
+ exemplar: exemplar,
+ workers: config.Workers,
+ name: config.Name,
+ }
+ queue.pool.qid = GetManager().Add(queue, ChannelQueueType, config, exemplar, queue.pool)
+ return queue, nil
+}
+
+// Run starts to run the queue
+func (c *ChannelQueue) Run(atShutdown, atTerminate func(context.Context, func())) {
+ atShutdown(context.Background(), func() {
+ log.Warn("ChannelQueue: %s is not shutdownable!", c.name)
+ })
+ atTerminate(context.Background(), func() {
+ log.Warn("ChannelQueue: %s is not terminatable!", c.name)
+ })
+ go func() {
+ _ = c.pool.AddWorkers(c.workers, 0)
+ }()
+}
+
+// Push will push data into the queue
+func (c *ChannelQueue) Push(data Data) error {
+ if c.exemplar != nil {
+ // Assert data is of same type as r.exemplar
+ t := reflect.TypeOf(data)
+ exemplarType := reflect.TypeOf(c.exemplar)
+ if !t.AssignableTo(exemplarType) || data == nil {
+ return fmt.Errorf("Unable to assign data: %v to same type as exemplar: %v in queue: %s", data, c.exemplar, c.name)
+ }
+ }
+ c.pool.Push(data)
+ return nil
+}
+
+// Name returns the name of this queue
+func (c *ChannelQueue) Name() string {
+ return c.name
+}
+
+func init() {
+ queuesMap[ChannelQueueType] = NewChannelQueue
+}
diff --git a/modules/queue/queue_channel_test.go b/modules/queue/queue_channel_test.go
new file mode 100644
index 0000000000..fafc1e3303
--- /dev/null
+++ b/modules/queue/queue_channel_test.go
@@ -0,0 +1,91 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package queue
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestChannelQueue(t *testing.T) {
+ handleChan := make(chan *testData)
+ handle := func(data ...Data) {
+ for _, datum := range data {
+ testDatum := datum.(*testData)
+ handleChan <- testDatum
+ }
+ }
+
+ nilFn := func(_ context.Context, _ func()) {}
+
+ queue, err := NewChannelQueue(handle,
+ ChannelQueueConfiguration{
+ QueueLength: 20,
+ Workers: 1,
+ MaxWorkers: 10,
+ BlockTimeout: 1 * time.Second,
+ BoostTimeout: 5 * time.Minute,
+ BoostWorkers: 5,
+ }, &testData{})
+ assert.NoError(t, err)
+
+ go queue.Run(nilFn, nilFn)
+
+ test1 := testData{"A", 1}
+ go queue.Push(&test1)
+ result1 := <-handleChan
+ assert.Equal(t, test1.TestString, result1.TestString)
+ assert.Equal(t, test1.TestInt, result1.TestInt)
+
+ err = queue.Push(test1)
+ assert.Error(t, err)
+}
+
+func TestChannelQueue_Batch(t *testing.T) {
+ handleChan := make(chan *testData)
+ handle := func(data ...Data) {
+ assert.True(t, len(data) == 2)
+ for _, datum := range data {
+ testDatum := datum.(*testData)
+ handleChan <- testDatum
+ }
+ }
+
+ nilFn := func(_ context.Context, _ func()) {}
+
+ queue, err := NewChannelQueue(handle,
+ ChannelQueueConfiguration{
+ QueueLength: 20,
+ BatchLength: 2,
+ Workers: 1,
+ MaxWorkers: 10,
+ BlockTimeout: 1 * time.Second,
+ BoostTimeout: 5 * time.Minute,
+ BoostWorkers: 5,
+ }, &testData{})
+ assert.NoError(t, err)
+
+ go queue.Run(nilFn, nilFn)
+
+ test1 := testData{"A", 1}
+ test2 := testData{"B", 2}
+
+ queue.Push(&test1)
+ go queue.Push(&test2)
+
+ result1 := <-handleChan
+ assert.Equal(t, test1.TestString, result1.TestString)
+ assert.Equal(t, test1.TestInt, result1.TestInt)
+
+ result2 := <-handleChan
+ assert.Equal(t, test2.TestString, result2.TestString)
+ assert.Equal(t, test2.TestInt, result2.TestInt)
+
+ err = queue.Push(test1)
+ assert.Error(t, err)
+}
diff --git a/modules/queue/queue_disk.go b/modules/queue/queue_disk.go
new file mode 100644
index 0000000000..98e7b24e42
--- /dev/null
+++ b/modules/queue/queue_disk.go
@@ -0,0 +1,213 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package queue
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "sync"
+ "time"
+
+ "code.gitea.io/gitea/modules/log"
+
+ "gitea.com/lunny/levelqueue"
+)
+
+// LevelQueueType is the type for level queue
+const LevelQueueType Type = "level"
+
+// LevelQueueConfiguration is the configuration for a LevelQueue
+type LevelQueueConfiguration struct {
+ DataDir string
+ QueueLength int
+ BatchLength int
+ Workers int
+ MaxWorkers int
+ BlockTimeout time.Duration
+ BoostTimeout time.Duration
+ BoostWorkers int
+ Name string
+}
+
+// LevelQueue implements a disk library queue
+type LevelQueue struct {
+ pool *WorkerPool
+ queue *levelqueue.Queue
+ closed chan struct{}
+ terminated chan struct{}
+ lock sync.Mutex
+ exemplar interface{}
+ workers int
+ name string
+}
+
+// NewLevelQueue creates a ledis local queue
+func NewLevelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) {
+ configInterface, err := toConfig(LevelQueueConfiguration{}, cfg)
+ if err != nil {
+ return nil, err
+ }
+ config := configInterface.(LevelQueueConfiguration)
+
+ internal, err := levelqueue.Open(config.DataDir)
+ if err != nil {
+ return nil, err
+ }
+
+ dataChan := make(chan Data, config.QueueLength)
+ ctx, cancel := context.WithCancel(context.Background())
+
+ queue := &LevelQueue{
+ pool: &WorkerPool{
+ baseCtx: ctx,
+ cancel: cancel,
+ batchLength: config.BatchLength,
+ handle: handle,
+ dataChan: dataChan,
+ blockTimeout: config.BlockTimeout,
+ boostTimeout: config.BoostTimeout,
+ boostWorkers: config.BoostWorkers,
+ maxNumberOfWorkers: config.MaxWorkers,
+ },
+ queue: internal,
+ exemplar: exemplar,
+ closed: make(chan struct{}),
+ terminated: make(chan struct{}),
+ workers: config.Workers,
+ name: config.Name,
+ }
+ queue.pool.qid = GetManager().Add(queue, LevelQueueType, config, exemplar, queue.pool)
+ return queue, nil
+}
+
+// Run starts to run the queue
+func (l *LevelQueue) Run(atShutdown, atTerminate func(context.Context, func())) {
+ atShutdown(context.Background(), l.Shutdown)
+ atTerminate(context.Background(), l.Terminate)
+
+ go func() {
+ _ = l.pool.AddWorkers(l.workers, 0)
+ }()
+
+ go l.readToChan()
+
+ log.Trace("LevelQueue: %s Waiting til closed", l.name)
+ <-l.closed
+
+ log.Trace("LevelQueue: %s Waiting til done", l.name)
+ l.pool.Wait()
+
+ log.Trace("LevelQueue: %s Waiting til cleaned", l.name)
+ ctx, cancel := context.WithCancel(context.Background())
+ atTerminate(ctx, cancel)
+ l.pool.CleanUp(ctx)
+ cancel()
+ log.Trace("LevelQueue: %s Cleaned", l.name)
+
+}
+
+func (l *LevelQueue) readToChan() {
+ for {
+ select {
+ case <-l.closed:
+ // tell the pool to shutdown.
+ l.pool.cancel()
+ return
+ default:
+ bs, err := l.queue.RPop()
+ if err != nil {
+ if err != levelqueue.ErrNotFound {
+ log.Error("LevelQueue: %s Error on RPop: %v", l.name, err)
+ }
+ time.Sleep(time.Millisecond * 100)
+ continue
+ }
+
+ if len(bs) == 0 {
+ time.Sleep(time.Millisecond * 100)
+ continue
+ }
+
+ var data Data
+ if l.exemplar != nil {
+ t := reflect.TypeOf(l.exemplar)
+ n := reflect.New(t)
+ ne := n.Elem()
+ err = json.Unmarshal(bs, ne.Addr().Interface())
+ data = ne.Interface().(Data)
+ } else {
+ err = json.Unmarshal(bs, &data)
+ }
+ if err != nil {
+ log.Error("LevelQueue: %s Failed to unmarshal with error: %v", l.name, err)
+ time.Sleep(time.Millisecond * 100)
+ continue
+ }
+
+ log.Trace("LevelQueue %s: Task found: %#v", l.name, data)
+ l.pool.Push(data)
+
+ }
+ }
+}
+
+// Push will push the indexer data to queue
+func (l *LevelQueue) Push(data Data) error {
+ if l.exemplar != nil {
+ // Assert data is of same type as r.exemplar
+ value := reflect.ValueOf(data)
+ t := value.Type()
+ exemplarType := reflect.ValueOf(l.exemplar).Type()
+ if !t.AssignableTo(exemplarType) || data == nil {
+ return fmt.Errorf("Unable to assign data: %v to same type as exemplar: %v in %s", data, l.exemplar, l.name)
+ }
+ }
+ bs, err := json.Marshal(data)
+ if err != nil {
+ return err
+ }
+ return l.queue.LPush(bs)
+}
+
+// Shutdown this queue and stop processing
+func (l *LevelQueue) Shutdown() {
+ l.lock.Lock()
+ defer l.lock.Unlock()
+ log.Trace("LevelQueue: %s Shutdown", l.name)
+ select {
+ case <-l.closed:
+ default:
+ close(l.closed)
+ }
+}
+
+// Terminate this queue and close the queue
+func (l *LevelQueue) Terminate() {
+ log.Trace("LevelQueue: %s Terminating", l.name)
+ l.Shutdown()
+ l.lock.Lock()
+ select {
+ case <-l.terminated:
+ l.lock.Unlock()
+ default:
+ close(l.terminated)
+ l.lock.Unlock()
+ if err := l.queue.Close(); err != nil && err.Error() != "leveldb: closed" {
+ log.Error("Error whilst closing internal queue in %s: %v", l.name, err)
+ }
+
+ }
+}
+
+// Name returns the name of this queue
+func (l *LevelQueue) Name() string {
+ return l.name
+}
+
+func init() {
+ queuesMap[LevelQueueType] = NewLevelQueue
+}
diff --git a/modules/queue/queue_disk_channel.go b/modules/queue/queue_disk_channel.go
new file mode 100644
index 0000000000..895c8ce918
--- /dev/null
+++ b/modules/queue/queue_disk_channel.go
@@ -0,0 +1,193 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package queue
+
+import (
+ "context"
+ "time"
+
+ "code.gitea.io/gitea/modules/log"
+)
+
+// PersistableChannelQueueType is the type for persistable queue
+const PersistableChannelQueueType Type = "persistable-channel"
+
+// PersistableChannelQueueConfiguration is the configuration for a PersistableChannelQueue
+type PersistableChannelQueueConfiguration struct {
+ Name string
+ DataDir string
+ BatchLength int
+ QueueLength int
+ Timeout time.Duration
+ MaxAttempts int
+ Workers int
+ MaxWorkers int
+ BlockTimeout time.Duration
+ BoostTimeout time.Duration
+ BoostWorkers int
+}
+
+// PersistableChannelQueue wraps a channel queue and level queue together
+type PersistableChannelQueue struct {
+ *ChannelQueue
+ delayedStarter
+ closed chan struct{}
+}
+
+// NewPersistableChannelQueue creates a wrapped batched channel queue with persistable level queue backend when shutting down
+// This differs from a wrapped queue in that the persistent queue is only used to persist at shutdown/terminate
+func NewPersistableChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) {
+ configInterface, err := toConfig(PersistableChannelQueueConfiguration{}, cfg)
+ if err != nil {
+ return nil, err
+ }
+ config := configInterface.(PersistableChannelQueueConfiguration)
+
+ channelQueue, err := NewChannelQueue(handle, ChannelQueueConfiguration{
+ QueueLength: config.QueueLength,
+ BatchLength: config.BatchLength,
+ Workers: config.Workers,
+ MaxWorkers: config.MaxWorkers,
+ BlockTimeout: config.BlockTimeout,
+ BoostTimeout: config.BoostTimeout,
+ BoostWorkers: config.BoostWorkers,
+ Name: config.Name + "-channel",
+ }, exemplar)
+ if err != nil {
+ return nil, err
+ }
+
+ // the level backend only needs temporary workers to catch up with the previously dropped work
+ levelCfg := LevelQueueConfiguration{
+ DataDir: config.DataDir,
+ QueueLength: config.QueueLength,
+ BatchLength: config.BatchLength,
+ Workers: 1,
+ MaxWorkers: 6,
+ BlockTimeout: 1 * time.Second,
+ BoostTimeout: 5 * time.Minute,
+ BoostWorkers: 5,
+ Name: config.Name + "-level",
+ }
+
+ levelQueue, err := NewLevelQueue(handle, levelCfg, exemplar)
+ if err == nil {
+ queue := &PersistableChannelQueue{
+ ChannelQueue: channelQueue.(*ChannelQueue),
+ delayedStarter: delayedStarter{
+ internal: levelQueue.(*LevelQueue),
+ name: config.Name,
+ },
+ closed: make(chan struct{}),
+ }
+ _ = GetManager().Add(queue, PersistableChannelQueueType, config, exemplar, nil)
+ return queue, nil
+ }
+ if IsErrInvalidConfiguration(err) {
+ // Retrying ain't gonna make this any better...
+ return nil, ErrInvalidConfiguration{cfg: cfg}
+ }
+
+ queue := &PersistableChannelQueue{
+ ChannelQueue: channelQueue.(*ChannelQueue),
+ delayedStarter: delayedStarter{
+ cfg: levelCfg,
+ underlying: LevelQueueType,
+ timeout: config.Timeout,
+ maxAttempts: config.MaxAttempts,
+ name: config.Name,
+ },
+ closed: make(chan struct{}),
+ }
+ _ = GetManager().Add(queue, PersistableChannelQueueType, config, exemplar, nil)
+ return queue, nil
+}
+
+// Name returns the name of this queue
+func (p *PersistableChannelQueue) Name() string {
+ return p.delayedStarter.name
+}
+
+// Push will push the indexer data to queue
+func (p *PersistableChannelQueue) Push(data Data) error {
+ select {
+ case <-p.closed:
+ return p.internal.Push(data)
+ default:
+ return p.ChannelQueue.Push(data)
+ }
+}
+
+// Run starts to run the queue
+func (p *PersistableChannelQueue) Run(atShutdown, atTerminate func(context.Context, func())) {
+ p.lock.Lock()
+ if p.internal == nil {
+ err := p.setInternal(atShutdown, p.ChannelQueue.pool.handle, p.exemplar)
+ p.lock.Unlock()
+ if err != nil {
+ log.Fatal("Unable to create internal queue for %s Error: %v", p.Name(), err)
+ return
+ }
+ } else {
+ p.lock.Unlock()
+ }
+ atShutdown(context.Background(), p.Shutdown)
+ atTerminate(context.Background(), p.Terminate)
+
+ // Just run the level queue - we shut it down later
+ go p.internal.Run(func(_ context.Context, _ func()) {}, func(_ context.Context, _ func()) {})
+
+ go func() {
+ _ = p.ChannelQueue.pool.AddWorkers(p.workers, 0)
+ }()
+
+ log.Trace("PersistableChannelQueue: %s Waiting til closed", p.delayedStarter.name)
+ <-p.closed
+ log.Trace("PersistableChannelQueue: %s Cancelling pools", p.delayedStarter.name)
+ p.ChannelQueue.pool.cancel()
+ p.internal.(*LevelQueue).pool.cancel()
+ log.Trace("PersistableChannelQueue: %s Waiting til done", p.delayedStarter.name)
+ p.ChannelQueue.pool.Wait()
+ p.internal.(*LevelQueue).pool.Wait()
+ // Redirect all remaining data in the chan to the internal channel
+ go func() {
+ log.Trace("PersistableChannelQueue: %s Redirecting remaining data", p.delayedStarter.name)
+ for data := range p.ChannelQueue.pool.dataChan {
+ _ = p.internal.Push(data)
+ }
+ log.Trace("PersistableChannelQueue: %s Done Redirecting remaining data", p.delayedStarter.name)
+ }()
+ log.Trace("PersistableChannelQueue: %s Done main loop", p.delayedStarter.name)
+}
+
+// Shutdown processing this queue
+func (p *PersistableChannelQueue) Shutdown() {
+ log.Trace("PersistableChannelQueue: %s Shutdown", p.delayedStarter.name)
+ select {
+ case <-p.closed:
+ default:
+ p.lock.Lock()
+ defer p.lock.Unlock()
+ if p.internal != nil {
+ p.internal.(*LevelQueue).Shutdown()
+ }
+ close(p.closed)
+ }
+}
+
+// Terminate this queue and close the queue
+func (p *PersistableChannelQueue) Terminate() {
+ log.Trace("PersistableChannelQueue: %s Terminating", p.delayedStarter.name)
+ p.Shutdown()
+ p.lock.Lock()
+ defer p.lock.Unlock()
+ if p.internal != nil {
+ p.internal.(*LevelQueue).Terminate()
+ }
+}
+
+func init() {
+ queuesMap[PersistableChannelQueueType] = NewPersistableChannelQueue
+}
diff --git a/modules/queue/queue_disk_channel_test.go b/modules/queue/queue_disk_channel_test.go
new file mode 100644
index 0000000000..4ef68961c6
--- /dev/null
+++ b/modules/queue/queue_disk_channel_test.go
@@ -0,0 +1,117 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package queue
+
+import (
+ "context"
+ "io/ioutil"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestPersistableChannelQueue(t *testing.T) {
+ handleChan := make(chan *testData)
+ handle := func(data ...Data) {
+ assert.True(t, len(data) == 2)
+ for _, datum := range data {
+ testDatum := datum.(*testData)
+ handleChan <- testDatum
+ }
+ }
+
+ queueShutdown := []func(){}
+ queueTerminate := []func(){}
+
+ tmpDir, err := ioutil.TempDir("", "persistable-channel-queue-test-data")
+ assert.NoError(t, err)
+ defer os.RemoveAll(tmpDir)
+
+ queue, err := NewPersistableChannelQueue(handle, PersistableChannelQueueConfiguration{
+ DataDir: tmpDir,
+ BatchLength: 2,
+ QueueLength: 20,
+ Workers: 1,
+ MaxWorkers: 10,
+ }, &testData{})
+ assert.NoError(t, err)
+
+ go queue.Run(func(_ context.Context, shutdown func()) {
+ queueShutdown = append(queueShutdown, shutdown)
+ }, func(_ context.Context, terminate func()) {
+ queueTerminate = append(queueTerminate, terminate)
+ })
+
+ test1 := testData{"A", 1}
+ test2 := testData{"B", 2}
+
+ err = queue.Push(&test1)
+ assert.NoError(t, err)
+ go func() {
+ err = queue.Push(&test2)
+ assert.NoError(t, err)
+ }()
+
+ result1 := <-handleChan
+ assert.Equal(t, test1.TestString, result1.TestString)
+ assert.Equal(t, test1.TestInt, result1.TestInt)
+
+ result2 := <-handleChan
+ assert.Equal(t, test2.TestString, result2.TestString)
+ assert.Equal(t, test2.TestInt, result2.TestInt)
+
+ err = queue.Push(test1)
+ assert.Error(t, err)
+
+ for _, callback := range queueShutdown {
+ callback()
+ }
+ time.Sleep(200 * time.Millisecond)
+ err = queue.Push(&test1)
+ assert.NoError(t, err)
+ err = queue.Push(&test2)
+ assert.NoError(t, err)
+ select {
+ case <-handleChan:
+ assert.Fail(t, "Handler processing should have stopped")
+ default:
+ }
+ for _, callback := range queueTerminate {
+ callback()
+ }
+
+ // Reopen queue
+ queue, err = NewPersistableChannelQueue(handle, PersistableChannelQueueConfiguration{
+ DataDir: tmpDir,
+ BatchLength: 2,
+ QueueLength: 20,
+ Workers: 1,
+ MaxWorkers: 10,
+ }, &testData{})
+ assert.NoError(t, err)
+
+ go queue.Run(func(_ context.Context, shutdown func()) {
+ queueShutdown = append(queueShutdown, shutdown)
+ }, func(_ context.Context, terminate func()) {
+ queueTerminate = append(queueTerminate, terminate)
+ })
+
+ result3 := <-handleChan
+ assert.Equal(t, test1.TestString, result3.TestString)
+ assert.Equal(t, test1.TestInt, result3.TestInt)
+
+ result4 := <-handleChan
+ assert.Equal(t, test2.TestString, result4.TestString)
+ assert.Equal(t, test2.TestInt, result4.TestInt)
+ for _, callback := range queueShutdown {
+ callback()
+ }
+ for _, callback := range queueTerminate {
+ callback()
+ }
+
+}
diff --git a/modules/queue/queue_disk_test.go b/modules/queue/queue_disk_test.go
new file mode 100644
index 0000000000..c5959d606f
--- /dev/null
+++ b/modules/queue/queue_disk_test.go
@@ -0,0 +1,126 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package queue
+
+import (
+ "context"
+ "io/ioutil"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestLevelQueue(t *testing.T) {
+ handleChan := make(chan *testData)
+ handle := func(data ...Data) {
+ assert.True(t, len(data) == 2)
+ for _, datum := range data {
+ testDatum := datum.(*testData)
+ handleChan <- testDatum
+ }
+ }
+
+ queueShutdown := []func(){}
+ queueTerminate := []func(){}
+
+ tmpDir, err := ioutil.TempDir("", "level-queue-test-data")
+ assert.NoError(t, err)
+ defer os.RemoveAll(tmpDir)
+
+ queue, err := NewLevelQueue(handle, LevelQueueConfiguration{
+ DataDir: tmpDir,
+ BatchLength: 2,
+ Workers: 1,
+ MaxWorkers: 10,
+ QueueLength: 20,
+ BlockTimeout: 1 * time.Second,
+ BoostTimeout: 5 * time.Minute,
+ BoostWorkers: 5,
+ }, &testData{})
+ assert.NoError(t, err)
+
+ go queue.Run(func(_ context.Context, shutdown func()) {
+ queueShutdown = append(queueShutdown, shutdown)
+ }, func(_ context.Context, terminate func()) {
+ queueTerminate = append(queueTerminate, terminate)
+ })
+
+ test1 := testData{"A", 1}
+ test2 := testData{"B", 2}
+
+ err = queue.Push(&test1)
+ assert.NoError(t, err)
+ go func() {
+ err = queue.Push(&test2)
+ assert.NoError(t, err)
+ }()
+
+ result1 := <-handleChan
+ assert.Equal(t, test1.TestString, result1.TestString)
+ assert.Equal(t, test1.TestInt, result1.TestInt)
+
+ result2 := <-handleChan
+ assert.Equal(t, test2.TestString, result2.TestString)
+ assert.Equal(t, test2.TestInt, result2.TestInt)
+
+ err = queue.Push(test1)
+ assert.Error(t, err)
+
+ for _, callback := range queueShutdown {
+ callback()
+ }
+ time.Sleep(200 * time.Millisecond)
+ err = queue.Push(&test1)
+ assert.NoError(t, err)
+ err = queue.Push(&test2)
+ assert.NoError(t, err)
+ select {
+ case <-handleChan:
+ assert.Fail(t, "Handler processing should have stopped")
+ default:
+ }
+ for _, callback := range queueTerminate {
+ callback()
+ }
+
+ // Reopen queue
+ queue, err = NewWrappedQueue(handle,
+ WrappedQueueConfiguration{
+ Underlying: LevelQueueType,
+ Config: LevelQueueConfiguration{
+ DataDir: tmpDir,
+ BatchLength: 2,
+ Workers: 1,
+ MaxWorkers: 10,
+ QueueLength: 20,
+ BlockTimeout: 1 * time.Second,
+ BoostTimeout: 5 * time.Minute,
+ BoostWorkers: 5,
+ },
+ }, &testData{})
+ assert.NoError(t, err)
+
+ go queue.Run(func(_ context.Context, shutdown func()) {
+ queueShutdown = append(queueShutdown, shutdown)
+ }, func(_ context.Context, terminate func()) {
+ queueTerminate = append(queueTerminate, terminate)
+ })
+
+ result3 := <-handleChan
+ assert.Equal(t, test1.TestString, result3.TestString)
+ assert.Equal(t, test1.TestInt, result3.TestInt)
+
+ result4 := <-handleChan
+ assert.Equal(t, test2.TestString, result4.TestString)
+ assert.Equal(t, test2.TestInt, result4.TestInt)
+ for _, callback := range queueShutdown {
+ callback()
+ }
+ for _, callback := range queueTerminate {
+ callback()
+ }
+}
diff --git a/modules/queue/queue_redis.go b/modules/queue/queue_redis.go
new file mode 100644
index 0000000000..14e68937a5
--- /dev/null
+++ b/modules/queue/queue_redis.go
@@ -0,0 +1,234 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package queue
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "time"
+
+ "code.gitea.io/gitea/modules/log"
+
+ "github.com/go-redis/redis"
+)
+
+// RedisQueueType is the type for redis queue
+const RedisQueueType Type = "redis"
+
+type redisClient interface {
+ RPush(key string, args ...interface{}) *redis.IntCmd
+ LPop(key string) *redis.StringCmd
+ Ping() *redis.StatusCmd
+ Close() error
+}
+
+// RedisQueue redis queue
+type RedisQueue struct {
+ pool *WorkerPool
+ client redisClient
+ queueName string
+ closed chan struct{}
+ terminated chan struct{}
+ exemplar interface{}
+ workers int
+ name string
+ lock sync.Mutex
+}
+
+// RedisQueueConfiguration is the configuration for the redis queue
+type RedisQueueConfiguration struct {
+ Network string
+ Addresses string
+ Password string
+ DBIndex int
+ BatchLength int
+ QueueLength int
+ QueueName string
+ Workers int
+ MaxWorkers int
+ BlockTimeout time.Duration
+ BoostTimeout time.Duration
+ BoostWorkers int
+ Name string
+}
+
+// NewRedisQueue creates single redis or cluster redis queue
+func NewRedisQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) {
+ configInterface, err := toConfig(RedisQueueConfiguration{}, cfg)
+ if err != nil {
+ return nil, err
+ }
+ config := configInterface.(RedisQueueConfiguration)
+
+ dbs := strings.Split(config.Addresses, ",")
+
+ dataChan := make(chan Data, config.QueueLength)
+ ctx, cancel := context.WithCancel(context.Background())
+
+ var queue = &RedisQueue{
+ pool: &WorkerPool{
+ baseCtx: ctx,
+ cancel: cancel,
+ batchLength: config.BatchLength,
+ handle: handle,
+ dataChan: dataChan,
+ blockTimeout: config.BlockTimeout,
+ boostTimeout: config.BoostTimeout,
+ boostWorkers: config.BoostWorkers,
+ maxNumberOfWorkers: config.MaxWorkers,
+ },
+ queueName: config.QueueName,
+ exemplar: exemplar,
+ closed: make(chan struct{}),
+ workers: config.Workers,
+ name: config.Name,
+ }
+ if len(dbs) == 0 {
+ return nil, errors.New("no redis host specified")
+ } else if len(dbs) == 1 {
+ queue.client = redis.NewClient(&redis.Options{
+ Network: config.Network,
+ Addr: strings.TrimSpace(dbs[0]), // use default Addr
+ Password: config.Password, // no password set
+ DB: config.DBIndex, // use default DB
+ })
+ } else {
+ queue.client = redis.NewClusterClient(&redis.ClusterOptions{
+ Addrs: dbs,
+ })
+ }
+ if err := queue.client.Ping().Err(); err != nil {
+ return nil, err
+ }
+ queue.pool.qid = GetManager().Add(queue, RedisQueueType, config, exemplar, queue.pool)
+
+ return queue, nil
+}
+
+// Run runs the redis queue
+func (r *RedisQueue) Run(atShutdown, atTerminate func(context.Context, func())) {
+ atShutdown(context.Background(), r.Shutdown)
+ atTerminate(context.Background(), r.Terminate)
+
+ go func() {
+ _ = r.pool.AddWorkers(r.workers, 0)
+ }()
+
+ go r.readToChan()
+
+ log.Trace("RedisQueue: %s Waiting til closed", r.name)
+ <-r.closed
+ log.Trace("RedisQueue: %s Waiting til done", r.name)
+ r.pool.Wait()
+
+ log.Trace("RedisQueue: %s Waiting til cleaned", r.name)
+ ctx, cancel := context.WithCancel(context.Background())
+ atTerminate(ctx, cancel)
+ r.pool.CleanUp(ctx)
+ cancel()
+}
+
+func (r *RedisQueue) readToChan() {
+ for {
+ select {
+ case <-r.closed:
+ // tell the pool to shutdown
+ r.pool.cancel()
+ return
+ default:
+ bs, err := r.client.LPop(r.queueName).Bytes()
+ if err != nil && err != redis.Nil {
+ log.Error("RedisQueue: %s Error on LPop: %v", r.name, err)
+ time.Sleep(time.Millisecond * 100)
+ continue
+ }
+
+ if len(bs) == 0 {
+ time.Sleep(time.Millisecond * 100)
+ continue
+ }
+
+ var data Data
+ if r.exemplar != nil {
+ t := reflect.TypeOf(r.exemplar)
+ n := reflect.New(t)
+ ne := n.Elem()
+ err = json.Unmarshal(bs, ne.Addr().Interface())
+ data = ne.Interface().(Data)
+ } else {
+ err = json.Unmarshal(bs, &data)
+ }
+ if err != nil {
+ log.Error("RedisQueue: %s Error on Unmarshal: %v", r.name, err)
+ time.Sleep(time.Millisecond * 100)
+ continue
+ }
+
+ log.Trace("RedisQueue: %s Task found: %#v", r.name, data)
+ r.pool.Push(data)
+ }
+ }
+}
+
+// Push implements Queue
+func (r *RedisQueue) Push(data Data) error {
+ if r.exemplar != nil {
+ // Assert data is of same type as r.exemplar
+ value := reflect.ValueOf(data)
+ t := value.Type()
+ exemplarType := reflect.ValueOf(r.exemplar).Type()
+ if !t.AssignableTo(exemplarType) || data == nil {
+ return fmt.Errorf("Unable to assign data: %v to same type as exemplar: %v in %s", data, r.exemplar, r.name)
+ }
+ }
+ bs, err := json.Marshal(data)
+ if err != nil {
+ return err
+ }
+ return r.client.RPush(r.queueName, bs).Err()
+}
+
+// Shutdown processing from this queue
+func (r *RedisQueue) Shutdown() {
+ log.Trace("Shutdown: %s", r.name)
+ r.lock.Lock()
+ select {
+ case <-r.closed:
+ default:
+ close(r.closed)
+ }
+ r.lock.Unlock()
+}
+
+// Terminate this queue and close the queue
+func (r *RedisQueue) Terminate() {
+ log.Trace("Terminating: %s", r.name)
+ r.Shutdown()
+ r.lock.Lock()
+ select {
+ case <-r.terminated:
+ r.lock.Unlock()
+ default:
+ close(r.terminated)
+ r.lock.Unlock()
+ if err := r.client.Close(); err != nil {
+ log.Error("Error whilst closing internal redis client in %s: %v", r.name, err)
+ }
+ }
+}
+
+// Name returns the name of this queue
+func (r *RedisQueue) Name() string {
+ return r.name
+}
+
+func init() {
+ queuesMap[RedisQueueType] = NewRedisQueue
+}
diff --git a/modules/queue/queue_test.go b/modules/queue/queue_test.go
new file mode 100644
index 0000000000..3608f68d3d
--- /dev/null
+++ b/modules/queue/queue_test.go
@@ -0,0 +1,43 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package queue
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type testData struct {
+ TestString string
+ TestInt int
+}
+
+func TestToConfig(t *testing.T) {
+ cfg := testData{
+ TestString: "Config",
+ TestInt: 10,
+ }
+ exemplar := testData{}
+
+ cfg2I, err := toConfig(exemplar, cfg)
+ assert.NoError(t, err)
+ cfg2, ok := (cfg2I).(testData)
+ assert.True(t, ok)
+ assert.NotEqual(t, cfg2, exemplar)
+ assert.Equal(t, &cfg, &cfg2)
+
+ cfgString, err := json.Marshal(cfg)
+ assert.NoError(t, err)
+
+ cfg3I, err := toConfig(exemplar, cfgString)
+ assert.NoError(t, err)
+ cfg3, ok := (cfg3I).(testData)
+ assert.True(t, ok)
+ assert.Equal(t, cfg.TestString, cfg3.TestString)
+ assert.Equal(t, cfg.TestInt, cfg3.TestInt)
+ assert.NotEqual(t, cfg3, exemplar)
+}
diff --git a/modules/queue/queue_wrapped.go b/modules/queue/queue_wrapped.go
new file mode 100644
index 0000000000..d0b93b54d0
--- /dev/null
+++ b/modules/queue/queue_wrapped.go
@@ -0,0 +1,206 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package queue
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "sync"
+ "time"
+
+ "code.gitea.io/gitea/modules/log"
+)
+
+// WrappedQueueType is the type for a wrapped delayed starting queue
+const WrappedQueueType Type = "wrapped"
+
+// WrappedQueueConfiguration is the configuration for a WrappedQueue
+type WrappedQueueConfiguration struct {
+ Underlying Type
+ Timeout time.Duration
+ MaxAttempts int
+ Config interface{}
+ QueueLength int
+ Name string
+}
+
+type delayedStarter struct {
+ lock sync.Mutex
+ internal Queue
+ underlying Type
+ cfg interface{}
+ timeout time.Duration
+ maxAttempts int
+ name string
+}
+
+// setInternal must be called with the lock locked.
+func (q *delayedStarter) setInternal(atShutdown func(context.Context, func()), handle HandlerFunc, exemplar interface{}) error {
+ var ctx context.Context
+ var cancel context.CancelFunc
+ if q.timeout > 0 {
+ ctx, cancel = context.WithTimeout(context.Background(), q.timeout)
+ } else {
+ ctx, cancel = context.WithCancel(context.Background())
+ }
+
+ defer cancel()
+ // Ensure we also stop at shutdown
+ atShutdown(ctx, func() {
+ cancel()
+ })
+
+ i := 1
+ for q.internal == nil {
+ select {
+ case <-ctx.Done():
+ return fmt.Errorf("Timedout creating queue %v with cfg %v in %s", q.underlying, q.cfg, q.name)
+ default:
+ queue, err := NewQueue(q.underlying, handle, q.cfg, exemplar)
+ if err == nil {
+ q.internal = queue
+ q.lock.Unlock()
+ break
+ }
+ if err.Error() != "resource temporarily unavailable" {
+ log.Warn("[Attempt: %d] Failed to create queue: %v for %s cfg: %v error: %v", i, q.underlying, q.name, q.cfg, err)
+ }
+ i++
+ if q.maxAttempts > 0 && i > q.maxAttempts {
+ return fmt.Errorf("Unable to create queue %v for %s with cfg %v by max attempts: error: %v", q.underlying, q.name, q.cfg, err)
+ }
+ sleepTime := 100 * time.Millisecond
+ if q.timeout > 0 && q.maxAttempts > 0 {
+ sleepTime = (q.timeout - 200*time.Millisecond) / time.Duration(q.maxAttempts)
+ }
+ t := time.NewTimer(sleepTime)
+ select {
+ case <-ctx.Done():
+ t.Stop()
+ case <-t.C:
+ }
+ }
+ }
+ return nil
+}
+
+// WrappedQueue wraps a delayed starting queue
+type WrappedQueue struct {
+ delayedStarter
+ handle HandlerFunc
+ exemplar interface{}
+ channel chan Data
+}
+
+// NewWrappedQueue will attempt to create a queue of the provided type,
+// but if there is a problem creating this queue it will instead create
+// a WrappedQueue with delayed startup of the queue instead and a
+// channel which will be redirected to the queue
+func NewWrappedQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) {
+ configInterface, err := toConfig(WrappedQueueConfiguration{}, cfg)
+ if err != nil {
+ return nil, err
+ }
+ config := configInterface.(WrappedQueueConfiguration)
+
+ queue, err := NewQueue(config.Underlying, handle, config.Config, exemplar)
+ if err == nil {
+ // Just return the queue there is no need to wrap
+ return queue, nil
+ }
+ if IsErrInvalidConfiguration(err) {
+ // Retrying ain't gonna make this any better...
+ return nil, ErrInvalidConfiguration{cfg: cfg}
+ }
+
+ queue = &WrappedQueue{
+ handle: handle,
+ channel: make(chan Data, config.QueueLength),
+ exemplar: exemplar,
+ delayedStarter: delayedStarter{
+ cfg: config.Config,
+ underlying: config.Underlying,
+ timeout: config.Timeout,
+ maxAttempts: config.MaxAttempts,
+ name: config.Name,
+ },
+ }
+ _ = GetManager().Add(queue, WrappedQueueType, config, exemplar, nil)
+ return queue, nil
+}
+
+// Name returns the name of the queue
+func (q *WrappedQueue) Name() string {
+ return q.name + "-wrapper"
+}
+
+// Push will push the data to the internal channel checking it against the exemplar
+func (q *WrappedQueue) Push(data Data) error {
+ if q.exemplar != nil {
+ // Assert data is of same type as r.exemplar
+ value := reflect.ValueOf(data)
+ t := value.Type()
+ exemplarType := reflect.ValueOf(q.exemplar).Type()
+ if !t.AssignableTo(exemplarType) || data == nil {
+ return fmt.Errorf("Unable to assign data: %v to same type as exemplar: %v in %s", data, q.exemplar, q.name)
+ }
+ }
+ q.channel <- data
+ return nil
+}
+
+// Run starts to run the queue and attempts to create the internal queue
+func (q *WrappedQueue) Run(atShutdown, atTerminate func(context.Context, func())) {
+ q.lock.Lock()
+ if q.internal == nil {
+ err := q.setInternal(atShutdown, q.handle, q.exemplar)
+ q.lock.Unlock()
+ if err != nil {
+ log.Fatal("Unable to set the internal queue for %s Error: %v", q.Name(), err)
+ return
+ }
+ go func() {
+ for data := range q.channel {
+ _ = q.internal.Push(data)
+ }
+ }()
+ } else {
+ q.lock.Unlock()
+ }
+
+ q.internal.Run(atShutdown, atTerminate)
+ log.Trace("WrappedQueue: %s Done", q.name)
+}
+
+// Shutdown this queue and stop processing
+func (q *WrappedQueue) Shutdown() {
+ log.Trace("WrappedQueue: %s Shutdown", q.name)
+ q.lock.Lock()
+ defer q.lock.Unlock()
+ if q.internal == nil {
+ return
+ }
+ if shutdownable, ok := q.internal.(Shutdownable); ok {
+ shutdownable.Shutdown()
+ }
+}
+
+// Terminate this queue and close the queue
+func (q *WrappedQueue) Terminate() {
+ log.Trace("WrappedQueue: %s Terminating", q.name)
+ q.lock.Lock()
+ defer q.lock.Unlock()
+ if q.internal == nil {
+ return
+ }
+ if shutdownable, ok := q.internal.(Shutdownable); ok {
+ shutdownable.Terminate()
+ }
+}
+
+func init() {
+ queuesMap[WrappedQueueType] = NewWrappedQueue
+}
diff --git a/modules/queue/setting.go b/modules/queue/setting.go
new file mode 100644
index 0000000000..d5a6b41882
--- /dev/null
+++ b/modules/queue/setting.go
@@ -0,0 +1,75 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package queue
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+)
+
+func validType(t string) (Type, error) {
+ if len(t) == 0 {
+ return PersistableChannelQueueType, nil
+ }
+ for _, typ := range RegisteredTypes() {
+ if t == string(typ) {
+ return typ, nil
+ }
+ }
+ return PersistableChannelQueueType, fmt.Errorf("Unknown queue type: %s defaulting to %s", t, string(PersistableChannelQueueType))
+}
+
+// CreateQueue for name with provided handler and exemplar
+func CreateQueue(name string, handle HandlerFunc, exemplar interface{}) Queue {
+ q := setting.GetQueueSettings(name)
+ opts := make(map[string]interface{})
+ opts["Name"] = name
+ opts["QueueLength"] = q.Length
+ opts["BatchLength"] = q.BatchLength
+ opts["DataDir"] = q.DataDir
+ opts["Addresses"] = q.Addresses
+ opts["Network"] = q.Network
+ opts["Password"] = q.Password
+ opts["DBIndex"] = q.DBIndex
+ opts["QueueName"] = q.QueueName
+ opts["Workers"] = q.Workers
+ opts["MaxWorkers"] = q.MaxWorkers
+ opts["BlockTimeout"] = q.BlockTimeout
+ opts["BoostTimeout"] = q.BoostTimeout
+ opts["BoostWorkers"] = q.BoostWorkers
+
+ typ, err := validType(q.Type)
+ if err != nil {
+ log.Error("Invalid type %s provided for queue named %s defaulting to %s", q.Type, name, string(typ))
+ }
+
+ cfg, err := json.Marshal(opts)
+ if err != nil {
+ log.Error("Unable to marshall generic options: %v Error: %v", opts, err)
+ log.Error("Unable to create queue for %s", name, err)
+ return nil
+ }
+
+ returnable, err := NewQueue(typ, handle, cfg, exemplar)
+ if q.WrapIfNecessary && err != nil {
+ log.Warn("Unable to create queue for %s: %v", name, err)
+ log.Warn("Attempting to create wrapped queue")
+ returnable, err = NewQueue(WrappedQueueType, handle, WrappedQueueConfiguration{
+ Underlying: Type(q.Type),
+ Timeout: q.Timeout,
+ MaxAttempts: q.MaxAttempts,
+ Config: cfg,
+ QueueLength: q.Length,
+ }, exemplar)
+ }
+ if err != nil {
+ log.Error("Unable to create queue for %s: %v", name, err)
+ return nil
+ }
+ return returnable
+}
diff --git a/modules/queue/workerpool.go b/modules/queue/workerpool.go
new file mode 100644
index 0000000000..25fc7dd644
--- /dev/null
+++ b/modules/queue/workerpool.go
@@ -0,0 +1,325 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package queue
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "code.gitea.io/gitea/modules/log"
+)
+
+// WorkerPool takes
+type WorkerPool struct {
+ lock sync.Mutex
+ baseCtx context.Context
+ cancel context.CancelFunc
+ cond *sync.Cond
+ qid int64
+ maxNumberOfWorkers int
+ numberOfWorkers int
+ batchLength int
+ handle HandlerFunc
+ dataChan chan Data
+ blockTimeout time.Duration
+ boostTimeout time.Duration
+ boostWorkers int
+}
+
+// Push pushes the data to the internal channel
+func (p *WorkerPool) Push(data Data) {
+ p.lock.Lock()
+ if p.blockTimeout > 0 && p.boostTimeout > 0 && (p.numberOfWorkers <= p.maxNumberOfWorkers || p.maxNumberOfWorkers < 0) {
+ p.lock.Unlock()
+ p.pushBoost(data)
+ } else {
+ p.lock.Unlock()
+ p.dataChan <- data
+ }
+}
+
+func (p *WorkerPool) pushBoost(data Data) {
+ select {
+ case p.dataChan <- data:
+ default:
+ p.lock.Lock()
+ if p.blockTimeout <= 0 {
+ p.lock.Unlock()
+ p.dataChan <- data
+ return
+ }
+ ourTimeout := p.blockTimeout
+ timer := time.NewTimer(p.blockTimeout)
+ p.lock.Unlock()
+ select {
+ case p.dataChan <- data:
+ if timer.Stop() {
+ select {
+ case <-timer.C:
+ default:
+ }
+ }
+ case <-timer.C:
+ p.lock.Lock()
+ if p.blockTimeout > ourTimeout || (p.numberOfWorkers > p.maxNumberOfWorkers && p.maxNumberOfWorkers >= 0) {
+ p.lock.Unlock()
+ p.dataChan <- data
+ return
+ }
+ p.blockTimeout *= 2
+ ctx, cancel := context.WithCancel(p.baseCtx)
+ mq := GetManager().GetManagedQueue(p.qid)
+ boost := p.boostWorkers
+ if (boost+p.numberOfWorkers) > p.maxNumberOfWorkers && p.maxNumberOfWorkers >= 0 {
+ boost = p.maxNumberOfWorkers - p.numberOfWorkers
+ }
+ if mq != nil {
+ log.Warn("WorkerPool: %d (for %s) Channel blocked for %v - adding %d temporary workers for %s, block timeout now %v", p.qid, mq.Name, ourTimeout, boost, p.boostTimeout, p.blockTimeout)
+
+ start := time.Now()
+ pid := mq.RegisterWorkers(boost, start, false, start, cancel)
+ go func() {
+ <-ctx.Done()
+ mq.RemoveWorkers(pid)
+ cancel()
+ }()
+ } else {
+ log.Warn("WorkerPool: %d Channel blocked for %v - adding %d temporary workers for %s, block timeout now %v", p.qid, ourTimeout, p.boostWorkers, p.boostTimeout, p.blockTimeout)
+ }
+ go func() {
+ <-time.After(p.boostTimeout)
+ cancel()
+ p.lock.Lock()
+ p.blockTimeout /= 2
+ p.lock.Unlock()
+ }()
+ p.addWorkers(ctx, boost)
+ p.lock.Unlock()
+ p.dataChan <- data
+ }
+ }
+}
+
+// NumberOfWorkers returns the number of current workers in the pool
+func (p *WorkerPool) NumberOfWorkers() int {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+ return p.numberOfWorkers
+}
+
+// MaxNumberOfWorkers returns the maximum number of workers automatically added to the pool
+func (p *WorkerPool) MaxNumberOfWorkers() int {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+ return p.maxNumberOfWorkers
+}
+
+// BoostWorkers returns the number of workers for a boost
+func (p *WorkerPool) BoostWorkers() int {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+ return p.boostWorkers
+}
+
+// BoostTimeout returns the timeout of the next boost
+func (p *WorkerPool) BoostTimeout() time.Duration {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+ return p.boostTimeout
+}
+
+// BlockTimeout returns the timeout til the next boost
+func (p *WorkerPool) BlockTimeout() time.Duration {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+ return p.blockTimeout
+}
+
+// SetSettings sets the setable boost values
+func (p *WorkerPool) SetSettings(maxNumberOfWorkers, boostWorkers int, timeout time.Duration) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+ p.maxNumberOfWorkers = maxNumberOfWorkers
+ p.boostWorkers = boostWorkers
+ p.boostTimeout = timeout
+}
+
+// SetMaxNumberOfWorkers sets the maximum number of workers automatically added to the pool
+// Changing this number will not change the number of current workers but will change the limit
+// for future additions
+func (p *WorkerPool) SetMaxNumberOfWorkers(newMax int) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+ p.maxNumberOfWorkers = newMax
+}
+
+// AddWorkers adds workers to the pool - this allows the number of workers to go above the limit
+func (p *WorkerPool) AddWorkers(number int, timeout time.Duration) context.CancelFunc {
+ var ctx context.Context
+ var cancel context.CancelFunc
+ start := time.Now()
+ end := start
+ hasTimeout := false
+ if timeout > 0 {
+ ctx, cancel = context.WithTimeout(p.baseCtx, timeout)
+ end = start.Add(timeout)
+ hasTimeout = true
+ } else {
+ ctx, cancel = context.WithCancel(p.baseCtx)
+ }
+
+ mq := GetManager().GetManagedQueue(p.qid)
+ if mq != nil {
+ pid := mq.RegisterWorkers(number, start, hasTimeout, end, cancel)
+ go func() {
+ <-ctx.Done()
+ mq.RemoveWorkers(pid)
+ cancel()
+ }()
+ log.Trace("WorkerPool: %d (for %s) adding %d workers with group id: %d", p.qid, mq.Name, number, pid)
+ } else {
+ log.Trace("WorkerPool: %d adding %d workers (no group id)", p.qid, number)
+
+ }
+ p.addWorkers(ctx, number)
+ return cancel
+}
+
+// addWorkers adds workers to the pool
+func (p *WorkerPool) addWorkers(ctx context.Context, number int) {
+ for i := 0; i < number; i++ {
+ p.lock.Lock()
+ if p.cond == nil {
+ p.cond = sync.NewCond(&p.lock)
+ }
+ p.numberOfWorkers++
+ p.lock.Unlock()
+ go func() {
+ p.doWork(ctx)
+
+ p.lock.Lock()
+ p.numberOfWorkers--
+ if p.numberOfWorkers == 0 {
+ p.cond.Broadcast()
+ } else if p.numberOfWorkers < 0 {
+ // numberOfWorkers can't go negative but...
+ log.Warn("Number of Workers < 0 for QID %d - this shouldn't happen", p.qid)
+ p.numberOfWorkers = 0
+ p.cond.Broadcast()
+ }
+ p.lock.Unlock()
+ }()
+ }
+}
+
+// Wait for WorkerPool to finish
+func (p *WorkerPool) Wait() {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+ if p.cond == nil {
+ p.cond = sync.NewCond(&p.lock)
+ }
+ if p.numberOfWorkers <= 0 {
+ return
+ }
+ p.cond.Wait()
+}
+
+// CleanUp will drain the remaining contents of the channel
+// This should be called after AddWorkers context is closed
+func (p *WorkerPool) CleanUp(ctx context.Context) {
+ log.Trace("WorkerPool: %d CleanUp", p.qid)
+ close(p.dataChan)
+ for data := range p.dataChan {
+ p.handle(data)
+ select {
+ case <-ctx.Done():
+ log.Warn("WorkerPool: %d Cleanup context closed before finishing clean-up", p.qid)
+ return
+ default:
+ }
+ }
+ log.Trace("WorkerPool: %d CleanUp Done", p.qid)
+}
+
+func (p *WorkerPool) doWork(ctx context.Context) {
+ delay := time.Millisecond * 300
+ var data = make([]Data, 0, p.batchLength)
+ for {
+ select {
+ case <-ctx.Done():
+ if len(data) > 0 {
+ log.Trace("Handling: %d data, %v", len(data), data)
+ p.handle(data...)
+ }
+ log.Trace("Worker shutting down")
+ return
+ case datum, ok := <-p.dataChan:
+ if !ok {
+ // the dataChan has been closed - we should finish up:
+ if len(data) > 0 {
+ log.Trace("Handling: %d data, %v", len(data), data)
+ p.handle(data...)
+ }
+ log.Trace("Worker shutting down")
+ return
+ }
+ data = append(data, datum)
+ if len(data) >= p.batchLength {
+ log.Trace("Handling: %d data, %v", len(data), data)
+ p.handle(data...)
+ data = make([]Data, 0, p.batchLength)
+ }
+ default:
+ timer := time.NewTimer(delay)
+ select {
+ case <-ctx.Done():
+ if timer.Stop() {
+ select {
+ case <-timer.C:
+ default:
+ }
+ }
+ if len(data) > 0 {
+ log.Trace("Handling: %d data, %v", len(data), data)
+ p.handle(data...)
+ }
+ log.Trace("Worker shutting down")
+ return
+ case datum, ok := <-p.dataChan:
+ if timer.Stop() {
+ select {
+ case <-timer.C:
+ default:
+ }
+ }
+ if !ok {
+ // the dataChan has been closed - we should finish up:
+ if len(data) > 0 {
+ log.Trace("Handling: %d data, %v", len(data), data)
+ p.handle(data...)
+ }
+ log.Trace("Worker shutting down")
+ return
+ }
+ data = append(data, datum)
+ if len(data) >= p.batchLength {
+ log.Trace("Handling: %d data, %v", len(data), data)
+ p.handle(data...)
+ data = make([]Data, 0, p.batchLength)
+ }
+ case <-timer.C:
+ delay = time.Millisecond * 100
+ if len(data) > 0 {
+ log.Trace("Handling: %d data, %v", len(data), data)
+ p.handle(data...)
+ data = make([]Data, 0, p.batchLength)
+ }
+
+ }
+ }
+ }
+}