BoostWorkers() int
// SetPoolSettings sets the user updatable settings for the pool
SetPoolSettings(maxNumberOfWorkers, boostWorkers int, timeout time.Duration)
+ // Done returns a channel that will be closed when the Pool's baseCtx is closed
+ Done() <-chan struct{}
}
// ManagedQueueList implements the sort.Interface
delete(m.Queues, qid)
m.mutex.Unlock()
log.Trace("Queue Manager removed: QID: %d", qid)
-
}
// GetManagedQueue by qid
wg.Done()
continue
}
+
+ if pool, ok := mq.Managed.(ManagedPool); ok {
+ // No point into flushing pools when their base's ctx is already done.
+ select {
+ case <-pool.Done():
+ wg.Done()
+ continue
+ default:
+ }
+ }
+
allEmpty = false
if flushable, ok := mq.Managed.(Flushable); ok {
log.Debug("Flushing (flushable) queue: %s", mq.Name)
wg.Wait()
}
return nil
-
}
// ManagedQueues returns the managed queues
q.internal.(*LevelQueue).Shutdown()
GetManager().Remove(q.internal.(*LevelQueue).qid)
}
-
}
// Flush flushes the queue and blocks till the queue is empty
q.channelQueue.Wait()
q.internal.(*LevelQueue).Wait()
// Redirect all remaining data in the chan to the internal channel
- go func() {
- log.Trace("PersistableChannelQueue: %s Redirecting remaining data", q.delayedStarter.name)
- for data := range q.channelQueue.dataChan {
- _ = q.internal.Push(data)
- atomic.AddInt64(&q.channelQueue.numInQueue, -1)
- }
- log.Trace("PersistableChannelQueue: %s Done Redirecting remaining data", q.delayedStarter.name)
- }()
+ close(q.channelQueue.dataChan)
+ log.Trace("PersistableChannelQueue: %s Redirecting remaining data", q.delayedStarter.name)
+ for data := range q.channelQueue.dataChan {
+ _ = q.internal.Push(data)
+ atomic.AddInt64(&q.channelQueue.numInQueue, -1)
+ }
+ log.Trace("PersistableChannelQueue: %s Done Redirecting remaining data", q.delayedStarter.name)
log.Debug("PersistableChannelQueue: %s Shutdown", q.delayedStarter.name)
}
return pool
}
+// Done returns when this worker pool's base context has been cancelled
+func (p *WorkerPool) Done() <-chan struct{} {
+ return p.baseCtx.Done()
+}
+
// Push pushes the data to the internal channel
func (p *WorkerPool) Push(data Data) {
atomic.AddInt64(&p.numInQueue, 1)
boost = p.maxNumberOfWorkers - p.numberOfWorkers
}
if mq != nil {
- log.Warn("WorkerPool: %d (for %s) has zero workers - adding %d temporary workers for %s", p.qid, mq.Name, boost, p.boostTimeout)
+ log.Debug("WorkerPool: %d (for %s) has zero workers - adding %d temporary workers for %s", p.qid, mq.Name, boost, p.boostTimeout)
start := time.Now()
pid := mq.RegisterWorkers(boost, start, true, start.Add(p.boostTimeout), cancel, false)
mq.RemoveWorkers(pid)
}
} else {
- log.Warn("WorkerPool: %d has zero workers - adding %d temporary workers for %s", p.qid, p.boostWorkers, p.boostTimeout)
+ log.Debug("WorkerPool: %d has zero workers - adding %d temporary workers for %s", p.qid, p.boostWorkers, p.boostTimeout)
}
p.lock.Unlock()
p.addWorkers(ctx, cancel, boost)
log.Trace("WorkerPool: %d Flush", p.qid)
for {
select {
- case data := <-p.dataChan:
+ case data, ok := <-p.dataChan:
+ if !ok {
+ return nil
+ }
p.handle(data)
atomic.AddInt64(&p.numInQueue, -1)
case <-p.baseCtx.Done():
func (p *WorkerPool) doWork(ctx context.Context) {
delay := time.Millisecond * 300
- var data = make([]Data, 0, p.batchLength)
+ data := make([]Data, 0, p.batchLength)
for {
select {
case <-ctx.Done():