summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/go-redis/redis/v7
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/go-redis/redis/v7')
-rw-r--r--vendor/github.com/go-redis/redis/v7/.gitignore2
-rw-r--r--vendor/github.com/go-redis/redis/v7/.golangci.yml15
-rw-r--r--vendor/github.com/go-redis/redis/v7/.travis.yml22
-rw-r--r--vendor/github.com/go-redis/redis/v7/CHANGELOG.md46
-rw-r--r--vendor/github.com/go-redis/redis/v7/LICENSE25
-rw-r--r--vendor/github.com/go-redis/redis/v7/Makefile20
-rw-r--r--vendor/github.com/go-redis/redis/v7/README.md128
-rw-r--r--vendor/github.com/go-redis/redis/v7/cluster.go1669
-rw-r--r--vendor/github.com/go-redis/redis/v7/cluster_commands.go22
-rw-r--r--vendor/github.com/go-redis/redis/v7/command.go2064
-rw-r--r--vendor/github.com/go-redis/redis/v7/commands.go2643
-rw-r--r--vendor/github.com/go-redis/redis/v7/doc.go4
-rw-r--r--vendor/github.com/go-redis/redis/v7/error.go108
-rw-r--r--vendor/github.com/go-redis/redis/v7/go.mod15
-rw-r--r--vendor/github.com/go-redis/redis/v7/go.sum47
-rw-r--r--vendor/github.com/go-redis/redis/v7/internal/consistenthash/consistenthash.go81
-rw-r--r--vendor/github.com/go-redis/redis/v7/internal/hashtag/hashtag.go77
-rw-r--r--vendor/github.com/go-redis/redis/v7/internal/internal.go24
-rw-r--r--vendor/github.com/go-redis/redis/v7/internal/log.go8
-rw-r--r--vendor/github.com/go-redis/redis/v7/internal/once.go60
-rw-r--r--vendor/github.com/go-redis/redis/v7/internal/pool/conn.go118
-rw-r--r--vendor/github.com/go-redis/redis/v7/internal/pool/pool.go517
-rw-r--r--vendor/github.com/go-redis/redis/v7/internal/pool/pool_single.go208
-rw-r--r--vendor/github.com/go-redis/redis/v7/internal/pool/pool_sticky.go112
-rw-r--r--vendor/github.com/go-redis/redis/v7/internal/proto/reader.go314
-rw-r--r--vendor/github.com/go-redis/redis/v7/internal/proto/scan.go166
-rw-r--r--vendor/github.com/go-redis/redis/v7/internal/proto/writer.go165
-rw-r--r--vendor/github.com/go-redis/redis/v7/internal/util.go56
-rw-r--r--vendor/github.com/go-redis/redis/v7/internal/util/safe.go11
-rw-r--r--vendor/github.com/go-redis/redis/v7/internal/util/strconv.go19
-rw-r--r--vendor/github.com/go-redis/redis/v7/internal/util/unsafe.go22
-rw-r--r--vendor/github.com/go-redis/redis/v7/iterator.go75
-rw-r--r--vendor/github.com/go-redis/redis/v7/options.go249
-rw-r--r--vendor/github.com/go-redis/redis/v7/pipeline.go142
-rw-r--r--vendor/github.com/go-redis/redis/v7/pubsub.go595
-rw-r--r--vendor/github.com/go-redis/redis/v7/redis.go758
-rw-r--r--vendor/github.com/go-redis/redis/v7/result.go180
-rw-r--r--vendor/github.com/go-redis/redis/v7/ring.go726
-rw-r--r--vendor/github.com/go-redis/redis/v7/script.go62
-rw-r--r--vendor/github.com/go-redis/redis/v7/sentinel.go509
-rw-r--r--vendor/github.com/go-redis/redis/v7/tx.go159
-rw-r--r--vendor/github.com/go-redis/redis/v7/universal.go198
42 files changed, 12441 insertions, 0 deletions
diff --git a/vendor/github.com/go-redis/redis/v7/.gitignore b/vendor/github.com/go-redis/redis/v7/.gitignore
new file mode 100644
index 0000000000..ebfe903bcd
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/.gitignore
@@ -0,0 +1,2 @@
+*.rdb
+testdata/*/
diff --git a/vendor/github.com/go-redis/redis/v7/.golangci.yml b/vendor/github.com/go-redis/redis/v7/.golangci.yml
new file mode 100644
index 0000000000..912dab1ef3
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/.golangci.yml
@@ -0,0 +1,15 @@
+run:
+ concurrency: 8
+ deadline: 5m
+ tests: false
+linters:
+ enable-all: true
+ disable:
+ - funlen
+ - gochecknoglobals
+ - gocognit
+ - goconst
+ - godox
+ - gosec
+ - maligned
+ - wsl
diff --git a/vendor/github.com/go-redis/redis/v7/.travis.yml b/vendor/github.com/go-redis/redis/v7/.travis.yml
new file mode 100644
index 0000000000..3f93932bc8
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/.travis.yml
@@ -0,0 +1,22 @@
+dist: xenial
+language: go
+
+services:
+ - redis-server
+
+go:
+ - 1.12.x
+ - 1.13.x
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+
+env:
+ - GO111MODULE=on
+
+go_import_path: github.com/go-redis/redis
+
+before_install:
+ - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.21.0
diff --git a/vendor/github.com/go-redis/redis/v7/CHANGELOG.md b/vendor/github.com/go-redis/redis/v7/CHANGELOG.md
new file mode 100644
index 0000000000..bd4eccff24
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/CHANGELOG.md
@@ -0,0 +1,46 @@
+# Changelog
+
+## v7.2
+
+- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users.
+
+## v7.1
+
+- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer` interface.
+
+## v7
+
+- *Important*. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a transactional pipeline.
+- WrapProcess is replaced with more convenient AddHook that has access to context.Context.
+- WithContext now can not be used to create a shallow copy of the client.
+- New methods ProcessContext, DoContext, and ExecContext.
+- Client respects Context.Deadline when setting net.Conn deadline.
+- Client listens on Context.Done while waiting for a connection from the pool and returns an error when context context is cancelled.
+- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow detecting reconnections.
+- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse the time.
+- `SetLimiter` is removed and added `Options.Limiter` instead.
+- `HMSet` is deprecated as of Redis v4.
+
+## v6.15
+
+- Cluster and Ring pipelines process commands for each node in its own goroutine.
+
+## 6.14
+
+- Added Options.MinIdleConns.
+- Added Options.MaxConnAge.
+- PoolStats.FreeConns is renamed to PoolStats.IdleConns.
+- Add Client.Do to simplify creating custom commands.
+- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers.
+- Lower memory usage.
+
+## v6.13
+
+- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set `HashReplicas = 1000` for better keys distribution between shards.
+- Cluster client was optimized to use much less memory when reloading cluster state.
+- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout occurres. In most cases it is recommended to use PubSub.Channel instead.
+- Dialer.KeepAlive is set to 5 minutes by default.
+
+## v6.12
+
+- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis Servers that don't have cluster mode enabled. See https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup
diff --git a/vendor/github.com/go-redis/redis/v7/LICENSE b/vendor/github.com/go-redis/redis/v7/LICENSE
new file mode 100644
index 0000000000..298bed9bea
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2013 The github.com/go-redis/redis Authors.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/go-redis/redis/v7/Makefile b/vendor/github.com/go-redis/redis/v7/Makefile
new file mode 100644
index 0000000000..86609c6e07
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/Makefile
@@ -0,0 +1,20 @@
+all: testdeps
+ go test ./...
+ go test ./... -short -race
+ go test ./... -run=NONE -bench=. -benchmem
+ env GOOS=linux GOARCH=386 go test ./...
+ golangci-lint run
+
+testdeps: testdata/redis/src/redis-server
+
+bench: testdeps
+ go test ./... -test.run=NONE -test.bench=. -test.benchmem
+
+.PHONY: all test testdeps bench
+
+testdata/redis:
+ mkdir -p $@
+ wget -qO- http://download.redis.io/redis-stable.tar.gz | tar xvz --strip-components=1 -C $@
+
+testdata/redis/src/redis-server: testdata/redis
+ cd $< && make all
diff --git a/vendor/github.com/go-redis/redis/v7/README.md b/vendor/github.com/go-redis/redis/v7/README.md
new file mode 100644
index 0000000000..0fbb506ead
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/README.md
@@ -0,0 +1,128 @@
+# Redis client for Golang
+
+[![Build Status](https://travis-ci.org/go-redis/redis.png?branch=master)](https://travis-ci.org/go-redis/redis)
+[![GoDoc](https://godoc.org/github.com/go-redis/redis?status.svg)](https://godoc.org/github.com/go-redis/redis)
+[![Airbrake](https://img.shields.io/badge/kudos-airbrake.io-orange.svg)](https://airbrake.io)
+
+Supports:
+
+- Redis 3 commands except QUIT, MONITOR, SLOWLOG and SYNC.
+- Automatic connection pooling with [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
+- [Pub/Sub](https://godoc.org/github.com/go-redis/redis#PubSub).
+- [Transactions](https://godoc.org/github.com/go-redis/redis#example-Client-TxPipeline).
+- [Pipeline](https://godoc.org/github.com/go-redis/redis#example-Client-Pipeline) and [TxPipeline](https://godoc.org/github.com/go-redis/redis#example-Client-TxPipeline).
+- [Scripting](https://godoc.org/github.com/go-redis/redis#Script).
+- [Timeouts](https://godoc.org/github.com/go-redis/redis#Options).
+- [Redis Sentinel](https://godoc.org/github.com/go-redis/redis#NewFailoverClient).
+- [Redis Cluster](https://godoc.org/github.com/go-redis/redis#NewClusterClient).
+- [Cluster of Redis Servers](https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup) without using cluster mode and Redis Sentinel.
+- [Ring](https://godoc.org/github.com/go-redis/redis#NewRing).
+- [Instrumentation](https://godoc.org/github.com/go-redis/redis#ex-package--Instrumentation).
+- [Cache friendly](https://github.com/go-redis/cache).
+- [Rate limiting](https://github.com/go-redis/redis_rate).
+- [Distributed Locks](https://github.com/bsm/redislock).
+
+API docs: https://godoc.org/github.com/go-redis/redis.
+Examples: https://godoc.org/github.com/go-redis/redis#pkg-examples.
+
+## Installation
+
+go-redis requires a Go version with [Modules](https://github.com/golang/go/wiki/Modules) support and uses import versioning. So please make sure to initialize a Go module before installing go-redis:
+
+``` shell
+go mod init github.com/my/repo
+go get github.com/go-redis/redis/v7
+```
+
+Import:
+
+``` go
+import "github.com/go-redis/redis/v7"
+```
+
+## Quickstart
+
+``` go
+func ExampleNewClient() {
+ client := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password set
+ DB: 0, // use default DB
+ })
+
+ pong, err := client.Ping().Result()
+ fmt.Println(pong, err)
+ // Output: PONG <nil>
+}
+
+func ExampleClient() {
+ client := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password set
+ DB: 0, // use default DB
+ })
+ err := client.Set("key", "value", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ val, err := client.Get("key").Result()
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println("key", val)
+
+ val2, err := client.Get("key2").Result()
+ if err == redis.Nil {
+ fmt.Println("key2 does not exist")
+ } else if err != nil {
+ panic(err)
+ } else {
+ fmt.Println("key2", val2)
+ }
+ // Output: key value
+ // key2 does not exist
+}
+```
+
+## Howto
+
+Please go through [examples](https://godoc.org/github.com/go-redis/redis#pkg-examples) to get an idea how to use this package.
+
+## Look and feel
+
+Some corner cases:
+
+``` go
+// SET key value EX 10 NX
+set, err := client.SetNX("key", "value", 10*time.Second).Result()
+
+// SORT list LIMIT 0 2 ASC
+vals, err := client.Sort("list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
+
+// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
+vals, err := client.ZRangeByScoreWithScores("zset", &redis.ZRangeBy{
+ Min: "-inf",
+ Max: "+inf",
+ Offset: 0,
+ Count: 2,
+}).Result()
+
+// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
+vals, err := client.ZInterStore("out", &redis.ZStore{
+ Keys: []string{"zset1", "zset2"},
+ Weights: []int64{2, 3}
+}).Result()
+
+// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
+vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
+
+// custom command
+res, err := client.Do("set", "key", "value").Result()
+```
+
+## See also
+
+- [Golang PostgreSQL ORM](https://github.com/go-pg/pg)
+- [Golang msgpack](https://github.com/vmihailenco/msgpack)
+- [Golang message task queue](https://github.com/vmihailenco/taskq)
diff --git a/vendor/github.com/go-redis/redis/v7/cluster.go b/vendor/github.com/go-redis/redis/v7/cluster.go
new file mode 100644
index 0000000000..1907de6c41
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/cluster.go
@@ -0,0 +1,1669 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "math"
+ "math/rand"
+ "net"
+ "runtime"
+ "sort"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/go-redis/redis/v7/internal"
+ "github.com/go-redis/redis/v7/internal/hashtag"
+ "github.com/go-redis/redis/v7/internal/pool"
+ "github.com/go-redis/redis/v7/internal/proto"
+)
+
+var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes")
+
+// ClusterOptions are used to configure a cluster client and should be
+// passed to NewClusterClient.
+type ClusterOptions struct {
+ // A seed list of host:port addresses of cluster nodes.
+ Addrs []string
+
+ // The maximum number of retries before giving up. Command is retried
+ // on network errors and MOVED/ASK redirects.
+ // Default is 8 retries.
+ MaxRedirects int
+
+ // Enables read-only commands on slave nodes.
+ ReadOnly bool
+ // Allows routing read-only commands to the closest master or slave node.
+ // It automatically enables ReadOnly.
+ RouteByLatency bool
+ // Allows routing read-only commands to the random master or slave node.
+ // It automatically enables ReadOnly.
+ RouteRandomly bool
+
+ // Optional function that returns cluster slots information.
+ // It is useful to manually create cluster of standalone Redis servers
+ // and load-balance read/write operations between master and slaves.
+ // It can use service like ZooKeeper to maintain configuration information
+ // and Cluster.ReloadState to manually trigger state reloading.
+ ClusterSlots func() ([]ClusterSlot, error)
+
+ // Optional hook that is called when a new node is created.
+ OnNewNode func(*Client)
+
+ // Following options are copied from Options struct.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ OnConnect func(*Conn) error
+
+ Username string
+ Password string
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ // NewClient creates a cluster node client with provided name and options.
+ NewClient func(opt *Options) *Client
+
+ // PoolSize applies per cluster node and not for the whole cluster.
+ PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+
+ TLSConfig *tls.Config
+}
+
+func (opt *ClusterOptions) init() {
+ if opt.MaxRedirects == -1 {
+ opt.MaxRedirects = 0
+ } else if opt.MaxRedirects == 0 {
+ opt.MaxRedirects = 8
+ }
+
+ if (opt.RouteByLatency || opt.RouteRandomly) && opt.ClusterSlots == nil {
+ opt.ReadOnly = true
+ }
+
+ if opt.PoolSize == 0 {
+ opt.PoolSize = 5 * runtime.NumCPU()
+ }
+
+ switch opt.ReadTimeout {
+ case -1:
+ opt.ReadTimeout = 0
+ case 0:
+ opt.ReadTimeout = 3 * time.Second
+ }
+ switch opt.WriteTimeout {
+ case -1:
+ opt.WriteTimeout = 0
+ case 0:
+ opt.WriteTimeout = opt.ReadTimeout
+ }
+
+ switch opt.MinRetryBackoff {
+ case -1:
+ opt.MinRetryBackoff = 0
+ case 0:
+ opt.MinRetryBackoff = 8 * time.Millisecond
+ }
+ switch opt.MaxRetryBackoff {
+ case -1:
+ opt.MaxRetryBackoff = 0
+ case 0:
+ opt.MaxRetryBackoff = 512 * time.Millisecond
+ }
+
+ if opt.NewClient == nil {
+ opt.NewClient = NewClient
+ }
+}
+
+func (opt *ClusterOptions) clientOptions() *Options {
+ const disableIdleCheck = -1
+
+ return &Options{
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+ Username: opt.Username,
+ Password: opt.Password,
+ readOnly: opt.ReadOnly,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.PoolSize,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: disableIdleCheck,
+
+ TLSConfig: opt.TLSConfig,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNode struct {
+ Client *Client
+
+ latency uint32 // atomic
+ generation uint32 // atomic
+ failing uint32 // atomic
+}
+
+func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode {
+ opt := clOpt.clientOptions()
+ opt.Addr = addr
+ node := clusterNode{
+ Client: clOpt.NewClient(opt),
+ }
+
+ node.latency = math.MaxUint32
+ if clOpt.RouteByLatency {
+ go node.updateLatency()
+ }
+
+ if clOpt.OnNewNode != nil {
+ clOpt.OnNewNode(node.Client)
+ }
+
+ return &node
+}
+
+func (n *clusterNode) String() string {
+ return n.Client.String()
+}
+
+func (n *clusterNode) Close() error {
+ return n.Client.Close()
+}
+
+func (n *clusterNode) updateLatency() {
+ const probes = 10
+
+ var latency uint32
+ for i := 0; i < probes; i++ {
+ start := time.Now()
+ n.Client.Ping()
+ probe := uint32(time.Since(start) / time.Microsecond)
+ latency = (latency + probe) / 2
+ }
+ atomic.StoreUint32(&n.latency, latency)
+}
+
+func (n *clusterNode) Latency() time.Duration {
+ latency := atomic.LoadUint32(&n.latency)
+ return time.Duration(latency) * time.Microsecond
+}
+
+func (n *clusterNode) MarkAsFailing() {
+ atomic.StoreUint32(&n.failing, uint32(time.Now().Unix()))
+}
+
+func (n *clusterNode) Failing() bool {
+ const timeout = 15 // 15 seconds
+
+ failing := atomic.LoadUint32(&n.failing)
+ if failing == 0 {
+ return false
+ }
+ if time.Now().Unix()-int64(failing) < timeout {
+ return true
+ }
+ atomic.StoreUint32(&n.failing, 0)
+ return false
+}
+
+func (n *clusterNode) Generation() uint32 {
+ return atomic.LoadUint32(&n.generation)
+}
+
+func (n *clusterNode) SetGeneration(gen uint32) {
+ for {
+ v := atomic.LoadUint32(&n.generation)
+ if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) {
+ break
+ }
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNodes struct {
+ opt *ClusterOptions
+
+ mu sync.RWMutex
+ allAddrs []string
+ allNodes map[string]*clusterNode
+ clusterAddrs []string
+ closed bool
+
+ _generation uint32 // atomic
+}
+
+func newClusterNodes(opt *ClusterOptions) *clusterNodes {
+ return &clusterNodes{
+ opt: opt,
+
+ allAddrs: opt.Addrs,
+ allNodes: make(map[string]*clusterNode),
+ }
+}
+
+func (c *clusterNodes) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil
+ }
+ c.closed = true
+
+ var firstErr error
+ for _, node := range c.allNodes {
+ if err := node.Client.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ c.allNodes = nil
+ c.clusterAddrs = nil
+
+ return firstErr
+}
+
+func (c *clusterNodes) Addrs() ([]string, error) {
+ var addrs []string
+ c.mu.RLock()
+ closed := c.closed
+ if !closed {
+ if len(c.clusterAddrs) > 0 {
+ addrs = c.clusterAddrs
+ } else {
+ addrs = c.allAddrs
+ }
+ }
+ c.mu.RUnlock()
+
+ if closed {
+ return nil, pool.ErrClosed
+ }
+ if len(addrs) == 0 {
+ return nil, errClusterNoNodes
+ }
+ return addrs, nil
+}
+
+func (c *clusterNodes) NextGeneration() uint32 {
+ return atomic.AddUint32(&c._generation, 1)
+}
+
+// GC removes unused nodes.
+func (c *clusterNodes) GC(generation uint32) {
+ //nolint:prealloc
+ var collected []*clusterNode
+ c.mu.Lock()
+ for addr, node := range c.allNodes {
+ if node.Generation() >= generation {
+ continue
+ }
+
+ c.clusterAddrs = remove(c.clusterAddrs, addr)
+ delete(c.allNodes, addr)
+ collected = append(collected, node)
+ }
+ c.mu.Unlock()
+
+ for _, node := range collected {
+ _ = node.Client.Close()
+ }
+}
+
+func (c *clusterNodes) Get(addr string) (*clusterNode, error) {
+ node, err := c.get(addr)
+ if err != nil {
+ return nil, err
+ }
+ if node != nil {
+ return node, nil
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ node, ok := c.allNodes[addr]
+ if ok {
+ return node, err
+ }
+
+ node = newClusterNode(c.opt, addr)
+
+ c.allAddrs = appendIfNotExists(c.allAddrs, addr)
+ c.clusterAddrs = append(c.clusterAddrs, addr)
+ c.allNodes[addr] = node
+
+ return node, err
+}
+
+func (c *clusterNodes) get(addr string) (*clusterNode, error) {
+ var node *clusterNode
+ var err error
+ c.mu.RLock()
+ if c.closed {
+ err = pool.ErrClosed
+ } else {
+ node = c.allNodes[addr]
+ }
+ c.mu.RUnlock()
+ return node, err
+}
+
+func (c *clusterNodes) All() ([]*clusterNode, error) {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ cp := make([]*clusterNode, 0, len(c.allNodes))
+ for _, node := range c.allNodes {
+ cp = append(cp, node)
+ }
+ return cp, nil
+}
+
+func (c *clusterNodes) Random() (*clusterNode, error) {
+ addrs, err := c.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ n := rand.Intn(len(addrs))
+ return c.Get(addrs[n])
+}
+
+//------------------------------------------------------------------------------
+
+type clusterSlot struct {
+ start, end int
+ nodes []*clusterNode
+}
+
+type clusterSlotSlice []*clusterSlot
+
+func (p clusterSlotSlice) Len() int {
+ return len(p)
+}
+
+func (p clusterSlotSlice) Less(i, j int) bool {
+ return p[i].start < p[j].start
+}
+
+func (p clusterSlotSlice) Swap(i, j int) {
+ p[i], p[j] = p[j], p[i]
+}
+
+type clusterState struct {
+ nodes *clusterNodes
+ Masters []*clusterNode
+ Slaves []*clusterNode
+
+ slots []*clusterSlot
+
+ generation uint32
+ createdAt time.Time
+}
+
+func newClusterState(
+ nodes *clusterNodes, slots []ClusterSlot, origin string,
+) (*clusterState, error) {
+ c := clusterState{
+ nodes: nodes,
+
+ slots: make([]*clusterSlot, 0, len(slots)),
+
+ generation: nodes.NextGeneration(),
+ createdAt: time.Now(),
+ }
+
+ originHost, _, _ := net.SplitHostPort(origin)
+ isLoopbackOrigin := isLoopback(originHost)
+
+ for _, slot := range slots {
+ var nodes []*clusterNode
+ for i, slotNode := range slot.Nodes {
+ addr := slotNode.Addr
+ if !isLoopbackOrigin {
+ addr = replaceLoopbackHost(addr, originHost)
+ }
+
+ node, err := c.nodes.Get(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ node.SetGeneration(c.generation)
+ nodes = append(nodes, node)
+
+ if i == 0 {
+ c.Masters = appendUniqueNode(c.Masters, node)
+ } else {
+ c.Slaves = appendUniqueNode(c.Slaves, node)
+ }
+ }
+
+ c.slots = append(c.slots, &clusterSlot{
+ start: slot.Start,
+ end: slot.End,
+ nodes: nodes,
+ })
+ }
+
+ sort.Sort(clusterSlotSlice(c.slots))
+
+ time.AfterFunc(time.Minute, func() {
+ nodes.GC(c.generation)
+ })
+
+ return &c, nil
+}
+
+func replaceLoopbackHost(nodeAddr, originHost string) string {
+ nodeHost, nodePort, err := net.SplitHostPort(nodeAddr)
+ if err != nil {
+ return nodeAddr
+ }
+
+ nodeIP := net.ParseIP(nodeHost)
+ if nodeIP == nil {
+ return nodeAddr
+ }
+
+ if !nodeIP.IsLoopback() {
+ return nodeAddr
+ }
+
+ // Use origin host which is not loopback and node port.
+ return net.JoinHostPort(originHost, nodePort)
+}
+
+func isLoopback(host string) bool {
+ ip := net.ParseIP(host)
+ if ip == nil {
+ return true
+ }
+ return ip.IsLoopback()
+}
+
+func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ if len(nodes) > 0 {
+ return nodes[0], nil
+ }
+ return c.nodes.Random()
+}
+
+func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ switch len(nodes) {
+ case 0:
+ return c.nodes.Random()
+ case 1:
+ return nodes[0], nil
+ case 2:
+ if slave := nodes[1]; !slave.Failing() {
+ return slave, nil
+ }
+ return nodes[0], nil
+ default:
+ var slave *clusterNode
+ for i := 0; i < 10; i++ {
+ n := rand.Intn(len(nodes)-1) + 1
+ slave = nodes[n]
+ if !slave.Failing() {
+ return slave, nil
+ }
+ }
+
+ // All slaves are loading - use master.
+ return nodes[0], nil
+ }
+}
+
+func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) {
+ const threshold = time.Millisecond
+
+ nodes := c.slotNodes(slot)
+ if len(nodes) == 0 {
+ return c.nodes.Random()
+ }
+
+ var node *clusterNode
+ for _, n := range nodes {
+ if n.Failing() {
+ continue
+ }
+ if node == nil || node.Latency()-n.Latency() > threshold {
+ node = n
+ }
+ }
+ return node, nil
+}
+
+func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ if len(nodes) == 0 {
+ return c.nodes.Random()
+ }
+ n := rand.Intn(len(nodes))
+ return nodes[n], nil
+}
+
+func (c *clusterState) slotNodes(slot int) []*clusterNode {
+ i := sort.Search(len(c.slots), func(i int) bool {
+ return c.slots[i].end >= slot
+ })
+ if i >= len(c.slots) {
+ return nil
+ }
+ x := c.slots[i]
+ if slot >= x.start && slot <= x.end {
+ return x.nodes
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type clusterStateHolder struct {
+ load func() (*clusterState, error)
+
+ state atomic.Value
+ reloading uint32 // atomic
+}
+
+func newClusterStateHolder(fn func() (*clusterState, error)) *clusterStateHolder {
+ return &clusterStateHolder{
+ load: fn,
+ }
+}
+
+func (c *clusterStateHolder) Reload() (*clusterState, error) {
+ state, err := c.load()
+ if err != nil {
+ return nil, err
+ }
+ c.state.Store(state)
+ return state, nil
+}
+
+func (c *clusterStateHolder) LazyReload() {
+ if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
+ return
+ }
+ go func() {
+ defer atomic.StoreUint32(&c.reloading, 0)
+
+ _, err := c.Reload()
+ if err != nil {
+ return
+ }
+ time.Sleep(100 * time.Millisecond)
+ }()
+}
+
+func (c *clusterStateHolder) Get() (*clusterState, error) {
+ v := c.state.Load()
+ if v != nil {
+ state := v.(*clusterState)
+ if time.Since(state.createdAt) > time.Minute {
+ c.LazyReload()
+ }
+ return state, nil
+ }
+ return c.Reload()
+}
+
+func (c *clusterStateHolder) ReloadOrGet() (*clusterState, error) {
+ state, err := c.Reload()
+ if err == nil {
+ return state, nil
+ }
+ return c.Get()
+}
+
+//------------------------------------------------------------------------------
+
+type clusterClient struct {
+ opt *ClusterOptions
+ nodes *clusterNodes
+ state *clusterStateHolder //nolint:structcheck
+ cmdsInfoCache *cmdsInfoCache //nolint:structcheck
+}
+
+// ClusterClient is a Redis Cluster client representing a pool of zero
+// or more underlying connections. It's safe for concurrent use by
+// multiple goroutines.
+type ClusterClient struct {
+ *clusterClient
+ cmdable
+ hooks
+ ctx context.Context
+}
+
+// NewClusterClient returns a Redis Cluster client as described in
+// http://redis.io/topics/cluster-spec.
+func NewClusterClient(opt *ClusterOptions) *ClusterClient {
+ opt.init()
+
+ c := &ClusterClient{
+ clusterClient: &clusterClient{
+ opt: opt,
+ nodes: newClusterNodes(opt),
+ },
+ ctx: context.Background(),
+ }
+ c.state = newClusterStateHolder(c.loadState)
+ c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo)
+ c.cmdable = c.Process
+
+ if opt.IdleCheckFrequency > 0 {
+ go c.reaper(opt.IdleCheckFrequency)
+ }
+
+ return c
+}
+
+func (c *ClusterClient) Context() context.Context {
+ return c.ctx
+}
+
+func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient {
+ if ctx == nil {
+ panic("nil context")
+ }
+ clone := *c
+ clone.cmdable = clone.Process
+ clone.hooks.lock()
+ clone.ctx = ctx
+ return &clone
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *ClusterClient) Options() *ClusterOptions {
+ return c.opt
+}
+
+// ReloadState reloads cluster state. If available it calls ClusterSlots func
+// to get cluster slots information.
+func (c *ClusterClient) ReloadState() error {
+ _, err := c.state.Reload()
+ return err
+}
+
+// Close closes the cluster client, releasing any open resources.
+//
+// It is rare to Close a ClusterClient, as the ClusterClient is meant
+// to be long-lived and shared between many goroutines.
+func (c *ClusterClient) Close() error {
+ return c.nodes.Close()
+}
+
+// Do creates a Cmd from the args and processes the cmd.
+func (c *ClusterClient) Do(args ...interface{}) *Cmd {
+ return c.DoContext(c.ctx, args...)
+}
+
+func (c *ClusterClient) DoContext(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(args...)
+ _ = c.ProcessContext(ctx, cmd)
+ return cmd
+}
+
+func (c *ClusterClient) Process(cmd Cmder) error {
+ return c.ProcessContext(c.ctx, cmd)
+}
+
+func (c *ClusterClient) ProcessContext(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.process)
+}
+
+func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
+ err := c._process(ctx, cmd)
+ if err != nil {
+ cmd.SetErr(err)
+ return err
+ }
+ return nil
+}
+
+func (c *ClusterClient) _process(ctx context.Context, cmd Cmder) error {
+ cmdInfo := c.cmdInfo(cmd.Name())
+ slot := c.cmdSlot(cmd)
+
+ var node *clusterNode
+ var ask bool
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ if node == nil {
+ var err error
+ node, err = c.cmdNode(cmdInfo, slot)
+ if err != nil {
+ return err
+ }
+ }
+
+ if ask {
+ pipe := node.Client.Pipeline()
+ _ = pipe.Process(NewCmd("asking"))
+ _ = pipe.Process(cmd)
+ _, lastErr = pipe.ExecContext(ctx)
+ _ = pipe.Close()
+ ask = false
+ } else {
+ lastErr = node.Client.ProcessContext(ctx, cmd)
+ }
+
+ // If there is no error - we are done.
+ if lastErr == nil {
+ return nil
+ }
+ if lastErr != Nil {
+ c.state.LazyReload()
+ }
+ if lastErr == pool.ErrClosed || isReadOnlyError(lastErr) {
+ node = nil
+ continue
+ }
+
+ // If slave is loading - pick another node.
+ if c.opt.ReadOnly && isLoadingError(lastErr) {
+ node.MarkAsFailing()
+ node = nil
+ continue
+ }
+
+ var moved bool
+ var addr string
+ moved, ask, addr = isMovedError(lastErr)
+ if moved || ask {
+ var err error
+ node, err = c.nodes.Get(addr)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if isRetryableError(lastErr, cmd.readTimeout() == nil) {
+ // First retry the same node.
+ if attempt == 0 {
+ continue
+ }
+
+ // Second try another node.
+ node.MarkAsFailing()
+ node = nil
+ continue
+ }
+
+ return lastErr
+ }
+ return lastErr
+}
+
+// ForEachMaster concurrently calls the fn on each master node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
+ state, err := c.state.ReloadOrGet()
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+
+ for _, master := range state.Masters {
+ wg.Add(1)
+ go func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(master)
+ }
+
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// ForEachSlave concurrently calls the fn on each slave node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error {
+ state, err := c.state.ReloadOrGet()
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+
+ for _, slave := range state.Slaves {
+ wg.Add(1)
+ go func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(slave)
+ }
+
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// ForEachNode concurrently calls the fn on each known node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error {
+ state, err := c.state.ReloadOrGet()
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+
+ worker := func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }
+
+ for _, node := range state.Masters {
+ wg.Add(1)
+ go worker(node)
+ }
+ for _, node := range state.Slaves {
+ wg.Add(1)
+ go worker(node)
+ }
+
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *ClusterClient) PoolStats() *PoolStats {
+ var acc PoolStats
+
+ state, _ := c.state.Get()
+ if state == nil {
+ return &acc
+ }
+
+ for _, node := range state.Masters {
+ s := node.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ acc.StaleConns += s.StaleConns
+ }
+
+ for _, node := range state.Slaves {
+ s := node.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ acc.StaleConns += s.StaleConns
+ }
+
+ return &acc
+}
+
+func (c *ClusterClient) loadState() (*clusterState, error) {
+ if c.opt.ClusterSlots != nil {
+ slots, err := c.opt.ClusterSlots()
+ if err != nil {
+ return nil, err
+ }
+ return newClusterState(c.nodes, slots, "")
+ }
+
+ addrs, err := c.nodes.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ var firstErr error
+ for _, addr := range addrs {
+ node, err := c.nodes.Get(addr)
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ slots, err := node.Client.ClusterSlots().Result()
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ return newClusterState(c.nodes, slots, node.Client.opt.Addr)
+ }
+
+ return nil, firstErr
+}
+
+// reaper closes idle connections to the cluster.
+func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) {
+ ticker := time.NewTicker(idleCheckFrequency)
+ defer ticker.Stop()
+
+ for range ticker.C {
+ nodes, err := c.nodes.All()
+ if err != nil {
+ break
+ }
+
+ for _, node := range nodes {
+ _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns()
+ if err != nil {
+ internal.Logger.Printf("ReapStaleConns failed: %s", err)
+ }
+ }
+ }
+}
+
+func (c *ClusterClient) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *ClusterClient) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(fn)
+}
+
+func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, c._processPipeline)
+}
+
+func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error {
+ cmdsMap := newCmdsMap()
+ err := c.mapCmdsByNode(cmdsMap, cmds)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ }
+
+ failedCmds := newCmdsMap()
+ var wg sync.WaitGroup
+
+ for node, cmds := range cmdsMap.m {
+ wg.Add(1)
+ go func(node *clusterNode, cmds []Cmder) {
+ defer wg.Done()
+
+ err := c._processPipelineNode(ctx, node, cmds, failedCmds)
+ if err == nil {
+ return
+ }
+ if attempt < c.opt.MaxRedirects {
+ if err := c.mapCmdsByNode(failedCmds, cmds); err != nil {
+ setCmdsErr(cmds, err)
+ }
+ } else {
+ setCmdsErr(cmds, err)
+ }
+ }(node, cmds)
+ }
+
+ wg.Wait()
+ if len(failedCmds.m) == 0 {
+ break
+ }
+ cmdsMap = failedCmds
+ }
+
+ return cmdsFirstErr(cmds)
+}
+
+func (c *ClusterClient) mapCmdsByNode(cmdsMap *cmdsMap, cmds []Cmder) error {
+ state, err := c.state.Get()
+ if err != nil {
+ return err
+ }
+
+ if c.opt.ReadOnly && c.cmdsAreReadOnly(cmds) {
+ for _, cmd := range cmds {
+ slot := c.cmdSlot(cmd)
+ node, err := c.slotReadOnlyNode(state, slot)
+ if err != nil {
+ return err
+ }
+ cmdsMap.Add(node, cmd)
+ }
+ return nil
+ }
+
+ for _, cmd := range cmds {
+ slot := c.cmdSlot(cmd)
+ node, err := state.slotMasterNode(slot)
+ if err != nil {
+ return err
+ }
+ cmdsMap.Add(node, cmd)
+ }
+ return nil
+}
+
+func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
+ for _, cmd := range cmds {
+ cmdInfo := c.cmdInfo(cmd.Name())
+ if cmdInfo == nil || !cmdInfo.ReadOnly {
+ return false
+ }
+ }
+ return true
+}
+
+func (c *ClusterClient) _processPipelineNode(
+ ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+ return node.Client.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ })
+ if err != nil {
+ return err
+ }
+
+ return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ return c.pipelineReadCmds(node, rd, cmds, failedCmds)
+ })
+ })
+ })
+}
+
+func (c *ClusterClient) pipelineReadCmds(
+ node *clusterNode, rd *proto.Reader, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+ for _, cmd := range cmds {
+ err := cmd.readReply(rd)
+ if err == nil {
+ continue
+ }
+ if c.checkMovedErr(cmd, err, failedCmds) {
+ continue
+ }
+
+ if c.opt.ReadOnly && isLoadingError(err) {
+ node.MarkAsFailing()
+ return err
+ }
+ if isRedisError(err) {
+ continue
+ }
+ return err
+ }
+ return nil
+}
+
+func (c *ClusterClient) checkMovedErr(
+ cmd Cmder, err error, failedCmds *cmdsMap,
+) bool {
+ moved, ask, addr := isMovedError(err)
+ if !moved && !ask {
+ return false
+ }
+
+ node, err := c.nodes.Get(addr)
+ if err != nil {
+ return false
+ }
+
+ if moved {
+ c.state.LazyReload()
+ failedCmds.Add(node, cmd)
+ return true
+ }
+
+ if ask {
+ failedCmds.Add(node, NewCmd("asking"), cmd)
+ return true
+ }
+
+ panic("not reached")
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *ClusterClient) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processTxPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *ClusterClient) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(fn)
+}
+
+func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, c._processTxPipeline)
+}
+
+func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ state, err := c.state.Get()
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ cmdsMap := c.mapCmdsBySlot(cmds)
+ for slot, cmds := range cmdsMap {
+ node, err := state.slotMasterNode(slot)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ continue
+ }
+
+ cmdsMap := map[*clusterNode][]Cmder{node: cmds}
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ }
+
+ failedCmds := newCmdsMap()
+ var wg sync.WaitGroup
+
+ for node, cmds := range cmdsMap {
+ wg.Add(1)
+ go func(node *clusterNode, cmds []Cmder) {
+ defer wg.Done()
+
+ err := c._processTxPipelineNode(ctx, node, cmds, failedCmds)
+ if err == nil {
+ return
+ }
+ if attempt < c.opt.MaxRedirects {
+ if err := c.mapCmdsByNode(failedCmds, cmds); err != nil {
+ setCmdsErr(cmds, err)
+ }
+ } else {
+ setCmdsErr(cmds, err)
+ }
+ }(node, cmds)
+ }
+
+ wg.Wait()
+ if len(failedCmds.m) == 0 {
+ break
+ }
+ cmdsMap = failedCmds.m
+ }
+ }
+
+ return cmdsFirstErr(cmds)
+}
+
+func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder {
+ cmdsMap := make(map[int][]Cmder)
+ for _, cmd := range cmds {
+ slot := c.cmdSlot(cmd)
+ cmdsMap[slot] = append(cmdsMap[slot], cmd)
+ }
+ return cmdsMap
+}
+
+func (c *ClusterClient) _processTxPipelineNode(
+ ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+ return node.Client.hooks.processTxPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ })
+ if err != nil {
+ return err
+ }
+
+ return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ statusCmd := cmds[0].(*StatusCmd)
+ // Trim multi and exec.
+ cmds = cmds[1 : len(cmds)-1]
+
+ err := c.txPipelineReadQueued(rd, statusCmd, cmds, failedCmds)
+ if err != nil {
+ moved, ask, addr := isMovedError(err)
+ if moved || ask {
+ return c.cmdsMoved(cmds, moved, ask, addr, failedCmds)
+ }
+ return err
+ }
+
+ return pipelineReadCmds(rd, cmds)
+ })
+ })
+ })
+}
+
+func (c *ClusterClient) txPipelineReadQueued(
+ rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+ // Parse queued replies.
+ if err := statusCmd.readReply(rd); err != nil {
+ return err
+ }
+
+ for _, cmd := range cmds {
+ err := statusCmd.readReply(rd)
+ if err == nil || c.checkMovedErr(cmd, err, failedCmds) || isRedisError(err) {
+ continue
+ }
+ return err
+ }
+
+ // Parse number of replies.
+ line, err := rd.ReadLine()
+ if err != nil {
+ if err == Nil {
+ err = TxFailedErr
+ }
+ return err
+ }
+
+ switch line[0] {
+ case proto.ErrorReply:
+ return proto.ParseErrorReply(line)
+ case proto.ArrayReply:
+ // ok
+ default:
+ return fmt.Errorf("redis: expected '*', but got line %q", line)
+ }
+
+ return nil
+}
+
+func (c *ClusterClient) cmdsMoved(
+ cmds []Cmder, moved, ask bool, addr string, failedCmds *cmdsMap,
+) error {
+ node, err := c.nodes.Get(addr)
+ if err != nil {
+ return err
+ }
+
+ if moved {
+ c.state.LazyReload()
+ for _, cmd := range cmds {
+ failedCmds.Add(node, cmd)
+ }
+ return nil
+ }
+
+ if ask {
+ for _, cmd := range cmds {
+ failedCmds.Add(node, NewCmd("asking"), cmd)
+ }
+ return nil
+ }
+
+ return nil
+}
+
+func (c *ClusterClient) Watch(fn func(*Tx) error, keys ...string) error {
+ return c.WatchContext(c.ctx, fn, keys...)
+}
+
+func (c *ClusterClient) WatchContext(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+ if len(keys) == 0 {
+ return fmt.Errorf("redis: Watch requires at least one key")
+ }
+
+ slot := hashtag.Slot(keys[0])
+ for _, key := range keys[1:] {
+ if hashtag.Slot(key) != slot {
+ err := fmt.Errorf("redis: Watch requires all keys to be in the same slot")
+ return err
+ }
+ }
+
+ node, err := c.slotMasterNode(slot)
+ if err != nil {
+ return err
+ }
+
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ err = node.Client.WatchContext(ctx, fn, keys...)
+ if err == nil {
+ break
+ }
+ if err != Nil {
+ c.state.LazyReload()
+ }
+
+ moved, ask, addr := isMovedError(err)
+ if moved || ask {
+ node, err = c.nodes.Get(addr)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if err == pool.ErrClosed || isReadOnlyError(err) {
+ node, err = c.slotMasterNode(slot)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if isRetryableError(err, true) {
+ continue
+ }
+
+ return err
+ }
+
+ return err
+}
+
+func (c *ClusterClient) pubSub() *PubSub {
+ var node *clusterNode
+ pubsub := &PubSub{
+ opt: c.opt.clientOptions(),
+
+ newConn: func(channels []string) (*pool.Conn, error) {
+ if node != nil {
+ panic("node != nil")
+ }
+
+ var err error
+ if len(channels) > 0 {
+ slot := hashtag.Slot(channels[0])
+ node, err = c.slotMasterNode(slot)
+ } else {
+ node, err = c.nodes.Random()
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ cn, err := node.Client.newConn(context.TODO())
+ if err != nil {
+ node = nil
+
+ return nil, err
+ }
+
+ return cn, nil
+ },
+ closeConn: func(cn *pool.Conn) error {
+ err := node.Client.connPool.CloseConn(cn)
+ node = nil
+ return err
+ },
+ }
+ pubsub.init()
+
+ return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *ClusterClient) Subscribe(channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *ClusterClient) PSubscribe(channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(channels...)
+ }
+ return pubsub
+}
+
+func (c *ClusterClient) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *ClusterClient) cmdsInfo() (map[string]*CommandInfo, error) {
+ addrs, err := c.nodes.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ var firstErr error
+ for _, addr := range addrs {
+ node, err := c.nodes.Get(addr)
+ if err != nil {
+ return nil, err
+ }
+ if node == nil {
+ continue
+ }
+
+ info, err := node.Client.Command().Result()
+ if err == nil {
+ return info, nil
+ }
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+ return nil, firstErr
+}
+
+func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
+ cmdsInfo, err := c.cmdsInfoCache.Get()
+ if err != nil {
+ return nil
+ }
+
+ info := cmdsInfo[name]
+ if info == nil {
+ internal.Logger.Printf("info for cmd=%s not found", name)
+ }
+ return info
+}
+
+func (c *ClusterClient) cmdSlot(cmd Cmder) int {
+ args := cmd.Args()
+ if args[0] == "cluster" && args[1] == "getkeysinslot" {
+ return args[2].(int)
+ }
+
+ cmdInfo := c.cmdInfo(cmd.Name())
+ return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo))
+}
+
+func cmdSlot(cmd Cmder, pos int) int {
+ if pos == 0 {
+ return hashtag.RandomSlot()
+ }
+ firstKey := cmd.stringArg(pos)
+ return hashtag.Slot(firstKey)
+}
+
+func (c *ClusterClient) cmdNode(cmdInfo *CommandInfo, slot int) (*clusterNode, error) {
+ state, err := c.state.Get()
+ if err != nil {
+ return nil, err
+ }
+
+ if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly {
+ return c.slotReadOnlyNode(state, slot)
+ }
+ return state.slotMasterNode(slot)
+}
+
+func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) {
+ if c.opt.RouteByLatency {
+ return state.slotClosestNode(slot)
+ }
+ if c.opt.RouteRandomly {
+ return state.slotRandomNode(slot)
+ }
+ return state.slotSlaveNode(slot)
+}
+
+func (c *ClusterClient) slotMasterNode(slot int) (*clusterNode, error) {
+ state, err := c.state.Get()
+ if err != nil {
+ return nil, err
+ }
+ return state.slotMasterNode(slot)
+}
+
+func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
+ for _, n := range nodes {
+ if n == node {
+ return nodes
+ }
+ }
+ return append(nodes, node)
+}
+
+func appendIfNotExists(ss []string, es ...string) []string {
+loop:
+ for _, e := range es {
+ for _, s := range ss {
+ if s == e {
+ continue loop
+ }
+ }
+ ss = append(ss, e)
+ }
+ return ss
+}
+
+func remove(ss []string, es ...string) []string {
+ if len(es) == 0 {
+ return ss[:0]
+ }
+ for _, e := range es {
+ for i, s := range ss {
+ if s == e {
+ ss = append(ss[:i], ss[i+1:]...)
+ break
+ }
+ }
+ }
+ return ss
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsMap struct {
+ mu sync.Mutex
+ m map[*clusterNode][]Cmder
+}
+
+func newCmdsMap() *cmdsMap {
+ return &cmdsMap{
+ m: make(map[*clusterNode][]Cmder),
+ }
+}
+
+func (m *cmdsMap) Add(node *clusterNode, cmds ...Cmder) {
+ m.mu.Lock()
+ m.m[node] = append(m.m[node], cmds...)
+ m.mu.Unlock()
+}
diff --git a/vendor/github.com/go-redis/redis/v7/cluster_commands.go b/vendor/github.com/go-redis/redis/v7/cluster_commands.go
new file mode 100644
index 0000000000..c9b9b9de24
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/cluster_commands.go
@@ -0,0 +1,22 @@
+package redis
+
+import "sync/atomic"
+
+func (c *ClusterClient) DBSize() *IntCmd {
+ cmd := NewIntCmd("dbsize")
+ var size int64
+ err := c.ForEachMaster(func(master *Client) error {
+ n, err := master.DBSize().Result()
+ if err != nil {
+ return err
+ }
+ atomic.AddInt64(&size, n)
+ return nil
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ return cmd
+ }
+ cmd.val = size
+ return cmd
+}
diff --git a/vendor/github.com/go-redis/redis/v7/command.go b/vendor/github.com/go-redis/redis/v7/command.go
new file mode 100644
index 0000000000..dd7fe4a91e
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/command.go
@@ -0,0 +1,2064 @@
+package redis
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/go-redis/redis/v7/internal"
+ "github.com/go-redis/redis/v7/internal/proto"
+ "github.com/go-redis/redis/v7/internal/util"
+)
+
+type Cmder interface {
+ Name() string
+ Args() []interface{}
+ String() string
+ stringArg(int) string
+
+ readTimeout() *time.Duration
+ readReply(rd *proto.Reader) error
+
+ SetErr(error)
+ Err() error
+}
+
+func setCmdsErr(cmds []Cmder, e error) {
+ for _, cmd := range cmds {
+ if cmd.Err() == nil {
+ cmd.SetErr(e)
+ }
+ }
+}
+
+func cmdsFirstErr(cmds []Cmder) error {
+ for _, cmd := range cmds {
+ if err := cmd.Err(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeCmds(wr *proto.Writer, cmds []Cmder) error {
+ for _, cmd := range cmds {
+ if err := writeCmd(wr, cmd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeCmd(wr *proto.Writer, cmd Cmder) error {
+ return wr.WriteArgs(cmd.Args())
+}
+
+func cmdString(cmd Cmder, val interface{}) string {
+ ss := make([]string, 0, len(cmd.Args()))
+ for _, arg := range cmd.Args() {
+ ss = append(ss, fmt.Sprint(arg))
+ }
+ s := strings.Join(ss, " ")
+ if err := cmd.Err(); err != nil {
+ return s + ": " + err.Error()
+ }
+ if val != nil {
+ switch vv := val.(type) {
+ case []byte:
+ return s + ": " + string(vv)
+ default:
+ return s + ": " + fmt.Sprint(val)
+ }
+ }
+ return s
+}
+
+func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
+ switch cmd.Name() {
+ case "eval", "evalsha":
+ if cmd.stringArg(2) != "0" {
+ return 3
+ }
+
+ return 0
+ case "publish":
+ return 1
+ }
+ if info == nil {
+ return 0
+ }
+ return int(info.FirstKeyPos)
+}
+
+//------------------------------------------------------------------------------
+
+type baseCmd struct {
+ args []interface{}
+ err error
+
+ _readTimeout *time.Duration
+}
+
+var _ Cmder = (*Cmd)(nil)
+
+func (cmd *baseCmd) Name() string {
+ if len(cmd.args) == 0 {
+ return ""
+ }
+ // Cmd name must be lower cased.
+ return internal.ToLower(cmd.stringArg(0))
+}
+
+func (cmd *baseCmd) Args() []interface{} {
+ return cmd.args
+}
+
+func (cmd *baseCmd) stringArg(pos int) string {
+ if pos < 0 || pos >= len(cmd.args) {
+ return ""
+ }
+ s, _ := cmd.args[pos].(string)
+ return s
+}
+
+func (cmd *baseCmd) SetErr(e error) {
+ cmd.err = e
+}
+
+func (cmd *baseCmd) Err() error {
+ return cmd.err
+}
+
+func (cmd *baseCmd) readTimeout() *time.Duration {
+ return cmd._readTimeout
+}
+
+func (cmd *baseCmd) setReadTimeout(d time.Duration) {
+ cmd._readTimeout = &d
+}
+
+//------------------------------------------------------------------------------
+
+type Cmd struct {
+ baseCmd
+
+ val interface{}
+}
+
+func NewCmd(args ...interface{}) *Cmd {
+ return &Cmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *Cmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *Cmd) Val() interface{} {
+ return cmd.val
+}
+
+func (cmd *Cmd) Result() (interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *Cmd) Text() (string, error) {
+ if cmd.err != nil {
+ return "", cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case string:
+ return val, nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for String", val)
+ return "", err
+ }
+}
+
+func (cmd *Cmd) Int() (int, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return int(val), nil
+ case string:
+ return strconv.Atoi(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return val, nil
+ case string:
+ return strconv.ParseInt(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return uint64(val), nil
+ case string:
+ return strconv.ParseUint(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Uint64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float32() (float32, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return float32(val), nil
+ case string:
+ f, err := strconv.ParseFloat(val, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float32", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return float64(val), nil
+ case string:
+ return strconv.ParseFloat(val, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Bool() (bool, error) {
+ if cmd.err != nil {
+ return false, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return val != 0, nil
+ case string:
+ return strconv.ParseBool(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Bool", val)
+ return false, err
+ }
+}
+
+func (cmd *Cmd) readReply(rd *proto.Reader) error {
+ cmd.val, cmd.err = rd.ReadReply(sliceParser)
+ return cmd.err
+}
+
+// Implements proto.MultiBulkParse
+func sliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ vals := make([]interface{}, n)
+ for i := 0; i < len(vals); i++ {
+ v, err := rd.ReadReply(sliceParser)
+ if err != nil {
+ if err == Nil {
+ vals[i] = nil
+ continue
+ }
+ if err, ok := err.(proto.RedisError); ok {
+ vals[i] = err
+ continue
+ }
+ return nil, err
+ }
+ vals[i] = v
+ }
+ return vals, nil
+}
+
+//------------------------------------------------------------------------------
+
+type SliceCmd struct {
+ baseCmd
+
+ val []interface{}
+}
+
+var _ Cmder = (*SliceCmd)(nil)
+
+func NewSliceCmd(args ...interface{}) *SliceCmd {
+ return &SliceCmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *SliceCmd) Val() []interface{} {
+ return cmd.val
+}
+
+func (cmd *SliceCmd) Result() ([]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *SliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *SliceCmd) readReply(rd *proto.Reader) error {
+ var v interface{}
+ v, cmd.err = rd.ReadArrayReply(sliceParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.([]interface{})
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StatusCmd struct {
+ baseCmd
+
+ val string
+}
+
+var _ Cmder = (*StatusCmd)(nil)
+
+func NewStatusCmd(args ...interface{}) *StatusCmd {
+ return &StatusCmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *StatusCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StatusCmd) Result() (string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StatusCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StatusCmd) readReply(rd *proto.Reader) error {
+ cmd.val, cmd.err = rd.ReadString()
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type IntCmd struct {
+ baseCmd
+
+ val int64
+}
+
+var _ Cmder = (*IntCmd)(nil)
+
+func NewIntCmd(args ...interface{}) *IntCmd {
+ return &IntCmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *IntCmd) Val() int64 {
+ return cmd.val
+}
+
+func (cmd *IntCmd) Result() (int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntCmd) Uint64() (uint64, error) {
+ return uint64(cmd.val), cmd.err
+}
+
+func (cmd *IntCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntCmd) readReply(rd *proto.Reader) error {
+ cmd.val, cmd.err = rd.ReadIntReply()
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type IntSliceCmd struct {
+ baseCmd
+
+ val []int64
+}
+
+var _ Cmder = (*IntSliceCmd)(nil)
+
+func NewIntSliceCmd(args ...interface{}) *IntSliceCmd {
+ return &IntSliceCmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *IntSliceCmd) Val() []int64 {
+ return cmd.val
+}
+
+func (cmd *IntSliceCmd) Result() ([]int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error {
+ _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]int64, n)
+ for i := 0; i < len(cmd.val); i++ {
+ num, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.val[i] = num
+ }
+ return nil, nil
+ })
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type DurationCmd struct {
+ baseCmd
+
+ val time.Duration
+ precision time.Duration
+}
+
+var _ Cmder = (*DurationCmd)(nil)
+
+func NewDurationCmd(precision time.Duration, args ...interface{}) *DurationCmd {
+ return &DurationCmd{
+ baseCmd: baseCmd{args: args},
+ precision: precision,
+ }
+}
+
+func (cmd *DurationCmd) Val() time.Duration {
+ return cmd.val
+}
+
+func (cmd *DurationCmd) Result() (time.Duration, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *DurationCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *DurationCmd) readReply(rd *proto.Reader) error {
+ var n int64
+ n, cmd.err = rd.ReadIntReply()
+ if cmd.err != nil {
+ return cmd.err
+ }
+ switch n {
+ // -2 if the key does not exist
+ // -1 if the key exists but has no associated expire
+ case -2, -1:
+ cmd.val = time.Duration(n)
+ default:
+ cmd.val = time.Duration(n) * cmd.precision
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type TimeCmd struct {
+ baseCmd
+
+ val time.Time
+}
+
+var _ Cmder = (*TimeCmd)(nil)
+
+func NewTimeCmd(args ...interface{}) *TimeCmd {
+ return &TimeCmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *TimeCmd) Val() time.Time {
+ return cmd.val
+}
+
+func (cmd *TimeCmd) Result() (time.Time, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TimeCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
+ _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d elements, expected 2", n)
+ }
+
+ sec, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ microsec, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val = time.Unix(sec, microsec*1000)
+ return nil, nil
+ })
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type BoolCmd struct {
+ baseCmd
+
+ val bool
+}
+
+var _ Cmder = (*BoolCmd)(nil)
+
+func NewBoolCmd(args ...interface{}) *BoolCmd {
+ return &BoolCmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *BoolCmd) Val() bool {
+ return cmd.val
+}
+
+func (cmd *BoolCmd) Result() (bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolCmd) readReply(rd *proto.Reader) error {
+ var v interface{}
+ v, cmd.err = rd.ReadReply(nil)
+ // `SET key value NX` returns nil when key already exists. But
+ // `SETNX key value` returns bool (0/1). So convert nil to bool.
+ if cmd.err == Nil {
+ cmd.val = false
+ cmd.err = nil
+ return nil
+ }
+ if cmd.err != nil {
+ return cmd.err
+ }
+ switch v := v.(type) {
+ case int64:
+ cmd.val = v == 1
+ return nil
+ case string:
+ cmd.val = v == "OK"
+ return nil
+ default:
+ cmd.err = fmt.Errorf("got %T, wanted int64 or string", v)
+ return cmd.err
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type StringCmd struct {
+ baseCmd
+
+ val string
+}
+
+var _ Cmder = (*StringCmd)(nil)
+
+func NewStringCmd(args ...interface{}) *StringCmd {
+ return &StringCmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *StringCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StringCmd) Result() (string, error) {
+ return cmd.Val(), cmd.err
+}
+
+func (cmd *StringCmd) Bytes() ([]byte, error) {
+ return util.StringToBytes(cmd.val), cmd.err
+}
+
+func (cmd *StringCmd) Int() (int, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.Atoi(cmd.Val())
+}
+
+func (cmd *StringCmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseInt(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseUint(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Float32() (float32, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ f, err := strconv.ParseFloat(cmd.Val(), 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+}
+
+func (cmd *StringCmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseFloat(cmd.Val(), 64)
+}
+
+func (cmd *StringCmd) Time() (time.Time, error) {
+ if cmd.err != nil {
+ return time.Time{}, cmd.err
+ }
+ return time.Parse(time.RFC3339Nano, cmd.Val())
+}
+
+func (cmd *StringCmd) Scan(val interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+ return proto.Scan([]byte(cmd.val), val)
+}
+
+func (cmd *StringCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringCmd) readReply(rd *proto.Reader) error {
+ cmd.val, cmd.err = rd.ReadString()
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatCmd struct {
+ baseCmd
+
+ val float64
+}
+
+var _ Cmder = (*FloatCmd)(nil)
+
+func NewFloatCmd(args ...interface{}) *FloatCmd {
+ return &FloatCmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *FloatCmd) Val() float64 {
+ return cmd.val
+}
+
+func (cmd *FloatCmd) Result() (float64, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *FloatCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatCmd) readReply(rd *proto.Reader) error {
+ cmd.val, cmd.err = rd.ReadFloatReply()
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type StringSliceCmd struct {
+ baseCmd
+
+ val []string
+}
+
+var _ Cmder = (*StringSliceCmd)(nil)
+
+func NewStringSliceCmd(args ...interface{}) *StringSliceCmd {
+ return &StringSliceCmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *StringSliceCmd) Val() []string {
+ return cmd.val
+}
+
+func (cmd *StringSliceCmd) Result() ([]string, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *StringSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
+ return proto.ScanSlice(cmd.Val(), container)
+}
+
+func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
+ _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]string, n)
+ for i := 0; i < len(cmd.val); i++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmd.val[i] = ""
+ case err != nil:
+ return nil, err
+ default:
+ cmd.val[i] = s
+ }
+ }
+ return nil, nil
+ })
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type BoolSliceCmd struct {
+ baseCmd
+
+ val []bool
+}
+
+var _ Cmder = (*BoolSliceCmd)(nil)
+
+func NewBoolSliceCmd(args ...interface{}) *BoolSliceCmd {
+ return &BoolSliceCmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *BoolSliceCmd) Val() []bool {
+ return cmd.val
+}
+
+func (cmd *BoolSliceCmd) Result() ([]bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
+ _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]bool, n)
+ for i := 0; i < len(cmd.val); i++ {
+ n, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.val[i] = n == 1
+ }
+ return nil, nil
+ })
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type StringStringMapCmd struct {
+ baseCmd
+
+ val map[string]string
+}
+
+var _ Cmder = (*StringStringMapCmd)(nil)
+
+func NewStringStringMapCmd(args ...interface{}) *StringStringMapCmd {
+ return &StringStringMapCmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *StringStringMapCmd) Val() map[string]string {
+ return cmd.val
+}
+
+func (cmd *StringStringMapCmd) Result() (map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringStringMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error {
+ _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make(map[string]string, n/2)
+ for i := int64(0); i < n; i += 2 {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val[key] = value
+ }
+ return nil, nil
+ })
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type StringIntMapCmd struct {
+ baseCmd
+
+ val map[string]int64
+}
+
+var _ Cmder = (*StringIntMapCmd)(nil)
+
+func NewStringIntMapCmd(args ...interface{}) *StringIntMapCmd {
+ return &StringIntMapCmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *StringIntMapCmd) Val() map[string]int64 {
+ return cmd.val
+}
+
+func (cmd *StringIntMapCmd) Result() (map[string]int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringIntMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error {
+ _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make(map[string]int64, n/2)
+ for i := int64(0); i < n; i += 2 {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ n, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val[key] = n
+ }
+ return nil, nil
+ })
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type StringStructMapCmd struct {
+ baseCmd
+
+ val map[string]struct{}
+}
+
+var _ Cmder = (*StringStructMapCmd)(nil)
+
+func NewStringStructMapCmd(args ...interface{}) *StringStructMapCmd {
+ return &StringStructMapCmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *StringStructMapCmd) Val() map[string]struct{} {
+ return cmd.val
+}
+
+func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringStructMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
+ _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make(map[string]struct{}, n)
+ for i := int64(0); i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ cmd.val[key] = struct{}{}
+ }
+ return nil, nil
+ })
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type XMessage struct {
+ ID string
+ Values map[string]interface{}
+}
+
+type XMessageSliceCmd struct {
+ baseCmd
+
+ val []XMessage
+}
+
+var _ Cmder = (*XMessageSliceCmd)(nil)
+
+func NewXMessageSliceCmd(args ...interface{}) *XMessageSliceCmd {
+ return &XMessageSliceCmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *XMessageSliceCmd) Val() []XMessage {
+ return cmd.val
+}
+
+func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XMessageSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error {
+ var v interface{}
+ v, cmd.err = rd.ReadArrayReply(xMessageSliceParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.([]XMessage)
+ return nil
+}
+
+// Implements proto.MultiBulkParse
+func xMessageSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ msgs := make([]XMessage, n)
+ for i := 0; i < len(msgs); i++ {
+ i := i
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ id, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ var values map[string]interface{}
+
+ v, err := rd.ReadArrayReply(stringInterfaceMapParser)
+ if err != nil {
+ if err != proto.Nil {
+ return nil, err
+ }
+ } else {
+ values = v.(map[string]interface{})
+ }
+
+ msgs[i] = XMessage{
+ ID: id,
+ Values: values,
+ }
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ return msgs, nil
+}
+
+// Implements proto.MultiBulkParse
+func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) {
+ m := make(map[string]interface{}, n/2)
+ for i := int64(0); i < n; i += 2 {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ m[key] = value
+ }
+ return m, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XStream struct {
+ Stream string
+ Messages []XMessage
+}
+
+type XStreamSliceCmd struct {
+ baseCmd
+
+ val []XStream
+}
+
+var _ Cmder = (*XStreamSliceCmd)(nil)
+
+func NewXStreamSliceCmd(args ...interface{}) *XStreamSliceCmd {
+ return &XStreamSliceCmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *XStreamSliceCmd) Val() []XStream {
+ return cmd.val
+}
+
+func (cmd *XStreamSliceCmd) Result() ([]XStream, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XStreamSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
+ _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]XStream, n)
+ for i := 0; i < len(cmd.val); i++ {
+ i := i
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d, wanted 2", n)
+ }
+
+ stream, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ v, err := rd.ReadArrayReply(xMessageSliceParser)
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val[i] = XStream{
+ Stream: stream,
+ Messages: v.([]XMessage),
+ }
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ })
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type XPending struct {
+ Count int64
+ Lower string
+ Higher string
+ Consumers map[string]int64
+}
+
+type XPendingCmd struct {
+ baseCmd
+ val *XPending
+}
+
+var _ Cmder = (*XPendingCmd)(nil)
+
+func NewXPendingCmd(args ...interface{}) *XPendingCmd {
+ return &XPendingCmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *XPendingCmd) Val() *XPending {
+ return cmd.val
+}
+
+func (cmd *XPendingCmd) Result() (*XPending, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
+ _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 4 {
+ return nil, fmt.Errorf("got %d, wanted 4", n)
+ }
+
+ count, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ lower, err := rd.ReadString()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ higher, err := rd.ReadString()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ cmd.val = &XPending{
+ Count: count,
+ Lower: lower,
+ Higher: higher,
+ }
+ _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ for i := int64(0); i < n; i++ {
+ _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d, wanted 2", n)
+ }
+
+ consumerName, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ consumerPending, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ if cmd.val.Consumers == nil {
+ cmd.val.Consumers = make(map[string]int64)
+ }
+ cmd.val.Consumers[consumerName] = consumerPending
+
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ })
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ return nil, nil
+ })
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type XPendingExt struct {
+ ID string
+ Consumer string
+ Idle time.Duration
+ RetryCount int64
+}
+
+type XPendingExtCmd struct {
+ baseCmd
+ val []XPendingExt
+}
+
+var _ Cmder = (*XPendingExtCmd)(nil)
+
+func NewXPendingExtCmd(args ...interface{}) *XPendingExtCmd {
+ return &XPendingExtCmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *XPendingExtCmd) Val() []XPendingExt {
+ return cmd.val
+}
+
+func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingExtCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
+ _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]XPendingExt, 0, n)
+ for i := int64(0); i < n; i++ {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 4 {
+ return nil, fmt.Errorf("got %d, wanted 4", n)
+ }
+
+ id, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ consumer, err := rd.ReadString()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ idle, err := rd.ReadIntReply()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ retryCount, err := rd.ReadIntReply()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ cmd.val = append(cmd.val, XPendingExt{
+ ID: id,
+ Consumer: consumer,
+ Idle: time.Duration(idle) * time.Millisecond,
+ RetryCount: retryCount,
+ })
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ })
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoGroupsCmd struct {
+ baseCmd
+ val []XInfoGroups
+}
+
+type XInfoGroups struct {
+ Name string
+ Consumers int64
+ Pending int64
+ LastDeliveredID string
+}
+
+var _ Cmder = (*XInfoGroupsCmd)(nil)
+
+func NewXInfoGroupsCmd(stream string) *XInfoGroupsCmd {
+ return &XInfoGroupsCmd{
+ baseCmd: baseCmd{args: []interface{}{"xinfo", "groups", stream}},
+ }
+}
+
+func (cmd *XInfoGroupsCmd) Val() []XInfoGroups {
+ return cmd.val
+}
+
+func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroups, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoGroupsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error {
+ _, cmd.err = rd.ReadArrayReply(
+ func(rd *proto.Reader, n int64) (interface{}, error) {
+ for i := int64(0); i < n; i++ {
+ v, err := rd.ReadReply(xGroupInfoParser)
+ if err != nil {
+ return nil, err
+ }
+ cmd.val = append(cmd.val, v.(XInfoGroups))
+ }
+ return nil, nil
+ })
+ return nil
+}
+
+func xGroupInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 8 {
+ return nil, fmt.Errorf("redis: got %d elements in XINFO GROUPS reply,"+
+ "wanted 8", n)
+ }
+ var (
+ err error
+ grp XInfoGroups
+ key string
+ val string
+ )
+
+ for i := 0; i < 4; i++ {
+ key, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ val, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ switch key {
+ case "name":
+ grp.Name = val
+ case "consumers":
+ grp.Consumers, err = strconv.ParseInt(val, 0, 64)
+ case "pending":
+ grp.Pending, err = strconv.ParseInt(val, 0, 64)
+ case "last-delivered-id":
+ grp.LastDeliveredID = val
+ default:
+ return nil, fmt.Errorf("redis: unexpected content %s "+
+ "in XINFO GROUPS reply", key)
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ return grp, err
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceCmd struct {
+ baseCmd
+
+ val []Z
+}
+
+var _ Cmder = (*ZSliceCmd)(nil)
+
+func NewZSliceCmd(args ...interface{}) *ZSliceCmd {
+ return &ZSliceCmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *ZSliceCmd) Val() []Z {
+ return cmd.val
+}
+
+func (cmd *ZSliceCmd) Result() ([]Z, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ZSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error {
+ _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]Z, n/2)
+ for i := 0; i < len(cmd.val); i++ {
+ member, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ score, err := rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val[i] = Z{
+ Member: member,
+ Score: score,
+ }
+ }
+ return nil, nil
+ })
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type ZWithKeyCmd struct {
+ baseCmd
+
+ val *ZWithKey
+}
+
+var _ Cmder = (*ZWithKeyCmd)(nil)
+
+func NewZWithKeyCmd(args ...interface{}) *ZWithKeyCmd {
+ return &ZWithKeyCmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *ZWithKeyCmd) Val() *ZWithKey {
+ return cmd.val
+}
+
+func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ZWithKeyCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error {
+ _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 3 {
+ return nil, fmt.Errorf("got %d elements, expected 3", n)
+ }
+
+ cmd.val = &ZWithKey{}
+ var err error
+
+ cmd.val.Key, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val.Member, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val.Score, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+ })
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type ScanCmd struct {
+ baseCmd
+
+ page []string
+ cursor uint64
+
+ process func(cmd Cmder) error
+}
+
+var _ Cmder = (*ScanCmd)(nil)
+
+func NewScanCmd(process func(cmd Cmder) error, args ...interface{}) *ScanCmd {
+ return &ScanCmd{
+ baseCmd: baseCmd{args: args},
+ process: process,
+ }
+}
+
+func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
+ return cmd.page, cmd.cursor
+}
+
+func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
+ return cmd.page, cmd.cursor, cmd.err
+}
+
+func (cmd *ScanCmd) String() string {
+ return cmdString(cmd, cmd.page)
+}
+
+func (cmd *ScanCmd) readReply(rd *proto.Reader) error {
+ cmd.page, cmd.cursor, cmd.err = rd.ReadScanReply()
+ return cmd.err
+}
+
+// Iterator creates a new ScanIterator.
+func (cmd *ScanCmd) Iterator() *ScanIterator {
+ return &ScanIterator{
+ cmd: cmd,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type ClusterNode struct {
+ ID string
+ Addr string
+}
+
+type ClusterSlot struct {
+ Start int
+ End int
+ Nodes []ClusterNode
+}
+
+type ClusterSlotsCmd struct {
+ baseCmd
+
+ val []ClusterSlot
+}
+
+var _ Cmder = (*ClusterSlotsCmd)(nil)
+
+func NewClusterSlotsCmd(args ...interface{}) *ClusterSlotsCmd {
+ return &ClusterSlotsCmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
+ return cmd.val
+}
+
+func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ClusterSlotsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
+ _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]ClusterSlot, n)
+ for i := 0; i < len(cmd.val); i++ {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n < 2 {
+ err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
+ return nil, err
+ }
+
+ start, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ end, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ nodes := make([]ClusterNode, n-2)
+ for j := 0; j < len(nodes); j++ {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n != 2 && n != 3 {
+ err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n)
+ return nil, err
+ }
+
+ ip, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ port, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ nodes[j].Addr = net.JoinHostPort(ip, port)
+
+ if n == 3 {
+ id, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ nodes[j].ID = id
+ }
+ }
+
+ cmd.val[i] = ClusterSlot{
+ Start: int(start),
+ End: int(end),
+ Nodes: nodes,
+ }
+ }
+ return nil, nil
+ })
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+// GeoLocation is used with GeoAdd to add geospatial location.
+type GeoLocation struct {
+ Name string
+ Longitude, Latitude, Dist float64
+ GeoHash int64
+}
+
+// GeoRadiusQuery is used with GeoRadius to query geospatial index.
+type GeoRadiusQuery struct {
+ Radius float64
+ // Can be m, km, ft, or mi. Default is km.
+ Unit string
+ WithCoord bool
+ WithDist bool
+ WithGeoHash bool
+ Count int
+ // Can be ASC or DESC. Default is no sort order.
+ Sort string
+ Store string
+ StoreDist string
+}
+
+type GeoLocationCmd struct {
+ baseCmd
+
+ q *GeoRadiusQuery
+ locations []GeoLocation
+}
+
+var _ Cmder = (*GeoLocationCmd)(nil)
+
+func NewGeoLocationCmd(q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
+ return &GeoLocationCmd{
+ baseCmd: baseCmd{args: geoLocationArgs(q, args...)},
+ q: q,
+ }
+}
+
+func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} {
+ args = append(args, q.Radius)
+ if q.Unit != "" {
+ args = append(args, q.Unit)
+ } else {
+ args = append(args, "km")
+ }
+ if q.WithCoord {
+ args = append(args, "withcoord")
+ }
+ if q.WithDist {
+ args = append(args, "withdist")
+ }
+ if q.WithGeoHash {
+ args = append(args, "withhash")
+ }
+ if q.Count > 0 {
+ args = append(args, "count", q.Count)
+ }
+ if q.Sort != "" {
+ args = append(args, q.Sort)
+ }
+ if q.Store != "" {
+ args = append(args, "store")
+ args = append(args, q.Store)
+ }
+ if q.StoreDist != "" {
+ args = append(args, "storedist")
+ args = append(args, q.StoreDist)
+ }
+ return args
+}
+
+func (cmd *GeoLocationCmd) Val() []GeoLocation {
+ return cmd.locations
+}
+
+func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
+ return cmd.locations, cmd.err
+}
+
+func (cmd *GeoLocationCmd) String() string {
+ return cmdString(cmd, cmd.locations)
+}
+
+func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error {
+ var v interface{}
+ v, cmd.err = rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q))
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.locations = v.([]GeoLocation)
+ return nil
+}
+
+func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse {
+ return func(rd *proto.Reader, n int64) (interface{}, error) {
+ locs := make([]GeoLocation, 0, n)
+ for i := int64(0); i < n; i++ {
+ v, err := rd.ReadReply(newGeoLocationParser(q))
+ if err != nil {
+ return nil, err
+ }
+ switch vv := v.(type) {
+ case string:
+ locs = append(locs, GeoLocation{
+ Name: vv,
+ })
+ case *GeoLocation:
+ //TODO: avoid copying
+ locs = append(locs, *vv)
+ default:
+ return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v)
+ }
+ }
+ return locs, nil
+ }
+}
+
+func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse {
+ return func(rd *proto.Reader, n int64) (interface{}, error) {
+ var loc GeoLocation
+ var err error
+
+ loc.Name, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ if q.WithDist {
+ loc.Dist, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if q.WithGeoHash {
+ loc.GeoHash, err = rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if q.WithCoord {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n != 2 {
+ return nil, fmt.Errorf("got %d coordinates, expected 2", n)
+ }
+
+ loc.Longitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ loc.Latitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &loc, nil
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type GeoPos struct {
+ Longitude, Latitude float64
+}
+
+type GeoPosCmd struct {
+ baseCmd
+
+ val []*GeoPos
+}
+
+var _ Cmder = (*GeoPosCmd)(nil)
+
+func NewGeoPosCmd(args ...interface{}) *GeoPosCmd {
+ return &GeoPosCmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *GeoPosCmd) Val() []*GeoPos {
+ return cmd.val
+}
+
+func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *GeoPosCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
+ _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]*GeoPos, n)
+ for i := 0; i < len(cmd.val); i++ {
+ i := i
+ _, err := rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ longitude, err := rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+
+ latitude, err := rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val[i] = &GeoPos{
+ Longitude: longitude,
+ Latitude: latitude,
+ }
+ return nil, nil
+ })
+ if err != nil {
+ if err == Nil {
+ cmd.val[i] = nil
+ continue
+ }
+ return nil, err
+ }
+ }
+ return nil, nil
+ })
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type CommandInfo struct {
+ Name string
+ Arity int8
+ Flags []string
+ ACLFlags []string
+ FirstKeyPos int8
+ LastKeyPos int8
+ StepCount int8
+ ReadOnly bool
+}
+
+type CommandsInfoCmd struct {
+ baseCmd
+
+ val map[string]*CommandInfo
+}
+
+var _ Cmder = (*CommandsInfoCmd)(nil)
+
+func NewCommandsInfoCmd(args ...interface{}) *CommandsInfoCmd {
+ return &CommandsInfoCmd{
+ baseCmd: baseCmd{args: args},
+ }
+}
+
+func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
+ return cmd.val
+}
+
+func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *CommandsInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
+ _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make(map[string]*CommandInfo, n)
+ for i := int64(0); i < n; i++ {
+ v, err := rd.ReadReply(commandInfoParser)
+ if err != nil {
+ return nil, err
+ }
+ vv := v.(*CommandInfo)
+ cmd.val[vv.Name] = vv
+ }
+ return nil, nil
+ })
+ return cmd.err
+}
+
+func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
+ const numArgRedis5 = 6
+ const numArgRedis6 = 7
+
+ switch n {
+ case numArgRedis5, numArgRedis6:
+ // continue
+ default:
+ return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 7", n)
+ }
+
+ var cmd CommandInfo
+ var err error
+
+ cmd.Name, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ arity, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.Arity = int8(arity)
+
+ _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.Flags = make([]string, n)
+ for i := 0; i < len(cmd.Flags); i++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmd.Flags[i] = ""
+ case err != nil:
+ return nil, err
+ default:
+ cmd.Flags[i] = s
+ }
+ }
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ firstKeyPos, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.FirstKeyPos = int8(firstKeyPos)
+
+ lastKeyPos, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.LastKeyPos = int8(lastKeyPos)
+
+ stepCount, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.StepCount = int8(stepCount)
+
+ for _, flag := range cmd.Flags {
+ if flag == "readonly" {
+ cmd.ReadOnly = true
+ break
+ }
+ }
+
+ if n == numArgRedis5 {
+ return &cmd, nil
+ }
+
+ _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.ACLFlags = make([]string, n)
+ for i := 0; i < len(cmd.ACLFlags); i++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmd.ACLFlags[i] = ""
+ case err != nil:
+ return nil, err
+ default:
+ cmd.ACLFlags[i] = s
+ }
+ }
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return &cmd, nil
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsInfoCache struct {
+ fn func() (map[string]*CommandInfo, error)
+
+ once internal.Once
+ cmds map[string]*CommandInfo
+}
+
+func newCmdsInfoCache(fn func() (map[string]*CommandInfo, error)) *cmdsInfoCache {
+ return &cmdsInfoCache{
+ fn: fn,
+ }
+}
+
+func (c *cmdsInfoCache) Get() (map[string]*CommandInfo, error) {
+ err := c.once.Do(func() error {
+ cmds, err := c.fn()
+ if err != nil {
+ return err
+ }
+
+ // Extensions have cmd names in upper case. Convert them to lower case.
+ for k, v := range cmds {
+ lower := internal.ToLower(k)
+ if lower != k {
+ cmds[lower] = v
+ }
+ }
+
+ c.cmds = cmds
+ return nil
+ })
+ return c.cmds, err
+}
diff --git a/vendor/github.com/go-redis/redis/v7/commands.go b/vendor/github.com/go-redis/redis/v7/commands.go
new file mode 100644
index 0000000000..da5ceda13e
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/commands.go
@@ -0,0 +1,2643 @@
+package redis
+
+import (
+ "errors"
+ "io"
+ "time"
+
+ "github.com/go-redis/redis/v7/internal"
+)
+
+func usePrecise(dur time.Duration) bool {
+ return dur < time.Second || dur%time.Second != 0
+}
+
+func formatMs(dur time.Duration) int64 {
+ if dur > 0 && dur < time.Millisecond {
+ internal.Logger.Printf(
+ "specified duration is %s, but minimal supported value is %s",
+ dur, time.Millisecond,
+ )
+ }
+ return int64(dur / time.Millisecond)
+}
+
+func formatSec(dur time.Duration) int64 {
+ if dur > 0 && dur < time.Second {
+ internal.Logger.Printf(
+ "specified duration is %s, but minimal supported value is %s",
+ dur, time.Second,
+ )
+ }
+ return int64(dur / time.Second)
+}
+
+func appendArgs(dst, src []interface{}) []interface{} {
+ if len(src) == 1 {
+ switch v := src[0].(type) {
+ case []string:
+ for _, s := range v {
+ dst = append(dst, s)
+ }
+ return dst
+ case map[string]interface{}:
+ for k, v := range v {
+ dst = append(dst, k, v)
+ }
+ return dst
+ }
+ }
+
+ dst = append(dst, src...)
+ return dst
+}
+
+type Cmdable interface {
+ Pipeline() Pipeliner
+ Pipelined(fn func(Pipeliner) error) ([]Cmder, error)
+
+ TxPipelined(fn func(Pipeliner) error) ([]Cmder, error)
+ TxPipeline() Pipeliner
+
+ Command() *CommandsInfoCmd
+ ClientGetName() *StringCmd
+ Echo(message interface{}) *StringCmd
+ Ping() *StatusCmd
+ Quit() *StatusCmd
+ Del(keys ...string) *IntCmd
+ Unlink(keys ...string) *IntCmd
+ Dump(key string) *StringCmd
+ Exists(keys ...string) *IntCmd
+ Expire(key string, expiration time.Duration) *BoolCmd
+ ExpireAt(key string, tm time.Time) *BoolCmd
+ Keys(pattern string) *StringSliceCmd
+ Migrate(host, port, key string, db int, timeout time.Duration) *StatusCmd
+ Move(key string, db int) *BoolCmd
+ ObjectRefCount(key string) *IntCmd
+ ObjectEncoding(key string) *StringCmd
+ ObjectIdleTime(key string) *DurationCmd
+ Persist(key string) *BoolCmd
+ PExpire(key string, expiration time.Duration) *BoolCmd
+ PExpireAt(key string, tm time.Time) *BoolCmd
+ PTTL(key string) *DurationCmd
+ RandomKey() *StringCmd
+ Rename(key, newkey string) *StatusCmd
+ RenameNX(key, newkey string) *BoolCmd
+ Restore(key string, ttl time.Duration, value string) *StatusCmd
+ RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd
+ Sort(key string, sort *Sort) *StringSliceCmd
+ SortStore(key, store string, sort *Sort) *IntCmd
+ SortInterfaces(key string, sort *Sort) *SliceCmd
+ Touch(keys ...string) *IntCmd
+ TTL(key string) *DurationCmd
+ Type(key string) *StatusCmd
+ Scan(cursor uint64, match string, count int64) *ScanCmd
+ SScan(key string, cursor uint64, match string, count int64) *ScanCmd
+ HScan(key string, cursor uint64, match string, count int64) *ScanCmd
+ ZScan(key string, cursor uint64, match string, count int64) *ScanCmd
+ Append(key, value string) *IntCmd
+ BitCount(key string, bitCount *BitCount) *IntCmd
+ BitOpAnd(destKey string, keys ...string) *IntCmd
+ BitOpOr(destKey string, keys ...string) *IntCmd
+ BitOpXor(destKey string, keys ...string) *IntCmd
+ BitOpNot(destKey string, key string) *IntCmd
+ BitPos(key string, bit int64, pos ...int64) *IntCmd
+ BitField(key string, args ...interface{}) *IntSliceCmd
+ Decr(key string) *IntCmd
+ DecrBy(key string, decrement int64) *IntCmd
+ Get(key string) *StringCmd
+ GetBit(key string, offset int64) *IntCmd
+ GetRange(key string, start, end int64) *StringCmd
+ GetSet(key string, value interface{}) *StringCmd
+ Incr(key string) *IntCmd
+ IncrBy(key string, value int64) *IntCmd
+ IncrByFloat(key string, value float64) *FloatCmd
+ MGet(keys ...string) *SliceCmd
+ MSet(values ...interface{}) *StatusCmd
+ MSetNX(values ...interface{}) *BoolCmd
+ Set(key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetBit(key string, offset int64, value int) *IntCmd
+ SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetRange(key string, offset int64, value string) *IntCmd
+ StrLen(key string) *IntCmd
+ HDel(key string, fields ...string) *IntCmd
+ HExists(key, field string) *BoolCmd
+ HGet(key, field string) *StringCmd
+ HGetAll(key string) *StringStringMapCmd
+ HIncrBy(key, field string, incr int64) *IntCmd
+ HIncrByFloat(key, field string, incr float64) *FloatCmd
+ HKeys(key string) *StringSliceCmd
+ HLen(key string) *IntCmd
+ HMGet(key string, fields ...string) *SliceCmd
+ HSet(key string, values ...interface{}) *IntCmd
+ HMSet(key string, values ...interface{}) *BoolCmd
+ HSetNX(key, field string, value interface{}) *BoolCmd
+ HVals(key string) *StringSliceCmd
+ BLPop(timeout time.Duration, keys ...string) *StringSliceCmd
+ BRPop(timeout time.Duration, keys ...string) *StringSliceCmd
+ BRPopLPush(source, destination string, timeout time.Duration) *StringCmd
+ LIndex(key string, index int64) *StringCmd
+ LInsert(key, op string, pivot, value interface{}) *IntCmd
+ LInsertBefore(key string, pivot, value interface{}) *IntCmd
+ LInsertAfter(key string, pivot, value interface{}) *IntCmd
+ LLen(key string) *IntCmd
+ LPop(key string) *StringCmd
+ LPush(key string, values ...interface{}) *IntCmd
+ LPushX(key string, values ...interface{}) *IntCmd
+ LRange(key string, start, stop int64) *StringSliceCmd
+ LRem(key string, count int64, value interface{}) *IntCmd
+ LSet(key string, index int64, value interface{}) *StatusCmd
+ LTrim(key string, start, stop int64) *StatusCmd
+ RPop(key string) *StringCmd
+ RPopLPush(source, destination string) *StringCmd
+ RPush(key string, values ...interface{}) *IntCmd
+ RPushX(key string, values ...interface{}) *IntCmd
+ SAdd(key string, members ...interface{}) *IntCmd
+ SCard(key string) *IntCmd
+ SDiff(keys ...string) *StringSliceCmd
+ SDiffStore(destination string, keys ...string) *IntCmd
+ SInter(keys ...string) *StringSliceCmd
+ SInterStore(destination string, keys ...string) *IntCmd
+ SIsMember(key string, member interface{}) *BoolCmd
+ SMembers(key string) *StringSliceCmd
+ SMembersMap(key string) *StringStructMapCmd
+ SMove(source, destination string, member interface{}) *BoolCmd
+ SPop(key string) *StringCmd
+ SPopN(key string, count int64) *StringSliceCmd
+ SRandMember(key string) *StringCmd
+ SRandMemberN(key string, count int64) *StringSliceCmd
+ SRem(key string, members ...interface{}) *IntCmd
+ SUnion(keys ...string) *StringSliceCmd
+ SUnionStore(destination string, keys ...string) *IntCmd
+ XAdd(a *XAddArgs) *StringCmd
+ XDel(stream string, ids ...string) *IntCmd
+ XLen(stream string) *IntCmd
+ XRange(stream, start, stop string) *XMessageSliceCmd
+ XRangeN(stream, start, stop string, count int64) *XMessageSliceCmd
+ XRevRange(stream string, start, stop string) *XMessageSliceCmd
+ XRevRangeN(stream string, start, stop string, count int64) *XMessageSliceCmd
+ XRead(a *XReadArgs) *XStreamSliceCmd
+ XReadStreams(streams ...string) *XStreamSliceCmd
+ XGroupCreate(stream, group, start string) *StatusCmd
+ XGroupCreateMkStream(stream, group, start string) *StatusCmd
+ XGroupSetID(stream, group, start string) *StatusCmd
+ XGroupDestroy(stream, group string) *IntCmd
+ XGroupDelConsumer(stream, group, consumer string) *IntCmd
+ XReadGroup(a *XReadGroupArgs) *XStreamSliceCmd
+ XAck(stream, group string, ids ...string) *IntCmd
+ XPending(stream, group string) *XPendingCmd
+ XPendingExt(a *XPendingExtArgs) *XPendingExtCmd
+ XClaim(a *XClaimArgs) *XMessageSliceCmd
+ XClaimJustID(a *XClaimArgs) *StringSliceCmd
+ XTrim(key string, maxLen int64) *IntCmd
+ XTrimApprox(key string, maxLen int64) *IntCmd
+ XInfoGroups(key string) *XInfoGroupsCmd
+ BZPopMax(timeout time.Duration, keys ...string) *ZWithKeyCmd
+ BZPopMin(timeout time.Duration, keys ...string) *ZWithKeyCmd
+ ZAdd(key string, members ...*Z) *IntCmd
+ ZAddNX(key string, members ...*Z) *IntCmd
+ ZAddXX(key string, members ...*Z) *IntCmd
+ ZAddCh(key string, members ...*Z) *IntCmd
+ ZAddNXCh(key string, members ...*Z) *IntCmd
+ ZAddXXCh(key string, members ...*Z) *IntCmd
+ ZIncr(key string, member *Z) *FloatCmd
+ ZIncrNX(key string, member *Z) *FloatCmd
+ ZIncrXX(key string, member *Z) *FloatCmd
+ ZCard(key string) *IntCmd
+ ZCount(key, min, max string) *IntCmd
+ ZLexCount(key, min, max string) *IntCmd
+ ZIncrBy(key string, increment float64, member string) *FloatCmd
+ ZInterStore(destination string, store *ZStore) *IntCmd
+ ZPopMax(key string, count ...int64) *ZSliceCmd
+ ZPopMin(key string, count ...int64) *ZSliceCmd
+ ZRange(key string, start, stop int64) *StringSliceCmd
+ ZRangeWithScores(key string, start, stop int64) *ZSliceCmd
+ ZRangeByScore(key string, opt *ZRangeBy) *StringSliceCmd
+ ZRangeByLex(key string, opt *ZRangeBy) *StringSliceCmd
+ ZRangeByScoreWithScores(key string, opt *ZRangeBy) *ZSliceCmd
+ ZRank(key, member string) *IntCmd
+ ZRem(key string, members ...interface{}) *IntCmd
+ ZRemRangeByRank(key string, start, stop int64) *IntCmd
+ ZRemRangeByScore(key, min, max string) *IntCmd
+ ZRemRangeByLex(key, min, max string) *IntCmd
+ ZRevRange(key string, start, stop int64) *StringSliceCmd
+ ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd
+ ZRevRangeByScore(key string, opt *ZRangeBy) *StringSliceCmd
+ ZRevRangeByLex(key string, opt *ZRangeBy) *StringSliceCmd
+ ZRevRangeByScoreWithScores(key string, opt *ZRangeBy) *ZSliceCmd
+ ZRevRank(key, member string) *IntCmd
+ ZScore(key, member string) *FloatCmd
+ ZUnionStore(dest string, store *ZStore) *IntCmd
+ PFAdd(key string, els ...interface{}) *IntCmd
+ PFCount(keys ...string) *IntCmd
+ PFMerge(dest string, keys ...string) *StatusCmd
+ BgRewriteAOF() *StatusCmd
+ BgSave() *StatusCmd
+ ClientKill(ipPort string) *StatusCmd
+ ClientKillByFilter(keys ...string) *IntCmd
+ ClientList() *StringCmd
+ ClientPause(dur time.Duration) *BoolCmd
+ ClientID() *IntCmd
+ ConfigGet(parameter string) *SliceCmd
+ ConfigResetStat() *StatusCmd
+ ConfigSet(parameter, value string) *StatusCmd
+ ConfigRewrite() *StatusCmd
+ DBSize() *IntCmd
+ FlushAll() *StatusCmd
+ FlushAllAsync() *StatusCmd
+ FlushDB() *StatusCmd
+ FlushDBAsync() *StatusCmd
+ Info(section ...string) *StringCmd
+ LastSave() *IntCmd
+ Save() *StatusCmd
+ Shutdown() *StatusCmd
+ ShutdownSave() *StatusCmd
+ ShutdownNoSave() *StatusCmd
+ SlaveOf(host, port string) *StatusCmd
+ Time() *TimeCmd
+ Eval(script string, keys []string, args ...interface{}) *Cmd
+ EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd
+ ScriptExists(hashes ...string) *BoolSliceCmd
+ ScriptFlush() *StatusCmd
+ ScriptKill() *StatusCmd
+ ScriptLoad(script string) *StringCmd
+ DebugObject(key string) *StringCmd
+ Publish(channel string, message interface{}) *IntCmd
+ PubSubChannels(pattern string) *StringSliceCmd
+ PubSubNumSub(channels ...string) *StringIntMapCmd
+ PubSubNumPat() *IntCmd
+ ClusterSlots() *ClusterSlotsCmd
+ ClusterNodes() *StringCmd
+ ClusterMeet(host, port string) *StatusCmd
+ ClusterForget(nodeID string) *StatusCmd
+ ClusterReplicate(nodeID string) *StatusCmd
+ ClusterResetSoft() *StatusCmd
+ ClusterResetHard() *StatusCmd
+ ClusterInfo() *StringCmd
+ ClusterKeySlot(key string) *IntCmd
+ ClusterGetKeysInSlot(slot int, count int) *StringSliceCmd
+ ClusterCountFailureReports(nodeID string) *IntCmd
+ ClusterCountKeysInSlot(slot int) *IntCmd
+ ClusterDelSlots(slots ...int) *StatusCmd
+ ClusterDelSlotsRange(min, max int) *StatusCmd
+ ClusterSaveConfig() *StatusCmd
+ ClusterSlaves(nodeID string) *StringSliceCmd
+ ClusterFailover() *StatusCmd
+ ClusterAddSlots(slots ...int) *StatusCmd
+ ClusterAddSlotsRange(min, max int) *StatusCmd
+ GeoAdd(key string, geoLocation ...*GeoLocation) *IntCmd
+ GeoPos(key string, members ...string) *GeoPosCmd
+ GeoRadius(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd
+ GeoRadiusStore(key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd
+ GeoRadiusByMember(key, member string, query *GeoRadiusQuery) *GeoLocationCmd
+ GeoRadiusByMemberStore(key, member string, query *GeoRadiusQuery) *IntCmd
+ GeoDist(key string, member1, member2, unit string) *FloatCmd
+ GeoHash(key string, members ...string) *StringSliceCmd
+ ReadOnly() *StatusCmd
+ ReadWrite() *StatusCmd
+ MemoryUsage(key string, samples ...int) *IntCmd
+}
+
+type StatefulCmdable interface {
+ Cmdable
+ Auth(password string) *StatusCmd
+ AuthACL(username, password string) *StatusCmd
+ Select(index int) *StatusCmd
+ SwapDB(index1, index2 int) *StatusCmd
+ ClientSetName(name string) *BoolCmd
+}
+
+var _ Cmdable = (*Client)(nil)
+var _ Cmdable = (*Tx)(nil)
+var _ Cmdable = (*Ring)(nil)
+var _ Cmdable = (*ClusterClient)(nil)
+
+type cmdable func(cmd Cmder) error
+
+type statefulCmdable func(cmd Cmder) error
+
+//------------------------------------------------------------------------------
+
+func (c statefulCmdable) Auth(password string) *StatusCmd {
+ cmd := NewStatusCmd("auth", password)
+ _ = c(cmd)
+ return cmd
+}
+
+// Perform an AUTH command, using the given user and pass.
+// Should be used to authenticate the current connection with one of the connections defined in the ACL list
+// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+func (c statefulCmdable) AuthACL(username, password string) *StatusCmd {
+ cmd := NewStatusCmd("auth", username, password)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) Echo(message interface{}) *StringCmd {
+ cmd := NewStringCmd("echo", message)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) Ping() *StatusCmd {
+ cmd := NewStatusCmd("ping")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) Wait(numSlaves int, timeout time.Duration) *IntCmd {
+ cmd := NewIntCmd("wait", numSlaves, int(timeout/time.Millisecond))
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) Quit() *StatusCmd {
+ panic("not implemented")
+}
+
+func (c statefulCmdable) Select(index int) *StatusCmd {
+ cmd := NewStatusCmd("select", index)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c statefulCmdable) SwapDB(index1, index2 int) *StatusCmd {
+ cmd := NewStatusCmd("swapdb", index1, index2)
+ _ = c(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Command() *CommandsInfoCmd {
+ cmd := NewCommandsInfoCmd("command")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) Del(keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "del"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) Unlink(keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "unlink"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) Dump(key string) *StringCmd {
+ cmd := NewStringCmd("dump", key)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) Exists(keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "exists"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) Expire(key string, expiration time.Duration) *BoolCmd {
+ cmd := NewBoolCmd("expire", key, formatSec(expiration))
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ExpireAt(key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd("expireat", key, tm.Unix())
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) Keys(pattern string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("keys", pattern)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) Migrate(host, port, key string, db int, timeout time.Duration) *StatusCmd {
+ cmd := NewStatusCmd(
+ "migrate",
+ host,
+ port,
+ key,
+ db,
+ formatMs(timeout),
+ )
+ cmd.setReadTimeout(timeout)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) Move(key string, db int) *BoolCmd {
+ cmd := NewBoolCmd("move", key, db)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectRefCount(key string) *IntCmd {
+ cmd := NewIntCmd("object", "refcount", key)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectEncoding(key string) *StringCmd {
+ cmd := NewStringCmd("object", "encoding", key)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectIdleTime(key string) *DurationCmd {
+ cmd := NewDurationCmd(time.Second, "object", "idletime", key)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) Persist(key string) *BoolCmd {
+ cmd := NewBoolCmd("persist", key)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) PExpire(key string, expiration time.Duration) *BoolCmd {
+ cmd := NewBoolCmd("pexpire", key, formatMs(expiration))
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) PExpireAt(key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd(
+ "pexpireat",
+ key,
+ tm.UnixNano()/int64(time.Millisecond),
+ )
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) PTTL(key string) *DurationCmd {
+ cmd := NewDurationCmd(time.Millisecond, "pttl", key)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) RandomKey() *StringCmd {
+ cmd := NewStringCmd("randomkey")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) Rename(key, newkey string) *StatusCmd {
+ cmd := NewStatusCmd("rename", key, newkey)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) RenameNX(key, newkey string) *BoolCmd {
+ cmd := NewBoolCmd("renamenx", key, newkey)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) Restore(key string, ttl time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ "restore",
+ key,
+ formatMs(ttl),
+ value,
+ )
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ "restore",
+ key,
+ formatMs(ttl),
+ value,
+ "replace",
+ )
+ _ = c(cmd)
+ return cmd
+}
+
+type Sort struct {
+ By string
+ Offset, Count int64
+ Get []string
+ Order string
+ Alpha bool
+}
+
+func (sort *Sort) args(key string) []interface{} {
+ args := []interface{}{"sort", key}
+ if sort.By != "" {
+ args = append(args, "by", sort.By)
+ }
+ if sort.Offset != 0 || sort.Count != 0 {
+ args = append(args, "limit", sort.Offset, sort.Count)
+ }
+ for _, get := range sort.Get {
+ args = append(args, "get", get)
+ }
+ if sort.Order != "" {
+ args = append(args, sort.Order)
+ }
+ if sort.Alpha {
+ args = append(args, "alpha")
+ }
+ return args
+}
+
+func (c cmdable) Sort(key string, sort *Sort) *StringSliceCmd {
+ cmd := NewStringSliceCmd(sort.args(key)...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) SortStore(key, store string, sort *Sort) *IntCmd {
+ args := sort.args(key)
+ if store != "" {
+ args = append(args, "store", store)
+ }
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) SortInterfaces(key string, sort *Sort) *SliceCmd {
+ cmd := NewSliceCmd(sort.args(key)...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) Touch(keys ...string) *IntCmd {
+ args := make([]interface{}, len(keys)+1)
+ args[0] = "touch"
+ for i, key := range keys {
+ args[i+1] = key
+ }
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) TTL(key string) *DurationCmd {
+ cmd := NewDurationCmd(time.Second, "ttl", key)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) Type(key string) *StatusCmd {
+ cmd := NewStatusCmd("type", key)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) Scan(cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"scan", cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(c, args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) SScan(key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"sscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(c, args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) HScan(key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"hscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(c, args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ZScan(key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"zscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(c, args...)
+ _ = c(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Append(key, value string) *IntCmd {
+ cmd := NewIntCmd("append", key, value)
+ _ = c(cmd)
+ return cmd
+}
+
+type BitCount struct {
+ Start, End int64
+}
+
+func (c cmdable) BitCount(key string, bitCount *BitCount) *IntCmd {
+ args := []interface{}{"bitcount", key}
+ if bitCount != nil {
+ args = append(
+ args,
+ bitCount.Start,
+ bitCount.End,
+ )
+ }
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) bitOp(op, destKey string, keys ...string) *IntCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "bitop"
+ args[1] = op
+ args[2] = destKey
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) BitOpAnd(destKey string, keys ...string) *IntCmd {
+ return c.bitOp("and", destKey, keys...)
+}
+
+func (c cmdable) BitOpOr(destKey string, keys ...string) *IntCmd {
+ return c.bitOp("or", destKey, keys...)
+}
+
+func (c cmdable) BitOpXor(destKey string, keys ...string) *IntCmd {
+ return c.bitOp("xor", destKey, keys...)
+}
+
+func (c cmdable) BitOpNot(destKey string, key string) *IntCmd {
+ return c.bitOp("not", destKey, key)
+}
+
+func (c cmdable) BitPos(key string, bit int64, pos ...int64) *IntCmd {
+ args := make([]interface{}, 3+len(pos))
+ args[0] = "bitpos"
+ args[1] = key
+ args[2] = bit
+ switch len(pos) {
+ case 0:
+ case 1:
+ args[3] = pos[0]
+ case 2:
+ args[3] = pos[0]
+ args[4] = pos[1]
+ default:
+ panic("too many arguments")
+ }
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) BitField(key string, args ...interface{}) *IntSliceCmd {
+ a := make([]interface{}, 0, 2+len(args))
+ a = append(a, "bitfield")
+ a = append(a, key)
+ a = append(a, args...)
+ cmd := NewIntSliceCmd(a...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) Decr(key string) *IntCmd {
+ cmd := NewIntCmd("decr", key)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) DecrBy(key string, decrement int64) *IntCmd {
+ cmd := NewIntCmd("decrby", key, decrement)
+ _ = c(cmd)
+ return cmd
+}
+
+// Redis `GET key` command. It returns redis.Nil error when key does not exist.
+func (c cmdable) Get(key string) *StringCmd {
+ cmd := NewStringCmd("get", key)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) GetBit(key string, offset int64) *IntCmd {
+ cmd := NewIntCmd("getbit", key, offset)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) GetRange(key string, start, end int64) *StringCmd {
+ cmd := NewStringCmd("getrange", key, start, end)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) GetSet(key string, value interface{}) *StringCmd {
+ cmd := NewStringCmd("getset", key, value)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) Incr(key string) *IntCmd {
+ cmd := NewIntCmd("incr", key)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) IncrBy(key string, value int64) *IntCmd {
+ cmd := NewIntCmd("incrby", key, value)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) IncrByFloat(key string, value float64) *FloatCmd {
+ cmd := NewFloatCmd("incrbyfloat", key, value)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) MGet(keys ...string) *SliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "mget"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewSliceCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+// MSet is like Set but accepts multiple values:
+// - MSet("key1", "value1", "key2", "value2")
+// - MSet([]string{"key1", "value1", "key2", "value2"})
+// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"})
+func (c cmdable) MSet(values ...interface{}) *StatusCmd {
+ args := make([]interface{}, 1, 1+len(values))
+ args[0] = "mset"
+ args = appendArgs(args, values)
+ cmd := NewStatusCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+// MSetNX is like SetNX but accepts multiple values:
+// - MSetNX("key1", "value1", "key2", "value2")
+// - MSetNX([]string{"key1", "value1", "key2", "value2"})
+// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"})
+func (c cmdable) MSetNX(values ...interface{}) *BoolCmd {
+ args := make([]interface{}, 1, 1+len(values))
+ args[0] = "msetnx"
+ args = appendArgs(args, values)
+ cmd := NewBoolCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+// Redis `SET key value [expiration]` command.
+//
+// Use expiration for `SETEX`-like behavior.
+// Zero expiration means the key has no expiration time.
+func (c cmdable) Set(key string, value interface{}, expiration time.Duration) *StatusCmd {
+ args := make([]interface{}, 3, 5)
+ args[0] = "set"
+ args[1] = key
+ args[2] = value
+ if expiration > 0 {
+ if usePrecise(expiration) {
+ args = append(args, "px", formatMs(expiration))
+ } else {
+ args = append(args, "ex", formatSec(expiration))
+ }
+ }
+ cmd := NewStatusCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) SetBit(key string, offset int64, value int) *IntCmd {
+ cmd := NewIntCmd(
+ "setbit",
+ key,
+ offset,
+ value,
+ )
+ _ = c(cmd)
+ return cmd
+}
+
+// Redis `SET key value [expiration] NX` command.
+//
+// Zero expiration means the key has no expiration time.
+func (c cmdable) SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ if expiration == 0 {
+ // Use old `SETNX` to support old Redis versions.
+ cmd = NewBoolCmd("setnx", key, value)
+ } else {
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "nx")
+ } else {
+ cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "nx")
+ }
+ }
+ _ = c(cmd)
+ return cmd
+}
+
+// Redis `SET key value [expiration] XX` command.
+//
+// Zero expiration means the key has no expiration time.
+func (c cmdable) SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ if expiration == 0 {
+ cmd = NewBoolCmd("set", key, value, "xx")
+ } else {
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "xx")
+ } else {
+ cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "xx")
+ }
+ }
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) SetRange(key string, offset int64, value string) *IntCmd {
+ cmd := NewIntCmd("setrange", key, offset, value)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) StrLen(key string) *IntCmd {
+ cmd := NewIntCmd("strlen", key)
+ _ = c(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) HDel(key string, fields ...string) *IntCmd {
+ args := make([]interface{}, 2+len(fields))
+ args[0] = "hdel"
+ args[1] = key
+ for i, field := range fields {
+ args[2+i] = field
+ }
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) HExists(key, field string) *BoolCmd {
+ cmd := NewBoolCmd("hexists", key, field)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) HGet(key, field string) *StringCmd {
+ cmd := NewStringCmd("hget", key, field)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) HGetAll(key string) *StringStringMapCmd {
+ cmd := NewStringStringMapCmd("hgetall", key)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) HIncrBy(key, field string, incr int64) *IntCmd {
+ cmd := NewIntCmd("hincrby", key, field, incr)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) HIncrByFloat(key, field string, incr float64) *FloatCmd {
+ cmd := NewFloatCmd("hincrbyfloat", key, field, incr)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) HKeys(key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("hkeys", key)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) HLen(key string) *IntCmd {
+ cmd := NewIntCmd("hlen", key)
+ _ = c(cmd)
+ return cmd
+}
+
+// HMGet returns the values for the specified fields in the hash stored at key.
+// It returns an interface{} to distinguish between empty string and nil value.
+func (c cmdable) HMGet(key string, fields ...string) *SliceCmd {
+ args := make([]interface{}, 2+len(fields))
+ args[0] = "hmget"
+ args[1] = key
+ for i, field := range fields {
+ args[2+i] = field
+ }
+ cmd := NewSliceCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+// HSet accepts values in following formats:
+// - HMSet("myhash", "key1", "value1", "key2", "value2")
+// - HMSet("myhash", []string{"key1", "value1", "key2", "value2"})
+// - HMSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"})
+//
+// Note that it requires Redis v4 for multiple field/value pairs support.
+func (c cmdable) HSet(key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "hset"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+// HMSet is a deprecated version of HSet left for compatibility with Redis 3.
+func (c cmdable) HMSet(key string, values ...interface{}) *BoolCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "hmset"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewBoolCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) HSetNX(key, field string, value interface{}) *BoolCmd {
+ cmd := NewBoolCmd("hsetnx", key, field, value)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) HVals(key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("hvals", key)
+ _ = c(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) BLPop(timeout time.Duration, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "blpop"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(timeout)
+ cmd := NewStringSliceCmd(args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) BRPop(timeout time.Duration, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "brpop"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(keys)+1] = formatSec(timeout)
+ cmd := NewStringSliceCmd(args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) BRPopLPush(source, destination string, timeout time.Duration) *StringCmd {
+ cmd := NewStringCmd(
+ "brpoplpush",
+ source,
+ destination,
+ formatSec(timeout),
+ )
+ cmd.setReadTimeout(timeout)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) LIndex(key string, index int64) *StringCmd {
+ cmd := NewStringCmd("lindex", key, index)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) LInsert(key, op string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd("linsert", key, op, pivot, value)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) LInsertBefore(key string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd("linsert", key, "before", pivot, value)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) LInsertAfter(key string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd("linsert", key, "after", pivot, value)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) LLen(key string) *IntCmd {
+ cmd := NewIntCmd("llen", key)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) LPop(key string) *StringCmd {
+ cmd := NewStringCmd("lpop", key)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) LPush(key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "lpush"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) LPushX(key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "lpushx"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) LRange(key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(
+ "lrange",
+ key,
+ start,
+ stop,
+ )
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) LRem(key string, count int64, value interface{}) *IntCmd {
+ cmd := NewIntCmd("lrem", key, count, value)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) LSet(key string, index int64, value interface{}) *StatusCmd {
+ cmd := NewStatusCmd("lset", key, index, value)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) LTrim(key string, start, stop int64) *StatusCmd {
+ cmd := NewStatusCmd(
+ "ltrim",
+ key,
+ start,
+ stop,
+ )
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) RPop(key string) *StringCmd {
+ cmd := NewStringCmd("rpop", key)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) RPopLPush(source, destination string) *StringCmd {
+ cmd := NewStringCmd("rpoplpush", source, destination)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) RPush(key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "rpush"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) RPushX(key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "rpushx"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) SAdd(key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "sadd"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) SCard(key string) *IntCmd {
+ cmd := NewIntCmd("scard", key)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) SDiff(keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sdiff"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) SDiffStore(destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sdiffstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) SInter(keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sinter"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) SInterStore(destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sinterstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) SIsMember(key string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd("sismember", key, member)
+ _ = c(cmd)
+ return cmd
+}
+
+// Redis `SMEMBERS key` command output as a slice
+func (c cmdable) SMembers(key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("smembers", key)
+ _ = c(cmd)
+ return cmd
+}
+
+// Redis `SMEMBERS key` command output as a map
+func (c cmdable) SMembersMap(key string) *StringStructMapCmd {
+ cmd := NewStringStructMapCmd("smembers", key)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) SMove(source, destination string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd("smove", source, destination, member)
+ _ = c(cmd)
+ return cmd
+}
+
+// Redis `SPOP key` command.
+func (c cmdable) SPop(key string) *StringCmd {
+ cmd := NewStringCmd("spop", key)
+ _ = c(cmd)
+ return cmd
+}
+
+// Redis `SPOP key count` command.
+func (c cmdable) SPopN(key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd("spop", key, count)
+ _ = c(cmd)
+ return cmd
+}
+
+// Redis `SRANDMEMBER key` command.
+func (c cmdable) SRandMember(key string) *StringCmd {
+ cmd := NewStringCmd("srandmember", key)
+ _ = c(cmd)
+ return cmd
+}
+
+// Redis `SRANDMEMBER key count` command.
+func (c cmdable) SRandMemberN(key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd("srandmember", key, count)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) SRem(key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "srem"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) SUnion(keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sunion"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) SUnionStore(destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sunionstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+type XAddArgs struct {
+ Stream string
+ MaxLen int64 // MAXLEN N
+ MaxLenApprox int64 // MAXLEN ~ N
+ ID string
+ Values map[string]interface{}
+}
+
+func (c cmdable) XAdd(a *XAddArgs) *StringCmd {
+ args := make([]interface{}, 0, 6+len(a.Values)*2)
+ args = append(args, "xadd")
+ args = append(args, a.Stream)
+ if a.MaxLen > 0 {
+ args = append(args, "maxlen", a.MaxLen)
+ } else if a.MaxLenApprox > 0 {
+ args = append(args, "maxlen", "~", a.MaxLenApprox)
+ }
+ if a.ID != "" {
+ args = append(args, a.ID)
+ } else {
+ args = append(args, "*")
+ }
+ for k, v := range a.Values {
+ args = append(args, k)
+ args = append(args, v)
+ }
+
+ cmd := NewStringCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) XDel(stream string, ids ...string) *IntCmd {
+ args := []interface{}{"xdel", stream}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) XLen(stream string) *IntCmd {
+ cmd := NewIntCmd("xlen", stream)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) XRange(stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd("xrange", stream, start, stop)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) XRangeN(stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd("xrange", stream, start, stop, "count", count)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) XRevRange(stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd("xrevrange", stream, start, stop)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) XRevRangeN(stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd("xrevrange", stream, start, stop, "count", count)
+ _ = c(cmd)
+ return cmd
+}
+
+type XReadArgs struct {
+ Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+ Count int64
+ Block time.Duration
+}
+
+func (c cmdable) XRead(a *XReadArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 5+len(a.Streams))
+ args = append(args, "xread")
+ if a.Count > 0 {
+ args = append(args, "count")
+ args = append(args, a.Count)
+ }
+ if a.Block >= 0 {
+ args = append(args, "block")
+ args = append(args, int64(a.Block/time.Millisecond))
+ }
+
+ args = append(args, "streams")
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+
+ cmd := NewXStreamSliceCmd(args...)
+ if a.Block >= 0 {
+ cmd.setReadTimeout(a.Block)
+ }
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) XReadStreams(streams ...string) *XStreamSliceCmd {
+ return c.XRead(&XReadArgs{
+ Streams: streams,
+ Block: -1,
+ })
+}
+
+func (c cmdable) XGroupCreate(stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd("xgroup", "create", stream, group, start)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupCreateMkStream(stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd("xgroup", "create", stream, group, start, "mkstream")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupSetID(stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd("xgroup", "setid", stream, group, start)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupDestroy(stream, group string) *IntCmd {
+ cmd := NewIntCmd("xgroup", "destroy", stream, group)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupDelConsumer(stream, group, consumer string) *IntCmd {
+ cmd := NewIntCmd("xgroup", "delconsumer", stream, group, consumer)
+ _ = c(cmd)
+ return cmd
+}
+
+type XReadGroupArgs struct {
+ Group string
+ Consumer string
+ Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+ Count int64
+ Block time.Duration
+ NoAck bool
+}
+
+func (c cmdable) XReadGroup(a *XReadGroupArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 8+len(a.Streams))
+ args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
+ if a.Count > 0 {
+ args = append(args, "count", a.Count)
+ }
+ if a.Block >= 0 {
+ args = append(args, "block", int64(a.Block/time.Millisecond))
+ }
+ if a.NoAck {
+ args = append(args, "noack")
+ }
+ args = append(args, "streams")
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+
+ cmd := NewXStreamSliceCmd(args...)
+ if a.Block >= 0 {
+ cmd.setReadTimeout(a.Block)
+ }
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) XAck(stream, group string, ids ...string) *IntCmd {
+ args := []interface{}{"xack", stream, group}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) XPending(stream, group string) *XPendingCmd {
+ cmd := NewXPendingCmd("xpending", stream, group)
+ _ = c(cmd)
+ return cmd
+}
+
+type XPendingExtArgs struct {
+ Stream string
+ Group string
+ Start string
+ End string
+ Count int64
+ Consumer string
+}
+
+func (c cmdable) XPendingExt(a *XPendingExtArgs) *XPendingExtCmd {
+ args := make([]interface{}, 0, 7)
+ args = append(args, "xpending", a.Stream, a.Group, a.Start, a.End, a.Count)
+ if a.Consumer != "" {
+ args = append(args, a.Consumer)
+ }
+ cmd := NewXPendingExtCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+type XClaimArgs struct {
+ Stream string
+ Group string
+ Consumer string
+ MinIdle time.Duration
+ Messages []string
+}
+
+func (c cmdable) XClaim(a *XClaimArgs) *XMessageSliceCmd {
+ args := xClaimArgs(a)
+ cmd := NewXMessageSliceCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) XClaimJustID(a *XClaimArgs) *StringSliceCmd {
+ args := xClaimArgs(a)
+ args = append(args, "justid")
+ cmd := NewStringSliceCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func xClaimArgs(a *XClaimArgs) []interface{} {
+ args := make([]interface{}, 0, 4+len(a.Messages))
+ args = append(args,
+ "xclaim",
+ a.Stream,
+ a.Group, a.Consumer,
+ int64(a.MinIdle/time.Millisecond))
+ for _, id := range a.Messages {
+ args = append(args, id)
+ }
+ return args
+}
+
+func (c cmdable) XTrim(key string, maxLen int64) *IntCmd {
+ cmd := NewIntCmd("xtrim", key, "maxlen", maxLen)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) XTrimApprox(key string, maxLen int64) *IntCmd {
+ cmd := NewIntCmd("xtrim", key, "maxlen", "~", maxLen)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) XInfoGroups(key string) *XInfoGroupsCmd {
+ cmd := NewXInfoGroupsCmd(key)
+ _ = c(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// Z represents sorted set member.
+type Z struct {
+ Score float64
+ Member interface{}
+}
+
+// ZWithKey represents sorted set member including the name of the key where it was popped.
+type ZWithKey struct {
+ Z
+ Key string
+}
+
+// ZStore is used as an arg to ZInterStore and ZUnionStore.
+type ZStore struct {
+ Keys []string
+ Weights []float64
+ // Can be SUM, MIN or MAX.
+ Aggregate string
+}
+
+// Redis `BZPOPMAX key [key ...] timeout` command.
+func (c cmdable) BZPopMax(timeout time.Duration, keys ...string) *ZWithKeyCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "bzpopmax"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(timeout)
+ cmd := NewZWithKeyCmd(args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(cmd)
+ return cmd
+}
+
+// Redis `BZPOPMIN key [key ...] timeout` command.
+func (c cmdable) BZPopMin(timeout time.Duration, keys ...string) *ZWithKeyCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "bzpopmin"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(timeout)
+ cmd := NewZWithKeyCmd(args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) zAdd(a []interface{}, n int, members ...*Z) *IntCmd {
+ for i, m := range members {
+ a[n+2*i] = m.Score
+ a[n+2*i+1] = m.Member
+ }
+ cmd := NewIntCmd(a...)
+ _ = c(cmd)
+ return cmd
+}
+
+// Redis `ZADD key score member [score member ...]` command.
+func (c cmdable) ZAdd(key string, members ...*Z) *IntCmd {
+ const n = 2
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1] = "zadd", key
+ return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key NX score member [score member ...]` command.
+func (c cmdable) ZAddNX(key string, members ...*Z) *IntCmd {
+ const n = 3
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2] = "zadd", key, "nx"
+ return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key XX score member [score member ...]` command.
+func (c cmdable) ZAddXX(key string, members ...*Z) *IntCmd {
+ const n = 3
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2] = "zadd", key, "xx"
+ return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key CH score member [score member ...]` command.
+func (c cmdable) ZAddCh(key string, members ...*Z) *IntCmd {
+ const n = 3
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2] = "zadd", key, "ch"
+ return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key NX CH score member [score member ...]` command.
+func (c cmdable) ZAddNXCh(key string, members ...*Z) *IntCmd {
+ const n = 4
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2], a[3] = "zadd", key, "nx", "ch"
+ return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key XX CH score member [score member ...]` command.
+func (c cmdable) ZAddXXCh(key string, members ...*Z) *IntCmd {
+ const n = 4
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2], a[3] = "zadd", key, "xx", "ch"
+ return c.zAdd(a, n, members...)
+}
+
+func (c cmdable) zIncr(a []interface{}, n int, members ...*Z) *FloatCmd {
+ for i, m := range members {
+ a[n+2*i] = m.Score
+ a[n+2*i+1] = m.Member
+ }
+ cmd := NewFloatCmd(a...)
+ _ = c(cmd)
+ return cmd
+}
+
+// Redis `ZADD key INCR score member` command.
+func (c cmdable) ZIncr(key string, member *Z) *FloatCmd {
+ const n = 3
+ a := make([]interface{}, n+2)
+ a[0], a[1], a[2] = "zadd", key, "incr"
+ return c.zIncr(a, n, member)
+}
+
+// Redis `ZADD key NX INCR score member` command.
+func (c cmdable) ZIncrNX(key string, member *Z) *FloatCmd {
+ const n = 4
+ a := make([]interface{}, n+2)
+ a[0], a[1], a[2], a[3] = "zadd", key, "incr", "nx"
+ return c.zIncr(a, n, member)
+}
+
+// Redis `ZADD key XX INCR score member` command.
+func (c cmdable) ZIncrXX(key string, member *Z) *FloatCmd {
+ const n = 4
+ a := make([]interface{}, n+2)
+ a[0], a[1], a[2], a[3] = "zadd", key, "incr", "xx"
+ return c.zIncr(a, n, member)
+}
+
+func (c cmdable) ZCard(key string) *IntCmd {
+ cmd := NewIntCmd("zcard", key)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ZCount(key, min, max string) *IntCmd {
+ cmd := NewIntCmd("zcount", key, min, max)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ZLexCount(key, min, max string) *IntCmd {
+ cmd := NewIntCmd("zlexcount", key, min, max)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ZIncrBy(key string, increment float64, member string) *FloatCmd {
+ cmd := NewFloatCmd("zincrby", key, increment, member)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ZInterStore(destination string, store *ZStore) *IntCmd {
+ args := make([]interface{}, 3+len(store.Keys))
+ args[0] = "zinterstore"
+ args[1] = destination
+ args[2] = len(store.Keys)
+ for i, key := range store.Keys {
+ args[3+i] = key
+ }
+ if len(store.Weights) > 0 {
+ args = append(args, "weights")
+ for _, weight := range store.Weights {
+ args = append(args, weight)
+ }
+ }
+ if store.Aggregate != "" {
+ args = append(args, "aggregate", store.Aggregate)
+ }
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ZPopMax(key string, count ...int64) *ZSliceCmd {
+ args := []interface{}{
+ "zpopmax",
+ key,
+ }
+
+ switch len(count) {
+ case 0:
+ break
+ case 1:
+ args = append(args, count[0])
+ default:
+ panic("too many arguments")
+ }
+
+ cmd := NewZSliceCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ZPopMin(key string, count ...int64) *ZSliceCmd {
+ args := []interface{}{
+ "zpopmin",
+ key,
+ }
+
+ switch len(count) {
+ case 0:
+ break
+ case 1:
+ args = append(args, count[0])
+ default:
+ panic("too many arguments")
+ }
+
+ cmd := NewZSliceCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) zRange(key string, start, stop int64, withScores bool) *StringSliceCmd {
+ args := []interface{}{
+ "zrange",
+ key,
+ start,
+ stop,
+ }
+ if withScores {
+ args = append(args, "withscores")
+ }
+ cmd := NewStringSliceCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ZRange(key string, start, stop int64) *StringSliceCmd {
+ return c.zRange(key, start, stop, false)
+}
+
+func (c cmdable) ZRangeWithScores(key string, start, stop int64) *ZSliceCmd {
+ cmd := NewZSliceCmd("zrange", key, start, stop, "withscores")
+ _ = c(cmd)
+ return cmd
+}
+
+type ZRangeBy struct {
+ Min, Max string
+ Offset, Count int64
+}
+
+func (c cmdable) zRangeBy(zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Min, opt.Max}
+ if withScores {
+ args = append(args, "withscores")
+ }
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ZRangeByScore(key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy("zrangebyscore", key, opt, false)
+}
+
+func (c cmdable) ZRangeByLex(key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy("zrangebylex", key, opt, false)
+}
+
+func (c cmdable) ZRangeByScoreWithScores(key string, opt *ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ZRank(key, member string) *IntCmd {
+ cmd := NewIntCmd("zrank", key, member)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ZRem(key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "zrem"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByRank(key string, start, stop int64) *IntCmd {
+ cmd := NewIntCmd(
+ "zremrangebyrank",
+ key,
+ start,
+ stop,
+ )
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByScore(key, min, max string) *IntCmd {
+ cmd := NewIntCmd("zremrangebyscore", key, min, max)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByLex(key, min, max string) *IntCmd {
+ cmd := NewIntCmd("zremrangebylex", key, min, max)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRange(key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd("zrevrange", key, start, stop)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd {
+ cmd := NewZSliceCmd("zrevrange", key, start, stop, "withscores")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) zRevRangeBy(zcmd, key string, opt *ZRangeBy) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Max, opt.Min}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRangeByScore(key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy("zrevrangebyscore", key, opt)
+}
+
+func (c cmdable) ZRevRangeByLex(key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy("zrevrangebylex", key, opt)
+}
+
+func (c cmdable) ZRevRangeByScoreWithScores(key string, opt *ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRank(key, member string) *IntCmd {
+ cmd := NewIntCmd("zrevrank", key, member)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ZScore(key, member string) *FloatCmd {
+ cmd := NewFloatCmd("zscore", key, member)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnionStore(dest string, store *ZStore) *IntCmd {
+ args := make([]interface{}, 3+len(store.Keys))
+ args[0] = "zunionstore"
+ args[1] = dest
+ args[2] = len(store.Keys)
+ for i, key := range store.Keys {
+ args[3+i] = key
+ }
+ if len(store.Weights) > 0 {
+ args = append(args, "weights")
+ for _, weight := range store.Weights {
+ args = append(args, weight)
+ }
+ }
+ if store.Aggregate != "" {
+ args = append(args, "aggregate", store.Aggregate)
+ }
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) PFAdd(key string, els ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(els))
+ args[0] = "pfadd"
+ args[1] = key
+ args = appendArgs(args, els)
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) PFCount(keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "pfcount"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) PFMerge(dest string, keys ...string) *StatusCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "pfmerge"
+ args[1] = dest
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewStatusCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) BgRewriteAOF() *StatusCmd {
+ cmd := NewStatusCmd("bgrewriteaof")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) BgSave() *StatusCmd {
+ cmd := NewStatusCmd("bgsave")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClientKill(ipPort string) *StatusCmd {
+ cmd := NewStatusCmd("client", "kill", ipPort)
+ _ = c(cmd)
+ return cmd
+}
+
+// ClientKillByFilter is new style synx, while the ClientKill is old
+// CLIENT KILL <option> [value] ... <option> [value]
+func (c cmdable) ClientKillByFilter(keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "client"
+ args[1] = "kill"
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClientList() *StringCmd {
+ cmd := NewStringCmd("client", "list")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClientPause(dur time.Duration) *BoolCmd {
+ cmd := NewBoolCmd("client", "pause", formatMs(dur))
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClientID() *IntCmd {
+ cmd := NewIntCmd("client", "id")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClientUnblock(id int64) *IntCmd {
+ cmd := NewIntCmd("client", "unblock", id)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClientUnblockWithError(id int64) *IntCmd {
+ cmd := NewIntCmd("client", "unblock", id, "error")
+ _ = c(cmd)
+ return cmd
+}
+
+// ClientSetName assigns a name to the connection.
+func (c statefulCmdable) ClientSetName(name string) *BoolCmd {
+ cmd := NewBoolCmd("client", "setname", name)
+ _ = c(cmd)
+ return cmd
+}
+
+// ClientGetName returns the name of the connection.
+func (c cmdable) ClientGetName() *StringCmd {
+ cmd := NewStringCmd("client", "getname")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigGet(parameter string) *SliceCmd {
+ cmd := NewSliceCmd("config", "get", parameter)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigResetStat() *StatusCmd {
+ cmd := NewStatusCmd("config", "resetstat")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigSet(parameter, value string) *StatusCmd {
+ cmd := NewStatusCmd("config", "set", parameter, value)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigRewrite() *StatusCmd {
+ cmd := NewStatusCmd("config", "rewrite")
+ _ = c(cmd)
+ return cmd
+}
+
+// Deperecated. Use DBSize instead.
+func (c cmdable) DbSize() *IntCmd {
+ return c.DBSize()
+}
+
+func (c cmdable) DBSize() *IntCmd {
+ cmd := NewIntCmd("dbsize")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) FlushAll() *StatusCmd {
+ cmd := NewStatusCmd("flushall")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) FlushAllAsync() *StatusCmd {
+ cmd := NewStatusCmd("flushall", "async")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) FlushDB() *StatusCmd {
+ cmd := NewStatusCmd("flushdb")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) FlushDBAsync() *StatusCmd {
+ cmd := NewStatusCmd("flushdb", "async")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) Info(section ...string) *StringCmd {
+ args := []interface{}{"info"}
+ if len(section) > 0 {
+ args = append(args, section[0])
+ }
+ cmd := NewStringCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) LastSave() *IntCmd {
+ cmd := NewIntCmd("lastsave")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) Save() *StatusCmd {
+ cmd := NewStatusCmd("save")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) shutdown(modifier string) *StatusCmd {
+ var args []interface{}
+ if modifier == "" {
+ args = []interface{}{"shutdown"}
+ } else {
+ args = []interface{}{"shutdown", modifier}
+ }
+ cmd := NewStatusCmd(args...)
+ _ = c(cmd)
+ if err := cmd.Err(); err != nil {
+ if err == io.EOF {
+ // Server quit as expected.
+ cmd.err = nil
+ }
+ } else {
+ // Server did not quit. String reply contains the reason.
+ cmd.err = errors.New(cmd.val)
+ cmd.val = ""
+ }
+ return cmd
+}
+
+func (c cmdable) Shutdown() *StatusCmd {
+ return c.shutdown("")
+}
+
+func (c cmdable) ShutdownSave() *StatusCmd {
+ return c.shutdown("save")
+}
+
+func (c cmdable) ShutdownNoSave() *StatusCmd {
+ return c.shutdown("nosave")
+}
+
+func (c cmdable) SlaveOf(host, port string) *StatusCmd {
+ cmd := NewStatusCmd("slaveof", host, port)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) SlowLog() {
+ panic("not implemented")
+}
+
+func (c cmdable) Sync() {
+ panic("not implemented")
+}
+
+func (c cmdable) Time() *TimeCmd {
+ cmd := NewTimeCmd("time")
+ _ = c(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Eval(script string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+ cmdArgs[0] = "eval"
+ cmdArgs[1] = script
+ cmdArgs[2] = len(keys)
+ for i, key := range keys {
+ cmdArgs[3+i] = key
+ }
+ cmdArgs = appendArgs(cmdArgs, args)
+ cmd := NewCmd(cmdArgs...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+ cmdArgs[0] = "evalsha"
+ cmdArgs[1] = sha1
+ cmdArgs[2] = len(keys)
+ for i, key := range keys {
+ cmdArgs[3+i] = key
+ }
+ cmdArgs = appendArgs(cmdArgs, args)
+ cmd := NewCmd(cmdArgs...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptExists(hashes ...string) *BoolSliceCmd {
+ args := make([]interface{}, 2+len(hashes))
+ args[0] = "script"
+ args[1] = "exists"
+ for i, hash := range hashes {
+ args[2+i] = hash
+ }
+ cmd := NewBoolSliceCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptFlush() *StatusCmd {
+ cmd := NewStatusCmd("script", "flush")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptKill() *StatusCmd {
+ cmd := NewStatusCmd("script", "kill")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptLoad(script string) *StringCmd {
+ cmd := NewStringCmd("script", "load", script)
+ _ = c(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) DebugObject(key string) *StringCmd {
+ cmd := NewStringCmd("debug", "object", key)
+ _ = c(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// Publish posts the message to the channel.
+func (c cmdable) Publish(channel string, message interface{}) *IntCmd {
+ cmd := NewIntCmd("publish", channel, message)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubChannels(pattern string) *StringSliceCmd {
+ args := []interface{}{"pubsub", "channels"}
+ if pattern != "*" {
+ args = append(args, pattern)
+ }
+ cmd := NewStringSliceCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubNumSub(channels ...string) *StringIntMapCmd {
+ args := make([]interface{}, 2+len(channels))
+ args[0] = "pubsub"
+ args[1] = "numsub"
+ for i, channel := range channels {
+ args[2+i] = channel
+ }
+ cmd := NewStringIntMapCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubNumPat() *IntCmd {
+ cmd := NewIntCmd("pubsub", "numpat")
+ _ = c(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) ClusterSlots() *ClusterSlotsCmd {
+ cmd := NewClusterSlotsCmd("cluster", "slots")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterNodes() *StringCmd {
+ cmd := NewStringCmd("cluster", "nodes")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterMeet(host, port string) *StatusCmd {
+ cmd := NewStatusCmd("cluster", "meet", host, port)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterForget(nodeID string) *StatusCmd {
+ cmd := NewStatusCmd("cluster", "forget", nodeID)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterReplicate(nodeID string) *StatusCmd {
+ cmd := NewStatusCmd("cluster", "replicate", nodeID)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterResetSoft() *StatusCmd {
+ cmd := NewStatusCmd("cluster", "reset", "soft")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterResetHard() *StatusCmd {
+ cmd := NewStatusCmd("cluster", "reset", "hard")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterInfo() *StringCmd {
+ cmd := NewStringCmd("cluster", "info")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterKeySlot(key string) *IntCmd {
+ cmd := NewIntCmd("cluster", "keyslot", key)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterGetKeysInSlot(slot int, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd("cluster", "getkeysinslot", slot, count)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterCountFailureReports(nodeID string) *IntCmd {
+ cmd := NewIntCmd("cluster", "count-failure-reports", nodeID)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterCountKeysInSlot(slot int) *IntCmd {
+ cmd := NewIntCmd("cluster", "countkeysinslot", slot)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterDelSlots(slots ...int) *StatusCmd {
+ args := make([]interface{}, 2+len(slots))
+ args[0] = "cluster"
+ args[1] = "delslots"
+ for i, slot := range slots {
+ args[2+i] = slot
+ }
+ cmd := NewStatusCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterDelSlotsRange(min, max int) *StatusCmd {
+ size := max - min + 1
+ slots := make([]int, size)
+ for i := 0; i < size; i++ {
+ slots[i] = min + i
+ }
+ return c.ClusterDelSlots(slots...)
+}
+
+func (c cmdable) ClusterSaveConfig() *StatusCmd {
+ cmd := NewStatusCmd("cluster", "saveconfig")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterSlaves(nodeID string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("cluster", "slaves", nodeID)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ReadOnly() *StatusCmd {
+ cmd := NewStatusCmd("readonly")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ReadWrite() *StatusCmd {
+ cmd := NewStatusCmd("readwrite")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterFailover() *StatusCmd {
+ cmd := NewStatusCmd("cluster", "failover")
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterAddSlots(slots ...int) *StatusCmd {
+ args := make([]interface{}, 2+len(slots))
+ args[0] = "cluster"
+ args[1] = "addslots"
+ for i, num := range slots {
+ args[2+i] = num
+ }
+ cmd := NewStatusCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterAddSlotsRange(min, max int) *StatusCmd {
+ size := max - min + 1
+ slots := make([]int, size)
+ for i := 0; i < size; i++ {
+ slots[i] = min + i
+ }
+ return c.ClusterAddSlots(slots...)
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) GeoAdd(key string, geoLocation ...*GeoLocation) *IntCmd {
+ args := make([]interface{}, 2+3*len(geoLocation))
+ args[0] = "geoadd"
+ args[1] = key
+ for i, eachLoc := range geoLocation {
+ args[2+3*i] = eachLoc.Longitude
+ args[2+3*i+1] = eachLoc.Latitude
+ args[2+3*i+2] = eachLoc.Name
+ }
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+// GeoRadius is a read-only GEORADIUS_RO command.
+func (c cmdable) GeoRadius(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd {
+ cmd := NewGeoLocationCmd(query, "georadius_ro", key, longitude, latitude)
+ if query.Store != "" || query.StoreDist != "" {
+ cmd.SetErr(errors.New("GeoRadius does not support Store or StoreDist"))
+ return cmd
+ }
+ _ = c(cmd)
+ return cmd
+}
+
+// GeoRadiusStore is a writing GEORADIUS command.
+func (c cmdable) GeoRadiusStore(key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd {
+ args := geoLocationArgs(query, "georadius", key, longitude, latitude)
+ cmd := NewIntCmd(args...)
+ if query.Store == "" && query.StoreDist == "" {
+ cmd.SetErr(errors.New("GeoRadiusStore requires Store or StoreDist"))
+ return cmd
+ }
+ _ = c(cmd)
+ return cmd
+}
+
+// GeoRadius is a read-only GEORADIUSBYMEMBER_RO command.
+func (c cmdable) GeoRadiusByMember(key, member string, query *GeoRadiusQuery) *GeoLocationCmd {
+ cmd := NewGeoLocationCmd(query, "georadiusbymember_ro", key, member)
+ if query.Store != "" || query.StoreDist != "" {
+ cmd.SetErr(errors.New("GeoRadiusByMember does not support Store or StoreDist"))
+ return cmd
+ }
+ _ = c(cmd)
+ return cmd
+}
+
+// GeoRadiusByMemberStore is a writing GEORADIUSBYMEMBER command.
+func (c cmdable) GeoRadiusByMemberStore(key, member string, query *GeoRadiusQuery) *IntCmd {
+ args := geoLocationArgs(query, "georadiusbymember", key, member)
+ cmd := NewIntCmd(args...)
+ if query.Store == "" && query.StoreDist == "" {
+ cmd.SetErr(errors.New("GeoRadiusByMemberStore requires Store or StoreDist"))
+ return cmd
+ }
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) GeoDist(key string, member1, member2, unit string) *FloatCmd {
+ if unit == "" {
+ unit = "km"
+ }
+ cmd := NewFloatCmd("geodist", key, member1, member2, unit)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) GeoHash(key string, members ...string) *StringSliceCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "geohash"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewStringSliceCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+func (c cmdable) GeoPos(key string, members ...string) *GeoPosCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "geopos"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewGeoPosCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) MemoryUsage(key string, samples ...int) *IntCmd {
+ args := []interface{}{"memory", "usage", key}
+ if len(samples) > 0 {
+ if len(samples) != 1 {
+ panic("MemoryUsage expects single sample count")
+ }
+ args = append(args, "SAMPLES", samples[0])
+ }
+ cmd := NewIntCmd(args...)
+ _ = c(cmd)
+ return cmd
+}
diff --git a/vendor/github.com/go-redis/redis/v7/doc.go b/vendor/github.com/go-redis/redis/v7/doc.go
new file mode 100644
index 0000000000..55262533a6
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/doc.go
@@ -0,0 +1,4 @@
+/*
+Package redis implements a Redis client.
+*/
+package redis
diff --git a/vendor/github.com/go-redis/redis/v7/error.go b/vendor/github.com/go-redis/redis/v7/error.go
new file mode 100644
index 0000000000..0ffaca9f8f
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/error.go
@@ -0,0 +1,108 @@
+package redis
+
+import (
+ "context"
+ "io"
+ "net"
+ "strings"
+
+ "github.com/go-redis/redis/v7/internal/pool"
+ "github.com/go-redis/redis/v7/internal/proto"
+)
+
+var ErrClosed = pool.ErrClosed
+
+type Error interface {
+ error
+
+ // RedisError is a no-op function but
+ // serves to distinguish types that are Redis
+ // errors from ordinary errors: a type is a
+ // Redis error if it has a RedisError method.
+ RedisError()
+}
+
+var _ Error = proto.RedisError("")
+
+func isRetryableError(err error, retryTimeout bool) bool {
+ switch err {
+ case nil, context.Canceled, context.DeadlineExceeded:
+ return false
+ case io.EOF:
+ return true
+ }
+ if netErr, ok := err.(net.Error); ok {
+ if netErr.Timeout() {
+ return retryTimeout
+ }
+ return true
+ }
+
+ s := err.Error()
+ if s == "ERR max number of clients reached" {
+ return true
+ }
+ if strings.HasPrefix(s, "LOADING ") {
+ return true
+ }
+ if strings.HasPrefix(s, "READONLY ") {
+ return true
+ }
+ if strings.HasPrefix(s, "CLUSTERDOWN ") {
+ return true
+ }
+ return false
+}
+
+func isRedisError(err error) bool {
+ _, ok := err.(proto.RedisError)
+ return ok
+}
+
+func isBadConn(err error, allowTimeout bool) bool {
+ if err == nil {
+ return false
+ }
+ if isRedisError(err) {
+ // Close connections in read only state in case domain addr is used
+ // and domain resolves to a different Redis Server. See #790.
+ return isReadOnlyError(err)
+ }
+ if allowTimeout {
+ if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
+ return false
+ }
+ }
+ return true
+}
+
+func isMovedError(err error) (moved bool, ask bool, addr string) {
+ if !isRedisError(err) {
+ return
+ }
+
+ s := err.Error()
+ switch {
+ case strings.HasPrefix(s, "MOVED "):
+ moved = true
+ case strings.HasPrefix(s, "ASK "):
+ ask = true
+ default:
+ return
+ }
+
+ ind := strings.LastIndex(s, " ")
+ if ind == -1 {
+ return false, false, ""
+ }
+ addr = s[ind+1:]
+ return
+}
+
+func isLoadingError(err error) bool {
+ return strings.HasPrefix(err.Error(), "LOADING ")
+}
+
+func isReadOnlyError(err error) bool {
+ return strings.HasPrefix(err.Error(), "READONLY ")
+}
diff --git a/vendor/github.com/go-redis/redis/v7/go.mod b/vendor/github.com/go-redis/redis/v7/go.mod
new file mode 100644
index 0000000000..e3a4dec54a
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/go.mod
@@ -0,0 +1,15 @@
+module github.com/go-redis/redis/v7
+
+require (
+ github.com/golang/protobuf v1.3.2 // indirect
+ github.com/kr/pretty v0.1.0 // indirect
+ github.com/onsi/ginkgo v1.10.1
+ github.com/onsi/gomega v1.7.0
+ golang.org/x/net v0.0.0-20190923162816-aa69164e4478 // indirect
+ golang.org/x/sys v0.0.0-20191010194322-b09406accb47 // indirect
+ golang.org/x/text v0.3.2 // indirect
+ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
+ gopkg.in/yaml.v2 v2.2.4 // indirect
+)
+
+go 1.11
diff --git a/vendor/github.com/go-redis/redis/v7/go.sum b/vendor/github.com/go-redis/redis/v7/go.sum
new file mode 100644
index 0000000000..6a04dbb63a
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/go.sum
@@ -0,0 +1,47 @@
+github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
+github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY=
+golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/go-redis/redis/v7/internal/consistenthash/consistenthash.go b/vendor/github.com/go-redis/redis/v7/internal/consistenthash/consistenthash.go
new file mode 100644
index 0000000000..a9c56f0762
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/internal/consistenthash/consistenthash.go
@@ -0,0 +1,81 @@
+/*
+Copyright 2013 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package consistenthash provides an implementation of a ring hash.
+package consistenthash
+
+import (
+ "hash/crc32"
+ "sort"
+ "strconv"
+)
+
+type Hash func(data []byte) uint32
+
+type Map struct {
+ hash Hash
+ replicas int
+ keys []int // Sorted
+ hashMap map[int]string
+}
+
+func New(replicas int, fn Hash) *Map {
+ m := &Map{
+ replicas: replicas,
+ hash: fn,
+ hashMap: make(map[int]string),
+ }
+ if m.hash == nil {
+ m.hash = crc32.ChecksumIEEE
+ }
+ return m
+}
+
+// Returns true if there are no items available.
+func (m *Map) IsEmpty() bool {
+ return len(m.keys) == 0
+}
+
+// Adds some keys to the hash.
+func (m *Map) Add(keys ...string) {
+ for _, key := range keys {
+ for i := 0; i < m.replicas; i++ {
+ hash := int(m.hash([]byte(strconv.Itoa(i) + key)))
+ m.keys = append(m.keys, hash)
+ m.hashMap[hash] = key
+ }
+ }
+ sort.Ints(m.keys)
+}
+
+// Gets the closest item in the hash to the provided key.
+func (m *Map) Get(key string) string {
+ if m.IsEmpty() {
+ return ""
+ }
+
+ hash := int(m.hash([]byte(key)))
+
+ // Binary search for appropriate replica.
+ idx := sort.Search(len(m.keys), func(i int) bool { return m.keys[i] >= hash })
+
+ // Means we have cycled back to the first replica.
+ if idx == len(m.keys) {
+ idx = 0
+ }
+
+ return m.hashMap[m.keys[idx]]
+}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/hashtag/hashtag.go b/vendor/github.com/go-redis/redis/v7/internal/hashtag/hashtag.go
new file mode 100644
index 0000000000..22f5b3981b
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/internal/hashtag/hashtag.go
@@ -0,0 +1,77 @@
+package hashtag
+
+import (
+ "math/rand"
+ "strings"
+)
+
+const slotNumber = 16384
+
+// CRC16 implementation according to CCITT standards.
+// Copyright 2001-2010 Georges Menie (www.menie.org)
+// Copyright 2013 The Go Authors. All rights reserved.
+// http://redis.io/topics/cluster-spec#appendix-a-crc16-reference-implementation-in-ansi-c
+var crc16tab = [256]uint16{
+ 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
+ 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
+ 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
+ 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
+ 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
+ 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
+ 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
+ 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
+ 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
+ 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
+ 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
+ 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
+ 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
+ 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
+ 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
+ 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
+ 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
+ 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
+ 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
+ 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
+ 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
+ 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+ 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
+ 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
+ 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
+ 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
+ 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
+ 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
+ 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
+ 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
+ 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
+ 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
+}
+
+func Key(key string) string {
+ if s := strings.IndexByte(key, '{'); s > -1 {
+ if e := strings.IndexByte(key[s+1:], '}'); e > 0 {
+ return key[s+1 : s+e+1]
+ }
+ }
+ return key
+}
+
+func RandomSlot() int {
+ return rand.Intn(slotNumber)
+}
+
+// hashSlot returns a consistent slot number between 0 and 16383
+// for any given string key.
+func Slot(key string) int {
+ if key == "" {
+ return RandomSlot()
+ }
+ key = Key(key)
+ return int(crc16sum(key)) % slotNumber
+}
+
+func crc16sum(key string) (crc uint16) {
+ for i := 0; i < len(key); i++ {
+ crc = (crc << 8) ^ crc16tab[(byte(crc>>8)^key[i])&0x00ff]
+ }
+ return
+}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/internal.go b/vendor/github.com/go-redis/redis/v7/internal/internal.go
new file mode 100644
index 0000000000..ad3fc3c9ff
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/internal/internal.go
@@ -0,0 +1,24 @@
+package internal
+
+import (
+ "math/rand"
+ "time"
+)
+
+// Retry backoff with jitter sleep to prevent overloaded conditions during intervals
+// https://www.awsarchitectureblog.com/2015/03/backoff.html
+func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration {
+ if retry < 0 {
+ retry = 0
+ }
+
+ backoff := minBackoff << uint(retry)
+ if backoff > maxBackoff || backoff < minBackoff {
+ backoff = maxBackoff
+ }
+
+ if backoff == 0 {
+ return 0
+ }
+ return time.Duration(rand.Int63n(int64(backoff)))
+}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/log.go b/vendor/github.com/go-redis/redis/v7/internal/log.go
new file mode 100644
index 0000000000..405a2728d6
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/internal/log.go
@@ -0,0 +1,8 @@
+package internal
+
+import (
+ "log"
+ "os"
+)
+
+var Logger = log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile)
diff --git a/vendor/github.com/go-redis/redis/v7/internal/once.go b/vendor/github.com/go-redis/redis/v7/internal/once.go
new file mode 100644
index 0000000000..64f46272ae
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/internal/once.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2014 The Camlistore Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package internal
+
+import (
+ "sync"
+ "sync/atomic"
+)
+
+// A Once will perform a successful action exactly once.
+//
+// Unlike a sync.Once, this Once's func returns an error
+// and is re-armed on failure.
+type Once struct {
+ m sync.Mutex
+ done uint32
+}
+
+// Do calls the function f if and only if Do has not been invoked
+// without error for this instance of Once. In other words, given
+// var once Once
+// if once.Do(f) is called multiple times, only the first call will
+// invoke f, even if f has a different value in each invocation unless
+// f returns an error. A new instance of Once is required for each
+// function to execute.
+//
+// Do is intended for initialization that must be run exactly once. Since f
+// is niladic, it may be necessary to use a function literal to capture the
+// arguments to a function to be invoked by Do:
+// err := config.once.Do(func() error { return config.init(filename) })
+func (o *Once) Do(f func() error) error {
+ if atomic.LoadUint32(&o.done) == 1 {
+ return nil
+ }
+ // Slow-path.
+ o.m.Lock()
+ defer o.m.Unlock()
+ var err error
+ if o.done == 0 {
+ err = f()
+ if err == nil {
+ atomic.StoreUint32(&o.done, 1)
+ }
+ }
+ return err
+}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/pool/conn.go b/vendor/github.com/go-redis/redis/v7/internal/pool/conn.go
new file mode 100644
index 0000000000..e9a2585463
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/internal/pool/conn.go
@@ -0,0 +1,118 @@
+package pool
+
+import (
+ "context"
+ "net"
+ "sync/atomic"
+ "time"
+
+ "github.com/go-redis/redis/v7/internal/proto"
+)
+
+var noDeadline = time.Time{}
+
+type Conn struct {
+ netConn net.Conn
+
+ rd *proto.Reader
+ wr *proto.Writer
+
+ Inited bool
+ pooled bool
+ createdAt time.Time
+ usedAt int64 // atomic
+}
+
+func NewConn(netConn net.Conn) *Conn {
+ cn := &Conn{
+ netConn: netConn,
+ createdAt: time.Now(),
+ }
+ cn.rd = proto.NewReader(netConn)
+ cn.wr = proto.NewWriter(netConn)
+ cn.SetUsedAt(time.Now())
+ return cn
+}
+
+func (cn *Conn) UsedAt() time.Time {
+ unix := atomic.LoadInt64(&cn.usedAt)
+ return time.Unix(unix, 0)
+}
+
+func (cn *Conn) SetUsedAt(tm time.Time) {
+ atomic.StoreInt64(&cn.usedAt, tm.Unix())
+}
+
+func (cn *Conn) SetNetConn(netConn net.Conn) {
+ cn.netConn = netConn
+ cn.rd.Reset(netConn)
+ cn.wr.Reset(netConn)
+}
+
+func (cn *Conn) Write(b []byte) (int, error) {
+ return cn.netConn.Write(b)
+}
+
+func (cn *Conn) RemoteAddr() net.Addr {
+ return cn.netConn.RemoteAddr()
+}
+
+func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error {
+ err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout))
+ if err != nil {
+ return err
+ }
+ return fn(cn.rd)
+}
+
+func (cn *Conn) WithWriter(
+ ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error,
+) error {
+ err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout))
+ if err != nil {
+ return err
+ }
+
+ if cn.wr.Buffered() > 0 {
+ cn.wr.Reset(cn.netConn)
+ }
+
+ err = fn(cn.wr)
+ if err != nil {
+ return err
+ }
+
+ return cn.wr.Flush()
+}
+
+func (cn *Conn) Close() error {
+ return cn.netConn.Close()
+}
+
+func (cn *Conn) deadline(ctx context.Context, timeout time.Duration) time.Time {
+ tm := time.Now()
+ cn.SetUsedAt(tm)
+
+ if timeout > 0 {
+ tm = tm.Add(timeout)
+ }
+
+ if ctx != nil {
+ deadline, ok := ctx.Deadline()
+ if ok {
+ if timeout == 0 {
+ return deadline
+ }
+ if deadline.Before(tm) {
+ return deadline
+ }
+ return tm
+ }
+ }
+
+ if timeout > 0 {
+ return tm
+ }
+
+ return noDeadline
+}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/pool/pool.go b/vendor/github.com/go-redis/redis/v7/internal/pool/pool.go
new file mode 100644
index 0000000000..a8d8276a9f
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/internal/pool/pool.go
@@ -0,0 +1,517 @@
+package pool
+
+import (
+ "context"
+ "errors"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/go-redis/redis/v7/internal"
+)
+
+var ErrClosed = errors.New("redis: client is closed")
+var ErrPoolTimeout = errors.New("redis: connection pool timeout")
+
+var timers = sync.Pool{
+ New: func() interface{} {
+ t := time.NewTimer(time.Hour)
+ t.Stop()
+ return t
+ },
+}
+
+// Stats contains pool state information and accumulated stats.
+type Stats struct {
+ Hits uint32 // number of times free connection was found in the pool
+ Misses uint32 // number of times free connection was NOT found in the pool
+ Timeouts uint32 // number of times a wait timeout occurred
+
+ TotalConns uint32 // number of total connections in the pool
+ IdleConns uint32 // number of idle connections in the pool
+ StaleConns uint32 // number of stale connections removed from the pool
+}
+
+type Pooler interface {
+ NewConn(context.Context) (*Conn, error)
+ CloseConn(*Conn) error
+
+ Get(context.Context) (*Conn, error)
+ Put(*Conn)
+ Remove(*Conn, error)
+
+ Len() int
+ IdleLen() int
+ Stats() *Stats
+
+ Close() error
+}
+
+type Options struct {
+ Dialer func(context.Context) (net.Conn, error)
+ OnClose func(*Conn) error
+
+ PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+}
+
+type ConnPool struct {
+ opt *Options
+
+ dialErrorsNum uint32 // atomic
+
+ lastDialErrorMu sync.RWMutex
+ lastDialError error
+
+ queue chan struct{}
+
+ connsMu sync.Mutex
+ conns []*Conn
+ idleConns []*Conn
+ poolSize int
+ idleConnsLen int
+
+ stats Stats
+
+ _closed uint32 // atomic
+ closedCh chan struct{}
+}
+
+var _ Pooler = (*ConnPool)(nil)
+
+func NewConnPool(opt *Options) *ConnPool {
+ p := &ConnPool{
+ opt: opt,
+
+ queue: make(chan struct{}, opt.PoolSize),
+ conns: make([]*Conn, 0, opt.PoolSize),
+ idleConns: make([]*Conn, 0, opt.PoolSize),
+ closedCh: make(chan struct{}),
+ }
+
+ p.connsMu.Lock()
+ p.checkMinIdleConns()
+ p.connsMu.Unlock()
+
+ if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 {
+ go p.reaper(opt.IdleCheckFrequency)
+ }
+
+ return p
+}
+
+func (p *ConnPool) checkMinIdleConns() {
+ if p.opt.MinIdleConns == 0 {
+ return
+ }
+ for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns {
+ p.poolSize++
+ p.idleConnsLen++
+ go func() {
+ err := p.addIdleConn()
+ if err != nil {
+ p.connsMu.Lock()
+ p.poolSize--
+ p.idleConnsLen--
+ p.connsMu.Unlock()
+ }
+ }()
+ }
+}
+
+func (p *ConnPool) addIdleConn() error {
+ cn, err := p.dialConn(context.TODO(), true)
+ if err != nil {
+ return err
+ }
+
+ p.connsMu.Lock()
+ p.conns = append(p.conns, cn)
+ p.idleConns = append(p.idleConns, cn)
+ p.connsMu.Unlock()
+ return nil
+}
+
+func (p *ConnPool) NewConn(ctx context.Context) (*Conn, error) {
+ return p.newConn(ctx, false)
+}
+
+func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) {
+ cn, err := p.dialConn(ctx, pooled)
+ if err != nil {
+ return nil, err
+ }
+
+ p.connsMu.Lock()
+ p.conns = append(p.conns, cn)
+ if pooled {
+ // If pool is full remove the cn on next Put.
+ if p.poolSize >= p.opt.PoolSize {
+ cn.pooled = false
+ } else {
+ p.poolSize++
+ }
+ }
+ p.connsMu.Unlock()
+ return cn, nil
+}
+
+func (p *ConnPool) dialConn(ctx context.Context, pooled bool) (*Conn, error) {
+ if p.closed() {
+ return nil, ErrClosed
+ }
+
+ if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) {
+ return nil, p.getLastDialError()
+ }
+
+ netConn, err := p.opt.Dialer(ctx)
+ if err != nil {
+ p.setLastDialError(err)
+ if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) {
+ go p.tryDial()
+ }
+ return nil, err
+ }
+
+ cn := NewConn(netConn)
+ cn.pooled = pooled
+ return cn, nil
+}
+
+func (p *ConnPool) tryDial() {
+ for {
+ if p.closed() {
+ return
+ }
+
+ conn, err := p.opt.Dialer(context.Background())
+ if err != nil {
+ p.setLastDialError(err)
+ time.Sleep(time.Second)
+ continue
+ }
+
+ atomic.StoreUint32(&p.dialErrorsNum, 0)
+ _ = conn.Close()
+ return
+ }
+}
+
+func (p *ConnPool) setLastDialError(err error) {
+ p.lastDialErrorMu.Lock()
+ p.lastDialError = err
+ p.lastDialErrorMu.Unlock()
+}
+
+func (p *ConnPool) getLastDialError() error {
+ p.lastDialErrorMu.RLock()
+ err := p.lastDialError
+ p.lastDialErrorMu.RUnlock()
+ return err
+}
+
+// Get returns existed connection from the pool or creates a new one.
+func (p *ConnPool) Get(ctx context.Context) (*Conn, error) {
+ if p.closed() {
+ return nil, ErrClosed
+ }
+
+ err := p.waitTurn(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ for {
+ p.connsMu.Lock()
+ cn := p.popIdle()
+ p.connsMu.Unlock()
+
+ if cn == nil {
+ break
+ }
+
+ if p.isStaleConn(cn) {
+ _ = p.CloseConn(cn)
+ continue
+ }
+
+ atomic.AddUint32(&p.stats.Hits, 1)
+ return cn, nil
+ }
+
+ atomic.AddUint32(&p.stats.Misses, 1)
+
+ newcn, err := p.newConn(ctx, true)
+ if err != nil {
+ p.freeTurn()
+ return nil, err
+ }
+
+ return newcn, nil
+}
+
+func (p *ConnPool) getTurn() {
+ p.queue <- struct{}{}
+}
+
+func (p *ConnPool) waitTurn(ctx context.Context) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ select {
+ case p.queue <- struct{}{}:
+ return nil
+ default:
+ }
+
+ timer := timers.Get().(*time.Timer)
+ timer.Reset(p.opt.PoolTimeout)
+
+ select {
+ case <-ctx.Done():
+ if !timer.Stop() {
+ <-timer.C
+ }
+ timers.Put(timer)
+ return ctx.Err()
+ case p.queue <- struct{}{}:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ timers.Put(timer)
+ return nil
+ case <-timer.C:
+ timers.Put(timer)
+ atomic.AddUint32(&p.stats.Timeouts, 1)
+ return ErrPoolTimeout
+ }
+}
+
+func (p *ConnPool) freeTurn() {
+ <-p.queue
+}
+
+func (p *ConnPool) popIdle() *Conn {
+ if len(p.idleConns) == 0 {
+ return nil
+ }
+
+ idx := len(p.idleConns) - 1
+ cn := p.idleConns[idx]
+ p.idleConns = p.idleConns[:idx]
+ p.idleConnsLen--
+ p.checkMinIdleConns()
+ return cn
+}
+
+func (p *ConnPool) Put(cn *Conn) {
+ if cn.rd.Buffered() > 0 {
+ internal.Logger.Printf("Conn has unread data")
+ p.Remove(cn, BadConnError{})
+ return
+ }
+
+ if !cn.pooled {
+ p.Remove(cn, nil)
+ return
+ }
+
+ p.connsMu.Lock()
+ p.idleConns = append(p.idleConns, cn)
+ p.idleConnsLen++
+ p.connsMu.Unlock()
+ p.freeTurn()
+}
+
+func (p *ConnPool) Remove(cn *Conn, reason error) {
+ p.removeConnWithLock(cn)
+ p.freeTurn()
+ _ = p.closeConn(cn)
+}
+
+func (p *ConnPool) CloseConn(cn *Conn) error {
+ p.removeConnWithLock(cn)
+ return p.closeConn(cn)
+}
+
+func (p *ConnPool) removeConnWithLock(cn *Conn) {
+ p.connsMu.Lock()
+ p.removeConn(cn)
+ p.connsMu.Unlock()
+}
+
+func (p *ConnPool) removeConn(cn *Conn) {
+ for i, c := range p.conns {
+ if c == cn {
+ p.conns = append(p.conns[:i], p.conns[i+1:]...)
+ if cn.pooled {
+ p.poolSize--
+ p.checkMinIdleConns()
+ }
+ return
+ }
+ }
+}
+
+func (p *ConnPool) closeConn(cn *Conn) error {
+ if p.opt.OnClose != nil {
+ _ = p.opt.OnClose(cn)
+ }
+ return cn.Close()
+}
+
+// Len returns total number of connections.
+func (p *ConnPool) Len() int {
+ p.connsMu.Lock()
+ n := len(p.conns)
+ p.connsMu.Unlock()
+ return n
+}
+
+// IdleLen returns number of idle connections.
+func (p *ConnPool) IdleLen() int {
+ p.connsMu.Lock()
+ n := p.idleConnsLen
+ p.connsMu.Unlock()
+ return n
+}
+
+func (p *ConnPool) Stats() *Stats {
+ idleLen := p.IdleLen()
+ return &Stats{
+ Hits: atomic.LoadUint32(&p.stats.Hits),
+ Misses: atomic.LoadUint32(&p.stats.Misses),
+ Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
+
+ TotalConns: uint32(p.Len()),
+ IdleConns: uint32(idleLen),
+ StaleConns: atomic.LoadUint32(&p.stats.StaleConns),
+ }
+}
+
+func (p *ConnPool) closed() bool {
+ return atomic.LoadUint32(&p._closed) == 1
+}
+
+func (p *ConnPool) Filter(fn func(*Conn) bool) error {
+ var firstErr error
+ p.connsMu.Lock()
+ for _, cn := range p.conns {
+ if fn(cn) {
+ if err := p.closeConn(cn); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ }
+ p.connsMu.Unlock()
+ return firstErr
+}
+
+func (p *ConnPool) Close() error {
+ if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) {
+ return ErrClosed
+ }
+ close(p.closedCh)
+
+ var firstErr error
+ p.connsMu.Lock()
+ for _, cn := range p.conns {
+ if err := p.closeConn(cn); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ p.conns = nil
+ p.poolSize = 0
+ p.idleConns = nil
+ p.idleConnsLen = 0
+ p.connsMu.Unlock()
+
+ return firstErr
+}
+
+func (p *ConnPool) reaper(frequency time.Duration) {
+ ticker := time.NewTicker(frequency)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ // It is possible that ticker and closedCh arrive together,
+ // and select pseudo-randomly pick ticker case, we double
+ // check here to prevent being executed after closed.
+ if p.closed() {
+ return
+ }
+ _, err := p.ReapStaleConns()
+ if err != nil {
+ internal.Logger.Printf("ReapStaleConns failed: %s", err)
+ continue
+ }
+ case <-p.closedCh:
+ return
+ }
+ }
+}
+
+func (p *ConnPool) ReapStaleConns() (int, error) {
+ var n int
+ for {
+ p.getTurn()
+
+ p.connsMu.Lock()
+ cn := p.reapStaleConn()
+ p.connsMu.Unlock()
+ p.freeTurn()
+
+ if cn != nil {
+ _ = p.closeConn(cn)
+ n++
+ } else {
+ break
+ }
+ }
+ atomic.AddUint32(&p.stats.StaleConns, uint32(n))
+ return n, nil
+}
+
+func (p *ConnPool) reapStaleConn() *Conn {
+ if len(p.idleConns) == 0 {
+ return nil
+ }
+
+ cn := p.idleConns[0]
+ if !p.isStaleConn(cn) {
+ return nil
+ }
+
+ p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...)
+ p.idleConnsLen--
+ p.removeConn(cn)
+
+ return cn
+}
+
+func (p *ConnPool) isStaleConn(cn *Conn) bool {
+ if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 {
+ return false
+ }
+
+ now := time.Now()
+ if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout {
+ return true
+ }
+ if p.opt.MaxConnAge > 0 && now.Sub(cn.createdAt) >= p.opt.MaxConnAge {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/pool/pool_single.go b/vendor/github.com/go-redis/redis/v7/internal/pool/pool_single.go
new file mode 100644
index 0000000000..04758a00d4
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/internal/pool/pool_single.go
@@ -0,0 +1,208 @@
+package pool
+
+import (
+ "context"
+ "fmt"
+ "sync/atomic"
+)
+
+const (
+ stateDefault = 0
+ stateInited = 1
+ stateClosed = 2
+)
+
+type BadConnError struct {
+ wrapped error
+}
+
+var _ error = (*BadConnError)(nil)
+
+func (e BadConnError) Error() string {
+ s := "redis: Conn is in a bad state"
+ if e.wrapped != nil {
+ s += ": " + e.wrapped.Error()
+ }
+ return s
+}
+
+func (e BadConnError) Unwrap() error {
+ return e.wrapped
+}
+
+type SingleConnPool struct {
+ pool Pooler
+ level int32 // atomic
+
+ state uint32 // atomic
+ ch chan *Conn
+
+ _badConnError atomic.Value
+}
+
+var _ Pooler = (*SingleConnPool)(nil)
+
+func NewSingleConnPool(pool Pooler) *SingleConnPool {
+ p, ok := pool.(*SingleConnPool)
+ if !ok {
+ p = &SingleConnPool{
+ pool: pool,
+ ch: make(chan *Conn, 1),
+ }
+ }
+ atomic.AddInt32(&p.level, 1)
+ return p
+}
+
+func (p *SingleConnPool) SetConn(cn *Conn) {
+ if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) {
+ p.ch <- cn
+ } else {
+ panic("not reached")
+ }
+}
+
+func (p *SingleConnPool) NewConn(ctx context.Context) (*Conn, error) {
+ return p.pool.NewConn(ctx)
+}
+
+func (p *SingleConnPool) CloseConn(cn *Conn) error {
+ return p.pool.CloseConn(cn)
+}
+
+func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) {
+ // In worst case this races with Close which is not a very common operation.
+ for i := 0; i < 1000; i++ {
+ switch atomic.LoadUint32(&p.state) {
+ case stateDefault:
+ cn, err := p.pool.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) {
+ return cn, nil
+ }
+ p.pool.Remove(cn, ErrClosed)
+ case stateInited:
+ if err := p.badConnError(); err != nil {
+ return nil, err
+ }
+ cn, ok := <-p.ch
+ if !ok {
+ return nil, ErrClosed
+ }
+ return cn, nil
+ case stateClosed:
+ return nil, ErrClosed
+ default:
+ panic("not reached")
+ }
+ }
+ return nil, fmt.Errorf("redis: SingleConnPool.Get: infinite loop")
+}
+
+func (p *SingleConnPool) Put(cn *Conn) {
+ defer func() {
+ if recover() != nil {
+ p.freeConn(cn)
+ }
+ }()
+ p.ch <- cn
+}
+
+func (p *SingleConnPool) freeConn(cn *Conn) {
+ if err := p.badConnError(); err != nil {
+ p.pool.Remove(cn, err)
+ } else {
+ p.pool.Put(cn)
+ }
+}
+
+func (p *SingleConnPool) Remove(cn *Conn, reason error) {
+ defer func() {
+ if recover() != nil {
+ p.pool.Remove(cn, ErrClosed)
+ }
+ }()
+ p._badConnError.Store(BadConnError{wrapped: reason})
+ p.ch <- cn
+}
+
+func (p *SingleConnPool) Len() int {
+ switch atomic.LoadUint32(&p.state) {
+ case stateDefault:
+ return 0
+ case stateInited:
+ return 1
+ case stateClosed:
+ return 0
+ default:
+ panic("not reached")
+ }
+}
+
+func (p *SingleConnPool) IdleLen() int {
+ return len(p.ch)
+}
+
+func (p *SingleConnPool) Stats() *Stats {
+ return &Stats{}
+}
+
+func (p *SingleConnPool) Close() error {
+ level := atomic.AddInt32(&p.level, -1)
+ if level > 0 {
+ return nil
+ }
+
+ for i := 0; i < 1000; i++ {
+ state := atomic.LoadUint32(&p.state)
+ if state == stateClosed {
+ return ErrClosed
+ }
+ if atomic.CompareAndSwapUint32(&p.state, state, stateClosed) {
+ close(p.ch)
+ cn, ok := <-p.ch
+ if ok {
+ p.freeConn(cn)
+ }
+ return nil
+ }
+ }
+
+ return fmt.Errorf("redis: SingleConnPool.Close: infinite loop")
+}
+
+func (p *SingleConnPool) Reset() error {
+ if p.badConnError() == nil {
+ return nil
+ }
+
+ select {
+ case cn, ok := <-p.ch:
+ if !ok {
+ return ErrClosed
+ }
+ p.pool.Remove(cn, ErrClosed)
+ p._badConnError.Store(BadConnError{wrapped: nil})
+ default:
+ return fmt.Errorf("redis: SingleConnPool does not have a Conn")
+ }
+
+ if !atomic.CompareAndSwapUint32(&p.state, stateInited, stateDefault) {
+ state := atomic.LoadUint32(&p.state)
+ return fmt.Errorf("redis: invalid SingleConnPool state: %d", state)
+ }
+
+ return nil
+}
+
+func (p *SingleConnPool) badConnError() error {
+ if v := p._badConnError.Load(); v != nil {
+ err := v.(BadConnError)
+ if err.wrapped != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/pool/pool_sticky.go b/vendor/github.com/go-redis/redis/v7/internal/pool/pool_sticky.go
new file mode 100644
index 0000000000..d4a355a44f
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/internal/pool/pool_sticky.go
@@ -0,0 +1,112 @@
+package pool
+
+import (
+ "context"
+ "sync"
+)
+
+type StickyConnPool struct {
+ pool *ConnPool
+ reusable bool
+
+ cn *Conn
+ closed bool
+ mu sync.Mutex
+}
+
+var _ Pooler = (*StickyConnPool)(nil)
+
+func NewStickyConnPool(pool *ConnPool, reusable bool) *StickyConnPool {
+ return &StickyConnPool{
+ pool: pool,
+ reusable: reusable,
+ }
+}
+
+func (p *StickyConnPool) NewConn(context.Context) (*Conn, error) {
+ panic("not implemented")
+}
+
+func (p *StickyConnPool) CloseConn(*Conn) error {
+ panic("not implemented")
+}
+
+func (p *StickyConnPool) Get(ctx context.Context) (*Conn, error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ if p.closed {
+ return nil, ErrClosed
+ }
+ if p.cn != nil {
+ return p.cn, nil
+ }
+
+ cn, err := p.pool.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ p.cn = cn
+ return cn, nil
+}
+
+func (p *StickyConnPool) putUpstream() {
+ p.pool.Put(p.cn)
+ p.cn = nil
+}
+
+func (p *StickyConnPool) Put(cn *Conn) {}
+
+func (p *StickyConnPool) removeUpstream(reason error) {
+ p.pool.Remove(p.cn, reason)
+ p.cn = nil
+}
+
+func (p *StickyConnPool) Remove(cn *Conn, reason error) {
+ p.removeUpstream(reason)
+}
+
+func (p *StickyConnPool) Len() int {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ if p.cn == nil {
+ return 0
+ }
+ return 1
+}
+
+func (p *StickyConnPool) IdleLen() int {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ if p.cn == nil {
+ return 1
+ }
+ return 0
+}
+
+func (p *StickyConnPool) Stats() *Stats {
+ return nil
+}
+
+func (p *StickyConnPool) Close() error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ if p.closed {
+ return ErrClosed
+ }
+ p.closed = true
+
+ if p.cn != nil {
+ if p.reusable {
+ p.putUpstream()
+ } else {
+ p.removeUpstream(ErrClosed)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/proto/reader.go b/vendor/github.com/go-redis/redis/v7/internal/proto/reader.go
new file mode 100644
index 0000000000..d3f646e98f
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/internal/proto/reader.go
@@ -0,0 +1,314 @@
+package proto
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+
+ "github.com/go-redis/redis/v7/internal/util"
+)
+
+const (
+ ErrorReply = '-'
+ StatusReply = '+'
+ IntReply = ':'
+ StringReply = '$'
+ ArrayReply = '*'
+)
+
+//------------------------------------------------------------------------------
+
+const Nil = RedisError("redis: nil")
+
+type RedisError string
+
+func (e RedisError) Error() string { return string(e) }
+
+func (RedisError) RedisError() {}
+
+//------------------------------------------------------------------------------
+
+type MultiBulkParse func(*Reader, int64) (interface{}, error)
+
+type Reader struct {
+ rd *bufio.Reader
+ _buf []byte
+}
+
+func NewReader(rd io.Reader) *Reader {
+ return &Reader{
+ rd: bufio.NewReader(rd),
+ _buf: make([]byte, 64),
+ }
+}
+
+func (r *Reader) Buffered() int {
+ return r.rd.Buffered()
+}
+
+func (r *Reader) Peek(n int) ([]byte, error) {
+ return r.rd.Peek(n)
+}
+
+func (r *Reader) Reset(rd io.Reader) {
+ r.rd.Reset(rd)
+}
+
+func (r *Reader) ReadLine() ([]byte, error) {
+ line, err := r.readLine()
+ if err != nil {
+ return nil, err
+ }
+ if isNilReply(line) {
+ return nil, Nil
+ }
+ return line, nil
+}
+
+// readLine that returns an error if:
+// - there is a pending read error;
+// - or line does not end with \r\n.
+func (r *Reader) readLine() ([]byte, error) {
+ b, err := r.rd.ReadSlice('\n')
+ if err != nil {
+ return nil, err
+ }
+ if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
+ return nil, fmt.Errorf("redis: invalid reply: %q", b)
+ }
+ b = b[:len(b)-2]
+ return b, nil
+}
+
+func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+
+ switch line[0] {
+ case ErrorReply:
+ return nil, ParseErrorReply(line)
+ case StatusReply:
+ return string(line[1:]), nil
+ case IntReply:
+ return util.ParseInt(line[1:], 10, 64)
+ case StringReply:
+ return r.readStringReply(line)
+ case ArrayReply:
+ n, err := parseArrayLen(line)
+ if err != nil {
+ return nil, err
+ }
+ if m == nil {
+ err := fmt.Errorf("redis: got %.100q, but multi bulk parser is nil", line)
+ return nil, err
+ }
+ return m(r, n)
+ }
+ return nil, fmt.Errorf("redis: can't parse %.100q", line)
+}
+
+func (r *Reader) ReadIntReply() (int64, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return 0, ParseErrorReply(line)
+ case IntReply:
+ return util.ParseInt(line[1:], 10, 64)
+ default:
+ return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line)
+ }
+}
+
+func (r *Reader) ReadString() (string, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return "", err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return "", ParseErrorReply(line)
+ case StringReply:
+ return r.readStringReply(line)
+ case StatusReply:
+ return string(line[1:]), nil
+ case IntReply:
+ return string(line[1:]), nil
+ default:
+ return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line)
+ }
+}
+
+func (r *Reader) readStringReply(line []byte) (string, error) {
+ if isNilReply(line) {
+ return "", Nil
+ }
+
+ replyLen, err := util.Atoi(line[1:])
+ if err != nil {
+ return "", err
+ }
+
+ b := make([]byte, replyLen+2)
+ _, err = io.ReadFull(r.rd, b)
+ if err != nil {
+ return "", err
+ }
+
+ return util.BytesToString(b[:replyLen]), nil
+}
+
+func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return nil, ParseErrorReply(line)
+ case ArrayReply:
+ n, err := parseArrayLen(line)
+ if err != nil {
+ return nil, err
+ }
+ return m(r, n)
+ default:
+ return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line)
+ }
+}
+
+func (r *Reader) ReadArrayLen() (int64, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return 0, ParseErrorReply(line)
+ case ArrayReply:
+ return parseArrayLen(line)
+ default:
+ return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line)
+ }
+}
+
+func (r *Reader) ReadScanReply() ([]string, uint64, error) {
+ n, err := r.ReadArrayLen()
+ if err != nil {
+ return nil, 0, err
+ }
+ if n != 2 {
+ return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n)
+ }
+
+ cursor, err := r.ReadUint()
+ if err != nil {
+ return nil, 0, err
+ }
+
+ n, err = r.ReadArrayLen()
+ if err != nil {
+ return nil, 0, err
+ }
+
+ keys := make([]string, n)
+ for i := int64(0); i < n; i++ {
+ key, err := r.ReadString()
+ if err != nil {
+ return nil, 0, err
+ }
+ keys[i] = key
+ }
+
+ return keys, cursor, err
+}
+
+func (r *Reader) ReadInt() (int64, error) {
+ b, err := r.readTmpBytesReply()
+ if err != nil {
+ return 0, err
+ }
+ return util.ParseInt(b, 10, 64)
+}
+
+func (r *Reader) ReadUint() (uint64, error) {
+ b, err := r.readTmpBytesReply()
+ if err != nil {
+ return 0, err
+ }
+ return util.ParseUint(b, 10, 64)
+}
+
+func (r *Reader) ReadFloatReply() (float64, error) {
+ b, err := r.readTmpBytesReply()
+ if err != nil {
+ return 0, err
+ }
+ return util.ParseFloat(b, 64)
+}
+
+func (r *Reader) readTmpBytesReply() ([]byte, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return nil, ParseErrorReply(line)
+ case StringReply:
+ return r._readTmpBytesReply(line)
+ case StatusReply:
+ return line[1:], nil
+ default:
+ return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line)
+ }
+}
+
+func (r *Reader) _readTmpBytesReply(line []byte) ([]byte, error) {
+ if isNilReply(line) {
+ return nil, Nil
+ }
+
+ replyLen, err := util.Atoi(line[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ buf := r.buf(replyLen + 2)
+ _, err = io.ReadFull(r.rd, buf)
+ if err != nil {
+ return nil, err
+ }
+
+ return buf[:replyLen], nil
+}
+
+func (r *Reader) buf(n int) []byte {
+ if n <= cap(r._buf) {
+ return r._buf[:n]
+ }
+ d := n - cap(r._buf)
+ r._buf = append(r._buf, make([]byte, d)...)
+ return r._buf
+}
+
+func isNilReply(b []byte) bool {
+ return len(b) == 3 &&
+ (b[0] == StringReply || b[0] == ArrayReply) &&
+ b[1] == '-' && b[2] == '1'
+}
+
+func ParseErrorReply(line []byte) error {
+ return RedisError(string(line[1:]))
+}
+
+func parseArrayLen(line []byte) (int64, error) {
+ if isNilReply(line) {
+ return 0, Nil
+ }
+ return util.ParseInt(line[1:], 10, 64)
+}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/proto/scan.go b/vendor/github.com/go-redis/redis/v7/internal/proto/scan.go
new file mode 100644
index 0000000000..90c1e4ae6e
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/internal/proto/scan.go
@@ -0,0 +1,166 @@
+package proto
+
+import (
+ "encoding"
+ "fmt"
+ "reflect"
+
+ "github.com/go-redis/redis/v7/internal/util"
+)
+
+func Scan(b []byte, v interface{}) error {
+ switch v := v.(type) {
+ case nil:
+ return fmt.Errorf("redis: Scan(nil)")
+ case *string:
+ *v = util.BytesToString(b)
+ return nil
+ case *[]byte:
+ *v = b
+ return nil
+ case *int:
+ var err error
+ *v, err = util.Atoi(b)
+ return err
+ case *int8:
+ n, err := util.ParseInt(b, 10, 8)
+ if err != nil {
+ return err
+ }
+ *v = int8(n)
+ return nil
+ case *int16:
+ n, err := util.ParseInt(b, 10, 16)
+ if err != nil {
+ return err
+ }
+ *v = int16(n)
+ return nil
+ case *int32:
+ n, err := util.ParseInt(b, 10, 32)
+ if err != nil {
+ return err
+ }
+ *v = int32(n)
+ return nil
+ case *int64:
+ n, err := util.ParseInt(b, 10, 64)
+ if err != nil {
+ return err
+ }
+ *v = n
+ return nil
+ case *uint:
+ n, err := util.ParseUint(b, 10, 64)
+ if err != nil {
+ return err
+ }
+ *v = uint(n)
+ return nil
+ case *uint8:
+ n, err := util.ParseUint(b, 10, 8)
+ if err != nil {
+ return err
+ }
+ *v = uint8(n)
+ return nil
+ case *uint16:
+ n, err := util.ParseUint(b, 10, 16)
+ if err != nil {
+ return err
+ }
+ *v = uint16(n)
+ return nil
+ case *uint32:
+ n, err := util.ParseUint(b, 10, 32)
+ if err != nil {
+ return err
+ }
+ *v = uint32(n)
+ return nil
+ case *uint64:
+ n, err := util.ParseUint(b, 10, 64)
+ if err != nil {
+ return err
+ }
+ *v = n
+ return nil
+ case *float32:
+ n, err := util.ParseFloat(b, 32)
+ if err != nil {
+ return err
+ }
+ *v = float32(n)
+ return err
+ case *float64:
+ var err error
+ *v, err = util.ParseFloat(b, 64)
+ return err
+ case *bool:
+ *v = len(b) == 1 && b[0] == '1'
+ return nil
+ case encoding.BinaryUnmarshaler:
+ return v.UnmarshalBinary(b)
+ default:
+ return fmt.Errorf(
+ "redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", v)
+ }
+}
+
+func ScanSlice(data []string, slice interface{}) error {
+ v := reflect.ValueOf(slice)
+ if !v.IsValid() {
+ return fmt.Errorf("redis: ScanSlice(nil)")
+ }
+ if v.Kind() != reflect.Ptr {
+ return fmt.Errorf("redis: ScanSlice(non-pointer %T)", slice)
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Slice {
+ return fmt.Errorf("redis: ScanSlice(non-slice %T)", slice)
+ }
+
+ next := makeSliceNextElemFunc(v)
+ for i, s := range data {
+ elem := next()
+ if err := Scan([]byte(s), elem.Addr().Interface()); err != nil {
+ err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %s", i, s, err)
+ return err
+ }
+ }
+
+ return nil
+}
+
+func makeSliceNextElemFunc(v reflect.Value) func() reflect.Value {
+ elemType := v.Type().Elem()
+
+ if elemType.Kind() == reflect.Ptr {
+ elemType = elemType.Elem()
+ return func() reflect.Value {
+ if v.Len() < v.Cap() {
+ v.Set(v.Slice(0, v.Len()+1))
+ elem := v.Index(v.Len() - 1)
+ if elem.IsNil() {
+ elem.Set(reflect.New(elemType))
+ }
+ return elem.Elem()
+ }
+
+ elem := reflect.New(elemType)
+ v.Set(reflect.Append(v, elem))
+ return elem.Elem()
+ }
+ }
+
+ zero := reflect.Zero(elemType)
+ return func() reflect.Value {
+ if v.Len() < v.Cap() {
+ v.Set(v.Slice(0, v.Len()+1))
+ return v.Index(v.Len() - 1)
+ }
+
+ v.Set(reflect.Append(v, zero))
+ return v.Index(v.Len() - 1)
+ }
+}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/proto/writer.go b/vendor/github.com/go-redis/redis/v7/internal/proto/writer.go
new file mode 100644
index 0000000000..d552f1e84d
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/internal/proto/writer.go
@@ -0,0 +1,165 @@
+package proto
+
+import (
+ "bufio"
+ "encoding"
+ "fmt"
+ "io"
+ "strconv"
+ "time"
+
+ "github.com/go-redis/redis/v7/internal/util"
+)
+
+type Writer struct {
+ wr *bufio.Writer
+
+ lenBuf []byte
+ numBuf []byte
+}
+
+func NewWriter(wr io.Writer) *Writer {
+ return &Writer{
+ wr: bufio.NewWriter(wr),
+
+ lenBuf: make([]byte, 64),
+ numBuf: make([]byte, 64),
+ }
+}
+
+func (w *Writer) WriteArgs(args []interface{}) error {
+ err := w.wr.WriteByte(ArrayReply)
+ if err != nil {
+ return err
+ }
+
+ err = w.writeLen(len(args))
+ if err != nil {
+ return err
+ }
+
+ for _, arg := range args {
+ err := w.writeArg(arg)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (w *Writer) writeLen(n int) error {
+ w.lenBuf = strconv.AppendUint(w.lenBuf[:0], uint64(n), 10)
+ w.lenBuf = append(w.lenBuf, '\r', '\n')
+ _, err := w.wr.Write(w.lenBuf)
+ return err
+}
+
+func (w *Writer) writeArg(v interface{}) error {
+ switch v := v.(type) {
+ case nil:
+ return w.string("")
+ case string:
+ return w.string(v)
+ case []byte:
+ return w.bytes(v)
+ case int:
+ return w.int(int64(v))
+ case int8:
+ return w.int(int64(v))
+ case int16:
+ return w.int(int64(v))
+ case int32:
+ return w.int(int64(v))
+ case int64:
+ return w.int(v)
+ case uint:
+ return w.uint(uint64(v))
+ case uint8:
+ return w.uint(uint64(v))
+ case uint16:
+ return w.uint(uint64(v))
+ case uint32:
+ return w.uint(uint64(v))
+ case uint64:
+ return w.uint(v)
+ case float32:
+ return w.float(float64(v))
+ case float64:
+ return w.float(v)
+ case bool:
+ if v {
+ return w.int(1)
+ }
+ return w.int(0)
+ case time.Time:
+ return w.string(v.Format(time.RFC3339Nano))
+ case encoding.BinaryMarshaler:
+ b, err := v.MarshalBinary()
+ if err != nil {
+ return err
+ }
+ return w.bytes(b)
+ default:
+ return fmt.Errorf(
+ "redis: can't marshal %T (implement encoding.BinaryMarshaler)", v)
+ }
+}
+
+func (w *Writer) bytes(b []byte) error {
+ err := w.wr.WriteByte(StringReply)
+ if err != nil {
+ return err
+ }
+
+ err = w.writeLen(len(b))
+ if err != nil {
+ return err
+ }
+
+ _, err = w.wr.Write(b)
+ if err != nil {
+ return err
+ }
+
+ return w.crlf()
+}
+
+func (w *Writer) string(s string) error {
+ return w.bytes(util.StringToBytes(s))
+}
+
+func (w *Writer) uint(n uint64) error {
+ w.numBuf = strconv.AppendUint(w.numBuf[:0], n, 10)
+ return w.bytes(w.numBuf)
+}
+
+func (w *Writer) int(n int64) error {
+ w.numBuf = strconv.AppendInt(w.numBuf[:0], n, 10)
+ return w.bytes(w.numBuf)
+}
+
+func (w *Writer) float(f float64) error {
+ w.numBuf = strconv.AppendFloat(w.numBuf[:0], f, 'f', -1, 64)
+ return w.bytes(w.numBuf)
+}
+
+func (w *Writer) crlf() error {
+ err := w.wr.WriteByte('\r')
+ if err != nil {
+ return err
+ }
+ return w.wr.WriteByte('\n')
+}
+
+func (w *Writer) Buffered() int {
+ return w.wr.Buffered()
+}
+
+func (w *Writer) Reset(wr io.Writer) {
+ w.wr.Reset(wr)
+}
+
+func (w *Writer) Flush() error {
+ return w.wr.Flush()
+}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/util.go b/vendor/github.com/go-redis/redis/v7/internal/util.go
new file mode 100644
index 0000000000..844f34bad8
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/internal/util.go
@@ -0,0 +1,56 @@
+package internal
+
+import (
+ "context"
+ "time"
+
+ "github.com/go-redis/redis/v7/internal/util"
+)
+
+func Sleep(ctx context.Context, dur time.Duration) error {
+ t := time.NewTimer(dur)
+ defer t.Stop()
+
+ select {
+ case <-t.C:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+func ToLower(s string) string {
+ if isLower(s) {
+ return s
+ }
+
+ b := make([]byte, len(s))
+ for i := range b {
+ c := s[i]
+ if c >= 'A' && c <= 'Z' {
+ c += 'a' - 'A'
+ }
+ b[i] = c
+ }
+ return util.BytesToString(b)
+}
+
+func isLower(s string) bool {
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c >= 'A' && c <= 'Z' {
+ return false
+ }
+ }
+ return true
+}
+
+func Unwrap(err error) error {
+ u, ok := err.(interface {
+ Unwrap() error
+ })
+ if !ok {
+ return nil
+ }
+ return u.Unwrap()
+}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/util/safe.go b/vendor/github.com/go-redis/redis/v7/internal/util/safe.go
new file mode 100644
index 0000000000..1b3060ebc2
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/internal/util/safe.go
@@ -0,0 +1,11 @@
+// +build appengine
+
+package util
+
+func BytesToString(b []byte) string {
+ return string(b)
+}
+
+func StringToBytes(s string) []byte {
+ return []byte(s)
+}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/util/strconv.go b/vendor/github.com/go-redis/redis/v7/internal/util/strconv.go
new file mode 100644
index 0000000000..db5033802a
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/internal/util/strconv.go
@@ -0,0 +1,19 @@
+package util
+
+import "strconv"
+
+func Atoi(b []byte) (int, error) {
+ return strconv.Atoi(BytesToString(b))
+}
+
+func ParseInt(b []byte, base int, bitSize int) (int64, error) {
+ return strconv.ParseInt(BytesToString(b), base, bitSize)
+}
+
+func ParseUint(b []byte, base int, bitSize int) (uint64, error) {
+ return strconv.ParseUint(BytesToString(b), base, bitSize)
+}
+
+func ParseFloat(b []byte, bitSize int) (float64, error) {
+ return strconv.ParseFloat(BytesToString(b), bitSize)
+}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/util/unsafe.go b/vendor/github.com/go-redis/redis/v7/internal/util/unsafe.go
new file mode 100644
index 0000000000..c9868aac2b
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/internal/util/unsafe.go
@@ -0,0 +1,22 @@
+// +build !appengine
+
+package util
+
+import (
+ "unsafe"
+)
+
+// BytesToString converts byte slice to string.
+func BytesToString(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+// StringToBytes converts string to byte slice.
+func StringToBytes(s string) []byte {
+ return *(*[]byte)(unsafe.Pointer(
+ &struct {
+ string
+ Cap int
+ }{s, len(s)},
+ ))
+}
diff --git a/vendor/github.com/go-redis/redis/v7/iterator.go b/vendor/github.com/go-redis/redis/v7/iterator.go
new file mode 100644
index 0000000000..f9d3aab6d2
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/iterator.go
@@ -0,0 +1,75 @@
+package redis
+
+import (
+ "sync"
+)
+
+// ScanIterator is used to incrementally iterate over a collection of elements.
+// It's safe for concurrent use by multiple goroutines.
+type ScanIterator struct {
+ mu sync.Mutex // protects Scanner and pos
+ cmd *ScanCmd
+ pos int
+}
+
+// Err returns the last iterator error, if any.
+func (it *ScanIterator) Err() error {
+ it.mu.Lock()
+ err := it.cmd.Err()
+ it.mu.Unlock()
+ return err
+}
+
+// Next advances the cursor and returns true if more values can be read.
+func (it *ScanIterator) Next() bool {
+ it.mu.Lock()
+ defer it.mu.Unlock()
+
+ // Instantly return on errors.
+ if it.cmd.Err() != nil {
+ return false
+ }
+
+ // Advance cursor, check if we are still within range.
+ if it.pos < len(it.cmd.page) {
+ it.pos++
+ return true
+ }
+
+ for {
+ // Return if there is no more data to fetch.
+ if it.cmd.cursor == 0 {
+ return false
+ }
+
+ // Fetch next page.
+ if it.cmd.args[0] == "scan" {
+ it.cmd.args[1] = it.cmd.cursor
+ } else {
+ it.cmd.args[2] = it.cmd.cursor
+ }
+
+ err := it.cmd.process(it.cmd)
+ if err != nil {
+ return false
+ }
+
+ it.pos = 1
+
+ // Redis can occasionally return empty page.
+ if len(it.cmd.page) > 0 {
+ return true
+ }
+ }
+}
+
+// Val returns the key/field at the current cursor position.
+func (it *ScanIterator) Val() string {
+ var v string
+ it.mu.Lock()
+ if it.cmd.Err() == nil && it.pos > 0 && it.pos <= len(it.cmd.page) {
+ v = it.cmd.page[it.pos-1]
+ }
+ it.mu.Unlock()
+ return v
+}
diff --git a/vendor/github.com/go-redis/redis/v7/options.go b/vendor/github.com/go-redis/redis/v7/options.go
new file mode 100644
index 0000000000..47dcc29bf3
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/options.go
@@ -0,0 +1,249 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/go-redis/redis/v7/internal/pool"
+)
+
+// Limiter is the interface of a rate limiter or a circuit breaker.
+type Limiter interface {
+ // Allow returns nil if operation is allowed or an error otherwise.
+ // If operation is allowed client must ReportResult of the operation
+ // whether it is a success or a failure.
+ Allow() error
+ // ReportResult reports the result of the previously allowed operation.
+ // nil indicates a success, non-nil error usually indicates a failure.
+ ReportResult(result error)
+}
+
+type Options struct {
+ // The network type, either tcp or unix.
+ // Default is tcp.
+ Network string
+ // host:port address.
+ Addr string
+
+ // Dialer creates new network connection and has priority over
+ // Network and Addr options.
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ // Hook that is called when new connection is established.
+ OnConnect func(*Conn) error
+
+ // Use the specified Username to authenticate the current connection with one of the connections defined in the ACL
+ // list when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+ Username string
+
+ // Optional password. Must match the password specified in the
+ // requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower),
+ // or the User Password when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+ Password string
+ // Database to be selected after connecting to the server.
+ DB int
+
+ // Maximum number of retries before giving up.
+ // Default is to not retry failed commands.
+ MaxRetries int
+ // Minimum backoff between each retry.
+ // Default is 8 milliseconds; -1 disables backoff.
+ MinRetryBackoff time.Duration
+ // Maximum backoff between each retry.
+ // Default is 512 milliseconds; -1 disables backoff.
+ MaxRetryBackoff time.Duration
+
+ // Dial timeout for establishing new connections.
+ // Default is 5 seconds.
+ DialTimeout time.Duration
+ // Timeout for socket reads. If reached, commands will fail
+ // with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.
+ // Default is 3 seconds.
+ ReadTimeout time.Duration
+ // Timeout for socket writes. If reached, commands will fail
+ // with a timeout instead of blocking.
+ // Default is ReadTimeout.
+ WriteTimeout time.Duration
+
+ // Maximum number of socket connections.
+ // Default is 10 connections per every CPU as reported by runtime.NumCPU.
+ PoolSize int
+ // Minimum number of idle connections which is useful when establishing
+ // new connection is slow.
+ MinIdleConns int
+ // Connection age at which client retires (closes) the connection.
+ // Default is to not close aged connections.
+ MaxConnAge time.Duration
+ // Amount of time client waits for connection if all connections
+ // are busy before returning an error.
+ // Default is ReadTimeout + 1 second.
+ PoolTimeout time.Duration
+ // Amount of time after which client closes idle connections.
+ // Should be less than server's timeout.
+ // Default is 5 minutes. -1 disables idle timeout check.
+ IdleTimeout time.Duration
+ // Frequency of idle checks made by idle connections reaper.
+ // Default is 1 minute. -1 disables idle connections reaper,
+ // but idle connections are still discarded by the client
+ // if IdleTimeout is set.
+ IdleCheckFrequency time.Duration
+
+ // Enables read only queries on slave nodes.
+ readOnly bool
+
+ // TLS Config to use. When set TLS will be negotiated.
+ TLSConfig *tls.Config
+
+ // Limiter interface used to implemented circuit breaker or rate limiter.
+ Limiter Limiter
+}
+
+func (opt *Options) init() {
+ if opt.Addr == "" {
+ opt.Addr = "localhost:6379"
+ }
+ if opt.Network == "" {
+ if strings.HasPrefix(opt.Addr, "/") {
+ opt.Network = "unix"
+ } else {
+ opt.Network = "tcp"
+ }
+ }
+ if opt.Dialer == nil {
+ opt.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) {
+ netDialer := &net.Dialer{
+ Timeout: opt.DialTimeout,
+ KeepAlive: 5 * time.Minute,
+ }
+ if opt.TLSConfig == nil {
+ return netDialer.DialContext(ctx, network, addr)
+ }
+ return tls.DialWithDialer(netDialer, network, addr, opt.TLSConfig)
+ }
+ }
+ if opt.PoolSize == 0 {
+ opt.PoolSize = 10 * runtime.NumCPU()
+ }
+ if opt.DialTimeout == 0 {
+ opt.DialTimeout = 5 * time.Second
+ }
+ switch opt.ReadTimeout {
+ case -1:
+ opt.ReadTimeout = 0
+ case 0:
+ opt.ReadTimeout = 3 * time.Second
+ }
+ switch opt.WriteTimeout {
+ case -1:
+ opt.WriteTimeout = 0
+ case 0:
+ opt.WriteTimeout = opt.ReadTimeout
+ }
+ if opt.PoolTimeout == 0 {
+ opt.PoolTimeout = opt.ReadTimeout + time.Second
+ }
+ if opt.IdleTimeout == 0 {
+ opt.IdleTimeout = 5 * time.Minute
+ }
+ if opt.IdleCheckFrequency == 0 {
+ opt.IdleCheckFrequency = time.Minute
+ }
+
+ if opt.MaxRetries == -1 {
+ opt.MaxRetries = 0
+ }
+ switch opt.MinRetryBackoff {
+ case -1:
+ opt.MinRetryBackoff = 0
+ case 0:
+ opt.MinRetryBackoff = 8 * time.Millisecond
+ }
+ switch opt.MaxRetryBackoff {
+ case -1:
+ opt.MaxRetryBackoff = 0
+ case 0:
+ opt.MaxRetryBackoff = 512 * time.Millisecond
+ }
+}
+
+func (opt *Options) clone() *Options {
+ clone := *opt
+ return &clone
+}
+
+// ParseURL parses an URL into Options that can be used to connect to Redis.
+func ParseURL(redisURL string) (*Options, error) {
+ o := &Options{Network: "tcp"}
+ u, err := url.Parse(redisURL)
+ if err != nil {
+ return nil, err
+ }
+
+ if u.Scheme != "redis" && u.Scheme != "rediss" {
+ return nil, errors.New("invalid redis URL scheme: " + u.Scheme)
+ }
+
+ if u.User != nil {
+ o.Username = u.User.Username()
+ if p, ok := u.User.Password(); ok {
+ o.Password = p
+ }
+ }
+
+ if len(u.Query()) > 0 {
+ return nil, errors.New("no options supported")
+ }
+
+ h, p, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ h = u.Host
+ }
+ if h == "" {
+ h = "localhost"
+ }
+ if p == "" {
+ p = "6379"
+ }
+ o.Addr = net.JoinHostPort(h, p)
+
+ f := strings.FieldsFunc(u.Path, func(r rune) bool {
+ return r == '/'
+ })
+ switch len(f) {
+ case 0:
+ o.DB = 0
+ case 1:
+ if o.DB, err = strconv.Atoi(f[0]); err != nil {
+ return nil, fmt.Errorf("invalid redis database number: %q", f[0])
+ }
+ default:
+ return nil, errors.New("invalid redis URL path: " + u.Path)
+ }
+
+ if u.Scheme == "rediss" {
+ o.TLSConfig = &tls.Config{ServerName: h}
+ }
+ return o, nil
+}
+
+func newConnPool(opt *Options) *pool.ConnPool {
+ return pool.NewConnPool(&pool.Options{
+ Dialer: func(ctx context.Context) (net.Conn, error) {
+ return opt.Dialer(ctx, opt.Network, opt.Addr)
+ },
+ PoolSize: opt.PoolSize,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+ })
+}
diff --git a/vendor/github.com/go-redis/redis/v7/pipeline.go b/vendor/github.com/go-redis/redis/v7/pipeline.go
new file mode 100644
index 0000000000..d48566a787
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/pipeline.go
@@ -0,0 +1,142 @@
+package redis
+
+import (
+ "context"
+ "sync"
+
+ "github.com/go-redis/redis/v7/internal/pool"
+)
+
+type pipelineExecer func(context.Context, []Cmder) error
+
+// Pipeliner is an mechanism to realise Redis Pipeline technique.
+//
+// Pipelining is a technique to extremely speed up processing by packing
+// operations to batches, send them at once to Redis and read a replies in a
+// singe step.
+// See https://redis.io/topics/pipelining
+//
+// Pay attention, that Pipeline is not a transaction, so you can get unexpected
+// results in case of big pipelines and small read/write timeouts.
+// Redis client has retransmission logic in case of timeouts, pipeline
+// can be retransmitted and commands can be executed more then once.
+// To avoid this: it is good idea to use reasonable bigger read/write timeouts
+// depends of your batch size and/or use TxPipeline.
+type Pipeliner interface {
+ StatefulCmdable
+ Do(args ...interface{}) *Cmd
+ Process(cmd Cmder) error
+ Close() error
+ Discard() error
+ Exec() ([]Cmder, error)
+ ExecContext(ctx context.Context) ([]Cmder, error)
+}
+
+var _ Pipeliner = (*Pipeline)(nil)
+
+// Pipeline implements pipelining as described in
+// http://redis.io/topics/pipelining. It's safe for concurrent use
+// by multiple goroutines.
+type Pipeline struct {
+ cmdable
+ statefulCmdable
+
+ ctx context.Context
+ exec pipelineExecer
+
+ mu sync.Mutex
+ cmds []Cmder
+ closed bool
+}
+
+func (c *Pipeline) init() {
+ c.cmdable = c.Process
+ c.statefulCmdable = c.Process
+}
+
+func (c *Pipeline) Do(args ...interface{}) *Cmd {
+ cmd := NewCmd(args...)
+ _ = c.Process(cmd)
+ return cmd
+}
+
+// Process queues the cmd for later execution.
+func (c *Pipeline) Process(cmd Cmder) error {
+ c.mu.Lock()
+ c.cmds = append(c.cmds, cmd)
+ c.mu.Unlock()
+ return nil
+}
+
+// Close closes the pipeline, releasing any open resources.
+func (c *Pipeline) Close() error {
+ c.mu.Lock()
+ _ = c.discard()
+ c.closed = true
+ c.mu.Unlock()
+ return nil
+}
+
+// Discard resets the pipeline and discards queued commands.
+func (c *Pipeline) Discard() error {
+ c.mu.Lock()
+ err := c.discard()
+ c.mu.Unlock()
+ return err
+}
+
+func (c *Pipeline) discard() error {
+ if c.closed {
+ return pool.ErrClosed
+ }
+ c.cmds = c.cmds[:0]
+ return nil
+}
+
+// Exec executes all previously queued commands using one
+// client-server roundtrip.
+//
+// Exec always returns list of commands and error of the first failed
+// command if any.
+func (c *Pipeline) Exec() ([]Cmder, error) {
+ return c.ExecContext(c.ctx)
+}
+
+func (c *Pipeline) ExecContext(ctx context.Context) ([]Cmder, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ if len(c.cmds) == 0 {
+ return nil, nil
+ }
+
+ cmds := c.cmds
+ c.cmds = nil
+
+ return cmds, c.exec(ctx, cmds)
+}
+
+func (c *Pipeline) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ if err := fn(c); err != nil {
+ return nil, err
+ }
+ cmds, err := c.Exec()
+ _ = c.Close()
+ return cmds, err
+}
+
+func (c *Pipeline) Pipeline() Pipeliner {
+ return c
+}
+
+func (c *Pipeline) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipelined(fn)
+}
+
+func (c *Pipeline) TxPipeline() Pipeliner {
+ return c
+}
diff --git a/vendor/github.com/go-redis/redis/v7/pubsub.go b/vendor/github.com/go-redis/redis/v7/pubsub.go
new file mode 100644
index 0000000000..26cde242be
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/pubsub.go
@@ -0,0 +1,595 @@
+package redis
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/go-redis/redis/v7/internal"
+ "github.com/go-redis/redis/v7/internal/pool"
+ "github.com/go-redis/redis/v7/internal/proto"
+)
+
+const pingTimeout = 30 * time.Second
+
+var errPingTimeout = errors.New("redis: ping timeout")
+
+// PubSub implements Pub/Sub commands as described in
+// http://redis.io/topics/pubsub. Message receiving is NOT safe
+// for concurrent use by multiple goroutines.
+//
+// PubSub automatically reconnects to Redis Server and resubscribes
+// to the channels in case of network errors.
+type PubSub struct {
+ opt *Options
+
+ newConn func([]string) (*pool.Conn, error)
+ closeConn func(*pool.Conn) error
+
+ mu sync.Mutex
+ cn *pool.Conn
+ channels map[string]struct{}
+ patterns map[string]struct{}
+
+ closed bool
+ exit chan struct{}
+
+ cmd *Cmd
+
+ chOnce sync.Once
+ msgCh chan *Message
+ allCh chan interface{}
+ ping chan struct{}
+}
+
+func (c *PubSub) String() string {
+ channels := mapKeys(c.channels)
+ channels = append(channels, mapKeys(c.patterns)...)
+ return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
+}
+
+func (c *PubSub) init() {
+ c.exit = make(chan struct{})
+}
+
+func (c *PubSub) connWithLock() (*pool.Conn, error) {
+ c.mu.Lock()
+ cn, err := c.conn(nil)
+ c.mu.Unlock()
+ return cn, err
+}
+
+func (c *PubSub) conn(newChannels []string) (*pool.Conn, error) {
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+ if c.cn != nil {
+ return c.cn, nil
+ }
+
+ channels := mapKeys(c.channels)
+ channels = append(channels, newChannels...)
+
+ cn, err := c.newConn(channels)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := c.resubscribe(cn); err != nil {
+ _ = c.closeConn(cn)
+ return nil, err
+ }
+
+ c.cn = cn
+ return cn, nil
+}
+
+func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error {
+ return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmd(wr, cmd)
+ })
+}
+
+func (c *PubSub) resubscribe(cn *pool.Conn) error {
+ var firstErr error
+
+ if len(c.channels) > 0 {
+ firstErr = c._subscribe(cn, "subscribe", mapKeys(c.channels))
+ }
+
+ if len(c.patterns) > 0 {
+ err := c._subscribe(cn, "psubscribe", mapKeys(c.patterns))
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ return firstErr
+}
+
+func mapKeys(m map[string]struct{}) []string {
+ s := make([]string, len(m))
+ i := 0
+ for k := range m {
+ s[i] = k
+ i++
+ }
+ return s
+}
+
+func (c *PubSub) _subscribe(
+ cn *pool.Conn, redisCmd string, channels []string,
+) error {
+ args := make([]interface{}, 0, 1+len(channels))
+ args = append(args, redisCmd)
+ for _, channel := range channels {
+ args = append(args, channel)
+ }
+ cmd := NewSliceCmd(args...)
+ return c.writeCmd(context.TODO(), cn, cmd)
+}
+
+func (c *PubSub) releaseConnWithLock(cn *pool.Conn, err error, allowTimeout bool) {
+ c.mu.Lock()
+ c.releaseConn(cn, err, allowTimeout)
+ c.mu.Unlock()
+}
+
+func (c *PubSub) releaseConn(cn *pool.Conn, err error, allowTimeout bool) {
+ if c.cn != cn {
+ return
+ }
+ if isBadConn(err, allowTimeout) {
+ c.reconnect(err)
+ }
+}
+
+func (c *PubSub) reconnect(reason error) {
+ _ = c.closeTheCn(reason)
+ _, _ = c.conn(nil)
+}
+
+func (c *PubSub) closeTheCn(reason error) error {
+ if c.cn == nil {
+ return nil
+ }
+ if !c.closed {
+ internal.Logger.Printf("redis: discarding bad PubSub connection: %s", reason)
+ }
+ err := c.closeConn(c.cn)
+ c.cn = nil
+ return err
+}
+
+func (c *PubSub) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return pool.ErrClosed
+ }
+ c.closed = true
+ close(c.exit)
+
+ return c.closeTheCn(pool.ErrClosed)
+}
+
+// Subscribe the client to the specified channels. It returns
+// empty subscription if there are no channels.
+func (c *PubSub) Subscribe(channels ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ err := c.subscribe("subscribe", channels...)
+ if c.channels == nil {
+ c.channels = make(map[string]struct{})
+ }
+ for _, s := range channels {
+ c.channels[s] = struct{}{}
+ }
+ return err
+}
+
+// PSubscribe the client to the given patterns. It returns
+// empty subscription if there are no patterns.
+func (c *PubSub) PSubscribe(patterns ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ err := c.subscribe("psubscribe", patterns...)
+ if c.patterns == nil {
+ c.patterns = make(map[string]struct{})
+ }
+ for _, s := range patterns {
+ c.patterns[s] = struct{}{}
+ }
+ return err
+}
+
+// Unsubscribe the client from the given channels, or from all of
+// them if none is given.
+func (c *PubSub) Unsubscribe(channels ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ for _, channel := range channels {
+ delete(c.channels, channel)
+ }
+ err := c.subscribe("unsubscribe", channels...)
+ return err
+}
+
+// PUnsubscribe the client from the given patterns, or from all of
+// them if none is given.
+func (c *PubSub) PUnsubscribe(patterns ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ for _, pattern := range patterns {
+ delete(c.patterns, pattern)
+ }
+ err := c.subscribe("punsubscribe", patterns...)
+ return err
+}
+
+func (c *PubSub) subscribe(redisCmd string, channels ...string) error {
+ cn, err := c.conn(channels)
+ if err != nil {
+ return err
+ }
+
+ err = c._subscribe(cn, redisCmd, channels)
+ c.releaseConn(cn, err, false)
+ return err
+}
+
+func (c *PubSub) Ping(payload ...string) error {
+ args := []interface{}{"ping"}
+ if len(payload) == 1 {
+ args = append(args, payload[0])
+ }
+ cmd := NewCmd(args...)
+
+ cn, err := c.connWithLock()
+ if err != nil {
+ return err
+ }
+
+ err = c.writeCmd(context.TODO(), cn, cmd)
+ c.releaseConnWithLock(cn, err, false)
+ return err
+}
+
+// Subscription received after a successful subscription to channel.
+type Subscription struct {
+ // Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe".
+ Kind string
+ // Channel name we have subscribed to.
+ Channel string
+ // Number of channels we are currently subscribed to.
+ Count int
+}
+
+func (m *Subscription) String() string {
+ return fmt.Sprintf("%s: %s", m.Kind, m.Channel)
+}
+
+// Message received as result of a PUBLISH command issued by another client.
+type Message struct {
+ Channel string
+ Pattern string
+ Payload string
+}
+
+func (m *Message) String() string {
+ return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload)
+}
+
+// Pong received as result of a PING command issued by another client.
+type Pong struct {
+ Payload string
+}
+
+func (p *Pong) String() string {
+ if p.Payload != "" {
+ return fmt.Sprintf("Pong<%s>", p.Payload)
+ }
+ return "Pong"
+}
+
+func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
+ switch reply := reply.(type) {
+ case string:
+ return &Pong{
+ Payload: reply,
+ }, nil
+ case []interface{}:
+ switch kind := reply[0].(string); kind {
+ case "subscribe", "unsubscribe", "psubscribe", "punsubscribe":
+ // Can be nil in case of "unsubscribe".
+ channel, _ := reply[1].(string)
+ return &Subscription{
+ Kind: kind,
+ Channel: channel,
+ Count: int(reply[2].(int64)),
+ }, nil
+ case "message":
+ return &Message{
+ Channel: reply[1].(string),
+ Payload: reply[2].(string),
+ }, nil
+ case "pmessage":
+ return &Message{
+ Pattern: reply[1].(string),
+ Channel: reply[2].(string),
+ Payload: reply[3].(string),
+ }, nil
+ case "pong":
+ return &Pong{
+ Payload: reply[1].(string),
+ }, nil
+ default:
+ return nil, fmt.Errorf("redis: unsupported pubsub message: %q", kind)
+ }
+ default:
+ return nil, fmt.Errorf("redis: unsupported pubsub message: %#v", reply)
+ }
+}
+
+// ReceiveTimeout acts like Receive but returns an error if message
+// is not received in time. This is low-level API and in most cases
+// Channel should be used instead.
+func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) {
+ if c.cmd == nil {
+ c.cmd = NewCmd()
+ }
+
+ cn, err := c.connWithLock()
+ if err != nil {
+ return nil, err
+ }
+
+ err = cn.WithReader(context.TODO(), timeout, func(rd *proto.Reader) error {
+ return c.cmd.readReply(rd)
+ })
+
+ c.releaseConnWithLock(cn, err, timeout > 0)
+ if err != nil {
+ return nil, err
+ }
+
+ return c.newMessage(c.cmd.Val())
+}
+
+// Receive returns a message as a Subscription, Message, Pong or error.
+// See PubSub example for details. This is low-level API and in most cases
+// Channel should be used instead.
+func (c *PubSub) Receive() (interface{}, error) {
+ return c.ReceiveTimeout(0)
+}
+
+// ReceiveMessage returns a Message or error ignoring Subscription and Pong
+// messages. This is low-level API and in most cases Channel should be used
+// instead.
+func (c *PubSub) ReceiveMessage() (*Message, error) {
+ for {
+ msg, err := c.Receive()
+ if err != nil {
+ return nil, err
+ }
+
+ switch msg := msg.(type) {
+ case *Subscription:
+ // Ignore.
+ case *Pong:
+ // Ignore.
+ case *Message:
+ return msg, nil
+ default:
+ err := fmt.Errorf("redis: unknown message: %T", msg)
+ return nil, err
+ }
+ }
+}
+
+// Channel returns a Go channel for concurrently receiving messages.
+// The channel is closed together with the PubSub. If the Go channel
+// is blocked full for 30 seconds the message is dropped.
+// Receive* APIs can not be used after channel is created.
+//
+// go-redis periodically sends ping messages to test connection health
+// and re-subscribes if ping can not not received for 30 seconds.
+func (c *PubSub) Channel() <-chan *Message {
+ return c.ChannelSize(100)
+}
+
+// ChannelSize is like Channel, but creates a Go channel
+// with specified buffer size.
+func (c *PubSub) ChannelSize(size int) <-chan *Message {
+ c.chOnce.Do(func() {
+ c.initPing()
+ c.initMsgChan(size)
+ })
+ if c.msgCh == nil {
+ err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
+ panic(err)
+ }
+ if cap(c.msgCh) != size {
+ err := fmt.Errorf("redis: PubSub.Channel size can not be changed once created")
+ panic(err)
+ }
+ return c.msgCh
+}
+
+// ChannelWithSubscriptions is like Channel, but message type can be either
+// *Subscription or *Message. Subscription messages can be used to detect
+// reconnections.
+//
+// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
+func (c *PubSub) ChannelWithSubscriptions(size int) <-chan interface{} {
+ c.chOnce.Do(func() {
+ c.initPing()
+ c.initAllChan(size)
+ })
+ if c.allCh == nil {
+ err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
+ panic(err)
+ }
+ if cap(c.allCh) != size {
+ err := fmt.Errorf("redis: PubSub.Channel size can not be changed once created")
+ panic(err)
+ }
+ return c.allCh
+}
+
+func (c *PubSub) initPing() {
+ c.ping = make(chan struct{}, 1)
+ go func() {
+ timer := time.NewTimer(pingTimeout)
+ timer.Stop()
+
+ healthy := true
+ for {
+ timer.Reset(pingTimeout)
+ select {
+ case <-c.ping:
+ healthy = true
+ if !timer.Stop() {
+ <-timer.C
+ }
+ case <-timer.C:
+ pingErr := c.Ping()
+ if healthy {
+ healthy = false
+ } else {
+ if pingErr == nil {
+ pingErr = errPingTimeout
+ }
+ c.mu.Lock()
+ c.reconnect(pingErr)
+ healthy = true
+ c.mu.Unlock()
+ }
+ case <-c.exit:
+ return
+ }
+ }
+ }()
+}
+
+// initMsgChan must be in sync with initAllChan.
+func (c *PubSub) initMsgChan(size int) {
+ c.msgCh = make(chan *Message, size)
+ go func() {
+ timer := time.NewTimer(pingTimeout)
+ timer.Stop()
+
+ var errCount int
+ for {
+ msg, err := c.Receive()
+ if err != nil {
+ if err == pool.ErrClosed {
+ close(c.msgCh)
+ return
+ }
+ if errCount > 0 {
+ time.Sleep(c.retryBackoff(errCount))
+ }
+ errCount++
+ continue
+ }
+
+ errCount = 0
+
+ // Any message is as good as a ping.
+ select {
+ case c.ping <- struct{}{}:
+ default:
+ }
+
+ switch msg := msg.(type) {
+ case *Subscription:
+ // Ignore.
+ case *Pong:
+ // Ignore.
+ case *Message:
+ timer.Reset(pingTimeout)
+ select {
+ case c.msgCh <- msg:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ case <-timer.C:
+ internal.Logger.Printf(
+ "redis: %s channel is full for %s (message is dropped)", c, pingTimeout)
+ }
+ default:
+ internal.Logger.Printf("redis: unknown message type: %T", msg)
+ }
+ }
+ }()
+}
+
+// initAllChan must be in sync with initMsgChan.
+func (c *PubSub) initAllChan(size int) {
+ c.allCh = make(chan interface{}, size)
+ go func() {
+ timer := time.NewTimer(pingTimeout)
+ timer.Stop()
+
+ var errCount int
+ for {
+ msg, err := c.Receive()
+ if err != nil {
+ if err == pool.ErrClosed {
+ close(c.allCh)
+ return
+ }
+ if errCount > 0 {
+ time.Sleep(c.retryBackoff(errCount))
+ }
+ errCount++
+ continue
+ }
+
+ errCount = 0
+
+ // Any message is as good as a ping.
+ select {
+ case c.ping <- struct{}{}:
+ default:
+ }
+
+ switch msg := msg.(type) {
+ case *Subscription:
+ c.sendMessage(msg, timer)
+ case *Pong:
+ // Ignore.
+ case *Message:
+ c.sendMessage(msg, timer)
+ default:
+ internal.Logger.Printf("redis: unknown message type: %T", msg)
+ }
+ }
+ }()
+}
+
+func (c *PubSub) sendMessage(msg interface{}, timer *time.Timer) {
+ timer.Reset(pingTimeout)
+ select {
+ case c.allCh <- msg:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ case <-timer.C:
+ internal.Logger.Printf(
+ "redis: %s channel is full for %s (message is dropped)", c, pingTimeout)
+ }
+}
+
+func (c *PubSub) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
diff --git a/vendor/github.com/go-redis/redis/v7/redis.go b/vendor/github.com/go-redis/redis/v7/redis.go
new file mode 100644
index 0000000000..3d9dfed7db
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/redis.go
@@ -0,0 +1,758 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "time"
+
+ "github.com/go-redis/redis/v7/internal"
+ "github.com/go-redis/redis/v7/internal/pool"
+ "github.com/go-redis/redis/v7/internal/proto"
+)
+
+// Nil reply returned by Redis when key does not exist.
+const Nil = proto.Nil
+
+func SetLogger(logger *log.Logger) {
+ internal.Logger = logger
+}
+
+//------------------------------------------------------------------------------
+
+type Hook interface {
+ BeforeProcess(ctx context.Context, cmd Cmder) (context.Context, error)
+ AfterProcess(ctx context.Context, cmd Cmder) error
+
+ BeforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error)
+ AfterProcessPipeline(ctx context.Context, cmds []Cmder) error
+}
+
+type hooks struct {
+ hooks []Hook
+}
+
+func (hs *hooks) lock() {
+ hs.hooks = hs.hooks[:len(hs.hooks):len(hs.hooks)]
+}
+
+func (hs hooks) clone() hooks {
+ clone := hs
+ clone.lock()
+ return clone
+}
+
+func (hs *hooks) AddHook(hook Hook) {
+ hs.hooks = append(hs.hooks, hook)
+}
+
+func (hs hooks) process(
+ ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error,
+) error {
+ ctx, err := hs.beforeProcess(ctx, cmd)
+ if err != nil {
+ cmd.SetErr(err)
+ return err
+ }
+
+ cmdErr := fn(ctx, cmd)
+
+ if err := hs.afterProcess(ctx, cmd); err != nil {
+ cmd.SetErr(err)
+ return err
+ }
+
+ return cmdErr
+}
+
+func (hs hooks) beforeProcess(ctx context.Context, cmd Cmder) (context.Context, error) {
+ for _, h := range hs.hooks {
+ var err error
+ ctx, err = h.BeforeProcess(ctx, cmd)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return ctx, nil
+}
+
+func (hs hooks) afterProcess(ctx context.Context, cmd Cmder) error {
+ var firstErr error
+ for _, h := range hs.hooks {
+ err := h.AfterProcess(ctx, cmd)
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ return firstErr
+}
+
+func (hs hooks) processPipeline(
+ ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
+) error {
+ ctx, err := hs.beforeProcessPipeline(ctx, cmds)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ cmdsErr := fn(ctx, cmds)
+
+ if err := hs.afterProcessPipeline(ctx, cmds); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ return cmdsErr
+}
+
+func (hs hooks) beforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error) {
+ for _, h := range hs.hooks {
+ var err error
+ ctx, err = h.BeforeProcessPipeline(ctx, cmds)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return ctx, nil
+}
+
+func (hs hooks) afterProcessPipeline(ctx context.Context, cmds []Cmder) error {
+ var firstErr error
+ for _, h := range hs.hooks {
+ err := h.AfterProcessPipeline(ctx, cmds)
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ return firstErr
+}
+
+func (hs hooks) processTxPipeline(
+ ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
+) error {
+ cmds = wrapMultiExec(cmds)
+ return hs.processPipeline(ctx, cmds, fn)
+}
+
+//------------------------------------------------------------------------------
+
+type baseClient struct {
+ opt *Options
+ connPool pool.Pooler
+
+ onClose func() error // hook called when client is closed
+}
+
+func newBaseClient(opt *Options, connPool pool.Pooler) *baseClient {
+ return &baseClient{
+ opt: opt,
+ connPool: connPool,
+ }
+}
+
+func (c *baseClient) clone() *baseClient {
+ clone := *c
+ return &clone
+}
+
+func (c *baseClient) withTimeout(timeout time.Duration) *baseClient {
+ opt := c.opt.clone()
+ opt.ReadTimeout = timeout
+ opt.WriteTimeout = timeout
+
+ clone := c.clone()
+ clone.opt = opt
+
+ return clone
+}
+
+func (c *baseClient) String() string {
+ return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
+}
+
+func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) {
+ cn, err := c.connPool.NewConn(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ err = c.initConn(ctx, cn)
+ if err != nil {
+ _ = c.connPool.CloseConn(cn)
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
+ if c.opt.Limiter != nil {
+ err := c.opt.Limiter.Allow()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ cn, err := c._getConn(ctx)
+ if err != nil {
+ if c.opt.Limiter != nil {
+ c.opt.Limiter.ReportResult(err)
+ }
+ return nil, err
+ }
+ return cn, nil
+}
+
+func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
+ cn, err := c.connPool.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ err = c.initConn(ctx, cn)
+ if err != nil {
+ c.connPool.Remove(cn, err)
+ if err := internal.Unwrap(err); err != nil {
+ return nil, err
+ }
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
+ if cn.Inited {
+ return nil
+ }
+ cn.Inited = true
+
+ if c.opt.Password == "" &&
+ c.opt.DB == 0 &&
+ !c.opt.readOnly &&
+ c.opt.OnConnect == nil {
+ return nil
+ }
+
+ connPool := pool.NewSingleConnPool(nil)
+ connPool.SetConn(cn)
+ conn := newConn(ctx, c.opt, connPool)
+
+ _, err := conn.Pipelined(func(pipe Pipeliner) error {
+ if c.opt.Password != "" {
+ if c.opt.Username != "" {
+ pipe.AuthACL(c.opt.Username, c.opt.Password)
+ } else {
+ pipe.Auth(c.opt.Password)
+ }
+ }
+
+ if c.opt.DB > 0 {
+ pipe.Select(c.opt.DB)
+ }
+
+ if c.opt.readOnly {
+ pipe.ReadOnly()
+ }
+
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ if c.opt.OnConnect != nil {
+ return c.opt.OnConnect(conn)
+ }
+ return nil
+}
+
+func (c *baseClient) releaseConn(cn *pool.Conn, err error) {
+ if c.opt.Limiter != nil {
+ c.opt.Limiter.ReportResult(err)
+ }
+
+ if isBadConn(err, false) {
+ c.connPool.Remove(cn, err)
+ } else {
+ c.connPool.Put(cn)
+ }
+}
+
+func (c *baseClient) withConn(
+ ctx context.Context, fn func(context.Context, *pool.Conn) error,
+) error {
+ cn, err := c.getConn(ctx)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ c.releaseConn(cn, err)
+ }()
+
+ err = fn(ctx, cn)
+ return err
+}
+
+func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
+ err := c._process(ctx, cmd)
+ if err != nil {
+ cmd.SetErr(err)
+ return err
+ }
+ return nil
+}
+
+func (c *baseClient) _process(ctx context.Context, cmd Cmder) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ retryTimeout := true
+ lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmd(wr, cmd)
+ })
+ if err != nil {
+ return err
+ }
+
+ err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
+ if err != nil {
+ retryTimeout = cmd.readTimeout() == nil
+ return err
+ }
+
+ return nil
+ })
+ if lastErr == nil || !isRetryableError(lastErr, retryTimeout) {
+ return lastErr
+ }
+ }
+ return lastErr
+}
+
+func (c *baseClient) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
+ if timeout := cmd.readTimeout(); timeout != nil {
+ t := *timeout
+ if t == 0 {
+ return 0
+ }
+ return t + 10*time.Second
+ }
+ return c.opt.ReadTimeout
+}
+
+// Close closes the client, releasing any open resources.
+//
+// It is rare to Close a Client, as the Client is meant to be
+// long-lived and shared between many goroutines.
+func (c *baseClient) Close() error {
+ var firstErr error
+ if c.onClose != nil {
+ if err := c.onClose(); err != nil {
+ firstErr = err
+ }
+ }
+ if err := c.connPool.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ return firstErr
+}
+
+func (c *baseClient) getAddr() string {
+ return c.opt.Addr
+}
+
+func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds)
+}
+
+func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds)
+}
+
+type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error)
+
+func (c *baseClient) generalProcessPipeline(
+ ctx context.Context, cmds []Cmder, p pipelineProcessor,
+) error {
+ err := c._generalProcessPipeline(ctx, cmds, p)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ return cmdsFirstErr(cmds)
+}
+
+func (c *baseClient) _generalProcessPipeline(
+ ctx context.Context, cmds []Cmder, p pipelineProcessor,
+) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ var canRetry bool
+ lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ var err error
+ canRetry, err = p(ctx, cn, cmds)
+ return err
+ })
+ if lastErr == nil || !canRetry || !isRetryableError(lastErr, true) {
+ return lastErr
+ }
+ }
+ return lastErr
+}
+
+func (c *baseClient) pipelineProcessCmds(
+ ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+ err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ })
+ if err != nil {
+ return true, err
+ }
+
+ err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ return pipelineReadCmds(rd, cmds)
+ })
+ return true, err
+}
+
+func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
+ for _, cmd := range cmds {
+ err := cmd.readReply(rd)
+ if err != nil && !isRedisError(err) {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *baseClient) txPipelineProcessCmds(
+ ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+ err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ })
+ if err != nil {
+ return true, err
+ }
+
+ err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ statusCmd := cmds[0].(*StatusCmd)
+ // Trim multi and exec.
+ cmds = cmds[1 : len(cmds)-1]
+
+ err := txPipelineReadQueued(rd, statusCmd, cmds)
+ if err != nil {
+ return err
+ }
+
+ return pipelineReadCmds(rd, cmds)
+ })
+ return false, err
+}
+
+func wrapMultiExec(cmds []Cmder) []Cmder {
+ if len(cmds) == 0 {
+ panic("not reached")
+ }
+ cmds = append(cmds, make([]Cmder, 2)...)
+ copy(cmds[1:], cmds[:len(cmds)-2])
+ cmds[0] = NewStatusCmd("multi")
+ cmds[len(cmds)-1] = NewSliceCmd("exec")
+ return cmds
+}
+
+func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error {
+ // Parse queued replies.
+ if err := statusCmd.readReply(rd); err != nil {
+ return err
+ }
+
+ for range cmds {
+ if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) {
+ return err
+ }
+ }
+
+ // Parse number of replies.
+ line, err := rd.ReadLine()
+ if err != nil {
+ if err == Nil {
+ err = TxFailedErr
+ }
+ return err
+ }
+
+ switch line[0] {
+ case proto.ErrorReply:
+ return proto.ParseErrorReply(line)
+ case proto.ArrayReply:
+ // ok
+ default:
+ err := fmt.Errorf("redis: expected '*', but got line %q", line)
+ return err
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// Client is a Redis client representing a pool of zero or more
+// underlying connections. It's safe for concurrent use by multiple
+// goroutines.
+type Client struct {
+ *baseClient
+ cmdable
+ hooks
+ ctx context.Context
+}
+
+// NewClient returns a client to the Redis Server specified by Options.
+func NewClient(opt *Options) *Client {
+ opt.init()
+
+ c := Client{
+ baseClient: newBaseClient(opt, newConnPool(opt)),
+ ctx: context.Background(),
+ }
+ c.cmdable = c.Process
+
+ return &c
+}
+
+func (c *Client) clone() *Client {
+ clone := *c
+ clone.cmdable = clone.Process
+ clone.hooks.lock()
+ return &clone
+}
+
+func (c *Client) WithTimeout(timeout time.Duration) *Client {
+ clone := c.clone()
+ clone.baseClient = c.baseClient.withTimeout(timeout)
+ return clone
+}
+
+func (c *Client) Context() context.Context {
+ return c.ctx
+}
+
+func (c *Client) WithContext(ctx context.Context) *Client {
+ if ctx == nil {
+ panic("nil context")
+ }
+ clone := c.clone()
+ clone.ctx = ctx
+ return clone
+}
+
+func (c *Client) Conn() *Conn {
+ return newConn(c.ctx, c.opt, pool.NewSingleConnPool(c.connPool))
+}
+
+// Do creates a Cmd from the args and processes the cmd.
+func (c *Client) Do(args ...interface{}) *Cmd {
+ return c.DoContext(c.ctx, args...)
+}
+
+func (c *Client) DoContext(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(args...)
+ _ = c.ProcessContext(ctx, cmd)
+ return cmd
+}
+
+func (c *Client) Process(cmd Cmder) error {
+ return c.ProcessContext(c.ctx, cmd)
+}
+
+func (c *Client) ProcessContext(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+func (c *Client) processPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+}
+
+func (c *Client) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Client) Options() *Options {
+ return c.opt
+}
+
+type PoolStats pool.Stats
+
+// PoolStats returns connection pool stats.
+func (c *Client) PoolStats() *PoolStats {
+ stats := c.connPool.Stats()
+ return (*PoolStats)(stats)
+}
+
+func (c *Client) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(fn)
+}
+
+func (c *Client) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Client) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Client) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processTxPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Client) pubSub() *PubSub {
+ pubsub := &PubSub{
+ opt: c.opt,
+
+ newConn: func(channels []string) (*pool.Conn, error) {
+ return c.newConn(context.TODO())
+ },
+ closeConn: c.connPool.CloseConn,
+ }
+ pubsub.init()
+ return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+// Note that this method does not wait on a response from Redis, so the
+// subscription may not be active immediately. To force the connection to wait,
+// you may call the Receive() method on the returned *PubSub like so:
+//
+// sub := client.Subscribe(queryResp)
+// iface, err := sub.Receive()
+// if err != nil {
+// // handle error
+// }
+//
+// // Should be *Subscription, but others are possible if other actions have been
+// // taken on sub since it was created.
+// switch iface.(type) {
+// case *Subscription:
+// // subscribe succeeded
+// case *Message:
+// // received first message
+// case *Pong:
+// // pong received
+// default:
+// // handle error
+// }
+//
+// ch := sub.Channel()
+func (c *Client) Subscribe(channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *Client) PSubscribe(channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(channels...)
+ }
+ return pubsub
+}
+
+//------------------------------------------------------------------------------
+
+type conn struct {
+ baseClient
+ cmdable
+ statefulCmdable
+}
+
+// Conn is like Client, but its pool contains single connection.
+type Conn struct {
+ *conn
+ ctx context.Context
+}
+
+func newConn(ctx context.Context, opt *Options, connPool pool.Pooler) *Conn {
+ c := Conn{
+ conn: &conn{
+ baseClient: baseClient{
+ opt: opt,
+ connPool: connPool,
+ },
+ },
+ ctx: ctx,
+ }
+ c.cmdable = c.Process
+ c.statefulCmdable = c.Process
+ return &c
+}
+
+func (c *Conn) Process(cmd Cmder) error {
+ return c.ProcessContext(c.ctx, cmd)
+}
+
+func (c *Conn) ProcessContext(ctx context.Context, cmd Cmder) error {
+ return c.baseClient.process(ctx, cmd)
+}
+
+func (c *Conn) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(fn)
+}
+
+func (c *Conn) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Conn) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Conn) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processTxPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
diff --git a/vendor/github.com/go-redis/redis/v7/result.go b/vendor/github.com/go-redis/redis/v7/result.go
new file mode 100644
index 0000000000..5bec26ca95
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/result.go
@@ -0,0 +1,180 @@
+package redis
+
+import "time"
+
+// NewCmdResult returns a Cmd initialised with val and err for testing
+func NewCmdResult(val interface{}, err error) *Cmd {
+ var cmd Cmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewSliceResult returns a SliceCmd initialised with val and err for testing
+func NewSliceResult(val []interface{}, err error) *SliceCmd {
+ var cmd SliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStatusResult returns a StatusCmd initialised with val and err for testing
+func NewStatusResult(val string, err error) *StatusCmd {
+ var cmd StatusCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewIntResult returns an IntCmd initialised with val and err for testing
+func NewIntResult(val int64, err error) *IntCmd {
+ var cmd IntCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewDurationResult returns a DurationCmd initialised with val and err for testing
+func NewDurationResult(val time.Duration, err error) *DurationCmd {
+ var cmd DurationCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewBoolResult returns a BoolCmd initialised with val and err for testing
+func NewBoolResult(val bool, err error) *BoolCmd {
+ var cmd BoolCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStringResult returns a StringCmd initialised with val and err for testing
+func NewStringResult(val string, err error) *StringCmd {
+ var cmd StringCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewFloatResult returns a FloatCmd initialised with val and err for testing
+func NewFloatResult(val float64, err error) *FloatCmd {
+ var cmd FloatCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStringSliceResult returns a StringSliceCmd initialised with val and err for testing
+func NewStringSliceResult(val []string, err error) *StringSliceCmd {
+ var cmd StringSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewBoolSliceResult returns a BoolSliceCmd initialised with val and err for testing
+func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
+ var cmd BoolSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing
+func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd {
+ var cmd StringStringMapCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing
+func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd {
+ var cmd StringIntMapCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewTimeCmdResult returns a TimeCmd initialised with val and err for testing
+func NewTimeCmdResult(val time.Time, err error) *TimeCmd {
+ var cmd TimeCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewZSliceCmdResult returns a ZSliceCmd initialised with val and err for testing
+func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
+ var cmd ZSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing
+func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd {
+ var cmd ZWithKeyCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewScanCmdResult returns a ScanCmd initialised with val and err for testing
+func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd {
+ var cmd ScanCmd
+ cmd.page = keys
+ cmd.cursor = cursor
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initialised with val and err for testing
+func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd {
+ var cmd ClusterSlotsCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewGeoLocationCmdResult returns a GeoLocationCmd initialised with val and err for testing
+func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd {
+ var cmd GeoLocationCmd
+ cmd.locations = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewGeoPosCmdResult returns a GeoPosCmd initialised with val and err for testing
+func NewGeoPosCmdResult(val []*GeoPos, err error) *GeoPosCmd {
+ var cmd GeoPosCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewCommandsInfoCmdResult returns a CommandsInfoCmd initialised with val and err for testing
+func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd {
+ var cmd CommandsInfoCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewXMessageSliceCmdResult returns a XMessageSliceCmd initialised with val and err for testing
+func NewXMessageSliceCmdResult(val []XMessage, err error) *XMessageSliceCmd {
+ var cmd XMessageSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewXStreamSliceCmdResult returns a XStreamSliceCmd initialised with val and err for testing
+func NewXStreamSliceCmdResult(val []XStream, err error) *XStreamSliceCmd {
+ var cmd XStreamSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
diff --git a/vendor/github.com/go-redis/redis/v7/ring.go b/vendor/github.com/go-redis/redis/v7/ring.go
new file mode 100644
index 0000000000..44fc623f8a
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/ring.go
@@ -0,0 +1,726 @@
+package redis
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math/rand"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/go-redis/redis/v7/internal"
+ "github.com/go-redis/redis/v7/internal/consistenthash"
+ "github.com/go-redis/redis/v7/internal/hashtag"
+ "github.com/go-redis/redis/v7/internal/pool"
+)
+
+// Hash is type of hash function used in consistent hash.
+type Hash consistenthash.Hash
+
+var errRingShardsDown = errors.New("redis: all ring shards are down")
+
+// RingOptions are used to configure a ring client and should be
+// passed to NewRing.
+type RingOptions struct {
+ // Map of name => host:port addresses of ring shards.
+ Addrs map[string]string
+
+ // Map of name => password of ring shards, to allow different shards to have
+ // different passwords. It will be ignored if the Password field is set.
+ Passwords map[string]string
+
+ // Frequency of PING commands sent to check shards availability.
+ // Shard is considered down after 3 subsequent failed checks.
+ HeartbeatFrequency time.Duration
+
+ // Hash function used in consistent hash.
+ // Default is crc32.ChecksumIEEE.
+ Hash Hash
+
+ // Number of replicas in consistent hash.
+ // Default is 100 replicas.
+ //
+ // Higher number of replicas will provide less deviation, that is keys will be
+ // distributed to nodes more evenly.
+ //
+ // Following is deviation for common nreplicas:
+ // --------------------------------------------------------
+ // | nreplicas | standard error | 99% confidence interval |
+ // | 10 | 0.3152 | (0.37, 1.98) |
+ // | 100 | 0.0997 | (0.76, 1.28) |
+ // | 1000 | 0.0316 | (0.92, 1.09) |
+ // --------------------------------------------------------
+ //
+ // See https://arxiv.org/abs/1406.2294 for reference
+ HashReplicas int
+
+ // NewClient creates a shard client with provided name and options.
+ NewClient func(name string, opt *Options) *Client
+
+ // Optional hook that is called when a new shard is created.
+ OnNewShard func(*Client)
+
+ // Following options are copied from Options struct.
+
+ OnConnect func(*Conn) error
+
+ DB int
+ Password string
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+}
+
+func (opt *RingOptions) init() {
+ if opt.HeartbeatFrequency == 0 {
+ opt.HeartbeatFrequency = 500 * time.Millisecond
+ }
+
+ if opt.HashReplicas == 0 {
+ opt.HashReplicas = 100
+ }
+
+ switch opt.MinRetryBackoff {
+ case -1:
+ opt.MinRetryBackoff = 0
+ case 0:
+ opt.MinRetryBackoff = 8 * time.Millisecond
+ }
+ switch opt.MaxRetryBackoff {
+ case -1:
+ opt.MaxRetryBackoff = 0
+ case 0:
+ opt.MaxRetryBackoff = 512 * time.Millisecond
+ }
+}
+
+func (opt *RingOptions) clientOptions(shard string) *Options {
+ return &Options{
+ OnConnect: opt.OnConnect,
+
+ DB: opt.DB,
+ Password: opt.getPassword(shard),
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.PoolSize,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+ }
+}
+
+func (opt *RingOptions) getPassword(shard string) string {
+ if opt.Password == "" {
+ return opt.Passwords[shard]
+ }
+ return opt.Password
+}
+
+//------------------------------------------------------------------------------
+
+type ringShard struct {
+ Client *Client
+ down int32
+}
+
+func (shard *ringShard) String() string {
+ var state string
+ if shard.IsUp() {
+ state = "up"
+ } else {
+ state = "down"
+ }
+ return fmt.Sprintf("%s is %s", shard.Client, state)
+}
+
+func (shard *ringShard) IsDown() bool {
+ const threshold = 3
+ return atomic.LoadInt32(&shard.down) >= threshold
+}
+
+func (shard *ringShard) IsUp() bool {
+ return !shard.IsDown()
+}
+
+// Vote votes to set shard state and returns true if state was changed.
+func (shard *ringShard) Vote(up bool) bool {
+ if up {
+ changed := shard.IsDown()
+ atomic.StoreInt32(&shard.down, 0)
+ return changed
+ }
+
+ if shard.IsDown() {
+ return false
+ }
+
+ atomic.AddInt32(&shard.down, 1)
+ return shard.IsDown()
+}
+
+//------------------------------------------------------------------------------
+
+type ringShards struct {
+ opt *RingOptions
+
+ mu sync.RWMutex
+ hash *consistenthash.Map
+ shards map[string]*ringShard // read only
+ list []*ringShard // read only
+ len int
+ closed bool
+}
+
+func newRingShards(opt *RingOptions) *ringShards {
+ return &ringShards{
+ opt: opt,
+
+ hash: newConsistentHash(opt),
+ shards: make(map[string]*ringShard),
+ }
+}
+
+func (c *ringShards) Add(name string, cl *Client) {
+ shard := &ringShard{Client: cl}
+ c.hash.Add(name)
+ c.shards[name] = shard
+ c.list = append(c.list, shard)
+}
+
+func (c *ringShards) List() []*ringShard {
+ c.mu.RLock()
+ list := c.list
+ c.mu.RUnlock()
+ return list
+}
+
+func (c *ringShards) Hash(key string) string {
+ c.mu.RLock()
+ hash := c.hash.Get(key)
+ c.mu.RUnlock()
+ return hash
+}
+
+func (c *ringShards) GetByKey(key string) (*ringShard, error) {
+ key = hashtag.Key(key)
+
+ c.mu.RLock()
+
+ if c.closed {
+ c.mu.RUnlock()
+ return nil, pool.ErrClosed
+ }
+
+ hash := c.hash.Get(key)
+ if hash == "" {
+ c.mu.RUnlock()
+ return nil, errRingShardsDown
+ }
+
+ shard := c.shards[hash]
+ c.mu.RUnlock()
+
+ return shard, nil
+}
+
+func (c *ringShards) GetByHash(name string) (*ringShard, error) {
+ if name == "" {
+ return c.Random()
+ }
+
+ c.mu.RLock()
+ shard := c.shards[name]
+ c.mu.RUnlock()
+ return shard, nil
+}
+
+func (c *ringShards) Random() (*ringShard, error) {
+ return c.GetByKey(strconv.Itoa(rand.Int()))
+}
+
+// heartbeat monitors state of each shard in the ring.
+func (c *ringShards) Heartbeat(frequency time.Duration) {
+ ticker := time.NewTicker(frequency)
+ defer ticker.Stop()
+ for range ticker.C {
+ var rebalance bool
+
+ c.mu.RLock()
+
+ if c.closed {
+ c.mu.RUnlock()
+ break
+ }
+
+ shards := c.list
+ c.mu.RUnlock()
+
+ for _, shard := range shards {
+ err := shard.Client.Ping().Err()
+ if shard.Vote(err == nil || err == pool.ErrPoolTimeout) {
+ internal.Logger.Printf("ring shard state changed: %s", shard)
+ rebalance = true
+ }
+ }
+
+ if rebalance {
+ c.rebalance()
+ }
+ }
+}
+
+// rebalance removes dead shards from the Ring.
+func (c *ringShards) rebalance() {
+ c.mu.RLock()
+ shards := c.shards
+ c.mu.RUnlock()
+
+ hash := newConsistentHash(c.opt)
+ var shardsNum int
+ for name, shard := range shards {
+ if shard.IsUp() {
+ hash.Add(name)
+ shardsNum++
+ }
+ }
+
+ c.mu.Lock()
+ c.hash = hash
+ c.len = shardsNum
+ c.mu.Unlock()
+}
+
+func (c *ringShards) Len() int {
+ c.mu.RLock()
+ l := c.len
+ c.mu.RUnlock()
+ return l
+}
+
+func (c *ringShards) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil
+ }
+ c.closed = true
+
+ var firstErr error
+ for _, shard := range c.shards {
+ if err := shard.Client.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ c.hash = nil
+ c.shards = nil
+ c.list = nil
+
+ return firstErr
+}
+
+//------------------------------------------------------------------------------
+
+type ring struct {
+ opt *RingOptions
+ shards *ringShards
+ cmdsInfoCache *cmdsInfoCache //nolint:structcheck
+}
+
+// Ring is a Redis client that uses consistent hashing to distribute
+// keys across multiple Redis servers (shards). It's safe for
+// concurrent use by multiple goroutines.
+//
+// Ring monitors the state of each shard and removes dead shards from
+// the ring. When a shard comes online it is added back to the ring. This
+// gives you maximum availability and partition tolerance, but no
+// consistency between different shards or even clients. Each client
+// uses shards that are available to the client and does not do any
+// coordination when shard state is changed.
+//
+// Ring should be used when you need multiple Redis servers for caching
+// and can tolerate losing data when one of the servers dies.
+// Otherwise you should use Redis Cluster.
+type Ring struct {
+ *ring
+ cmdable
+ hooks
+ ctx context.Context
+}
+
+func NewRing(opt *RingOptions) *Ring {
+ opt.init()
+
+ ring := Ring{
+ ring: &ring{
+ opt: opt,
+ shards: newRingShards(opt),
+ },
+ ctx: context.Background(),
+ }
+ ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo)
+ ring.cmdable = ring.Process
+
+ for name, addr := range opt.Addrs {
+ shard := newRingShard(opt, name, addr)
+ ring.shards.Add(name, shard)
+ }
+
+ go ring.shards.Heartbeat(opt.HeartbeatFrequency)
+
+ return &ring
+}
+
+func newRingShard(opt *RingOptions, name, addr string) *Client {
+ clopt := opt.clientOptions(name)
+ clopt.Addr = addr
+ var shard *Client
+ if opt.NewClient != nil {
+ shard = opt.NewClient(name, clopt)
+ } else {
+ shard = NewClient(clopt)
+ }
+ if opt.OnNewShard != nil {
+ opt.OnNewShard(shard)
+ }
+ return shard
+}
+
+func (c *Ring) Context() context.Context {
+ return c.ctx
+}
+
+func (c *Ring) WithContext(ctx context.Context) *Ring {
+ if ctx == nil {
+ panic("nil context")
+ }
+ clone := *c
+ clone.cmdable = clone.Process
+ clone.hooks.lock()
+ clone.ctx = ctx
+ return &clone
+}
+
+// Do creates a Cmd from the args and processes the cmd.
+func (c *Ring) Do(args ...interface{}) *Cmd {
+ return c.DoContext(c.ctx, args...)
+}
+
+func (c *Ring) DoContext(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(args...)
+ _ = c.ProcessContext(ctx, cmd)
+ return cmd
+}
+
+func (c *Ring) Process(cmd Cmder) error {
+ return c.ProcessContext(c.ctx, cmd)
+}
+
+func (c *Ring) ProcessContext(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.process)
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Ring) Options() *RingOptions {
+ return c.opt
+}
+
+func (c *Ring) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *Ring) PoolStats() *PoolStats {
+ shards := c.shards.List()
+ var acc PoolStats
+ for _, shard := range shards {
+ s := shard.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ }
+ return &acc
+}
+
+// Len returns the current number of shards in the ring.
+func (c *Ring) Len() int {
+ return c.shards.Len()
+}
+
+// Subscribe subscribes the client to the specified channels.
+func (c *Ring) Subscribe(channels ...string) *PubSub {
+ if len(channels) == 0 {
+ panic("at least one channel is required")
+ }
+
+ shard, err := c.shards.GetByKey(channels[0])
+ if err != nil {
+ //TODO: return PubSub with sticky error
+ panic(err)
+ }
+ return shard.Client.Subscribe(channels...)
+}
+
+// PSubscribe subscribes the client to the given patterns.
+func (c *Ring) PSubscribe(channels ...string) *PubSub {
+ if len(channels) == 0 {
+ panic("at least one channel is required")
+ }
+
+ shard, err := c.shards.GetByKey(channels[0])
+ if err != nil {
+ //TODO: return PubSub with sticky error
+ panic(err)
+ }
+ return shard.Client.PSubscribe(channels...)
+}
+
+// ForEachShard concurrently calls the fn on each live shard in the ring.
+// It returns the first error if any.
+func (c *Ring) ForEachShard(fn func(client *Client) error) error {
+ shards := c.shards.List()
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+ for _, shard := range shards {
+ if shard.IsDown() {
+ continue
+ }
+
+ wg.Add(1)
+ go func(shard *ringShard) {
+ defer wg.Done()
+ err := fn(shard.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(shard)
+ }
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+func (c *Ring) cmdsInfo() (map[string]*CommandInfo, error) {
+ shards := c.shards.List()
+ firstErr := errRingShardsDown
+ for _, shard := range shards {
+ cmdsInfo, err := shard.Client.Command().Result()
+ if err == nil {
+ return cmdsInfo, nil
+ }
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+ return nil, firstErr
+}
+
+func (c *Ring) cmdInfo(name string) *CommandInfo {
+ cmdsInfo, err := c.cmdsInfoCache.Get()
+ if err != nil {
+ return nil
+ }
+ info := cmdsInfo[name]
+ if info == nil {
+ internal.Logger.Printf("info for cmd=%s not found", name)
+ }
+ return info
+}
+
+func (c *Ring) cmdShard(cmd Cmder) (*ringShard, error) {
+ cmdInfo := c.cmdInfo(cmd.Name())
+ pos := cmdFirstKeyPos(cmd, cmdInfo)
+ if pos == 0 {
+ return c.shards.Random()
+ }
+ firstKey := cmd.stringArg(pos)
+ return c.shards.GetByKey(firstKey)
+}
+
+func (c *Ring) process(ctx context.Context, cmd Cmder) error {
+ err := c._process(ctx, cmd)
+ if err != nil {
+ cmd.SetErr(err)
+ return err
+ }
+ return nil
+}
+
+func (c *Ring) _process(ctx context.Context, cmd Cmder) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ shard, err := c.cmdShard(cmd)
+ if err != nil {
+ return err
+ }
+
+ lastErr = shard.Client.ProcessContext(ctx, cmd)
+ if lastErr == nil || !isRetryableError(lastErr, cmd.readTimeout() == nil) {
+ return lastErr
+ }
+ }
+ return lastErr
+}
+
+func (c *Ring) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(fn)
+}
+
+func (c *Ring) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Ring) processPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ return c.generalProcessPipeline(ctx, cmds, false)
+ })
+}
+
+func (c *Ring) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(fn)
+}
+
+func (c *Ring) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processTxPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Ring) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ return c.generalProcessPipeline(ctx, cmds, true)
+ })
+}
+
+func (c *Ring) generalProcessPipeline(
+ ctx context.Context, cmds []Cmder, tx bool,
+) error {
+ cmdsMap := make(map[string][]Cmder)
+ for _, cmd := range cmds {
+ cmdInfo := c.cmdInfo(cmd.Name())
+ hash := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
+ if hash != "" {
+ hash = c.shards.Hash(hashtag.Key(hash))
+ }
+ cmdsMap[hash] = append(cmdsMap[hash], cmd)
+ }
+
+ var wg sync.WaitGroup
+ for hash, cmds := range cmdsMap {
+ wg.Add(1)
+ go func(hash string, cmds []Cmder) {
+ defer wg.Done()
+
+ _ = c.processShardPipeline(ctx, hash, cmds, tx)
+ }(hash, cmds)
+ }
+
+ wg.Wait()
+ return cmdsFirstErr(cmds)
+}
+
+func (c *Ring) processShardPipeline(
+ ctx context.Context, hash string, cmds []Cmder, tx bool,
+) error {
+ //TODO: retry?
+ shard, err := c.shards.GetByHash(hash)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ if tx {
+ err = shard.Client.processTxPipeline(ctx, cmds)
+ } else {
+ err = shard.Client.processPipeline(ctx, cmds)
+ }
+ return err
+}
+
+// Close closes the ring client, releasing any open resources.
+//
+// It is rare to Close a Ring, as the Ring is meant to be long-lived
+// and shared between many goroutines.
+func (c *Ring) Close() error {
+ return c.shards.Close()
+}
+
+func (c *Ring) Watch(fn func(*Tx) error, keys ...string) error {
+ if len(keys) == 0 {
+ return fmt.Errorf("redis: Watch requires at least one key")
+ }
+
+ var shards []*ringShard
+ for _, key := range keys {
+ if key != "" {
+ shard, err := c.shards.GetByKey(hashtag.Key(key))
+ if err != nil {
+ return err
+ }
+
+ shards = append(shards, shard)
+ }
+ }
+
+ if len(shards) == 0 {
+ return fmt.Errorf("redis: Watch requires at least one shard")
+ }
+
+ if len(shards) > 1 {
+ for _, shard := range shards[1:] {
+ if shard.Client != shards[0].Client {
+ err := fmt.Errorf("redis: Watch requires all keys to be in the same shard")
+ return err
+ }
+ }
+ }
+
+ return shards[0].Client.Watch(fn, keys...)
+}
+
+func newConsistentHash(opt *RingOptions) *consistenthash.Map {
+ return consistenthash.New(opt.HashReplicas, consistenthash.Hash(opt.Hash))
+}
diff --git a/vendor/github.com/go-redis/redis/v7/script.go b/vendor/github.com/go-redis/redis/v7/script.go
new file mode 100644
index 0000000000..88b7d0a2ea
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/script.go
@@ -0,0 +1,62 @@
+package redis
+
+import (
+ "crypto/sha1"
+ "encoding/hex"
+ "io"
+ "strings"
+)
+
+type scripter interface {
+ Eval(script string, keys []string, args ...interface{}) *Cmd
+ EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd
+ ScriptExists(hashes ...string) *BoolSliceCmd
+ ScriptLoad(script string) *StringCmd
+}
+
+var _ scripter = (*Client)(nil)
+var _ scripter = (*Ring)(nil)
+var _ scripter = (*ClusterClient)(nil)
+
+type Script struct {
+ src, hash string
+}
+
+func NewScript(src string) *Script {
+ h := sha1.New()
+ _, _ = io.WriteString(h, src)
+ return &Script{
+ src: src,
+ hash: hex.EncodeToString(h.Sum(nil)),
+ }
+}
+
+func (s *Script) Hash() string {
+ return s.hash
+}
+
+func (s *Script) Load(c scripter) *StringCmd {
+ return c.ScriptLoad(s.src)
+}
+
+func (s *Script) Exists(c scripter) *BoolSliceCmd {
+ return c.ScriptExists(s.hash)
+}
+
+func (s *Script) Eval(c scripter, keys []string, args ...interface{}) *Cmd {
+ return c.Eval(s.src, keys, args...)
+}
+
+func (s *Script) EvalSha(c scripter, keys []string, args ...interface{}) *Cmd {
+ return c.EvalSha(s.hash, keys, args...)
+}
+
+// Run optimistically uses EVALSHA to run the script. If script does not exist
+// it is retried using EVAL.
+func (s *Script) Run(c scripter, keys []string, args ...interface{}) *Cmd {
+ r := s.EvalSha(c, keys, args...)
+ if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
+ return s.Eval(c, keys, args...)
+ }
+ return r
+}
diff --git a/vendor/github.com/go-redis/redis/v7/sentinel.go b/vendor/github.com/go-redis/redis/v7/sentinel.go
new file mode 100644
index 0000000000..8aa40ef799
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/sentinel.go
@@ -0,0 +1,509 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "net"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/go-redis/redis/v7/internal"
+ "github.com/go-redis/redis/v7/internal/pool"
+)
+
+//------------------------------------------------------------------------------
+
+// FailoverOptions are used to configure a failover client and should
+// be passed to NewFailoverClient.
+type FailoverOptions struct {
+ // The master name.
+ MasterName string
+ // A seed list of host:port addresses of sentinel nodes.
+ SentinelAddrs []string
+ SentinelUsername string
+ SentinelPassword string
+
+ // Following options are copied from Options struct.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(*Conn) error
+
+ Username string
+ Password string
+ DB int
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+
+ TLSConfig *tls.Config
+}
+
+func (opt *FailoverOptions) options() *Options {
+ return &Options{
+ Addr: "FailoverClient",
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ DB: opt.DB,
+ Username: opt.Username,
+ Password: opt.Password,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+
+ TLSConfig: opt.TLSConfig,
+ }
+}
+
+// NewFailoverClient returns a Redis client that uses Redis Sentinel
+// for automatic failover. It's safe for concurrent use by multiple
+// goroutines.
+func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
+ opt := failoverOpt.options()
+ opt.init()
+
+ failover := &sentinelFailover{
+ masterName: failoverOpt.MasterName,
+ sentinelAddrs: failoverOpt.SentinelAddrs,
+ username: failoverOpt.SentinelUsername,
+ password: failoverOpt.SentinelPassword,
+
+ opt: opt,
+ }
+
+ c := Client{
+ baseClient: newBaseClient(opt, failover.Pool()),
+ ctx: context.Background(),
+ }
+ c.cmdable = c.Process
+ c.onClose = failover.Close
+
+ return &c
+}
+
+//------------------------------------------------------------------------------
+
+type SentinelClient struct {
+ *baseClient
+ ctx context.Context
+}
+
+func NewSentinelClient(opt *Options) *SentinelClient {
+ opt.init()
+ c := &SentinelClient{
+ baseClient: &baseClient{
+ opt: opt,
+ connPool: newConnPool(opt),
+ },
+ ctx: context.Background(),
+ }
+ return c
+}
+
+func (c *SentinelClient) Context() context.Context {
+ return c.ctx
+}
+
+func (c *SentinelClient) WithContext(ctx context.Context) *SentinelClient {
+ if ctx == nil {
+ panic("nil context")
+ }
+ clone := *c
+ clone.ctx = ctx
+ return &clone
+}
+
+func (c *SentinelClient) Process(cmd Cmder) error {
+ return c.ProcessContext(c.ctx, cmd)
+}
+
+func (c *SentinelClient) ProcessContext(ctx context.Context, cmd Cmder) error {
+ return c.baseClient.process(ctx, cmd)
+}
+
+func (c *SentinelClient) pubSub() *PubSub {
+ pubsub := &PubSub{
+ opt: c.opt,
+
+ newConn: func(channels []string) (*pool.Conn, error) {
+ return c.newConn(context.TODO())
+ },
+ closeConn: c.connPool.CloseConn,
+ }
+ pubsub.init()
+ return pubsub
+}
+
+// Ping is used to test if a connection is still alive, or to
+// measure latency.
+func (c *SentinelClient) Ping() *StringCmd {
+ cmd := NewStringCmd("ping")
+ _ = c.Process(cmd)
+ return cmd
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *SentinelClient) Subscribe(channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *SentinelClient) PSubscribe(channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(channels...)
+ }
+ return pubsub
+}
+
+func (c *SentinelClient) GetMasterAddrByName(name string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("sentinel", "get-master-addr-by-name", name)
+ _ = c.Process(cmd)
+ return cmd
+}
+
+func (c *SentinelClient) Sentinels(name string) *SliceCmd {
+ cmd := NewSliceCmd("sentinel", "sentinels", name)
+ _ = c.Process(cmd)
+ return cmd
+}
+
+// Failover forces a failover as if the master was not reachable, and without
+// asking for agreement to other Sentinels.
+func (c *SentinelClient) Failover(name string) *StatusCmd {
+ cmd := NewStatusCmd("sentinel", "failover", name)
+ _ = c.Process(cmd)
+ return cmd
+}
+
+// Reset resets all the masters with matching name. The pattern argument is a
+// glob-style pattern. The reset process clears any previous state in a master
+// (including a failover in progress), and removes every slave and sentinel
+// already discovered and associated with the master.
+func (c *SentinelClient) Reset(pattern string) *IntCmd {
+ cmd := NewIntCmd("sentinel", "reset", pattern)
+ _ = c.Process(cmd)
+ return cmd
+}
+
+// FlushConfig forces Sentinel to rewrite its configuration on disk, including
+// the current Sentinel state.
+func (c *SentinelClient) FlushConfig() *StatusCmd {
+ cmd := NewStatusCmd("sentinel", "flushconfig")
+ _ = c.Process(cmd)
+ return cmd
+}
+
+// Master shows the state and info of the specified master.
+func (c *SentinelClient) Master(name string) *StringStringMapCmd {
+ cmd := NewStringStringMapCmd("sentinel", "master", name)
+ _ = c.Process(cmd)
+ return cmd
+}
+
+// Masters shows a list of monitored masters and their state.
+func (c *SentinelClient) Masters() *SliceCmd {
+ cmd := NewSliceCmd("sentinel", "masters")
+ _ = c.Process(cmd)
+ return cmd
+}
+
+// Slaves shows a list of slaves for the specified master and their state.
+func (c *SentinelClient) Slaves(name string) *SliceCmd {
+ cmd := NewSliceCmd("sentinel", "slaves", name)
+ _ = c.Process(cmd)
+ return cmd
+}
+
+// CkQuorum checks if the current Sentinel configuration is able to reach the
+// quorum needed to failover a master, and the majority needed to authorize the
+// failover. This command should be used in monitoring systems to check if a
+// Sentinel deployment is ok.
+func (c *SentinelClient) CkQuorum(name string) *StringCmd {
+ cmd := NewStringCmd("sentinel", "ckquorum", name)
+ _ = c.Process(cmd)
+ return cmd
+}
+
+// Monitor tells the Sentinel to start monitoring a new master with the specified
+// name, ip, port, and quorum.
+func (c *SentinelClient) Monitor(name, ip, port, quorum string) *StringCmd {
+ cmd := NewStringCmd("sentinel", "monitor", name, ip, port, quorum)
+ _ = c.Process(cmd)
+ return cmd
+}
+
+// Set is used in order to change configuration parameters of a specific master.
+func (c *SentinelClient) Set(name, option, value string) *StringCmd {
+ cmd := NewStringCmd("sentinel", "set", name, option, value)
+ _ = c.Process(cmd)
+ return cmd
+}
+
+// Remove is used in order to remove the specified master: the master will no
+// longer be monitored, and will totally be removed from the internal state of
+// the Sentinel.
+func (c *SentinelClient) Remove(name string) *StringCmd {
+ cmd := NewStringCmd("sentinel", "remove", name)
+ _ = c.Process(cmd)
+ return cmd
+}
+
+type sentinelFailover struct {
+ sentinelAddrs []string
+
+ opt *Options
+ username string
+ password string
+
+ pool *pool.ConnPool
+ poolOnce sync.Once
+
+ mu sync.RWMutex
+ masterName string
+ _masterAddr string
+ sentinel *SentinelClient
+ pubsub *PubSub
+}
+
+func (c *sentinelFailover) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.sentinel != nil {
+ return c.closeSentinel()
+ }
+ return nil
+}
+
+func (c *sentinelFailover) closeSentinel() error {
+ firstErr := c.pubsub.Close()
+ c.pubsub = nil
+
+ err := c.sentinel.Close()
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ c.sentinel = nil
+
+ return firstErr
+}
+
+func (c *sentinelFailover) Pool() *pool.ConnPool {
+ c.poolOnce.Do(func() {
+ opt := *c.opt
+ opt.Dialer = c.dial
+ c.pool = newConnPool(&opt)
+ })
+ return c.pool
+}
+
+func (c *sentinelFailover) dial(ctx context.Context, network, _ string) (net.Conn, error) {
+ addr, err := c.MasterAddr()
+ if err != nil {
+ return nil, err
+ }
+ if c.opt.Dialer != nil {
+ return c.opt.Dialer(ctx, network, addr)
+ }
+ return net.DialTimeout("tcp", addr, c.opt.DialTimeout)
+}
+
+func (c *sentinelFailover) MasterAddr() (string, error) {
+ addr, err := c.masterAddr()
+ if err != nil {
+ return "", err
+ }
+ c.switchMaster(addr)
+ return addr, nil
+}
+
+func (c *sentinelFailover) masterAddr() (string, error) {
+ c.mu.RLock()
+ sentinel := c.sentinel
+ c.mu.RUnlock()
+
+ if sentinel != nil {
+ addr := c.getMasterAddr(sentinel)
+ if addr != "" {
+ return addr, nil
+ }
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.sentinel != nil {
+ addr := c.getMasterAddr(c.sentinel)
+ if addr != "" {
+ return addr, nil
+ }
+ _ = c.closeSentinel()
+ }
+
+ for i, sentinelAddr := range c.sentinelAddrs {
+ sentinel := NewSentinelClient(&Options{
+ Addr: sentinelAddr,
+ Dialer: c.opt.Dialer,
+
+ Username: c.username,
+ Password: c.password,
+
+ MaxRetries: c.opt.MaxRetries,
+
+ DialTimeout: c.opt.DialTimeout,
+ ReadTimeout: c.opt.ReadTimeout,
+ WriteTimeout: c.opt.WriteTimeout,
+
+ PoolSize: c.opt.PoolSize,
+ PoolTimeout: c.opt.PoolTimeout,
+ IdleTimeout: c.opt.IdleTimeout,
+ IdleCheckFrequency: c.opt.IdleCheckFrequency,
+
+ TLSConfig: c.opt.TLSConfig,
+ })
+
+ masterAddr, err := sentinel.GetMasterAddrByName(c.masterName).Result()
+ if err != nil {
+ internal.Logger.Printf("sentinel: GetMasterAddrByName master=%q failed: %s",
+ c.masterName, err)
+ _ = sentinel.Close()
+ continue
+ }
+
+ // Push working sentinel to the top.
+ c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
+ c.setSentinel(sentinel)
+
+ addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
+ return addr, nil
+ }
+
+ return "", errors.New("redis: all sentinels are unreachable")
+}
+
+func (c *sentinelFailover) getMasterAddr(sentinel *SentinelClient) string {
+ addr, err := sentinel.GetMasterAddrByName(c.masterName).Result()
+ if err != nil {
+ internal.Logger.Printf("sentinel: GetMasterAddrByName name=%q failed: %s",
+ c.masterName, err)
+ return ""
+ }
+ return net.JoinHostPort(addr[0], addr[1])
+}
+
+func (c *sentinelFailover) switchMaster(addr string) {
+ c.mu.RLock()
+ masterAddr := c._masterAddr
+ c.mu.RUnlock()
+ if masterAddr == addr {
+ return
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c._masterAddr == addr {
+ return
+ }
+
+ internal.Logger.Printf("sentinel: new master=%q addr=%q",
+ c.masterName, addr)
+ _ = c.Pool().Filter(func(cn *pool.Conn) bool {
+ return cn.RemoteAddr().String() != addr
+ })
+ c._masterAddr = addr
+}
+
+func (c *sentinelFailover) setSentinel(sentinel *SentinelClient) {
+ if c.sentinel != nil {
+ panic("not reached")
+ }
+ c.sentinel = sentinel
+ c.discoverSentinels()
+
+ c.pubsub = sentinel.Subscribe("+switch-master")
+ go c.listen(c.pubsub)
+}
+
+func (c *sentinelFailover) discoverSentinels() {
+ sentinels, err := c.sentinel.Sentinels(c.masterName).Result()
+ if err != nil {
+ internal.Logger.Printf("sentinel: Sentinels master=%q failed: %s", c.masterName, err)
+ return
+ }
+ for _, sentinel := range sentinels {
+ vals := sentinel.([]interface{})
+ for i := 0; i < len(vals); i += 2 {
+ key := vals[i].(string)
+ if key == "name" {
+ sentinelAddr := vals[i+1].(string)
+ if !contains(c.sentinelAddrs, sentinelAddr) {
+ internal.Logger.Printf("sentinel: discovered new sentinel=%q for master=%q",
+ sentinelAddr, c.masterName)
+ c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
+ }
+ }
+ }
+ }
+}
+
+func (c *sentinelFailover) listen(pubsub *PubSub) {
+ ch := pubsub.Channel()
+ for {
+ msg, ok := <-ch
+ if !ok {
+ break
+ }
+
+ if msg.Channel == "+switch-master" {
+ parts := strings.Split(msg.Payload, " ")
+ if parts[0] != c.masterName {
+ internal.Logger.Printf("sentinel: ignore addr for master=%q", parts[0])
+ continue
+ }
+ addr := net.JoinHostPort(parts[3], parts[4])
+ c.switchMaster(addr)
+ }
+ }
+}
+
+func contains(slice []string, str string) bool {
+ for _, s := range slice {
+ if s == str {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/go-redis/redis/v7/tx.go b/vendor/github.com/go-redis/redis/v7/tx.go
new file mode 100644
index 0000000000..9ae159015e
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/tx.go
@@ -0,0 +1,159 @@
+package redis
+
+import (
+ "context"
+
+ "github.com/go-redis/redis/v7/internal/pool"
+ "github.com/go-redis/redis/v7/internal/proto"
+)
+
+// TxFailedErr transaction redis failed.
+const TxFailedErr = proto.RedisError("redis: transaction failed")
+
+// Tx implements Redis transactions as described in
+// http://redis.io/topics/transactions. It's NOT safe for concurrent use
+// by multiple goroutines, because Exec resets list of watched keys.
+// If you don't need WATCH it is better to use Pipeline.
+type Tx struct {
+ baseClient
+ cmdable
+ statefulCmdable
+ hooks
+ ctx context.Context
+}
+
+func (c *Client) newTx(ctx context.Context) *Tx {
+ tx := Tx{
+ baseClient: baseClient{
+ opt: c.opt,
+ connPool: pool.NewStickyConnPool(c.connPool.(*pool.ConnPool), true),
+ },
+ hooks: c.hooks.clone(),
+ ctx: ctx,
+ }
+ tx.init()
+ return &tx
+}
+
+func (c *Tx) init() {
+ c.cmdable = c.Process
+ c.statefulCmdable = c.Process
+}
+
+func (c *Tx) Context() context.Context {
+ return c.ctx
+}
+
+func (c *Tx) WithContext(ctx context.Context) *Tx {
+ if ctx == nil {
+ panic("nil context")
+ }
+ clone := *c
+ clone.init()
+ clone.hooks.lock()
+ clone.ctx = ctx
+ return &clone
+}
+
+func (c *Tx) Process(cmd Cmder) error {
+ return c.ProcessContext(c.ctx, cmd)
+}
+
+func (c *Tx) ProcessContext(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+// Watch prepares a transaction and marks the keys to be watched
+// for conditional execution if there are any keys.
+//
+// The transaction is automatically closed when fn exits.
+func (c *Client) Watch(fn func(*Tx) error, keys ...string) error {
+ return c.WatchContext(c.ctx, fn, keys...)
+}
+
+func (c *Client) WatchContext(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+ tx := c.newTx(ctx)
+ if len(keys) > 0 {
+ if err := tx.Watch(keys...).Err(); err != nil {
+ _ = tx.Close()
+ return err
+ }
+ }
+
+ err := fn(tx)
+ _ = tx.Close()
+ return err
+}
+
+// Close closes the transaction, releasing any open resources.
+func (c *Tx) Close() error {
+ _ = c.Unwatch().Err()
+ return c.baseClient.Close()
+}
+
+// Watch marks the keys to be watched for conditional execution
+// of a transaction.
+func (c *Tx) Watch(keys ...string) *StatusCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "watch"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStatusCmd(args...)
+ _ = c.Process(cmd)
+ return cmd
+}
+
+// Unwatch flushes all the previously watched keys for a transaction.
+func (c *Tx) Unwatch(keys ...string) *StatusCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "unwatch"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStatusCmd(args...)
+ _ = c.Process(cmd)
+ return cmd
+}
+
+// Pipeline creates a pipeline. Usually it is more convenient to use Pipelined.
+func (c *Tx) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
+
+// Pipelined executes commands queued in the fn outside of the transaction.
+// Use TxPipelined if you need transactional behavior.
+func (c *Tx) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(fn)
+}
+
+// TxPipelined executes commands queued in the fn in the transaction.
+//
+// When using WATCH, EXEC will execute commands only if the watched keys
+// were not modified, allowing for a check-and-set mechanism.
+//
+// Exec always returns list of commands. If transaction fails
+// TxFailedErr is returned. Otherwise Exec returns an error of the first
+// failed command or nil.
+func (c *Tx) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(fn)
+}
+
+// TxPipeline creates a pipeline. Usually it is more convenient to use TxPipelined.
+func (c *Tx) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
diff --git a/vendor/github.com/go-redis/redis/v7/universal.go b/vendor/github.com/go-redis/redis/v7/universal.go
new file mode 100644
index 0000000000..005ca682c5
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v7/universal.go
@@ -0,0 +1,198 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "net"
+ "time"
+)
+
+// UniversalOptions information is required by UniversalClient to establish
+// connections.
+type UniversalOptions struct {
+ // Either a single address or a seed list of host:port addresses
+ // of cluster/sentinel nodes.
+ Addrs []string
+
+ // Database to be selected after connecting to the server.
+ // Only single-node and failover clients.
+ DB int
+
+ // Common options.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(*Conn) error
+ Username string
+ Password string
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+ TLSConfig *tls.Config
+
+ // Only cluster clients.
+
+ MaxRedirects int
+ ReadOnly bool
+ RouteByLatency bool
+ RouteRandomly bool
+
+ // The sentinel master name.
+ // Only failover clients.
+ MasterName string
+}
+
+// Cluster returns cluster options created from the universal options.
+func (o *UniversalOptions) Cluster() *ClusterOptions {
+ if len(o.Addrs) == 0 {
+ o.Addrs = []string{"127.0.0.1:6379"}
+ }
+
+ return &ClusterOptions{
+ Addrs: o.Addrs,
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+
+ Username: o.Username,
+ Password: o.Password,
+
+ MaxRedirects: o.MaxRedirects,
+ ReadOnly: o.ReadOnly,
+ RouteByLatency: o.RouteByLatency,
+ RouteRandomly: o.RouteRandomly,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+ PoolSize: o.PoolSize,
+ MinIdleConns: o.MinIdleConns,
+ MaxConnAge: o.MaxConnAge,
+ PoolTimeout: o.PoolTimeout,
+ IdleTimeout: o.IdleTimeout,
+ IdleCheckFrequency: o.IdleCheckFrequency,
+
+ TLSConfig: o.TLSConfig,
+ }
+}
+
+// Failover returns failover options created from the universal options.
+func (o *UniversalOptions) Failover() *FailoverOptions {
+ if len(o.Addrs) == 0 {
+ o.Addrs = []string{"127.0.0.1:26379"}
+ }
+
+ return &FailoverOptions{
+ SentinelAddrs: o.Addrs,
+ MasterName: o.MasterName,
+
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+
+ DB: o.DB,
+ Username: o.Username,
+ Password: o.Password,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+
+ PoolSize: o.PoolSize,
+ MinIdleConns: o.MinIdleConns,
+ MaxConnAge: o.MaxConnAge,
+ PoolTimeout: o.PoolTimeout,
+ IdleTimeout: o.IdleTimeout,
+ IdleCheckFrequency: o.IdleCheckFrequency,
+
+ TLSConfig: o.TLSConfig,
+ }
+}
+
+// Simple returns basic options created from the universal options.
+func (o *UniversalOptions) Simple() *Options {
+ addr := "127.0.0.1:6379"
+ if len(o.Addrs) > 0 {
+ addr = o.Addrs[0]
+ }
+
+ return &Options{
+ Addr: addr,
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+
+ DB: o.DB,
+ Username: o.Username,
+ Password: o.Password,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+
+ PoolSize: o.PoolSize,
+ MinIdleConns: o.MinIdleConns,
+ MaxConnAge: o.MaxConnAge,
+ PoolTimeout: o.PoolTimeout,
+ IdleTimeout: o.IdleTimeout,
+ IdleCheckFrequency: o.IdleCheckFrequency,
+
+ TLSConfig: o.TLSConfig,
+ }
+}
+
+// --------------------------------------------------------------------
+
+// UniversalClient is an abstract client which - based on the provided options -
+// can connect to either clusters, or sentinel-backed failover instances
+// or simple single-instance servers. This can be useful for testing
+// cluster-specific applications locally.
+type UniversalClient interface {
+ Cmdable
+ Context() context.Context
+ AddHook(Hook)
+ Watch(fn func(*Tx) error, keys ...string) error
+ Do(args ...interface{}) *Cmd
+ DoContext(ctx context.Context, args ...interface{}) *Cmd
+ Process(cmd Cmder) error
+ ProcessContext(ctx context.Context, cmd Cmder) error
+ Subscribe(channels ...string) *PubSub
+ PSubscribe(channels ...string) *PubSub
+ Close() error
+}
+
+var _ UniversalClient = (*Client)(nil)
+var _ UniversalClient = (*ClusterClient)(nil)
+var _ UniversalClient = (*Ring)(nil)
+
+// NewUniversalClient returns a new multi client. The type of client returned depends
+// on the following three conditions:
+//
+// 1. if a MasterName is passed a sentinel-backed FailoverClient will be returned
+// 2. if the number of Addrs is two or more, a ClusterClient will be returned
+// 3. otherwise, a single-node redis Client will be returned.
+func NewUniversalClient(opts *UniversalOptions) UniversalClient {
+ if opts.MasterName != "" {
+ return NewFailoverClient(opts.Failover())
+ } else if len(opts.Addrs) > 1 {
+ return NewClusterClient(opts.Cluster())
+ }
+ return NewClient(opts.Simple())
+}