summaryrefslogtreecommitdiffstats
path: root/vendor/github.com
diff options
context:
space:
mode:
author6543 <6543@obermui.de>2021-02-10 22:28:32 +0100
committerGitHub <noreply@github.com>2021-02-10 21:28:32 +0000
commitac97ea573c1b10d03e72775e8f74b9fe5453bfc8 (patch)
tree99fa7488782a2a6c4362c49e4cdf04594c662ca3 /vendor/github.com
parent4cffc46f651205b9d7eb0b1df46dd6117c6d95e9 (diff)
downloadgitea-ac97ea573c1b10d03e72775e8f74b9fe5453bfc8.tar.gz
gitea-ac97ea573c1b10d03e72775e8f74b9fe5453bfc8.zip
[Vendor] Update go-redis to v8.5.0 (#13749)
* Update go-redis to v8.4.0 * github.com/go-redis/redis/v8 v8.4.0 -> v8.5.0 * Apply suggestions from code review Co-authored-by: zeripath <art27@cantab.net> * TODO * Use the Queue termination channel as the default context for pushes Signed-off-by: Andrew Thornton <art27@cantab.net> * missed one Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/dgryski/go-rendezvous/LICENSE21
-rw-r--r--vendor/github.com/dgryski/go-rendezvous/rdv.go79
-rw-r--r--vendor/github.com/go-redis/redis/v7/.travis.yml22
-rw-r--r--vendor/github.com/go-redis/redis/v7/CHANGELOG.md46
-rw-r--r--vendor/github.com/go-redis/redis/v7/README.md128
-rw-r--r--vendor/github.com/go-redis/redis/v7/cluster_commands.go22
-rw-r--r--vendor/github.com/go-redis/redis/v7/commands.go2643
-rw-r--r--vendor/github.com/go-redis/redis/v7/go.mod15
-rw-r--r--vendor/github.com/go-redis/redis/v7/go.sum47
-rw-r--r--vendor/github.com/go-redis/redis/v7/internal/consistenthash/consistenthash.go81
-rw-r--r--vendor/github.com/go-redis/redis/v7/internal/internal.go24
-rw-r--r--vendor/github.com/go-redis/redis/v7/internal/log.go8
-rw-r--r--vendor/github.com/go-redis/redis/v7/internal/pool/pool_sticky.go112
-rw-r--r--vendor/github.com/go-redis/redis/v7/internal/util.go56
-rw-r--r--vendor/github.com/go-redis/redis/v7/script.go62
-rw-r--r--vendor/github.com/go-redis/redis/v7/sentinel.go509
-rw-r--r--vendor/github.com/go-redis/redis/v8/.gitignore (renamed from vendor/github.com/go-redis/redis/v7/.gitignore)1
-rw-r--r--vendor/github.com/go-redis/redis/v8/.golangci.yml (renamed from vendor/github.com/go-redis/redis/v7/.golangci.yml)9
-rw-r--r--vendor/github.com/go-redis/redis/v8/.prettierrc4
-rw-r--r--vendor/github.com/go-redis/redis/v8/.travis.yml20
-rw-r--r--vendor/github.com/go-redis/redis/v8/CHANGELOG.md5
-rw-r--r--vendor/github.com/go-redis/redis/v8/LICENSE (renamed from vendor/github.com/go-redis/redis/v7/LICENSE)0
-rw-r--r--vendor/github.com/go-redis/redis/v8/Makefile (renamed from vendor/github.com/go-redis/redis/v7/Makefile)7
-rw-r--r--vendor/github.com/go-redis/redis/v8/README.md159
-rw-r--r--vendor/github.com/go-redis/redis/v8/cluster.go (renamed from vendor/github.com/go-redis/redis/v7/cluster.go)451
-rw-r--r--vendor/github.com/go-redis/redis/v8/cluster_commands.go25
-rw-r--r--vendor/github.com/go-redis/redis/v8/command.go (renamed from vendor/github.com/go-redis/redis/v7/command.go)830
-rw-r--r--vendor/github.com/go-redis/redis/v8/commands.go2790
-rw-r--r--vendor/github.com/go-redis/redis/v8/doc.go (renamed from vendor/github.com/go-redis/redis/v7/doc.go)0
-rw-r--r--vendor/github.com/go-redis/redis/v8/error.go (renamed from vendor/github.com/go-redis/redis/v7/error.go)35
-rw-r--r--vendor/github.com/go-redis/redis/v8/go.mod11
-rw-r--r--vendor/github.com/go-redis/redis/v8/go.sum97
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/arg.go56
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go (renamed from vendor/github.com/go-redis/redis/v7/internal/hashtag/hashtag.go)3
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go151
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go87
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/instruments.go33
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/internal.go29
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/log.go24
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/once.go (renamed from vendor/github.com/go-redis/redis/v7/internal/once.go)0
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/pool/conn.go (renamed from vendor/github.com/go-redis/redis/v7/internal/pool/conn.go)62
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/pool/pool.go (renamed from vendor/github.com/go-redis/redis/v7/internal/pool/pool.go)52
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go58
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go (renamed from vendor/github.com/go-redis/redis/v7/internal/pool/pool_single.go)112
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/proto/reader.go (renamed from vendor/github.com/go-redis/redis/v7/internal/proto/reader.go)31
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/proto/scan.go (renamed from vendor/github.com/go-redis/redis/v7/internal/proto/scan.go)11
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/proto/writer.go (renamed from vendor/github.com/go-redis/redis/v7/internal/proto/writer.go)58
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/rand/rand.go45
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/safe.go11
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/unsafe.go20
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/util.go73
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/util/safe.go (renamed from vendor/github.com/go-redis/redis/v7/internal/util/safe.go)0
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/util/strconv.go (renamed from vendor/github.com/go-redis/redis/v7/internal/util/strconv.go)0
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go (renamed from vendor/github.com/go-redis/redis/v7/internal/util/unsafe.go)0
-rw-r--r--vendor/github.com/go-redis/redis/v8/iterator.go (renamed from vendor/github.com/go-redis/redis/v7/iterator.go)10
-rw-r--r--vendor/github.com/go-redis/redis/v8/options.go (renamed from vendor/github.com/go-redis/redis/v7/options.go)114
-rw-r--r--vendor/github.com/go-redis/redis/v8/pipeline.go (renamed from vendor/github.com/go-redis/redis/v7/pipeline.go)31
-rw-r--r--vendor/github.com/go-redis/redis/v8/pubsub.go (renamed from vendor/github.com/go-redis/redis/v7/pubsub.go)180
-rw-r--r--vendor/github.com/go-redis/redis/v8/redis.go (renamed from vendor/github.com/go-redis/redis/v7/redis.go)330
-rw-r--r--vendor/github.com/go-redis/redis/v8/result.go (renamed from vendor/github.com/go-redis/redis/v7/result.go)44
-rw-r--r--vendor/github.com/go-redis/redis/v8/ring.go (renamed from vendor/github.com/go-redis/redis/v7/ring.go)349
-rw-r--r--vendor/github.com/go-redis/redis/v8/script.go65
-rw-r--r--vendor/github.com/go-redis/redis/v8/sentinel.go738
-rw-r--r--vendor/github.com/go-redis/redis/v8/tx.go (renamed from vendor/github.com/go-redis/redis/v7/tx.go)51
-rw-r--r--vendor/github.com/go-redis/redis/v8/universal.go (renamed from vendor/github.com/go-redis/redis/v7/universal.go)56
65 files changed, 6335 insertions, 4868 deletions
diff --git a/vendor/github.com/dgryski/go-rendezvous/LICENSE b/vendor/github.com/dgryski/go-rendezvous/LICENSE
new file mode 100644
index 0000000000..22080f736a
--- /dev/null
+++ b/vendor/github.com/dgryski/go-rendezvous/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2017-2020 Damian Gryski <damian@gryski.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/dgryski/go-rendezvous/rdv.go b/vendor/github.com/dgryski/go-rendezvous/rdv.go
new file mode 100644
index 0000000000..7a6f8203c6
--- /dev/null
+++ b/vendor/github.com/dgryski/go-rendezvous/rdv.go
@@ -0,0 +1,79 @@
+package rendezvous
+
+type Rendezvous struct {
+ nodes map[string]int
+ nstr []string
+ nhash []uint64
+ hash Hasher
+}
+
+type Hasher func(s string) uint64
+
+func New(nodes []string, hash Hasher) *Rendezvous {
+ r := &Rendezvous{
+ nodes: make(map[string]int, len(nodes)),
+ nstr: make([]string, len(nodes)),
+ nhash: make([]uint64, len(nodes)),
+ hash: hash,
+ }
+
+ for i, n := range nodes {
+ r.nodes[n] = i
+ r.nstr[i] = n
+ r.nhash[i] = hash(n)
+ }
+
+ return r
+}
+
+func (r *Rendezvous) Lookup(k string) string {
+ // short-circuit if we're empty
+ if len(r.nodes) == 0 {
+ return ""
+ }
+
+ khash := r.hash(k)
+
+ var midx int
+ var mhash = xorshiftMult64(khash ^ r.nhash[0])
+
+ for i, nhash := range r.nhash[1:] {
+ if h := xorshiftMult64(khash ^ nhash); h > mhash {
+ midx = i + 1
+ mhash = h
+ }
+ }
+
+ return r.nstr[midx]
+}
+
+func (r *Rendezvous) Add(node string) {
+ r.nodes[node] = len(r.nstr)
+ r.nstr = append(r.nstr, node)
+ r.nhash = append(r.nhash, r.hash(node))
+}
+
+func (r *Rendezvous) Remove(node string) {
+ // find index of node to remove
+ nidx := r.nodes[node]
+
+ // remove from the slices
+ l := len(r.nstr)
+ r.nstr[nidx] = r.nstr[l]
+ r.nstr = r.nstr[:l]
+
+ r.nhash[nidx] = r.nhash[l]
+ r.nhash = r.nhash[:l]
+
+ // update the map
+ delete(r.nodes, node)
+ moved := r.nstr[nidx]
+ r.nodes[moved] = nidx
+}
+
+func xorshiftMult64(x uint64) uint64 {
+ x ^= x >> 12 // a
+ x ^= x << 25 // b
+ x ^= x >> 27 // c
+ return x * 2685821657736338717
+}
diff --git a/vendor/github.com/go-redis/redis/v7/.travis.yml b/vendor/github.com/go-redis/redis/v7/.travis.yml
deleted file mode 100644
index 3f93932bc8..0000000000
--- a/vendor/github.com/go-redis/redis/v7/.travis.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-dist: xenial
-language: go
-
-services:
- - redis-server
-
-go:
- - 1.12.x
- - 1.13.x
- - tip
-
-matrix:
- allow_failures:
- - go: tip
-
-env:
- - GO111MODULE=on
-
-go_import_path: github.com/go-redis/redis
-
-before_install:
- - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.21.0
diff --git a/vendor/github.com/go-redis/redis/v7/CHANGELOG.md b/vendor/github.com/go-redis/redis/v7/CHANGELOG.md
deleted file mode 100644
index bd4eccff24..0000000000
--- a/vendor/github.com/go-redis/redis/v7/CHANGELOG.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Changelog
-
-## v7.2
-
-- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users.
-
-## v7.1
-
-- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer` interface.
-
-## v7
-
-- *Important*. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a transactional pipeline.
-- WrapProcess is replaced with more convenient AddHook that has access to context.Context.
-- WithContext now can not be used to create a shallow copy of the client.
-- New methods ProcessContext, DoContext, and ExecContext.
-- Client respects Context.Deadline when setting net.Conn deadline.
-- Client listens on Context.Done while waiting for a connection from the pool and returns an error when context context is cancelled.
-- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow detecting reconnections.
-- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse the time.
-- `SetLimiter` is removed and added `Options.Limiter` instead.
-- `HMSet` is deprecated as of Redis v4.
-
-## v6.15
-
-- Cluster and Ring pipelines process commands for each node in its own goroutine.
-
-## 6.14
-
-- Added Options.MinIdleConns.
-- Added Options.MaxConnAge.
-- PoolStats.FreeConns is renamed to PoolStats.IdleConns.
-- Add Client.Do to simplify creating custom commands.
-- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers.
-- Lower memory usage.
-
-## v6.13
-
-- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set `HashReplicas = 1000` for better keys distribution between shards.
-- Cluster client was optimized to use much less memory when reloading cluster state.
-- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout occurres. In most cases it is recommended to use PubSub.Channel instead.
-- Dialer.KeepAlive is set to 5 minutes by default.
-
-## v6.12
-
-- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis Servers that don't have cluster mode enabled. See https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup
diff --git a/vendor/github.com/go-redis/redis/v7/README.md b/vendor/github.com/go-redis/redis/v7/README.md
deleted file mode 100644
index 0fbb506ead..0000000000
--- a/vendor/github.com/go-redis/redis/v7/README.md
+++ /dev/null
@@ -1,128 +0,0 @@
-# Redis client for Golang
-
-[![Build Status](https://travis-ci.org/go-redis/redis.png?branch=master)](https://travis-ci.org/go-redis/redis)
-[![GoDoc](https://godoc.org/github.com/go-redis/redis?status.svg)](https://godoc.org/github.com/go-redis/redis)
-[![Airbrake](https://img.shields.io/badge/kudos-airbrake.io-orange.svg)](https://airbrake.io)
-
-Supports:
-
-- Redis 3 commands except QUIT, MONITOR, SLOWLOG and SYNC.
-- Automatic connection pooling with [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
-- [Pub/Sub](https://godoc.org/github.com/go-redis/redis#PubSub).
-- [Transactions](https://godoc.org/github.com/go-redis/redis#example-Client-TxPipeline).
-- [Pipeline](https://godoc.org/github.com/go-redis/redis#example-Client-Pipeline) and [TxPipeline](https://godoc.org/github.com/go-redis/redis#example-Client-TxPipeline).
-- [Scripting](https://godoc.org/github.com/go-redis/redis#Script).
-- [Timeouts](https://godoc.org/github.com/go-redis/redis#Options).
-- [Redis Sentinel](https://godoc.org/github.com/go-redis/redis#NewFailoverClient).
-- [Redis Cluster](https://godoc.org/github.com/go-redis/redis#NewClusterClient).
-- [Cluster of Redis Servers](https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup) without using cluster mode and Redis Sentinel.
-- [Ring](https://godoc.org/github.com/go-redis/redis#NewRing).
-- [Instrumentation](https://godoc.org/github.com/go-redis/redis#ex-package--Instrumentation).
-- [Cache friendly](https://github.com/go-redis/cache).
-- [Rate limiting](https://github.com/go-redis/redis_rate).
-- [Distributed Locks](https://github.com/bsm/redislock).
-
-API docs: https://godoc.org/github.com/go-redis/redis.
-Examples: https://godoc.org/github.com/go-redis/redis#pkg-examples.
-
-## Installation
-
-go-redis requires a Go version with [Modules](https://github.com/golang/go/wiki/Modules) support and uses import versioning. So please make sure to initialize a Go module before installing go-redis:
-
-``` shell
-go mod init github.com/my/repo
-go get github.com/go-redis/redis/v7
-```
-
-Import:
-
-``` go
-import "github.com/go-redis/redis/v7"
-```
-
-## Quickstart
-
-``` go
-func ExampleNewClient() {
- client := redis.NewClient(&redis.Options{
- Addr: "localhost:6379",
- Password: "", // no password set
- DB: 0, // use default DB
- })
-
- pong, err := client.Ping().Result()
- fmt.Println(pong, err)
- // Output: PONG <nil>
-}
-
-func ExampleClient() {
- client := redis.NewClient(&redis.Options{
- Addr: "localhost:6379",
- Password: "", // no password set
- DB: 0, // use default DB
- })
- err := client.Set("key", "value", 0).Err()
- if err != nil {
- panic(err)
- }
-
- val, err := client.Get("key").Result()
- if err != nil {
- panic(err)
- }
- fmt.Println("key", val)
-
- val2, err := client.Get("key2").Result()
- if err == redis.Nil {
- fmt.Println("key2 does not exist")
- } else if err != nil {
- panic(err)
- } else {
- fmt.Println("key2", val2)
- }
- // Output: key value
- // key2 does not exist
-}
-```
-
-## Howto
-
-Please go through [examples](https://godoc.org/github.com/go-redis/redis#pkg-examples) to get an idea how to use this package.
-
-## Look and feel
-
-Some corner cases:
-
-``` go
-// SET key value EX 10 NX
-set, err := client.SetNX("key", "value", 10*time.Second).Result()
-
-// SORT list LIMIT 0 2 ASC
-vals, err := client.Sort("list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
-
-// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
-vals, err := client.ZRangeByScoreWithScores("zset", &redis.ZRangeBy{
- Min: "-inf",
- Max: "+inf",
- Offset: 0,
- Count: 2,
-}).Result()
-
-// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
-vals, err := client.ZInterStore("out", &redis.ZStore{
- Keys: []string{"zset1", "zset2"},
- Weights: []int64{2, 3}
-}).Result()
-
-// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
-vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
-
-// custom command
-res, err := client.Do("set", "key", "value").Result()
-```
-
-## See also
-
-- [Golang PostgreSQL ORM](https://github.com/go-pg/pg)
-- [Golang msgpack](https://github.com/vmihailenco/msgpack)
-- [Golang message task queue](https://github.com/vmihailenco/taskq)
diff --git a/vendor/github.com/go-redis/redis/v7/cluster_commands.go b/vendor/github.com/go-redis/redis/v7/cluster_commands.go
deleted file mode 100644
index c9b9b9de24..0000000000
--- a/vendor/github.com/go-redis/redis/v7/cluster_commands.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package redis
-
-import "sync/atomic"
-
-func (c *ClusterClient) DBSize() *IntCmd {
- cmd := NewIntCmd("dbsize")
- var size int64
- err := c.ForEachMaster(func(master *Client) error {
- n, err := master.DBSize().Result()
- if err != nil {
- return err
- }
- atomic.AddInt64(&size, n)
- return nil
- })
- if err != nil {
- cmd.SetErr(err)
- return cmd
- }
- cmd.val = size
- return cmd
-}
diff --git a/vendor/github.com/go-redis/redis/v7/commands.go b/vendor/github.com/go-redis/redis/v7/commands.go
deleted file mode 100644
index da5ceda13e..0000000000
--- a/vendor/github.com/go-redis/redis/v7/commands.go
+++ /dev/null
@@ -1,2643 +0,0 @@
-package redis
-
-import (
- "errors"
- "io"
- "time"
-
- "github.com/go-redis/redis/v7/internal"
-)
-
-func usePrecise(dur time.Duration) bool {
- return dur < time.Second || dur%time.Second != 0
-}
-
-func formatMs(dur time.Duration) int64 {
- if dur > 0 && dur < time.Millisecond {
- internal.Logger.Printf(
- "specified duration is %s, but minimal supported value is %s",
- dur, time.Millisecond,
- )
- }
- return int64(dur / time.Millisecond)
-}
-
-func formatSec(dur time.Duration) int64 {
- if dur > 0 && dur < time.Second {
- internal.Logger.Printf(
- "specified duration is %s, but minimal supported value is %s",
- dur, time.Second,
- )
- }
- return int64(dur / time.Second)
-}
-
-func appendArgs(dst, src []interface{}) []interface{} {
- if len(src) == 1 {
- switch v := src[0].(type) {
- case []string:
- for _, s := range v {
- dst = append(dst, s)
- }
- return dst
- case map[string]interface{}:
- for k, v := range v {
- dst = append(dst, k, v)
- }
- return dst
- }
- }
-
- dst = append(dst, src...)
- return dst
-}
-
-type Cmdable interface {
- Pipeline() Pipeliner
- Pipelined(fn func(Pipeliner) error) ([]Cmder, error)
-
- TxPipelined(fn func(Pipeliner) error) ([]Cmder, error)
- TxPipeline() Pipeliner
-
- Command() *CommandsInfoCmd
- ClientGetName() *StringCmd
- Echo(message interface{}) *StringCmd
- Ping() *StatusCmd
- Quit() *StatusCmd
- Del(keys ...string) *IntCmd
- Unlink(keys ...string) *IntCmd
- Dump(key string) *StringCmd
- Exists(keys ...string) *IntCmd
- Expire(key string, expiration time.Duration) *BoolCmd
- ExpireAt(key string, tm time.Time) *BoolCmd
- Keys(pattern string) *StringSliceCmd
- Migrate(host, port, key string, db int, timeout time.Duration) *StatusCmd
- Move(key string, db int) *BoolCmd
- ObjectRefCount(key string) *IntCmd
- ObjectEncoding(key string) *StringCmd
- ObjectIdleTime(key string) *DurationCmd
- Persist(key string) *BoolCmd
- PExpire(key string, expiration time.Duration) *BoolCmd
- PExpireAt(key string, tm time.Time) *BoolCmd
- PTTL(key string) *DurationCmd
- RandomKey() *StringCmd
- Rename(key, newkey string) *StatusCmd
- RenameNX(key, newkey string) *BoolCmd
- Restore(key string, ttl time.Duration, value string) *StatusCmd
- RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd
- Sort(key string, sort *Sort) *StringSliceCmd
- SortStore(key, store string, sort *Sort) *IntCmd
- SortInterfaces(key string, sort *Sort) *SliceCmd
- Touch(keys ...string) *IntCmd
- TTL(key string) *DurationCmd
- Type(key string) *StatusCmd
- Scan(cursor uint64, match string, count int64) *ScanCmd
- SScan(key string, cursor uint64, match string, count int64) *ScanCmd
- HScan(key string, cursor uint64, match string, count int64) *ScanCmd
- ZScan(key string, cursor uint64, match string, count int64) *ScanCmd
- Append(key, value string) *IntCmd
- BitCount(key string, bitCount *BitCount) *IntCmd
- BitOpAnd(destKey string, keys ...string) *IntCmd
- BitOpOr(destKey string, keys ...string) *IntCmd
- BitOpXor(destKey string, keys ...string) *IntCmd
- BitOpNot(destKey string, key string) *IntCmd
- BitPos(key string, bit int64, pos ...int64) *IntCmd
- BitField(key string, args ...interface{}) *IntSliceCmd
- Decr(key string) *IntCmd
- DecrBy(key string, decrement int64) *IntCmd
- Get(key string) *StringCmd
- GetBit(key string, offset int64) *IntCmd
- GetRange(key string, start, end int64) *StringCmd
- GetSet(key string, value interface{}) *StringCmd
- Incr(key string) *IntCmd
- IncrBy(key string, value int64) *IntCmd
- IncrByFloat(key string, value float64) *FloatCmd
- MGet(keys ...string) *SliceCmd
- MSet(values ...interface{}) *StatusCmd
- MSetNX(values ...interface{}) *BoolCmd
- Set(key string, value interface{}, expiration time.Duration) *StatusCmd
- SetBit(key string, offset int64, value int) *IntCmd
- SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd
- SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd
- SetRange(key string, offset int64, value string) *IntCmd
- StrLen(key string) *IntCmd
- HDel(key string, fields ...string) *IntCmd
- HExists(key, field string) *BoolCmd
- HGet(key, field string) *StringCmd
- HGetAll(key string) *StringStringMapCmd
- HIncrBy(key, field string, incr int64) *IntCmd
- HIncrByFloat(key, field string, incr float64) *FloatCmd
- HKeys(key string) *StringSliceCmd
- HLen(key string) *IntCmd
- HMGet(key string, fields ...string) *SliceCmd
- HSet(key string, values ...interface{}) *IntCmd
- HMSet(key string, values ...interface{}) *BoolCmd
- HSetNX(key, field string, value interface{}) *BoolCmd
- HVals(key string) *StringSliceCmd
- BLPop(timeout time.Duration, keys ...string) *StringSliceCmd
- BRPop(timeout time.Duration, keys ...string) *StringSliceCmd
- BRPopLPush(source, destination string, timeout time.Duration) *StringCmd
- LIndex(key string, index int64) *StringCmd
- LInsert(key, op string, pivot, value interface{}) *IntCmd
- LInsertBefore(key string, pivot, value interface{}) *IntCmd
- LInsertAfter(key string, pivot, value interface{}) *IntCmd
- LLen(key string) *IntCmd
- LPop(key string) *StringCmd
- LPush(key string, values ...interface{}) *IntCmd
- LPushX(key string, values ...interface{}) *IntCmd
- LRange(key string, start, stop int64) *StringSliceCmd
- LRem(key string, count int64, value interface{}) *IntCmd
- LSet(key string, index int64, value interface{}) *StatusCmd
- LTrim(key string, start, stop int64) *StatusCmd
- RPop(key string) *StringCmd
- RPopLPush(source, destination string) *StringCmd
- RPush(key string, values ...interface{}) *IntCmd
- RPushX(key string, values ...interface{}) *IntCmd
- SAdd(key string, members ...interface{}) *IntCmd
- SCard(key string) *IntCmd
- SDiff(keys ...string) *StringSliceCmd
- SDiffStore(destination string, keys ...string) *IntCmd
- SInter(keys ...string) *StringSliceCmd
- SInterStore(destination string, keys ...string) *IntCmd
- SIsMember(key string, member interface{}) *BoolCmd
- SMembers(key string) *StringSliceCmd
- SMembersMap(key string) *StringStructMapCmd
- SMove(source, destination string, member interface{}) *BoolCmd
- SPop(key string) *StringCmd
- SPopN(key string, count int64) *StringSliceCmd
- SRandMember(key string) *StringCmd
- SRandMemberN(key string, count int64) *StringSliceCmd
- SRem(key string, members ...interface{}) *IntCmd
- SUnion(keys ...string) *StringSliceCmd
- SUnionStore(destination string, keys ...string) *IntCmd
- XAdd(a *XAddArgs) *StringCmd
- XDel(stream string, ids ...string) *IntCmd
- XLen(stream string) *IntCmd
- XRange(stream, start, stop string) *XMessageSliceCmd
- XRangeN(stream, start, stop string, count int64) *XMessageSliceCmd
- XRevRange(stream string, start, stop string) *XMessageSliceCmd
- XRevRangeN(stream string, start, stop string, count int64) *XMessageSliceCmd
- XRead(a *XReadArgs) *XStreamSliceCmd
- XReadStreams(streams ...string) *XStreamSliceCmd
- XGroupCreate(stream, group, start string) *StatusCmd
- XGroupCreateMkStream(stream, group, start string) *StatusCmd
- XGroupSetID(stream, group, start string) *StatusCmd
- XGroupDestroy(stream, group string) *IntCmd
- XGroupDelConsumer(stream, group, consumer string) *IntCmd
- XReadGroup(a *XReadGroupArgs) *XStreamSliceCmd
- XAck(stream, group string, ids ...string) *IntCmd
- XPending(stream, group string) *XPendingCmd
- XPendingExt(a *XPendingExtArgs) *XPendingExtCmd
- XClaim(a *XClaimArgs) *XMessageSliceCmd
- XClaimJustID(a *XClaimArgs) *StringSliceCmd
- XTrim(key string, maxLen int64) *IntCmd
- XTrimApprox(key string, maxLen int64) *IntCmd
- XInfoGroups(key string) *XInfoGroupsCmd
- BZPopMax(timeout time.Duration, keys ...string) *ZWithKeyCmd
- BZPopMin(timeout time.Duration, keys ...string) *ZWithKeyCmd
- ZAdd(key string, members ...*Z) *IntCmd
- ZAddNX(key string, members ...*Z) *IntCmd
- ZAddXX(key string, members ...*Z) *IntCmd
- ZAddCh(key string, members ...*Z) *IntCmd
- ZAddNXCh(key string, members ...*Z) *IntCmd
- ZAddXXCh(key string, members ...*Z) *IntCmd
- ZIncr(key string, member *Z) *FloatCmd
- ZIncrNX(key string, member *Z) *FloatCmd
- ZIncrXX(key string, member *Z) *FloatCmd
- ZCard(key string) *IntCmd
- ZCount(key, min, max string) *IntCmd
- ZLexCount(key, min, max string) *IntCmd
- ZIncrBy(key string, increment float64, member string) *FloatCmd
- ZInterStore(destination string, store *ZStore) *IntCmd
- ZPopMax(key string, count ...int64) *ZSliceCmd
- ZPopMin(key string, count ...int64) *ZSliceCmd
- ZRange(key string, start, stop int64) *StringSliceCmd
- ZRangeWithScores(key string, start, stop int64) *ZSliceCmd
- ZRangeByScore(key string, opt *ZRangeBy) *StringSliceCmd
- ZRangeByLex(key string, opt *ZRangeBy) *StringSliceCmd
- ZRangeByScoreWithScores(key string, opt *ZRangeBy) *ZSliceCmd
- ZRank(key, member string) *IntCmd
- ZRem(key string, members ...interface{}) *IntCmd
- ZRemRangeByRank(key string, start, stop int64) *IntCmd
- ZRemRangeByScore(key, min, max string) *IntCmd
- ZRemRangeByLex(key, min, max string) *IntCmd
- ZRevRange(key string, start, stop int64) *StringSliceCmd
- ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd
- ZRevRangeByScore(key string, opt *ZRangeBy) *StringSliceCmd
- ZRevRangeByLex(key string, opt *ZRangeBy) *StringSliceCmd
- ZRevRangeByScoreWithScores(key string, opt *ZRangeBy) *ZSliceCmd
- ZRevRank(key, member string) *IntCmd
- ZScore(key, member string) *FloatCmd
- ZUnionStore(dest string, store *ZStore) *IntCmd
- PFAdd(key string, els ...interface{}) *IntCmd
- PFCount(keys ...string) *IntCmd
- PFMerge(dest string, keys ...string) *StatusCmd
- BgRewriteAOF() *StatusCmd
- BgSave() *StatusCmd
- ClientKill(ipPort string) *StatusCmd
- ClientKillByFilter(keys ...string) *IntCmd
- ClientList() *StringCmd
- ClientPause(dur time.Duration) *BoolCmd
- ClientID() *IntCmd
- ConfigGet(parameter string) *SliceCmd
- ConfigResetStat() *StatusCmd
- ConfigSet(parameter, value string) *StatusCmd
- ConfigRewrite() *StatusCmd
- DBSize() *IntCmd
- FlushAll() *StatusCmd
- FlushAllAsync() *StatusCmd
- FlushDB() *StatusCmd
- FlushDBAsync() *StatusCmd
- Info(section ...string) *StringCmd
- LastSave() *IntCmd
- Save() *StatusCmd
- Shutdown() *StatusCmd
- ShutdownSave() *StatusCmd
- ShutdownNoSave() *StatusCmd
- SlaveOf(host, port string) *StatusCmd
- Time() *TimeCmd
- Eval(script string, keys []string, args ...interface{}) *Cmd
- EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd
- ScriptExists(hashes ...string) *BoolSliceCmd
- ScriptFlush() *StatusCmd
- ScriptKill() *StatusCmd
- ScriptLoad(script string) *StringCmd
- DebugObject(key string) *StringCmd
- Publish(channel string, message interface{}) *IntCmd
- PubSubChannels(pattern string) *StringSliceCmd
- PubSubNumSub(channels ...string) *StringIntMapCmd
- PubSubNumPat() *IntCmd
- ClusterSlots() *ClusterSlotsCmd
- ClusterNodes() *StringCmd
- ClusterMeet(host, port string) *StatusCmd
- ClusterForget(nodeID string) *StatusCmd
- ClusterReplicate(nodeID string) *StatusCmd
- ClusterResetSoft() *StatusCmd
- ClusterResetHard() *StatusCmd
- ClusterInfo() *StringCmd
- ClusterKeySlot(key string) *IntCmd
- ClusterGetKeysInSlot(slot int, count int) *StringSliceCmd
- ClusterCountFailureReports(nodeID string) *IntCmd
- ClusterCountKeysInSlot(slot int) *IntCmd
- ClusterDelSlots(slots ...int) *StatusCmd
- ClusterDelSlotsRange(min, max int) *StatusCmd
- ClusterSaveConfig() *StatusCmd
- ClusterSlaves(nodeID string) *StringSliceCmd
- ClusterFailover() *StatusCmd
- ClusterAddSlots(slots ...int) *StatusCmd
- ClusterAddSlotsRange(min, max int) *StatusCmd
- GeoAdd(key string, geoLocation ...*GeoLocation) *IntCmd
- GeoPos(key string, members ...string) *GeoPosCmd
- GeoRadius(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd
- GeoRadiusStore(key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd
- GeoRadiusByMember(key, member string, query *GeoRadiusQuery) *GeoLocationCmd
- GeoRadiusByMemberStore(key, member string, query *GeoRadiusQuery) *IntCmd
- GeoDist(key string, member1, member2, unit string) *FloatCmd
- GeoHash(key string, members ...string) *StringSliceCmd
- ReadOnly() *StatusCmd
- ReadWrite() *StatusCmd
- MemoryUsage(key string, samples ...int) *IntCmd
-}
-
-type StatefulCmdable interface {
- Cmdable
- Auth(password string) *StatusCmd
- AuthACL(username, password string) *StatusCmd
- Select(index int) *StatusCmd
- SwapDB(index1, index2 int) *StatusCmd
- ClientSetName(name string) *BoolCmd
-}
-
-var _ Cmdable = (*Client)(nil)
-var _ Cmdable = (*Tx)(nil)
-var _ Cmdable = (*Ring)(nil)
-var _ Cmdable = (*ClusterClient)(nil)
-
-type cmdable func(cmd Cmder) error
-
-type statefulCmdable func(cmd Cmder) error
-
-//------------------------------------------------------------------------------
-
-func (c statefulCmdable) Auth(password string) *StatusCmd {
- cmd := NewStatusCmd("auth", password)
- _ = c(cmd)
- return cmd
-}
-
-// Perform an AUTH command, using the given user and pass.
-// Should be used to authenticate the current connection with one of the connections defined in the ACL list
-// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
-func (c statefulCmdable) AuthACL(username, password string) *StatusCmd {
- cmd := NewStatusCmd("auth", username, password)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) Echo(message interface{}) *StringCmd {
- cmd := NewStringCmd("echo", message)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) Ping() *StatusCmd {
- cmd := NewStatusCmd("ping")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) Wait(numSlaves int, timeout time.Duration) *IntCmd {
- cmd := NewIntCmd("wait", numSlaves, int(timeout/time.Millisecond))
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) Quit() *StatusCmd {
- panic("not implemented")
-}
-
-func (c statefulCmdable) Select(index int) *StatusCmd {
- cmd := NewStatusCmd("select", index)
- _ = c(cmd)
- return cmd
-}
-
-func (c statefulCmdable) SwapDB(index1, index2 int) *StatusCmd {
- cmd := NewStatusCmd("swapdb", index1, index2)
- _ = c(cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) Command() *CommandsInfoCmd {
- cmd := NewCommandsInfoCmd("command")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) Del(keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "del"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) Unlink(keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "unlink"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) Dump(key string) *StringCmd {
- cmd := NewStringCmd("dump", key)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) Exists(keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "exists"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) Expire(key string, expiration time.Duration) *BoolCmd {
- cmd := NewBoolCmd("expire", key, formatSec(expiration))
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ExpireAt(key string, tm time.Time) *BoolCmd {
- cmd := NewBoolCmd("expireat", key, tm.Unix())
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) Keys(pattern string) *StringSliceCmd {
- cmd := NewStringSliceCmd("keys", pattern)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) Migrate(host, port, key string, db int, timeout time.Duration) *StatusCmd {
- cmd := NewStatusCmd(
- "migrate",
- host,
- port,
- key,
- db,
- formatMs(timeout),
- )
- cmd.setReadTimeout(timeout)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) Move(key string, db int) *BoolCmd {
- cmd := NewBoolCmd("move", key, db)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ObjectRefCount(key string) *IntCmd {
- cmd := NewIntCmd("object", "refcount", key)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ObjectEncoding(key string) *StringCmd {
- cmd := NewStringCmd("object", "encoding", key)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ObjectIdleTime(key string) *DurationCmd {
- cmd := NewDurationCmd(time.Second, "object", "idletime", key)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) Persist(key string) *BoolCmd {
- cmd := NewBoolCmd("persist", key)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) PExpire(key string, expiration time.Duration) *BoolCmd {
- cmd := NewBoolCmd("pexpire", key, formatMs(expiration))
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) PExpireAt(key string, tm time.Time) *BoolCmd {
- cmd := NewBoolCmd(
- "pexpireat",
- key,
- tm.UnixNano()/int64(time.Millisecond),
- )
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) PTTL(key string) *DurationCmd {
- cmd := NewDurationCmd(time.Millisecond, "pttl", key)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) RandomKey() *StringCmd {
- cmd := NewStringCmd("randomkey")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) Rename(key, newkey string) *StatusCmd {
- cmd := NewStatusCmd("rename", key, newkey)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) RenameNX(key, newkey string) *BoolCmd {
- cmd := NewBoolCmd("renamenx", key, newkey)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) Restore(key string, ttl time.Duration, value string) *StatusCmd {
- cmd := NewStatusCmd(
- "restore",
- key,
- formatMs(ttl),
- value,
- )
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd {
- cmd := NewStatusCmd(
- "restore",
- key,
- formatMs(ttl),
- value,
- "replace",
- )
- _ = c(cmd)
- return cmd
-}
-
-type Sort struct {
- By string
- Offset, Count int64
- Get []string
- Order string
- Alpha bool
-}
-
-func (sort *Sort) args(key string) []interface{} {
- args := []interface{}{"sort", key}
- if sort.By != "" {
- args = append(args, "by", sort.By)
- }
- if sort.Offset != 0 || sort.Count != 0 {
- args = append(args, "limit", sort.Offset, sort.Count)
- }
- for _, get := range sort.Get {
- args = append(args, "get", get)
- }
- if sort.Order != "" {
- args = append(args, sort.Order)
- }
- if sort.Alpha {
- args = append(args, "alpha")
- }
- return args
-}
-
-func (c cmdable) Sort(key string, sort *Sort) *StringSliceCmd {
- cmd := NewStringSliceCmd(sort.args(key)...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) SortStore(key, store string, sort *Sort) *IntCmd {
- args := sort.args(key)
- if store != "" {
- args = append(args, "store", store)
- }
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) SortInterfaces(key string, sort *Sort) *SliceCmd {
- cmd := NewSliceCmd(sort.args(key)...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) Touch(keys ...string) *IntCmd {
- args := make([]interface{}, len(keys)+1)
- args[0] = "touch"
- for i, key := range keys {
- args[i+1] = key
- }
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) TTL(key string) *DurationCmd {
- cmd := NewDurationCmd(time.Second, "ttl", key)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) Type(key string) *StatusCmd {
- cmd := NewStatusCmd("type", key)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) Scan(cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"scan", cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewScanCmd(c, args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) SScan(key string, cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"sscan", key, cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewScanCmd(c, args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) HScan(key string, cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"hscan", key, cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewScanCmd(c, args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ZScan(key string, cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"zscan", key, cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewScanCmd(c, args...)
- _ = c(cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) Append(key, value string) *IntCmd {
- cmd := NewIntCmd("append", key, value)
- _ = c(cmd)
- return cmd
-}
-
-type BitCount struct {
- Start, End int64
-}
-
-func (c cmdable) BitCount(key string, bitCount *BitCount) *IntCmd {
- args := []interface{}{"bitcount", key}
- if bitCount != nil {
- args = append(
- args,
- bitCount.Start,
- bitCount.End,
- )
- }
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) bitOp(op, destKey string, keys ...string) *IntCmd {
- args := make([]interface{}, 3+len(keys))
- args[0] = "bitop"
- args[1] = op
- args[2] = destKey
- for i, key := range keys {
- args[3+i] = key
- }
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) BitOpAnd(destKey string, keys ...string) *IntCmd {
- return c.bitOp("and", destKey, keys...)
-}
-
-func (c cmdable) BitOpOr(destKey string, keys ...string) *IntCmd {
- return c.bitOp("or", destKey, keys...)
-}
-
-func (c cmdable) BitOpXor(destKey string, keys ...string) *IntCmd {
- return c.bitOp("xor", destKey, keys...)
-}
-
-func (c cmdable) BitOpNot(destKey string, key string) *IntCmd {
- return c.bitOp("not", destKey, key)
-}
-
-func (c cmdable) BitPos(key string, bit int64, pos ...int64) *IntCmd {
- args := make([]interface{}, 3+len(pos))
- args[0] = "bitpos"
- args[1] = key
- args[2] = bit
- switch len(pos) {
- case 0:
- case 1:
- args[3] = pos[0]
- case 2:
- args[3] = pos[0]
- args[4] = pos[1]
- default:
- panic("too many arguments")
- }
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) BitField(key string, args ...interface{}) *IntSliceCmd {
- a := make([]interface{}, 0, 2+len(args))
- a = append(a, "bitfield")
- a = append(a, key)
- a = append(a, args...)
- cmd := NewIntSliceCmd(a...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) Decr(key string) *IntCmd {
- cmd := NewIntCmd("decr", key)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) DecrBy(key string, decrement int64) *IntCmd {
- cmd := NewIntCmd("decrby", key, decrement)
- _ = c(cmd)
- return cmd
-}
-
-// Redis `GET key` command. It returns redis.Nil error when key does not exist.
-func (c cmdable) Get(key string) *StringCmd {
- cmd := NewStringCmd("get", key)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) GetBit(key string, offset int64) *IntCmd {
- cmd := NewIntCmd("getbit", key, offset)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) GetRange(key string, start, end int64) *StringCmd {
- cmd := NewStringCmd("getrange", key, start, end)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) GetSet(key string, value interface{}) *StringCmd {
- cmd := NewStringCmd("getset", key, value)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) Incr(key string) *IntCmd {
- cmd := NewIntCmd("incr", key)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) IncrBy(key string, value int64) *IntCmd {
- cmd := NewIntCmd("incrby", key, value)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) IncrByFloat(key string, value float64) *FloatCmd {
- cmd := NewFloatCmd("incrbyfloat", key, value)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) MGet(keys ...string) *SliceCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "mget"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewSliceCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-// MSet is like Set but accepts multiple values:
-// - MSet("key1", "value1", "key2", "value2")
-// - MSet([]string{"key1", "value1", "key2", "value2"})
-// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"})
-func (c cmdable) MSet(values ...interface{}) *StatusCmd {
- args := make([]interface{}, 1, 1+len(values))
- args[0] = "mset"
- args = appendArgs(args, values)
- cmd := NewStatusCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-// MSetNX is like SetNX but accepts multiple values:
-// - MSetNX("key1", "value1", "key2", "value2")
-// - MSetNX([]string{"key1", "value1", "key2", "value2"})
-// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"})
-func (c cmdable) MSetNX(values ...interface{}) *BoolCmd {
- args := make([]interface{}, 1, 1+len(values))
- args[0] = "msetnx"
- args = appendArgs(args, values)
- cmd := NewBoolCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-// Redis `SET key value [expiration]` command.
-//
-// Use expiration for `SETEX`-like behavior.
-// Zero expiration means the key has no expiration time.
-func (c cmdable) Set(key string, value interface{}, expiration time.Duration) *StatusCmd {
- args := make([]interface{}, 3, 5)
- args[0] = "set"
- args[1] = key
- args[2] = value
- if expiration > 0 {
- if usePrecise(expiration) {
- args = append(args, "px", formatMs(expiration))
- } else {
- args = append(args, "ex", formatSec(expiration))
- }
- }
- cmd := NewStatusCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) SetBit(key string, offset int64, value int) *IntCmd {
- cmd := NewIntCmd(
- "setbit",
- key,
- offset,
- value,
- )
- _ = c(cmd)
- return cmd
-}
-
-// Redis `SET key value [expiration] NX` command.
-//
-// Zero expiration means the key has no expiration time.
-func (c cmdable) SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd {
- var cmd *BoolCmd
- if expiration == 0 {
- // Use old `SETNX` to support old Redis versions.
- cmd = NewBoolCmd("setnx", key, value)
- } else {
- if usePrecise(expiration) {
- cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "nx")
- } else {
- cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "nx")
- }
- }
- _ = c(cmd)
- return cmd
-}
-
-// Redis `SET key value [expiration] XX` command.
-//
-// Zero expiration means the key has no expiration time.
-func (c cmdable) SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd {
- var cmd *BoolCmd
- if expiration == 0 {
- cmd = NewBoolCmd("set", key, value, "xx")
- } else {
- if usePrecise(expiration) {
- cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "xx")
- } else {
- cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "xx")
- }
- }
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) SetRange(key string, offset int64, value string) *IntCmd {
- cmd := NewIntCmd("setrange", key, offset, value)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) StrLen(key string) *IntCmd {
- cmd := NewIntCmd("strlen", key)
- _ = c(cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) HDel(key string, fields ...string) *IntCmd {
- args := make([]interface{}, 2+len(fields))
- args[0] = "hdel"
- args[1] = key
- for i, field := range fields {
- args[2+i] = field
- }
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) HExists(key, field string) *BoolCmd {
- cmd := NewBoolCmd("hexists", key, field)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) HGet(key, field string) *StringCmd {
- cmd := NewStringCmd("hget", key, field)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) HGetAll(key string) *StringStringMapCmd {
- cmd := NewStringStringMapCmd("hgetall", key)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) HIncrBy(key, field string, incr int64) *IntCmd {
- cmd := NewIntCmd("hincrby", key, field, incr)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) HIncrByFloat(key, field string, incr float64) *FloatCmd {
- cmd := NewFloatCmd("hincrbyfloat", key, field, incr)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) HKeys(key string) *StringSliceCmd {
- cmd := NewStringSliceCmd("hkeys", key)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) HLen(key string) *IntCmd {
- cmd := NewIntCmd("hlen", key)
- _ = c(cmd)
- return cmd
-}
-
-// HMGet returns the values for the specified fields in the hash stored at key.
-// It returns an interface{} to distinguish between empty string and nil value.
-func (c cmdable) HMGet(key string, fields ...string) *SliceCmd {
- args := make([]interface{}, 2+len(fields))
- args[0] = "hmget"
- args[1] = key
- for i, field := range fields {
- args[2+i] = field
- }
- cmd := NewSliceCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-// HSet accepts values in following formats:
-// - HMSet("myhash", "key1", "value1", "key2", "value2")
-// - HMSet("myhash", []string{"key1", "value1", "key2", "value2"})
-// - HMSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"})
-//
-// Note that it requires Redis v4 for multiple field/value pairs support.
-func (c cmdable) HSet(key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "hset"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-// HMSet is a deprecated version of HSet left for compatibility with Redis 3.
-func (c cmdable) HMSet(key string, values ...interface{}) *BoolCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "hmset"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewBoolCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) HSetNX(key, field string, value interface{}) *BoolCmd {
- cmd := NewBoolCmd("hsetnx", key, field, value)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) HVals(key string) *StringSliceCmd {
- cmd := NewStringSliceCmd("hvals", key)
- _ = c(cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) BLPop(timeout time.Duration, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys)+1)
- args[0] = "blpop"
- for i, key := range keys {
- args[1+i] = key
- }
- args[len(args)-1] = formatSec(timeout)
- cmd := NewStringSliceCmd(args...)
- cmd.setReadTimeout(timeout)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) BRPop(timeout time.Duration, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys)+1)
- args[0] = "brpop"
- for i, key := range keys {
- args[1+i] = key
- }
- args[len(keys)+1] = formatSec(timeout)
- cmd := NewStringSliceCmd(args...)
- cmd.setReadTimeout(timeout)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) BRPopLPush(source, destination string, timeout time.Duration) *StringCmd {
- cmd := NewStringCmd(
- "brpoplpush",
- source,
- destination,
- formatSec(timeout),
- )
- cmd.setReadTimeout(timeout)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) LIndex(key string, index int64) *StringCmd {
- cmd := NewStringCmd("lindex", key, index)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) LInsert(key, op string, pivot, value interface{}) *IntCmd {
- cmd := NewIntCmd("linsert", key, op, pivot, value)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) LInsertBefore(key string, pivot, value interface{}) *IntCmd {
- cmd := NewIntCmd("linsert", key, "before", pivot, value)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) LInsertAfter(key string, pivot, value interface{}) *IntCmd {
- cmd := NewIntCmd("linsert", key, "after", pivot, value)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) LLen(key string) *IntCmd {
- cmd := NewIntCmd("llen", key)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) LPop(key string) *StringCmd {
- cmd := NewStringCmd("lpop", key)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) LPush(key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "lpush"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) LPushX(key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "lpushx"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) LRange(key string, start, stop int64) *StringSliceCmd {
- cmd := NewStringSliceCmd(
- "lrange",
- key,
- start,
- stop,
- )
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) LRem(key string, count int64, value interface{}) *IntCmd {
- cmd := NewIntCmd("lrem", key, count, value)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) LSet(key string, index int64, value interface{}) *StatusCmd {
- cmd := NewStatusCmd("lset", key, index, value)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) LTrim(key string, start, stop int64) *StatusCmd {
- cmd := NewStatusCmd(
- "ltrim",
- key,
- start,
- stop,
- )
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) RPop(key string) *StringCmd {
- cmd := NewStringCmd("rpop", key)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) RPopLPush(source, destination string) *StringCmd {
- cmd := NewStringCmd("rpoplpush", source, destination)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) RPush(key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "rpush"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) RPushX(key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "rpushx"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) SAdd(key string, members ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(members))
- args[0] = "sadd"
- args[1] = key
- args = appendArgs(args, members)
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) SCard(key string) *IntCmd {
- cmd := NewIntCmd("scard", key)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) SDiff(keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "sdiff"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewStringSliceCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) SDiffStore(destination string, keys ...string) *IntCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "sdiffstore"
- args[1] = destination
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) SInter(keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "sinter"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewStringSliceCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) SInterStore(destination string, keys ...string) *IntCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "sinterstore"
- args[1] = destination
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) SIsMember(key string, member interface{}) *BoolCmd {
- cmd := NewBoolCmd("sismember", key, member)
- _ = c(cmd)
- return cmd
-}
-
-// Redis `SMEMBERS key` command output as a slice
-func (c cmdable) SMembers(key string) *StringSliceCmd {
- cmd := NewStringSliceCmd("smembers", key)
- _ = c(cmd)
- return cmd
-}
-
-// Redis `SMEMBERS key` command output as a map
-func (c cmdable) SMembersMap(key string) *StringStructMapCmd {
- cmd := NewStringStructMapCmd("smembers", key)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) SMove(source, destination string, member interface{}) *BoolCmd {
- cmd := NewBoolCmd("smove", source, destination, member)
- _ = c(cmd)
- return cmd
-}
-
-// Redis `SPOP key` command.
-func (c cmdable) SPop(key string) *StringCmd {
- cmd := NewStringCmd("spop", key)
- _ = c(cmd)
- return cmd
-}
-
-// Redis `SPOP key count` command.
-func (c cmdable) SPopN(key string, count int64) *StringSliceCmd {
- cmd := NewStringSliceCmd("spop", key, count)
- _ = c(cmd)
- return cmd
-}
-
-// Redis `SRANDMEMBER key` command.
-func (c cmdable) SRandMember(key string) *StringCmd {
- cmd := NewStringCmd("srandmember", key)
- _ = c(cmd)
- return cmd
-}
-
-// Redis `SRANDMEMBER key count` command.
-func (c cmdable) SRandMemberN(key string, count int64) *StringSliceCmd {
- cmd := NewStringSliceCmd("srandmember", key, count)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) SRem(key string, members ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(members))
- args[0] = "srem"
- args[1] = key
- args = appendArgs(args, members)
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) SUnion(keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "sunion"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewStringSliceCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) SUnionStore(destination string, keys ...string) *IntCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "sunionstore"
- args[1] = destination
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-type XAddArgs struct {
- Stream string
- MaxLen int64 // MAXLEN N
- MaxLenApprox int64 // MAXLEN ~ N
- ID string
- Values map[string]interface{}
-}
-
-func (c cmdable) XAdd(a *XAddArgs) *StringCmd {
- args := make([]interface{}, 0, 6+len(a.Values)*2)
- args = append(args, "xadd")
- args = append(args, a.Stream)
- if a.MaxLen > 0 {
- args = append(args, "maxlen", a.MaxLen)
- } else if a.MaxLenApprox > 0 {
- args = append(args, "maxlen", "~", a.MaxLenApprox)
- }
- if a.ID != "" {
- args = append(args, a.ID)
- } else {
- args = append(args, "*")
- }
- for k, v := range a.Values {
- args = append(args, k)
- args = append(args, v)
- }
-
- cmd := NewStringCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) XDel(stream string, ids ...string) *IntCmd {
- args := []interface{}{"xdel", stream}
- for _, id := range ids {
- args = append(args, id)
- }
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) XLen(stream string) *IntCmd {
- cmd := NewIntCmd("xlen", stream)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) XRange(stream, start, stop string) *XMessageSliceCmd {
- cmd := NewXMessageSliceCmd("xrange", stream, start, stop)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) XRangeN(stream, start, stop string, count int64) *XMessageSliceCmd {
- cmd := NewXMessageSliceCmd("xrange", stream, start, stop, "count", count)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) XRevRange(stream, start, stop string) *XMessageSliceCmd {
- cmd := NewXMessageSliceCmd("xrevrange", stream, start, stop)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) XRevRangeN(stream, start, stop string, count int64) *XMessageSliceCmd {
- cmd := NewXMessageSliceCmd("xrevrange", stream, start, stop, "count", count)
- _ = c(cmd)
- return cmd
-}
-
-type XReadArgs struct {
- Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
- Count int64
- Block time.Duration
-}
-
-func (c cmdable) XRead(a *XReadArgs) *XStreamSliceCmd {
- args := make([]interface{}, 0, 5+len(a.Streams))
- args = append(args, "xread")
- if a.Count > 0 {
- args = append(args, "count")
- args = append(args, a.Count)
- }
- if a.Block >= 0 {
- args = append(args, "block")
- args = append(args, int64(a.Block/time.Millisecond))
- }
-
- args = append(args, "streams")
- for _, s := range a.Streams {
- args = append(args, s)
- }
-
- cmd := NewXStreamSliceCmd(args...)
- if a.Block >= 0 {
- cmd.setReadTimeout(a.Block)
- }
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) XReadStreams(streams ...string) *XStreamSliceCmd {
- return c.XRead(&XReadArgs{
- Streams: streams,
- Block: -1,
- })
-}
-
-func (c cmdable) XGroupCreate(stream, group, start string) *StatusCmd {
- cmd := NewStatusCmd("xgroup", "create", stream, group, start)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) XGroupCreateMkStream(stream, group, start string) *StatusCmd {
- cmd := NewStatusCmd("xgroup", "create", stream, group, start, "mkstream")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) XGroupSetID(stream, group, start string) *StatusCmd {
- cmd := NewStatusCmd("xgroup", "setid", stream, group, start)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) XGroupDestroy(stream, group string) *IntCmd {
- cmd := NewIntCmd("xgroup", "destroy", stream, group)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) XGroupDelConsumer(stream, group, consumer string) *IntCmd {
- cmd := NewIntCmd("xgroup", "delconsumer", stream, group, consumer)
- _ = c(cmd)
- return cmd
-}
-
-type XReadGroupArgs struct {
- Group string
- Consumer string
- Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
- Count int64
- Block time.Duration
- NoAck bool
-}
-
-func (c cmdable) XReadGroup(a *XReadGroupArgs) *XStreamSliceCmd {
- args := make([]interface{}, 0, 8+len(a.Streams))
- args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
- if a.Count > 0 {
- args = append(args, "count", a.Count)
- }
- if a.Block >= 0 {
- args = append(args, "block", int64(a.Block/time.Millisecond))
- }
- if a.NoAck {
- args = append(args, "noack")
- }
- args = append(args, "streams")
- for _, s := range a.Streams {
- args = append(args, s)
- }
-
- cmd := NewXStreamSliceCmd(args...)
- if a.Block >= 0 {
- cmd.setReadTimeout(a.Block)
- }
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) XAck(stream, group string, ids ...string) *IntCmd {
- args := []interface{}{"xack", stream, group}
- for _, id := range ids {
- args = append(args, id)
- }
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) XPending(stream, group string) *XPendingCmd {
- cmd := NewXPendingCmd("xpending", stream, group)
- _ = c(cmd)
- return cmd
-}
-
-type XPendingExtArgs struct {
- Stream string
- Group string
- Start string
- End string
- Count int64
- Consumer string
-}
-
-func (c cmdable) XPendingExt(a *XPendingExtArgs) *XPendingExtCmd {
- args := make([]interface{}, 0, 7)
- args = append(args, "xpending", a.Stream, a.Group, a.Start, a.End, a.Count)
- if a.Consumer != "" {
- args = append(args, a.Consumer)
- }
- cmd := NewXPendingExtCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-type XClaimArgs struct {
- Stream string
- Group string
- Consumer string
- MinIdle time.Duration
- Messages []string
-}
-
-func (c cmdable) XClaim(a *XClaimArgs) *XMessageSliceCmd {
- args := xClaimArgs(a)
- cmd := NewXMessageSliceCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) XClaimJustID(a *XClaimArgs) *StringSliceCmd {
- args := xClaimArgs(a)
- args = append(args, "justid")
- cmd := NewStringSliceCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func xClaimArgs(a *XClaimArgs) []interface{} {
- args := make([]interface{}, 0, 4+len(a.Messages))
- args = append(args,
- "xclaim",
- a.Stream,
- a.Group, a.Consumer,
- int64(a.MinIdle/time.Millisecond))
- for _, id := range a.Messages {
- args = append(args, id)
- }
- return args
-}
-
-func (c cmdable) XTrim(key string, maxLen int64) *IntCmd {
- cmd := NewIntCmd("xtrim", key, "maxlen", maxLen)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) XTrimApprox(key string, maxLen int64) *IntCmd {
- cmd := NewIntCmd("xtrim", key, "maxlen", "~", maxLen)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) XInfoGroups(key string) *XInfoGroupsCmd {
- cmd := NewXInfoGroupsCmd(key)
- _ = c(cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-// Z represents sorted set member.
-type Z struct {
- Score float64
- Member interface{}
-}
-
-// ZWithKey represents sorted set member including the name of the key where it was popped.
-type ZWithKey struct {
- Z
- Key string
-}
-
-// ZStore is used as an arg to ZInterStore and ZUnionStore.
-type ZStore struct {
- Keys []string
- Weights []float64
- // Can be SUM, MIN or MAX.
- Aggregate string
-}
-
-// Redis `BZPOPMAX key [key ...] timeout` command.
-func (c cmdable) BZPopMax(timeout time.Duration, keys ...string) *ZWithKeyCmd {
- args := make([]interface{}, 1+len(keys)+1)
- args[0] = "bzpopmax"
- for i, key := range keys {
- args[1+i] = key
- }
- args[len(args)-1] = formatSec(timeout)
- cmd := NewZWithKeyCmd(args...)
- cmd.setReadTimeout(timeout)
- _ = c(cmd)
- return cmd
-}
-
-// Redis `BZPOPMIN key [key ...] timeout` command.
-func (c cmdable) BZPopMin(timeout time.Duration, keys ...string) *ZWithKeyCmd {
- args := make([]interface{}, 1+len(keys)+1)
- args[0] = "bzpopmin"
- for i, key := range keys {
- args[1+i] = key
- }
- args[len(args)-1] = formatSec(timeout)
- cmd := NewZWithKeyCmd(args...)
- cmd.setReadTimeout(timeout)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) zAdd(a []interface{}, n int, members ...*Z) *IntCmd {
- for i, m := range members {
- a[n+2*i] = m.Score
- a[n+2*i+1] = m.Member
- }
- cmd := NewIntCmd(a...)
- _ = c(cmd)
- return cmd
-}
-
-// Redis `ZADD key score member [score member ...]` command.
-func (c cmdable) ZAdd(key string, members ...*Z) *IntCmd {
- const n = 2
- a := make([]interface{}, n+2*len(members))
- a[0], a[1] = "zadd", key
- return c.zAdd(a, n, members...)
-}
-
-// Redis `ZADD key NX score member [score member ...]` command.
-func (c cmdable) ZAddNX(key string, members ...*Z) *IntCmd {
- const n = 3
- a := make([]interface{}, n+2*len(members))
- a[0], a[1], a[2] = "zadd", key, "nx"
- return c.zAdd(a, n, members...)
-}
-
-// Redis `ZADD key XX score member [score member ...]` command.
-func (c cmdable) ZAddXX(key string, members ...*Z) *IntCmd {
- const n = 3
- a := make([]interface{}, n+2*len(members))
- a[0], a[1], a[2] = "zadd", key, "xx"
- return c.zAdd(a, n, members...)
-}
-
-// Redis `ZADD key CH score member [score member ...]` command.
-func (c cmdable) ZAddCh(key string, members ...*Z) *IntCmd {
- const n = 3
- a := make([]interface{}, n+2*len(members))
- a[0], a[1], a[2] = "zadd", key, "ch"
- return c.zAdd(a, n, members...)
-}
-
-// Redis `ZADD key NX CH score member [score member ...]` command.
-func (c cmdable) ZAddNXCh(key string, members ...*Z) *IntCmd {
- const n = 4
- a := make([]interface{}, n+2*len(members))
- a[0], a[1], a[2], a[3] = "zadd", key, "nx", "ch"
- return c.zAdd(a, n, members...)
-}
-
-// Redis `ZADD key XX CH score member [score member ...]` command.
-func (c cmdable) ZAddXXCh(key string, members ...*Z) *IntCmd {
- const n = 4
- a := make([]interface{}, n+2*len(members))
- a[0], a[1], a[2], a[3] = "zadd", key, "xx", "ch"
- return c.zAdd(a, n, members...)
-}
-
-func (c cmdable) zIncr(a []interface{}, n int, members ...*Z) *FloatCmd {
- for i, m := range members {
- a[n+2*i] = m.Score
- a[n+2*i+1] = m.Member
- }
- cmd := NewFloatCmd(a...)
- _ = c(cmd)
- return cmd
-}
-
-// Redis `ZADD key INCR score member` command.
-func (c cmdable) ZIncr(key string, member *Z) *FloatCmd {
- const n = 3
- a := make([]interface{}, n+2)
- a[0], a[1], a[2] = "zadd", key, "incr"
- return c.zIncr(a, n, member)
-}
-
-// Redis `ZADD key NX INCR score member` command.
-func (c cmdable) ZIncrNX(key string, member *Z) *FloatCmd {
- const n = 4
- a := make([]interface{}, n+2)
- a[0], a[1], a[2], a[3] = "zadd", key, "incr", "nx"
- return c.zIncr(a, n, member)
-}
-
-// Redis `ZADD key XX INCR score member` command.
-func (c cmdable) ZIncrXX(key string, member *Z) *FloatCmd {
- const n = 4
- a := make([]interface{}, n+2)
- a[0], a[1], a[2], a[3] = "zadd", key, "incr", "xx"
- return c.zIncr(a, n, member)
-}
-
-func (c cmdable) ZCard(key string) *IntCmd {
- cmd := NewIntCmd("zcard", key)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ZCount(key, min, max string) *IntCmd {
- cmd := NewIntCmd("zcount", key, min, max)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ZLexCount(key, min, max string) *IntCmd {
- cmd := NewIntCmd("zlexcount", key, min, max)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ZIncrBy(key string, increment float64, member string) *FloatCmd {
- cmd := NewFloatCmd("zincrby", key, increment, member)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ZInterStore(destination string, store *ZStore) *IntCmd {
- args := make([]interface{}, 3+len(store.Keys))
- args[0] = "zinterstore"
- args[1] = destination
- args[2] = len(store.Keys)
- for i, key := range store.Keys {
- args[3+i] = key
- }
- if len(store.Weights) > 0 {
- args = append(args, "weights")
- for _, weight := range store.Weights {
- args = append(args, weight)
- }
- }
- if store.Aggregate != "" {
- args = append(args, "aggregate", store.Aggregate)
- }
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ZPopMax(key string, count ...int64) *ZSliceCmd {
- args := []interface{}{
- "zpopmax",
- key,
- }
-
- switch len(count) {
- case 0:
- break
- case 1:
- args = append(args, count[0])
- default:
- panic("too many arguments")
- }
-
- cmd := NewZSliceCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ZPopMin(key string, count ...int64) *ZSliceCmd {
- args := []interface{}{
- "zpopmin",
- key,
- }
-
- switch len(count) {
- case 0:
- break
- case 1:
- args = append(args, count[0])
- default:
- panic("too many arguments")
- }
-
- cmd := NewZSliceCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) zRange(key string, start, stop int64, withScores bool) *StringSliceCmd {
- args := []interface{}{
- "zrange",
- key,
- start,
- stop,
- }
- if withScores {
- args = append(args, "withscores")
- }
- cmd := NewStringSliceCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ZRange(key string, start, stop int64) *StringSliceCmd {
- return c.zRange(key, start, stop, false)
-}
-
-func (c cmdable) ZRangeWithScores(key string, start, stop int64) *ZSliceCmd {
- cmd := NewZSliceCmd("zrange", key, start, stop, "withscores")
- _ = c(cmd)
- return cmd
-}
-
-type ZRangeBy struct {
- Min, Max string
- Offset, Count int64
-}
-
-func (c cmdable) zRangeBy(zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd {
- args := []interface{}{zcmd, key, opt.Min, opt.Max}
- if withScores {
- args = append(args, "withscores")
- }
- if opt.Offset != 0 || opt.Count != 0 {
- args = append(
- args,
- "limit",
- opt.Offset,
- opt.Count,
- )
- }
- cmd := NewStringSliceCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ZRangeByScore(key string, opt *ZRangeBy) *StringSliceCmd {
- return c.zRangeBy("zrangebyscore", key, opt, false)
-}
-
-func (c cmdable) ZRangeByLex(key string, opt *ZRangeBy) *StringSliceCmd {
- return c.zRangeBy("zrangebylex", key, opt, false)
-}
-
-func (c cmdable) ZRangeByScoreWithScores(key string, opt *ZRangeBy) *ZSliceCmd {
- args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"}
- if opt.Offset != 0 || opt.Count != 0 {
- args = append(
- args,
- "limit",
- opt.Offset,
- opt.Count,
- )
- }
- cmd := NewZSliceCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ZRank(key, member string) *IntCmd {
- cmd := NewIntCmd("zrank", key, member)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ZRem(key string, members ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(members))
- args[0] = "zrem"
- args[1] = key
- args = appendArgs(args, members)
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ZRemRangeByRank(key string, start, stop int64) *IntCmd {
- cmd := NewIntCmd(
- "zremrangebyrank",
- key,
- start,
- stop,
- )
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ZRemRangeByScore(key, min, max string) *IntCmd {
- cmd := NewIntCmd("zremrangebyscore", key, min, max)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ZRemRangeByLex(key, min, max string) *IntCmd {
- cmd := NewIntCmd("zremrangebylex", key, min, max)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ZRevRange(key string, start, stop int64) *StringSliceCmd {
- cmd := NewStringSliceCmd("zrevrange", key, start, stop)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd {
- cmd := NewZSliceCmd("zrevrange", key, start, stop, "withscores")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) zRevRangeBy(zcmd, key string, opt *ZRangeBy) *StringSliceCmd {
- args := []interface{}{zcmd, key, opt.Max, opt.Min}
- if opt.Offset != 0 || opt.Count != 0 {
- args = append(
- args,
- "limit",
- opt.Offset,
- opt.Count,
- )
- }
- cmd := NewStringSliceCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ZRevRangeByScore(key string, opt *ZRangeBy) *StringSliceCmd {
- return c.zRevRangeBy("zrevrangebyscore", key, opt)
-}
-
-func (c cmdable) ZRevRangeByLex(key string, opt *ZRangeBy) *StringSliceCmd {
- return c.zRevRangeBy("zrevrangebylex", key, opt)
-}
-
-func (c cmdable) ZRevRangeByScoreWithScores(key string, opt *ZRangeBy) *ZSliceCmd {
- args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"}
- if opt.Offset != 0 || opt.Count != 0 {
- args = append(
- args,
- "limit",
- opt.Offset,
- opt.Count,
- )
- }
- cmd := NewZSliceCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ZRevRank(key, member string) *IntCmd {
- cmd := NewIntCmd("zrevrank", key, member)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ZScore(key, member string) *FloatCmd {
- cmd := NewFloatCmd("zscore", key, member)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ZUnionStore(dest string, store *ZStore) *IntCmd {
- args := make([]interface{}, 3+len(store.Keys))
- args[0] = "zunionstore"
- args[1] = dest
- args[2] = len(store.Keys)
- for i, key := range store.Keys {
- args[3+i] = key
- }
- if len(store.Weights) > 0 {
- args = append(args, "weights")
- for _, weight := range store.Weights {
- args = append(args, weight)
- }
- }
- if store.Aggregate != "" {
- args = append(args, "aggregate", store.Aggregate)
- }
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) PFAdd(key string, els ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(els))
- args[0] = "pfadd"
- args[1] = key
- args = appendArgs(args, els)
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) PFCount(keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "pfcount"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) PFMerge(dest string, keys ...string) *StatusCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "pfmerge"
- args[1] = dest
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewStatusCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) BgRewriteAOF() *StatusCmd {
- cmd := NewStatusCmd("bgrewriteaof")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) BgSave() *StatusCmd {
- cmd := NewStatusCmd("bgsave")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClientKill(ipPort string) *StatusCmd {
- cmd := NewStatusCmd("client", "kill", ipPort)
- _ = c(cmd)
- return cmd
-}
-
-// ClientKillByFilter is new style synx, while the ClientKill is old
-// CLIENT KILL <option> [value] ... <option> [value]
-func (c cmdable) ClientKillByFilter(keys ...string) *IntCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "client"
- args[1] = "kill"
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClientList() *StringCmd {
- cmd := NewStringCmd("client", "list")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClientPause(dur time.Duration) *BoolCmd {
- cmd := NewBoolCmd("client", "pause", formatMs(dur))
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClientID() *IntCmd {
- cmd := NewIntCmd("client", "id")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClientUnblock(id int64) *IntCmd {
- cmd := NewIntCmd("client", "unblock", id)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClientUnblockWithError(id int64) *IntCmd {
- cmd := NewIntCmd("client", "unblock", id, "error")
- _ = c(cmd)
- return cmd
-}
-
-// ClientSetName assigns a name to the connection.
-func (c statefulCmdable) ClientSetName(name string) *BoolCmd {
- cmd := NewBoolCmd("client", "setname", name)
- _ = c(cmd)
- return cmd
-}
-
-// ClientGetName returns the name of the connection.
-func (c cmdable) ClientGetName() *StringCmd {
- cmd := NewStringCmd("client", "getname")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ConfigGet(parameter string) *SliceCmd {
- cmd := NewSliceCmd("config", "get", parameter)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ConfigResetStat() *StatusCmd {
- cmd := NewStatusCmd("config", "resetstat")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ConfigSet(parameter, value string) *StatusCmd {
- cmd := NewStatusCmd("config", "set", parameter, value)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ConfigRewrite() *StatusCmd {
- cmd := NewStatusCmd("config", "rewrite")
- _ = c(cmd)
- return cmd
-}
-
-// Deperecated. Use DBSize instead.
-func (c cmdable) DbSize() *IntCmd {
- return c.DBSize()
-}
-
-func (c cmdable) DBSize() *IntCmd {
- cmd := NewIntCmd("dbsize")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) FlushAll() *StatusCmd {
- cmd := NewStatusCmd("flushall")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) FlushAllAsync() *StatusCmd {
- cmd := NewStatusCmd("flushall", "async")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) FlushDB() *StatusCmd {
- cmd := NewStatusCmd("flushdb")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) FlushDBAsync() *StatusCmd {
- cmd := NewStatusCmd("flushdb", "async")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) Info(section ...string) *StringCmd {
- args := []interface{}{"info"}
- if len(section) > 0 {
- args = append(args, section[0])
- }
- cmd := NewStringCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) LastSave() *IntCmd {
- cmd := NewIntCmd("lastsave")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) Save() *StatusCmd {
- cmd := NewStatusCmd("save")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) shutdown(modifier string) *StatusCmd {
- var args []interface{}
- if modifier == "" {
- args = []interface{}{"shutdown"}
- } else {
- args = []interface{}{"shutdown", modifier}
- }
- cmd := NewStatusCmd(args...)
- _ = c(cmd)
- if err := cmd.Err(); err != nil {
- if err == io.EOF {
- // Server quit as expected.
- cmd.err = nil
- }
- } else {
- // Server did not quit. String reply contains the reason.
- cmd.err = errors.New(cmd.val)
- cmd.val = ""
- }
- return cmd
-}
-
-func (c cmdable) Shutdown() *StatusCmd {
- return c.shutdown("")
-}
-
-func (c cmdable) ShutdownSave() *StatusCmd {
- return c.shutdown("save")
-}
-
-func (c cmdable) ShutdownNoSave() *StatusCmd {
- return c.shutdown("nosave")
-}
-
-func (c cmdable) SlaveOf(host, port string) *StatusCmd {
- cmd := NewStatusCmd("slaveof", host, port)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) SlowLog() {
- panic("not implemented")
-}
-
-func (c cmdable) Sync() {
- panic("not implemented")
-}
-
-func (c cmdable) Time() *TimeCmd {
- cmd := NewTimeCmd("time")
- _ = c(cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) Eval(script string, keys []string, args ...interface{}) *Cmd {
- cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
- cmdArgs[0] = "eval"
- cmdArgs[1] = script
- cmdArgs[2] = len(keys)
- for i, key := range keys {
- cmdArgs[3+i] = key
- }
- cmdArgs = appendArgs(cmdArgs, args)
- cmd := NewCmd(cmdArgs...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd {
- cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
- cmdArgs[0] = "evalsha"
- cmdArgs[1] = sha1
- cmdArgs[2] = len(keys)
- for i, key := range keys {
- cmdArgs[3+i] = key
- }
- cmdArgs = appendArgs(cmdArgs, args)
- cmd := NewCmd(cmdArgs...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ScriptExists(hashes ...string) *BoolSliceCmd {
- args := make([]interface{}, 2+len(hashes))
- args[0] = "script"
- args[1] = "exists"
- for i, hash := range hashes {
- args[2+i] = hash
- }
- cmd := NewBoolSliceCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ScriptFlush() *StatusCmd {
- cmd := NewStatusCmd("script", "flush")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ScriptKill() *StatusCmd {
- cmd := NewStatusCmd("script", "kill")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ScriptLoad(script string) *StringCmd {
- cmd := NewStringCmd("script", "load", script)
- _ = c(cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) DebugObject(key string) *StringCmd {
- cmd := NewStringCmd("debug", "object", key)
- _ = c(cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-// Publish posts the message to the channel.
-func (c cmdable) Publish(channel string, message interface{}) *IntCmd {
- cmd := NewIntCmd("publish", channel, message)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) PubSubChannels(pattern string) *StringSliceCmd {
- args := []interface{}{"pubsub", "channels"}
- if pattern != "*" {
- args = append(args, pattern)
- }
- cmd := NewStringSliceCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) PubSubNumSub(channels ...string) *StringIntMapCmd {
- args := make([]interface{}, 2+len(channels))
- args[0] = "pubsub"
- args[1] = "numsub"
- for i, channel := range channels {
- args[2+i] = channel
- }
- cmd := NewStringIntMapCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) PubSubNumPat() *IntCmd {
- cmd := NewIntCmd("pubsub", "numpat")
- _ = c(cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) ClusterSlots() *ClusterSlotsCmd {
- cmd := NewClusterSlotsCmd("cluster", "slots")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClusterNodes() *StringCmd {
- cmd := NewStringCmd("cluster", "nodes")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClusterMeet(host, port string) *StatusCmd {
- cmd := NewStatusCmd("cluster", "meet", host, port)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClusterForget(nodeID string) *StatusCmd {
- cmd := NewStatusCmd("cluster", "forget", nodeID)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClusterReplicate(nodeID string) *StatusCmd {
- cmd := NewStatusCmd("cluster", "replicate", nodeID)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClusterResetSoft() *StatusCmd {
- cmd := NewStatusCmd("cluster", "reset", "soft")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClusterResetHard() *StatusCmd {
- cmd := NewStatusCmd("cluster", "reset", "hard")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClusterInfo() *StringCmd {
- cmd := NewStringCmd("cluster", "info")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClusterKeySlot(key string) *IntCmd {
- cmd := NewIntCmd("cluster", "keyslot", key)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClusterGetKeysInSlot(slot int, count int) *StringSliceCmd {
- cmd := NewStringSliceCmd("cluster", "getkeysinslot", slot, count)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClusterCountFailureReports(nodeID string) *IntCmd {
- cmd := NewIntCmd("cluster", "count-failure-reports", nodeID)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClusterCountKeysInSlot(slot int) *IntCmd {
- cmd := NewIntCmd("cluster", "countkeysinslot", slot)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClusterDelSlots(slots ...int) *StatusCmd {
- args := make([]interface{}, 2+len(slots))
- args[0] = "cluster"
- args[1] = "delslots"
- for i, slot := range slots {
- args[2+i] = slot
- }
- cmd := NewStatusCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClusterDelSlotsRange(min, max int) *StatusCmd {
- size := max - min + 1
- slots := make([]int, size)
- for i := 0; i < size; i++ {
- slots[i] = min + i
- }
- return c.ClusterDelSlots(slots...)
-}
-
-func (c cmdable) ClusterSaveConfig() *StatusCmd {
- cmd := NewStatusCmd("cluster", "saveconfig")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClusterSlaves(nodeID string) *StringSliceCmd {
- cmd := NewStringSliceCmd("cluster", "slaves", nodeID)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ReadOnly() *StatusCmd {
- cmd := NewStatusCmd("readonly")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ReadWrite() *StatusCmd {
- cmd := NewStatusCmd("readwrite")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClusterFailover() *StatusCmd {
- cmd := NewStatusCmd("cluster", "failover")
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClusterAddSlots(slots ...int) *StatusCmd {
- args := make([]interface{}, 2+len(slots))
- args[0] = "cluster"
- args[1] = "addslots"
- for i, num := range slots {
- args[2+i] = num
- }
- cmd := NewStatusCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) ClusterAddSlotsRange(min, max int) *StatusCmd {
- size := max - min + 1
- slots := make([]int, size)
- for i := 0; i < size; i++ {
- slots[i] = min + i
- }
- return c.ClusterAddSlots(slots...)
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) GeoAdd(key string, geoLocation ...*GeoLocation) *IntCmd {
- args := make([]interface{}, 2+3*len(geoLocation))
- args[0] = "geoadd"
- args[1] = key
- for i, eachLoc := range geoLocation {
- args[2+3*i] = eachLoc.Longitude
- args[2+3*i+1] = eachLoc.Latitude
- args[2+3*i+2] = eachLoc.Name
- }
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-// GeoRadius is a read-only GEORADIUS_RO command.
-func (c cmdable) GeoRadius(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd {
- cmd := NewGeoLocationCmd(query, "georadius_ro", key, longitude, latitude)
- if query.Store != "" || query.StoreDist != "" {
- cmd.SetErr(errors.New("GeoRadius does not support Store or StoreDist"))
- return cmd
- }
- _ = c(cmd)
- return cmd
-}
-
-// GeoRadiusStore is a writing GEORADIUS command.
-func (c cmdable) GeoRadiusStore(key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd {
- args := geoLocationArgs(query, "georadius", key, longitude, latitude)
- cmd := NewIntCmd(args...)
- if query.Store == "" && query.StoreDist == "" {
- cmd.SetErr(errors.New("GeoRadiusStore requires Store or StoreDist"))
- return cmd
- }
- _ = c(cmd)
- return cmd
-}
-
-// GeoRadius is a read-only GEORADIUSBYMEMBER_RO command.
-func (c cmdable) GeoRadiusByMember(key, member string, query *GeoRadiusQuery) *GeoLocationCmd {
- cmd := NewGeoLocationCmd(query, "georadiusbymember_ro", key, member)
- if query.Store != "" || query.StoreDist != "" {
- cmd.SetErr(errors.New("GeoRadiusByMember does not support Store or StoreDist"))
- return cmd
- }
- _ = c(cmd)
- return cmd
-}
-
-// GeoRadiusByMemberStore is a writing GEORADIUSBYMEMBER command.
-func (c cmdable) GeoRadiusByMemberStore(key, member string, query *GeoRadiusQuery) *IntCmd {
- args := geoLocationArgs(query, "georadiusbymember", key, member)
- cmd := NewIntCmd(args...)
- if query.Store == "" && query.StoreDist == "" {
- cmd.SetErr(errors.New("GeoRadiusByMemberStore requires Store or StoreDist"))
- return cmd
- }
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) GeoDist(key string, member1, member2, unit string) *FloatCmd {
- if unit == "" {
- unit = "km"
- }
- cmd := NewFloatCmd("geodist", key, member1, member2, unit)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) GeoHash(key string, members ...string) *StringSliceCmd {
- args := make([]interface{}, 2+len(members))
- args[0] = "geohash"
- args[1] = key
- for i, member := range members {
- args[2+i] = member
- }
- cmd := NewStringSliceCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-func (c cmdable) GeoPos(key string, members ...string) *GeoPosCmd {
- args := make([]interface{}, 2+len(members))
- args[0] = "geopos"
- args[1] = key
- for i, member := range members {
- args[2+i] = member
- }
- cmd := NewGeoPosCmd(args...)
- _ = c(cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) MemoryUsage(key string, samples ...int) *IntCmd {
- args := []interface{}{"memory", "usage", key}
- if len(samples) > 0 {
- if len(samples) != 1 {
- panic("MemoryUsage expects single sample count")
- }
- args = append(args, "SAMPLES", samples[0])
- }
- cmd := NewIntCmd(args...)
- _ = c(cmd)
- return cmd
-}
diff --git a/vendor/github.com/go-redis/redis/v7/go.mod b/vendor/github.com/go-redis/redis/v7/go.mod
deleted file mode 100644
index e3a4dec54a..0000000000
--- a/vendor/github.com/go-redis/redis/v7/go.mod
+++ /dev/null
@@ -1,15 +0,0 @@
-module github.com/go-redis/redis/v7
-
-require (
- github.com/golang/protobuf v1.3.2 // indirect
- github.com/kr/pretty v0.1.0 // indirect
- github.com/onsi/ginkgo v1.10.1
- github.com/onsi/gomega v1.7.0
- golang.org/x/net v0.0.0-20190923162816-aa69164e4478 // indirect
- golang.org/x/sys v0.0.0-20191010194322-b09406accb47 // indirect
- golang.org/x/text v0.3.2 // indirect
- gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
- gopkg.in/yaml.v2 v2.2.4 // indirect
-)
-
-go 1.11
diff --git a/vendor/github.com/go-redis/redis/v7/go.sum b/vendor/github.com/go-redis/redis/v7/go.sum
deleted file mode 100644
index 6a04dbb63a..0000000000
--- a/vendor/github.com/go-redis/redis/v7/go.sum
+++ /dev/null
@@ -1,47 +0,0 @@
-github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
-github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
-github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
-golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY=
-golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/go-redis/redis/v7/internal/consistenthash/consistenthash.go b/vendor/github.com/go-redis/redis/v7/internal/consistenthash/consistenthash.go
deleted file mode 100644
index a9c56f0762..0000000000
--- a/vendor/github.com/go-redis/redis/v7/internal/consistenthash/consistenthash.go
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
-Copyright 2013 Google Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package consistenthash provides an implementation of a ring hash.
-package consistenthash
-
-import (
- "hash/crc32"
- "sort"
- "strconv"
-)
-
-type Hash func(data []byte) uint32
-
-type Map struct {
- hash Hash
- replicas int
- keys []int // Sorted
- hashMap map[int]string
-}
-
-func New(replicas int, fn Hash) *Map {
- m := &Map{
- replicas: replicas,
- hash: fn,
- hashMap: make(map[int]string),
- }
- if m.hash == nil {
- m.hash = crc32.ChecksumIEEE
- }
- return m
-}
-
-// Returns true if there are no items available.
-func (m *Map) IsEmpty() bool {
- return len(m.keys) == 0
-}
-
-// Adds some keys to the hash.
-func (m *Map) Add(keys ...string) {
- for _, key := range keys {
- for i := 0; i < m.replicas; i++ {
- hash := int(m.hash([]byte(strconv.Itoa(i) + key)))
- m.keys = append(m.keys, hash)
- m.hashMap[hash] = key
- }
- }
- sort.Ints(m.keys)
-}
-
-// Gets the closest item in the hash to the provided key.
-func (m *Map) Get(key string) string {
- if m.IsEmpty() {
- return ""
- }
-
- hash := int(m.hash([]byte(key)))
-
- // Binary search for appropriate replica.
- idx := sort.Search(len(m.keys), func(i int) bool { return m.keys[i] >= hash })
-
- // Means we have cycled back to the first replica.
- if idx == len(m.keys) {
- idx = 0
- }
-
- return m.hashMap[m.keys[idx]]
-}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/internal.go b/vendor/github.com/go-redis/redis/v7/internal/internal.go
deleted file mode 100644
index ad3fc3c9ff..0000000000
--- a/vendor/github.com/go-redis/redis/v7/internal/internal.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package internal
-
-import (
- "math/rand"
- "time"
-)
-
-// Retry backoff with jitter sleep to prevent overloaded conditions during intervals
-// https://www.awsarchitectureblog.com/2015/03/backoff.html
-func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration {
- if retry < 0 {
- retry = 0
- }
-
- backoff := minBackoff << uint(retry)
- if backoff > maxBackoff || backoff < minBackoff {
- backoff = maxBackoff
- }
-
- if backoff == 0 {
- return 0
- }
- return time.Duration(rand.Int63n(int64(backoff)))
-}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/log.go b/vendor/github.com/go-redis/redis/v7/internal/log.go
deleted file mode 100644
index 405a2728d6..0000000000
--- a/vendor/github.com/go-redis/redis/v7/internal/log.go
+++ /dev/null
@@ -1,8 +0,0 @@
-package internal
-
-import (
- "log"
- "os"
-)
-
-var Logger = log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile)
diff --git a/vendor/github.com/go-redis/redis/v7/internal/pool/pool_sticky.go b/vendor/github.com/go-redis/redis/v7/internal/pool/pool_sticky.go
deleted file mode 100644
index d4a355a44f..0000000000
--- a/vendor/github.com/go-redis/redis/v7/internal/pool/pool_sticky.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package pool
-
-import (
- "context"
- "sync"
-)
-
-type StickyConnPool struct {
- pool *ConnPool
- reusable bool
-
- cn *Conn
- closed bool
- mu sync.Mutex
-}
-
-var _ Pooler = (*StickyConnPool)(nil)
-
-func NewStickyConnPool(pool *ConnPool, reusable bool) *StickyConnPool {
- return &StickyConnPool{
- pool: pool,
- reusable: reusable,
- }
-}
-
-func (p *StickyConnPool) NewConn(context.Context) (*Conn, error) {
- panic("not implemented")
-}
-
-func (p *StickyConnPool) CloseConn(*Conn) error {
- panic("not implemented")
-}
-
-func (p *StickyConnPool) Get(ctx context.Context) (*Conn, error) {
- p.mu.Lock()
- defer p.mu.Unlock()
-
- if p.closed {
- return nil, ErrClosed
- }
- if p.cn != nil {
- return p.cn, nil
- }
-
- cn, err := p.pool.Get(ctx)
- if err != nil {
- return nil, err
- }
-
- p.cn = cn
- return cn, nil
-}
-
-func (p *StickyConnPool) putUpstream() {
- p.pool.Put(p.cn)
- p.cn = nil
-}
-
-func (p *StickyConnPool) Put(cn *Conn) {}
-
-func (p *StickyConnPool) removeUpstream(reason error) {
- p.pool.Remove(p.cn, reason)
- p.cn = nil
-}
-
-func (p *StickyConnPool) Remove(cn *Conn, reason error) {
- p.removeUpstream(reason)
-}
-
-func (p *StickyConnPool) Len() int {
- p.mu.Lock()
- defer p.mu.Unlock()
-
- if p.cn == nil {
- return 0
- }
- return 1
-}
-
-func (p *StickyConnPool) IdleLen() int {
- p.mu.Lock()
- defer p.mu.Unlock()
-
- if p.cn == nil {
- return 1
- }
- return 0
-}
-
-func (p *StickyConnPool) Stats() *Stats {
- return nil
-}
-
-func (p *StickyConnPool) Close() error {
- p.mu.Lock()
- defer p.mu.Unlock()
-
- if p.closed {
- return ErrClosed
- }
- p.closed = true
-
- if p.cn != nil {
- if p.reusable {
- p.putUpstream()
- } else {
- p.removeUpstream(ErrClosed)
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/util.go b/vendor/github.com/go-redis/redis/v7/internal/util.go
deleted file mode 100644
index 844f34bad8..0000000000
--- a/vendor/github.com/go-redis/redis/v7/internal/util.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package internal
-
-import (
- "context"
- "time"
-
- "github.com/go-redis/redis/v7/internal/util"
-)
-
-func Sleep(ctx context.Context, dur time.Duration) error {
- t := time.NewTimer(dur)
- defer t.Stop()
-
- select {
- case <-t.C:
- return nil
- case <-ctx.Done():
- return ctx.Err()
- }
-}
-
-func ToLower(s string) string {
- if isLower(s) {
- return s
- }
-
- b := make([]byte, len(s))
- for i := range b {
- c := s[i]
- if c >= 'A' && c <= 'Z' {
- c += 'a' - 'A'
- }
- b[i] = c
- }
- return util.BytesToString(b)
-}
-
-func isLower(s string) bool {
- for i := 0; i < len(s); i++ {
- c := s[i]
- if c >= 'A' && c <= 'Z' {
- return false
- }
- }
- return true
-}
-
-func Unwrap(err error) error {
- u, ok := err.(interface {
- Unwrap() error
- })
- if !ok {
- return nil
- }
- return u.Unwrap()
-}
diff --git a/vendor/github.com/go-redis/redis/v7/script.go b/vendor/github.com/go-redis/redis/v7/script.go
deleted file mode 100644
index 88b7d0a2ea..0000000000
--- a/vendor/github.com/go-redis/redis/v7/script.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package redis
-
-import (
- "crypto/sha1"
- "encoding/hex"
- "io"
- "strings"
-)
-
-type scripter interface {
- Eval(script string, keys []string, args ...interface{}) *Cmd
- EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd
- ScriptExists(hashes ...string) *BoolSliceCmd
- ScriptLoad(script string) *StringCmd
-}
-
-var _ scripter = (*Client)(nil)
-var _ scripter = (*Ring)(nil)
-var _ scripter = (*ClusterClient)(nil)
-
-type Script struct {
- src, hash string
-}
-
-func NewScript(src string) *Script {
- h := sha1.New()
- _, _ = io.WriteString(h, src)
- return &Script{
- src: src,
- hash: hex.EncodeToString(h.Sum(nil)),
- }
-}
-
-func (s *Script) Hash() string {
- return s.hash
-}
-
-func (s *Script) Load(c scripter) *StringCmd {
- return c.ScriptLoad(s.src)
-}
-
-func (s *Script) Exists(c scripter) *BoolSliceCmd {
- return c.ScriptExists(s.hash)
-}
-
-func (s *Script) Eval(c scripter, keys []string, args ...interface{}) *Cmd {
- return c.Eval(s.src, keys, args...)
-}
-
-func (s *Script) EvalSha(c scripter, keys []string, args ...interface{}) *Cmd {
- return c.EvalSha(s.hash, keys, args...)
-}
-
-// Run optimistically uses EVALSHA to run the script. If script does not exist
-// it is retried using EVAL.
-func (s *Script) Run(c scripter, keys []string, args ...interface{}) *Cmd {
- r := s.EvalSha(c, keys, args...)
- if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
- return s.Eval(c, keys, args...)
- }
- return r
-}
diff --git a/vendor/github.com/go-redis/redis/v7/sentinel.go b/vendor/github.com/go-redis/redis/v7/sentinel.go
deleted file mode 100644
index 8aa40ef799..0000000000
--- a/vendor/github.com/go-redis/redis/v7/sentinel.go
+++ /dev/null
@@ -1,509 +0,0 @@
-package redis
-
-import (
- "context"
- "crypto/tls"
- "errors"
- "net"
- "strings"
- "sync"
- "time"
-
- "github.com/go-redis/redis/v7/internal"
- "github.com/go-redis/redis/v7/internal/pool"
-)
-
-//------------------------------------------------------------------------------
-
-// FailoverOptions are used to configure a failover client and should
-// be passed to NewFailoverClient.
-type FailoverOptions struct {
- // The master name.
- MasterName string
- // A seed list of host:port addresses of sentinel nodes.
- SentinelAddrs []string
- SentinelUsername string
- SentinelPassword string
-
- // Following options are copied from Options struct.
-
- Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
- OnConnect func(*Conn) error
-
- Username string
- Password string
- DB int
-
- MaxRetries int
- MinRetryBackoff time.Duration
- MaxRetryBackoff time.Duration
-
- DialTimeout time.Duration
- ReadTimeout time.Duration
- WriteTimeout time.Duration
-
- PoolSize int
- MinIdleConns int
- MaxConnAge time.Duration
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
-
- TLSConfig *tls.Config
-}
-
-func (opt *FailoverOptions) options() *Options {
- return &Options{
- Addr: "FailoverClient",
- Dialer: opt.Dialer,
- OnConnect: opt.OnConnect,
-
- DB: opt.DB,
- Username: opt.Username,
- Password: opt.Password,
-
- MaxRetries: opt.MaxRetries,
- MinRetryBackoff: opt.MinRetryBackoff,
- MaxRetryBackoff: opt.MaxRetryBackoff,
-
- DialTimeout: opt.DialTimeout,
- ReadTimeout: opt.ReadTimeout,
- WriteTimeout: opt.WriteTimeout,
-
- PoolSize: opt.PoolSize,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: opt.IdleCheckFrequency,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
-
- TLSConfig: opt.TLSConfig,
- }
-}
-
-// NewFailoverClient returns a Redis client that uses Redis Sentinel
-// for automatic failover. It's safe for concurrent use by multiple
-// goroutines.
-func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
- opt := failoverOpt.options()
- opt.init()
-
- failover := &sentinelFailover{
- masterName: failoverOpt.MasterName,
- sentinelAddrs: failoverOpt.SentinelAddrs,
- username: failoverOpt.SentinelUsername,
- password: failoverOpt.SentinelPassword,
-
- opt: opt,
- }
-
- c := Client{
- baseClient: newBaseClient(opt, failover.Pool()),
- ctx: context.Background(),
- }
- c.cmdable = c.Process
- c.onClose = failover.Close
-
- return &c
-}
-
-//------------------------------------------------------------------------------
-
-type SentinelClient struct {
- *baseClient
- ctx context.Context
-}
-
-func NewSentinelClient(opt *Options) *SentinelClient {
- opt.init()
- c := &SentinelClient{
- baseClient: &baseClient{
- opt: opt,
- connPool: newConnPool(opt),
- },
- ctx: context.Background(),
- }
- return c
-}
-
-func (c *SentinelClient) Context() context.Context {
- return c.ctx
-}
-
-func (c *SentinelClient) WithContext(ctx context.Context) *SentinelClient {
- if ctx == nil {
- panic("nil context")
- }
- clone := *c
- clone.ctx = ctx
- return &clone
-}
-
-func (c *SentinelClient) Process(cmd Cmder) error {
- return c.ProcessContext(c.ctx, cmd)
-}
-
-func (c *SentinelClient) ProcessContext(ctx context.Context, cmd Cmder) error {
- return c.baseClient.process(ctx, cmd)
-}
-
-func (c *SentinelClient) pubSub() *PubSub {
- pubsub := &PubSub{
- opt: c.opt,
-
- newConn: func(channels []string) (*pool.Conn, error) {
- return c.newConn(context.TODO())
- },
- closeConn: c.connPool.CloseConn,
- }
- pubsub.init()
- return pubsub
-}
-
-// Ping is used to test if a connection is still alive, or to
-// measure latency.
-func (c *SentinelClient) Ping() *StringCmd {
- cmd := NewStringCmd("ping")
- _ = c.Process(cmd)
- return cmd
-}
-
-// Subscribe subscribes the client to the specified channels.
-// Channels can be omitted to create empty subscription.
-func (c *SentinelClient) Subscribe(channels ...string) *PubSub {
- pubsub := c.pubSub()
- if len(channels) > 0 {
- _ = pubsub.Subscribe(channels...)
- }
- return pubsub
-}
-
-// PSubscribe subscribes the client to the given patterns.
-// Patterns can be omitted to create empty subscription.
-func (c *SentinelClient) PSubscribe(channels ...string) *PubSub {
- pubsub := c.pubSub()
- if len(channels) > 0 {
- _ = pubsub.PSubscribe(channels...)
- }
- return pubsub
-}
-
-func (c *SentinelClient) GetMasterAddrByName(name string) *StringSliceCmd {
- cmd := NewStringSliceCmd("sentinel", "get-master-addr-by-name", name)
- _ = c.Process(cmd)
- return cmd
-}
-
-func (c *SentinelClient) Sentinels(name string) *SliceCmd {
- cmd := NewSliceCmd("sentinel", "sentinels", name)
- _ = c.Process(cmd)
- return cmd
-}
-
-// Failover forces a failover as if the master was not reachable, and without
-// asking for agreement to other Sentinels.
-func (c *SentinelClient) Failover(name string) *StatusCmd {
- cmd := NewStatusCmd("sentinel", "failover", name)
- _ = c.Process(cmd)
- return cmd
-}
-
-// Reset resets all the masters with matching name. The pattern argument is a
-// glob-style pattern. The reset process clears any previous state in a master
-// (including a failover in progress), and removes every slave and sentinel
-// already discovered and associated with the master.
-func (c *SentinelClient) Reset(pattern string) *IntCmd {
- cmd := NewIntCmd("sentinel", "reset", pattern)
- _ = c.Process(cmd)
- return cmd
-}
-
-// FlushConfig forces Sentinel to rewrite its configuration on disk, including
-// the current Sentinel state.
-func (c *SentinelClient) FlushConfig() *StatusCmd {
- cmd := NewStatusCmd("sentinel", "flushconfig")
- _ = c.Process(cmd)
- return cmd
-}
-
-// Master shows the state and info of the specified master.
-func (c *SentinelClient) Master(name string) *StringStringMapCmd {
- cmd := NewStringStringMapCmd("sentinel", "master", name)
- _ = c.Process(cmd)
- return cmd
-}
-
-// Masters shows a list of monitored masters and their state.
-func (c *SentinelClient) Masters() *SliceCmd {
- cmd := NewSliceCmd("sentinel", "masters")
- _ = c.Process(cmd)
- return cmd
-}
-
-// Slaves shows a list of slaves for the specified master and their state.
-func (c *SentinelClient) Slaves(name string) *SliceCmd {
- cmd := NewSliceCmd("sentinel", "slaves", name)
- _ = c.Process(cmd)
- return cmd
-}
-
-// CkQuorum checks if the current Sentinel configuration is able to reach the
-// quorum needed to failover a master, and the majority needed to authorize the
-// failover. This command should be used in monitoring systems to check if a
-// Sentinel deployment is ok.
-func (c *SentinelClient) CkQuorum(name string) *StringCmd {
- cmd := NewStringCmd("sentinel", "ckquorum", name)
- _ = c.Process(cmd)
- return cmd
-}
-
-// Monitor tells the Sentinel to start monitoring a new master with the specified
-// name, ip, port, and quorum.
-func (c *SentinelClient) Monitor(name, ip, port, quorum string) *StringCmd {
- cmd := NewStringCmd("sentinel", "monitor", name, ip, port, quorum)
- _ = c.Process(cmd)
- return cmd
-}
-
-// Set is used in order to change configuration parameters of a specific master.
-func (c *SentinelClient) Set(name, option, value string) *StringCmd {
- cmd := NewStringCmd("sentinel", "set", name, option, value)
- _ = c.Process(cmd)
- return cmd
-}
-
-// Remove is used in order to remove the specified master: the master will no
-// longer be monitored, and will totally be removed from the internal state of
-// the Sentinel.
-func (c *SentinelClient) Remove(name string) *StringCmd {
- cmd := NewStringCmd("sentinel", "remove", name)
- _ = c.Process(cmd)
- return cmd
-}
-
-type sentinelFailover struct {
- sentinelAddrs []string
-
- opt *Options
- username string
- password string
-
- pool *pool.ConnPool
- poolOnce sync.Once
-
- mu sync.RWMutex
- masterName string
- _masterAddr string
- sentinel *SentinelClient
- pubsub *PubSub
-}
-
-func (c *sentinelFailover) Close() error {
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.sentinel != nil {
- return c.closeSentinel()
- }
- return nil
-}
-
-func (c *sentinelFailover) closeSentinel() error {
- firstErr := c.pubsub.Close()
- c.pubsub = nil
-
- err := c.sentinel.Close()
- if err != nil && firstErr == nil {
- firstErr = err
- }
- c.sentinel = nil
-
- return firstErr
-}
-
-func (c *sentinelFailover) Pool() *pool.ConnPool {
- c.poolOnce.Do(func() {
- opt := *c.opt
- opt.Dialer = c.dial
- c.pool = newConnPool(&opt)
- })
- return c.pool
-}
-
-func (c *sentinelFailover) dial(ctx context.Context, network, _ string) (net.Conn, error) {
- addr, err := c.MasterAddr()
- if err != nil {
- return nil, err
- }
- if c.opt.Dialer != nil {
- return c.opt.Dialer(ctx, network, addr)
- }
- return net.DialTimeout("tcp", addr, c.opt.DialTimeout)
-}
-
-func (c *sentinelFailover) MasterAddr() (string, error) {
- addr, err := c.masterAddr()
- if err != nil {
- return "", err
- }
- c.switchMaster(addr)
- return addr, nil
-}
-
-func (c *sentinelFailover) masterAddr() (string, error) {
- c.mu.RLock()
- sentinel := c.sentinel
- c.mu.RUnlock()
-
- if sentinel != nil {
- addr := c.getMasterAddr(sentinel)
- if addr != "" {
- return addr, nil
- }
- }
-
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.sentinel != nil {
- addr := c.getMasterAddr(c.sentinel)
- if addr != "" {
- return addr, nil
- }
- _ = c.closeSentinel()
- }
-
- for i, sentinelAddr := range c.sentinelAddrs {
- sentinel := NewSentinelClient(&Options{
- Addr: sentinelAddr,
- Dialer: c.opt.Dialer,
-
- Username: c.username,
- Password: c.password,
-
- MaxRetries: c.opt.MaxRetries,
-
- DialTimeout: c.opt.DialTimeout,
- ReadTimeout: c.opt.ReadTimeout,
- WriteTimeout: c.opt.WriteTimeout,
-
- PoolSize: c.opt.PoolSize,
- PoolTimeout: c.opt.PoolTimeout,
- IdleTimeout: c.opt.IdleTimeout,
- IdleCheckFrequency: c.opt.IdleCheckFrequency,
-
- TLSConfig: c.opt.TLSConfig,
- })
-
- masterAddr, err := sentinel.GetMasterAddrByName(c.masterName).Result()
- if err != nil {
- internal.Logger.Printf("sentinel: GetMasterAddrByName master=%q failed: %s",
- c.masterName, err)
- _ = sentinel.Close()
- continue
- }
-
- // Push working sentinel to the top.
- c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
- c.setSentinel(sentinel)
-
- addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
- return addr, nil
- }
-
- return "", errors.New("redis: all sentinels are unreachable")
-}
-
-func (c *sentinelFailover) getMasterAddr(sentinel *SentinelClient) string {
- addr, err := sentinel.GetMasterAddrByName(c.masterName).Result()
- if err != nil {
- internal.Logger.Printf("sentinel: GetMasterAddrByName name=%q failed: %s",
- c.masterName, err)
- return ""
- }
- return net.JoinHostPort(addr[0], addr[1])
-}
-
-func (c *sentinelFailover) switchMaster(addr string) {
- c.mu.RLock()
- masterAddr := c._masterAddr
- c.mu.RUnlock()
- if masterAddr == addr {
- return
- }
-
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c._masterAddr == addr {
- return
- }
-
- internal.Logger.Printf("sentinel: new master=%q addr=%q",
- c.masterName, addr)
- _ = c.Pool().Filter(func(cn *pool.Conn) bool {
- return cn.RemoteAddr().String() != addr
- })
- c._masterAddr = addr
-}
-
-func (c *sentinelFailover) setSentinel(sentinel *SentinelClient) {
- if c.sentinel != nil {
- panic("not reached")
- }
- c.sentinel = sentinel
- c.discoverSentinels()
-
- c.pubsub = sentinel.Subscribe("+switch-master")
- go c.listen(c.pubsub)
-}
-
-func (c *sentinelFailover) discoverSentinels() {
- sentinels, err := c.sentinel.Sentinels(c.masterName).Result()
- if err != nil {
- internal.Logger.Printf("sentinel: Sentinels master=%q failed: %s", c.masterName, err)
- return
- }
- for _, sentinel := range sentinels {
- vals := sentinel.([]interface{})
- for i := 0; i < len(vals); i += 2 {
- key := vals[i].(string)
- if key == "name" {
- sentinelAddr := vals[i+1].(string)
- if !contains(c.sentinelAddrs, sentinelAddr) {
- internal.Logger.Printf("sentinel: discovered new sentinel=%q for master=%q",
- sentinelAddr, c.masterName)
- c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
- }
- }
- }
- }
-}
-
-func (c *sentinelFailover) listen(pubsub *PubSub) {
- ch := pubsub.Channel()
- for {
- msg, ok := <-ch
- if !ok {
- break
- }
-
- if msg.Channel == "+switch-master" {
- parts := strings.Split(msg.Payload, " ")
- if parts[0] != c.masterName {
- internal.Logger.Printf("sentinel: ignore addr for master=%q", parts[0])
- continue
- }
- addr := net.JoinHostPort(parts[3], parts[4])
- c.switchMaster(addr)
- }
- }
-}
-
-func contains(slice []string, str string) bool {
- for _, s := range slice {
- if s == str {
- return true
- }
- }
- return false
-}
diff --git a/vendor/github.com/go-redis/redis/v7/.gitignore b/vendor/github.com/go-redis/redis/v8/.gitignore
index ebfe903bcd..b975a7b4c3 100644
--- a/vendor/github.com/go-redis/redis/v7/.gitignore
+++ b/vendor/github.com/go-redis/redis/v8/.gitignore
@@ -1,2 +1,3 @@
*.rdb
testdata/*/
+.idea/
diff --git a/vendor/github.com/go-redis/redis/v7/.golangci.yml b/vendor/github.com/go-redis/redis/v8/.golangci.yml
index 912dab1ef3..b88b2b9b57 100644
--- a/vendor/github.com/go-redis/redis/v7/.golangci.yml
+++ b/vendor/github.com/go-redis/redis/v8/.golangci.yml
@@ -7,9 +7,18 @@ linters:
disable:
- funlen
- gochecknoglobals
+ - gochecknoinits
- gocognit
- goconst
- godox
- gosec
- maligned
- wsl
+ - gomnd
+ - goerr113
+ - exhaustive
+ - nestif
+ - nlreturn
+ - exhaustivestruct
+ - wrapcheck
+ - errorlint
diff --git a/vendor/github.com/go-redis/redis/v8/.prettierrc b/vendor/github.com/go-redis/redis/v8/.prettierrc
new file mode 100644
index 0000000000..8b7f044ad1
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/.prettierrc
@@ -0,0 +1,4 @@
+semi: false
+singleQuote: true
+proseWrap: always
+printWidth: 100
diff --git a/vendor/github.com/go-redis/redis/v8/.travis.yml b/vendor/github.com/go-redis/redis/v8/.travis.yml
new file mode 100644
index 0000000000..a221d84dba
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/.travis.yml
@@ -0,0 +1,20 @@
+dist: xenial
+language: go
+
+services:
+ - redis-server
+
+go:
+ - 1.14.x
+ - 1.15.x
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+
+go_import_path: github.com/go-redis/redis
+
+before_install:
+ - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s --
+ -b $(go env GOPATH)/bin v1.32.2
diff --git a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
new file mode 100644
index 0000000000..8392d54859
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
@@ -0,0 +1,5 @@
+# Changelog
+
+> :heart: [**Uptrace.dev** - distributed traces, logs, and errors in one place](https://uptrace.dev)
+
+See https://redis.uptrace.dev/changelog/
diff --git a/vendor/github.com/go-redis/redis/v7/LICENSE b/vendor/github.com/go-redis/redis/v8/LICENSE
index 298bed9bea..298bed9bea 100644
--- a/vendor/github.com/go-redis/redis/v7/LICENSE
+++ b/vendor/github.com/go-redis/redis/v8/LICENSE
diff --git a/vendor/github.com/go-redis/redis/v7/Makefile b/vendor/github.com/go-redis/redis/v8/Makefile
index 86609c6e07..d5d5d1900f 100644
--- a/vendor/github.com/go-redis/redis/v7/Makefile
+++ b/vendor/github.com/go-redis/redis/v8/Makefile
@@ -3,6 +3,7 @@ all: testdeps
go test ./... -short -race
go test ./... -run=NONE -bench=. -benchmem
env GOOS=linux GOARCH=386 go test ./...
+ go vet
golangci-lint run
testdeps: testdata/redis/src/redis-server
@@ -18,3 +19,9 @@ testdata/redis:
testdata/redis/src/redis-server: testdata/redis
cd $< && make all
+
+tag:
+ git tag $(VERSION)
+ git tag extra/rediscmd/$(VERSION)
+ git tag extra/redisotel/$(VERSION)
+ git tag extra/rediscensus/$(VERSION)
diff --git a/vendor/github.com/go-redis/redis/v8/README.md b/vendor/github.com/go-redis/redis/v8/README.md
new file mode 100644
index 0000000000..e086653665
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/README.md
@@ -0,0 +1,159 @@
+# Redis client for Golang
+
+[![Build Status](https://travis-ci.org/go-redis/redis.png?branch=master)](https://travis-ci.org/go-redis/redis)
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/go-redis/redis/v8)](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
+[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/)
+[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj)
+
+> :heart: [**Uptrace.dev** - distributed traces, logs, and errors in one place](https://uptrace.dev)
+
+- Join [Discord](https://discord.gg/rWtp5Aj) to ask questions.
+- [Documentation](https://redis.uptrace.dev)
+- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
+- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples)
+- [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app)
+
+## Ecosystem
+
+- [Redis Mock](https://github.com/go-redis/redismock).
+- [Distributed Locks](https://github.com/bsm/redislock).
+- [Redis Cache](https://github.com/go-redis/cache).
+- [Rate limiting](https://github.com/go-redis/redis_rate).
+
+## Features
+
+- Redis 3 commands except QUIT, MONITOR, and SYNC.
+- Automatic connection pooling with
+ [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
+- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub).
+- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
+- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-Pipeline) and
+ [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
+- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script).
+- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options).
+- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient).
+- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient).
+- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient--ManualSetup)
+ without using cluster mode and Redis Sentinel.
+- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing).
+- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#ex-package--Instrumentation).
+
+## Installation
+
+go-redis supports 2 last Go versions and requires a Go version with
+[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go
+module:
+
+```shell
+go mod init github.com/my/repo
+```
+
+And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake):
+
+```shell
+go get github.com/go-redis/redis/v8
+```
+
+## Quickstart
+
+```go
+import (
+ "context"
+ "github.com/go-redis/redis/v8"
+)
+
+var ctx = context.Background()
+
+func ExampleClient() {
+ rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password set
+ DB: 0, // use default DB
+ })
+
+ err := rdb.Set(ctx, "key", "value", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ val, err := rdb.Get(ctx, "key").Result()
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println("key", val)
+
+ val2, err := rdb.Get(ctx, "key2").Result()
+ if err == redis.Nil {
+ fmt.Println("key2 does not exist")
+ } else if err != nil {
+ panic(err)
+ } else {
+ fmt.Println("key2", val2)
+ }
+ // Output: key value
+ // key2 does not exist
+}
+```
+
+## Look and feel
+
+Some corner cases:
+
+```go
+// SET key value EX 10 NX
+set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result()
+
+// SET key value keepttl NX
+set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result()
+
+// SORT list LIMIT 0 2 ASC
+vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
+
+// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
+vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
+ Min: "-inf",
+ Max: "+inf",
+ Offset: 0,
+ Count: 2,
+}).Result()
+
+// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
+vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{
+ Keys: []string{"zset1", "zset2"},
+ Weights: []int64{2, 3}
+}).Result()
+
+// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
+vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
+
+// custom command
+res, err := rdb.Do(ctx, "set", "key", "value").Result()
+```
+## Run the test
+go-redis will start a redis-server and run the test cases.
+
+The paths of redis-server bin file and redis config file are definded in `main_test.go`:
+```
+var (
+ redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
+ redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
+)
+```
+
+For local testing, you can change the variables to refer to your local files, or create a soft link to the corresponding folder for redis-server and copy the config file to `testdata/redis/`:
+```
+ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src
+cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/
+```
+
+Lastly, run:
+```
+go test
+```
+
+## See also
+
+- [Fast and flexible HTTP router](https://github.com/vmihailenco/treemux)
+- [Golang PostgreSQL ORM](https://github.com/go-pg/pg)
+- [Golang msgpack](https://github.com/vmihailenco/msgpack)
+- [Golang message task queue](https://github.com/vmihailenco/taskq)
diff --git a/vendor/github.com/go-redis/redis/v7/cluster.go b/vendor/github.com/go-redis/redis/v8/cluster.go
index 1907de6c41..2ce475c414 100644
--- a/vendor/github.com/go-redis/redis/v7/cluster.go
+++ b/vendor/github.com/go-redis/redis/v8/cluster.go
@@ -5,7 +5,6 @@ import (
"crypto/tls"
"fmt"
"math"
- "math/rand"
"net"
"runtime"
"sort"
@@ -13,10 +12,11 @@ import (
"sync/atomic"
"time"
- "github.com/go-redis/redis/v7/internal"
- "github.com/go-redis/redis/v7/internal/hashtag"
- "github.com/go-redis/redis/v7/internal/pool"
- "github.com/go-redis/redis/v7/internal/proto"
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/hashtag"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/go-redis/redis/v8/internal/rand"
)
var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes")
@@ -27,9 +27,12 @@ type ClusterOptions struct {
// A seed list of host:port addresses of cluster nodes.
Addrs []string
+ // NewClient creates a cluster node client with provided name and options.
+ NewClient func(opt *Options) *Client
+
// The maximum number of retries before giving up. Command is retried
// on network errors and MOVED/ASK redirects.
- // Default is 8 retries.
+ // Default is 3 retries.
MaxRedirects int
// Enables read-only commands on slave nodes.
@@ -46,16 +49,13 @@ type ClusterOptions struct {
// and load-balance read/write operations between master and slaves.
// It can use service like ZooKeeper to maintain configuration information
// and Cluster.ReloadState to manually trigger state reloading.
- ClusterSlots func() ([]ClusterSlot, error)
-
- // Optional hook that is called when a new node is created.
- OnNewNode func(*Client)
+ ClusterSlots func(context.Context) ([]ClusterSlot, error)
// Following options are copied from Options struct.
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
- OnConnect func(*Conn) error
+ OnConnect func(ctx context.Context, cn *Conn) error
Username string
Password string
@@ -68,9 +68,6 @@ type ClusterOptions struct {
ReadTimeout time.Duration
WriteTimeout time.Duration
- // NewClient creates a cluster node client with provided name and options.
- NewClient func(opt *Options) *Client
-
// PoolSize applies per cluster node and not for the whole cluster.
PoolSize int
MinIdleConns int
@@ -86,10 +83,10 @@ func (opt *ClusterOptions) init() {
if opt.MaxRedirects == -1 {
opt.MaxRedirects = 0
} else if opt.MaxRedirects == 0 {
- opt.MaxRedirects = 8
+ opt.MaxRedirects = 3
}
- if (opt.RouteByLatency || opt.RouteRandomly) && opt.ClusterSlots == nil {
+ if opt.RouteByLatency || opt.RouteRandomly {
opt.ReadOnly = true
}
@@ -110,6 +107,9 @@ func (opt *ClusterOptions) init() {
opt.WriteTimeout = opt.ReadTimeout
}
+ if opt.MaxRetries == 0 {
+ opt.MaxRetries = -1
+ }
switch opt.MinRetryBackoff {
case -1:
opt.MinRetryBackoff = 0
@@ -135,12 +135,12 @@ func (opt *ClusterOptions) clientOptions() *Options {
Dialer: opt.Dialer,
OnConnect: opt.OnConnect,
+ Username: opt.Username,
+ Password: opt.Password,
+
MaxRetries: opt.MaxRetries,
MinRetryBackoff: opt.MinRetryBackoff,
MaxRetryBackoff: opt.MaxRetryBackoff,
- Username: opt.Username,
- Password: opt.Password,
- readOnly: opt.ReadOnly,
DialTimeout: opt.DialTimeout,
ReadTimeout: opt.ReadTimeout,
@@ -154,6 +154,12 @@ func (opt *ClusterOptions) clientOptions() *Options {
IdleCheckFrequency: disableIdleCheck,
TLSConfig: opt.TLSConfig,
+ // If ClusterSlots is populated, then we probably have an artificial
+ // cluster whose nodes are not in clustering mode (otherwise there isn't
+ // much use for ClusterSlots config). This means we cannot execute the
+ // READONLY command against that node -- setting readOnly to false in such
+ // situations in the options below will prevent that from happening.
+ readOnly: opt.ReadOnly && opt.ClusterSlots == nil,
}
}
@@ -179,10 +185,6 @@ func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode {
go node.updateLatency()
}
- if clOpt.OnNewNode != nil {
- clOpt.OnNewNode(node.Client)
- }
-
return &node
}
@@ -195,16 +197,19 @@ func (n *clusterNode) Close() error {
}
func (n *clusterNode) updateLatency() {
- const probes = 10
+ const numProbe = 10
+ var dur uint64
+
+ for i := 0; i < numProbe; i++ {
+ time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond)
- var latency uint32
- for i := 0; i < probes; i++ {
start := time.Now()
- n.Client.Ping()
- probe := uint32(time.Since(start) / time.Microsecond)
- latency = (latency + probe) / 2
+ n.Client.Ping(context.TODO())
+ dur += uint64(time.Since(start) / time.Microsecond)
}
- atomic.StoreUint32(&n.latency, latency)
+
+ latency := float64(dur) / float64(numProbe)
+ atomic.StoreUint32(&n.latency, uint32(latency+0.5))
}
func (n *clusterNode) Latency() time.Duration {
@@ -248,11 +253,11 @@ func (n *clusterNode) SetGeneration(gen uint32) {
type clusterNodes struct {
opt *ClusterOptions
- mu sync.RWMutex
- allAddrs []string
- allNodes map[string]*clusterNode
- clusterAddrs []string
- closed bool
+ mu sync.RWMutex
+ addrs []string
+ nodes map[string]*clusterNode
+ activeAddrs []string
+ closed bool
_generation uint32 // atomic
}
@@ -261,8 +266,8 @@ func newClusterNodes(opt *ClusterOptions) *clusterNodes {
return &clusterNodes{
opt: opt,
- allAddrs: opt.Addrs,
- allNodes: make(map[string]*clusterNode),
+ addrs: opt.Addrs,
+ nodes: make(map[string]*clusterNode),
}
}
@@ -276,14 +281,14 @@ func (c *clusterNodes) Close() error {
c.closed = true
var firstErr error
- for _, node := range c.allNodes {
+ for _, node := range c.nodes {
if err := node.Client.Close(); err != nil && firstErr == nil {
firstErr = err
}
}
- c.allNodes = nil
- c.clusterAddrs = nil
+ c.nodes = nil
+ c.activeAddrs = nil
return firstErr
}
@@ -293,10 +298,10 @@ func (c *clusterNodes) Addrs() ([]string, error) {
c.mu.RLock()
closed := c.closed
if !closed {
- if len(c.clusterAddrs) > 0 {
- addrs = c.clusterAddrs
+ if len(c.activeAddrs) > 0 {
+ addrs = c.activeAddrs
} else {
- addrs = c.allAddrs
+ addrs = c.addrs
}
}
c.mu.RUnlock()
@@ -318,16 +323,23 @@ func (c *clusterNodes) NextGeneration() uint32 {
func (c *clusterNodes) GC(generation uint32) {
//nolint:prealloc
var collected []*clusterNode
+
c.mu.Lock()
- for addr, node := range c.allNodes {
+
+ c.activeAddrs = c.activeAddrs[:0]
+ for addr, node := range c.nodes {
if node.Generation() >= generation {
+ c.activeAddrs = append(c.activeAddrs, addr)
+ if c.opt.RouteByLatency {
+ go node.updateLatency()
+ }
continue
}
- c.clusterAddrs = remove(c.clusterAddrs, addr)
- delete(c.allNodes, addr)
+ delete(c.nodes, addr)
collected = append(collected, node)
}
+
c.mu.Unlock()
for _, node := range collected {
@@ -351,18 +363,17 @@ func (c *clusterNodes) Get(addr string) (*clusterNode, error) {
return nil, pool.ErrClosed
}
- node, ok := c.allNodes[addr]
+ node, ok := c.nodes[addr]
if ok {
- return node, err
+ return node, nil
}
node = newClusterNode(c.opt, addr)
- c.allAddrs = appendIfNotExists(c.allAddrs, addr)
- c.clusterAddrs = append(c.clusterAddrs, addr)
- c.allNodes[addr] = node
+ c.addrs = appendIfNotExists(c.addrs, addr)
+ c.nodes[addr] = node
- return node, err
+ return node, nil
}
func (c *clusterNodes) get(addr string) (*clusterNode, error) {
@@ -372,7 +383,7 @@ func (c *clusterNodes) get(addr string) (*clusterNode, error) {
if c.closed {
err = pool.ErrClosed
} else {
- node = c.allNodes[addr]
+ node = c.nodes[addr]
}
c.mu.RUnlock()
return node, err
@@ -386,8 +397,8 @@ func (c *clusterNodes) All() ([]*clusterNode, error) {
return nil, pool.ErrClosed
}
- cp := make([]*clusterNode, 0, len(c.allNodes))
- for _, node := range c.allNodes {
+ cp := make([]*clusterNode, 0, len(c.nodes))
+ for _, node := range c.nodes {
cp = append(cp, node)
}
return cp, nil
@@ -552,8 +563,6 @@ func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) {
}
func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) {
- const threshold = time.Millisecond
-
nodes := c.slotNodes(slot)
if len(nodes) == 0 {
return c.nodes.Random()
@@ -564,11 +573,16 @@ func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) {
if n.Failing() {
continue
}
- if node == nil || node.Latency()-n.Latency() > threshold {
+ if node == nil || n.Latency() < node.Latency() {
node = n
}
}
- return node, nil
+ if node != nil {
+ return node, nil
+ }
+
+ // If all nodes are failing - return random node
+ return c.nodes.Random()
}
func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) {
@@ -597,20 +611,20 @@ func (c *clusterState) slotNodes(slot int) []*clusterNode {
//------------------------------------------------------------------------------
type clusterStateHolder struct {
- load func() (*clusterState, error)
+ load func(ctx context.Context) (*clusterState, error)
state atomic.Value
reloading uint32 // atomic
}
-func newClusterStateHolder(fn func() (*clusterState, error)) *clusterStateHolder {
+func newClusterStateHolder(fn func(ctx context.Context) (*clusterState, error)) *clusterStateHolder {
return &clusterStateHolder{
load: fn,
}
}
-func (c *clusterStateHolder) Reload() (*clusterState, error) {
- state, err := c.load()
+func (c *clusterStateHolder) Reload(ctx context.Context) (*clusterState, error) {
+ state, err := c.load(ctx)
if err != nil {
return nil, err
}
@@ -618,39 +632,39 @@ func (c *clusterStateHolder) Reload() (*clusterState, error) {
return state, nil
}
-func (c *clusterStateHolder) LazyReload() {
+func (c *clusterStateHolder) LazyReload(ctx context.Context) {
if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
return
}
go func() {
defer atomic.StoreUint32(&c.reloading, 0)
- _, err := c.Reload()
+ _, err := c.Reload(ctx)
if err != nil {
return
}
- time.Sleep(100 * time.Millisecond)
+ time.Sleep(200 * time.Millisecond)
}()
}
-func (c *clusterStateHolder) Get() (*clusterState, error) {
+func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) {
v := c.state.Load()
if v != nil {
state := v.(*clusterState)
- if time.Since(state.createdAt) > time.Minute {
- c.LazyReload()
+ if time.Since(state.createdAt) > 10*time.Second {
+ c.LazyReload(ctx)
}
return state, nil
}
- return c.Reload()
+ return c.Reload(ctx)
}
-func (c *clusterStateHolder) ReloadOrGet() (*clusterState, error) {
- state, err := c.Reload()
+func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) {
+ state, err := c.Reload(ctx)
if err == nil {
return state, nil
}
- return c.Get()
+ return c.Get(ctx)
}
//------------------------------------------------------------------------------
@@ -717,9 +731,8 @@ func (c *ClusterClient) Options() *ClusterOptions {
// ReloadState reloads cluster state. If available it calls ClusterSlots func
// to get cluster slots information.
-func (c *ClusterClient) ReloadState() error {
- _, err := c.state.Reload()
- return err
+func (c *ClusterClient) ReloadState(ctx context.Context) {
+ c.state.LazyReload(ctx)
}
// Close closes the cluster client, releasing any open resources.
@@ -731,34 +744,17 @@ func (c *ClusterClient) Close() error {
}
// Do creates a Cmd from the args and processes the cmd.
-func (c *ClusterClient) Do(args ...interface{}) *Cmd {
- return c.DoContext(c.ctx, args...)
-}
-
-func (c *ClusterClient) DoContext(ctx context.Context, args ...interface{}) *Cmd {
- cmd := NewCmd(args...)
- _ = c.ProcessContext(ctx, cmd)
+func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
return cmd
}
-func (c *ClusterClient) Process(cmd Cmder) error {
- return c.ProcessContext(c.ctx, cmd)
-}
-
-func (c *ClusterClient) ProcessContext(ctx context.Context, cmd Cmder) error {
+func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error {
return c.hooks.process(ctx, cmd, c.process)
}
func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
- err := c._process(ctx, cmd)
- if err != nil {
- cmd.SetErr(err)
- return err
- }
- return nil
-}
-
-func (c *ClusterClient) _process(ctx context.Context, cmd Cmder) error {
cmdInfo := c.cmdInfo(cmd.Name())
slot := c.cmdSlot(cmd)
@@ -774,7 +770,7 @@ func (c *ClusterClient) _process(ctx context.Context, cmd Cmder) error {
if node == nil {
var err error
- node, err = c.cmdNode(cmdInfo, slot)
+ node, err = c.cmdNode(ctx, cmdInfo, slot)
if err != nil {
return err
}
@@ -782,23 +778,23 @@ func (c *ClusterClient) _process(ctx context.Context, cmd Cmder) error {
if ask {
pipe := node.Client.Pipeline()
- _ = pipe.Process(NewCmd("asking"))
- _ = pipe.Process(cmd)
- _, lastErr = pipe.ExecContext(ctx)
+ _ = pipe.Process(ctx, NewCmd(ctx, "asking"))
+ _ = pipe.Process(ctx, cmd)
+ _, lastErr = pipe.Exec(ctx)
_ = pipe.Close()
ask = false
} else {
- lastErr = node.Client.ProcessContext(ctx, cmd)
+ lastErr = node.Client.Process(ctx, cmd)
}
// If there is no error - we are done.
if lastErr == nil {
return nil
}
- if lastErr != Nil {
- c.state.LazyReload()
- }
- if lastErr == pool.ErrClosed || isReadOnlyError(lastErr) {
+ if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed {
+ if isReadOnly {
+ c.state.LazyReload(ctx)
+ }
node = nil
continue
}
@@ -822,7 +818,7 @@ func (c *ClusterClient) _process(ctx context.Context, cmd Cmder) error {
continue
}
- if isRetryableError(lastErr, cmd.readTimeout() == nil) {
+ if shouldRetry(lastErr, cmd.readTimeout() == nil) {
// First retry the same node.
if attempt == 0 {
continue
@@ -841,8 +837,11 @@ func (c *ClusterClient) _process(ctx context.Context, cmd Cmder) error {
// ForEachMaster concurrently calls the fn on each master node in the cluster.
// It returns the first error if any.
-func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
- state, err := c.state.ReloadOrGet()
+func (c *ClusterClient) ForEachMaster(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ state, err := c.state.ReloadOrGet(ctx)
if err != nil {
return err
}
@@ -854,7 +853,7 @@ func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
wg.Add(1)
go func(node *clusterNode) {
defer wg.Done()
- err := fn(node.Client)
+ err := fn(ctx, node.Client)
if err != nil {
select {
case errCh <- err:
@@ -876,8 +875,11 @@ func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
// ForEachSlave concurrently calls the fn on each slave node in the cluster.
// It returns the first error if any.
-func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error {
- state, err := c.state.ReloadOrGet()
+func (c *ClusterClient) ForEachSlave(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ state, err := c.state.ReloadOrGet(ctx)
if err != nil {
return err
}
@@ -889,7 +891,7 @@ func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error {
wg.Add(1)
go func(node *clusterNode) {
defer wg.Done()
- err := fn(node.Client)
+ err := fn(ctx, node.Client)
if err != nil {
select {
case errCh <- err:
@@ -909,10 +911,13 @@ func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error {
}
}
-// ForEachNode concurrently calls the fn on each known node in the cluster.
+// ForEachShard concurrently calls the fn on each known node in the cluster.
// It returns the first error if any.
-func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error {
- state, err := c.state.ReloadOrGet()
+func (c *ClusterClient) ForEachShard(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ state, err := c.state.ReloadOrGet(ctx)
if err != nil {
return err
}
@@ -922,7 +927,7 @@ func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error {
worker := func(node *clusterNode) {
defer wg.Done()
- err := fn(node.Client)
+ err := fn(ctx, node.Client)
if err != nil {
select {
case errCh <- err:
@@ -954,7 +959,7 @@ func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error {
func (c *ClusterClient) PoolStats() *PoolStats {
var acc PoolStats
- state, _ := c.state.Get()
+ state, _ := c.state.Get(context.TODO())
if state == nil {
return &acc
}
@@ -984,9 +989,9 @@ func (c *ClusterClient) PoolStats() *PoolStats {
return &acc
}
-func (c *ClusterClient) loadState() (*clusterState, error) {
+func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) {
if c.opt.ClusterSlots != nil {
- slots, err := c.opt.ClusterSlots()
+ slots, err := c.opt.ClusterSlots(ctx)
if err != nil {
return nil, err
}
@@ -999,7 +1004,10 @@ func (c *ClusterClient) loadState() (*clusterState, error) {
}
var firstErr error
- for _, addr := range addrs {
+
+ for _, idx := range rand.Perm(len(addrs)) {
+ addr := addrs[idx]
+
node, err := c.nodes.Get(addr)
if err != nil {
if firstErr == nil {
@@ -1008,7 +1016,7 @@ func (c *ClusterClient) loadState() (*clusterState, error) {
continue
}
- slots, err := node.Client.ClusterSlots().Result()
+ slots, err := node.Client.ClusterSlots(ctx).Result()
if err != nil {
if firstErr == nil {
firstErr = err
@@ -1019,6 +1027,16 @@ func (c *ClusterClient) loadState() (*clusterState, error) {
return newClusterState(c.nodes, slots, node.Client.opt.Addr)
}
+ /*
+ * No node is connectable. It's possible that all nodes' IP has changed.
+ * Clear activeAddrs to let client be able to re-connect using the initial
+ * setting of the addresses (e.g. [redis-cluster-0:6379, redis-cluster-1:6379]),
+ * which might have chance to resolve domain name and get updated IP address.
+ */
+ c.nodes.mu.Lock()
+ c.nodes.activeAddrs = nil
+ c.nodes.mu.Unlock()
+
return nil, firstErr
}
@@ -1036,7 +1054,7 @@ func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) {
for _, node := range nodes {
_, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns()
if err != nil {
- internal.Logger.Printf("ReapStaleConns failed: %s", err)
+ internal.Logger.Printf(c.Context(), "ReapStaleConns failed: %s", err)
}
}
}
@@ -1051,8 +1069,8 @@ func (c *ClusterClient) Pipeline() Pipeliner {
return &pipe
}
-func (c *ClusterClient) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().Pipelined(fn)
+func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
}
func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error {
@@ -1061,7 +1079,7 @@ func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error
func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error {
cmdsMap := newCmdsMap()
- err := c.mapCmdsByNode(cmdsMap, cmds)
+ err := c.mapCmdsByNode(ctx, cmdsMap, cmds)
if err != nil {
setCmdsErr(cmds, err)
return err
@@ -1088,7 +1106,7 @@ func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) erro
return
}
if attempt < c.opt.MaxRedirects {
- if err := c.mapCmdsByNode(failedCmds, cmds); err != nil {
+ if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
setCmdsErr(cmds, err)
}
} else {
@@ -1107,8 +1125,8 @@ func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) erro
return cmdsFirstErr(cmds)
}
-func (c *ClusterClient) mapCmdsByNode(cmdsMap *cmdsMap, cmds []Cmder) error {
- state, err := c.state.Get()
+func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmds []Cmder) error {
+ state, err := c.state.Get(ctx)
if err != nil {
return err
}
@@ -1159,21 +1177,28 @@ func (c *ClusterClient) _processPipelineNode(
}
return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
- return c.pipelineReadCmds(node, rd, cmds, failedCmds)
+ return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds)
})
})
})
}
func (c *ClusterClient) pipelineReadCmds(
- node *clusterNode, rd *proto.Reader, cmds []Cmder, failedCmds *cmdsMap,
+ ctx context.Context,
+ node *clusterNode,
+ rd *proto.Reader,
+ cmds []Cmder,
+ failedCmds *cmdsMap,
) error {
for _, cmd := range cmds {
err := cmd.readReply(rd)
+ cmd.SetErr(err)
+
if err == nil {
continue
}
- if c.checkMovedErr(cmd, err, failedCmds) {
+
+ if c.checkMovedErr(ctx, cmd, err, failedCmds) {
continue
}
@@ -1190,7 +1215,7 @@ func (c *ClusterClient) pipelineReadCmds(
}
func (c *ClusterClient) checkMovedErr(
- cmd Cmder, err error, failedCmds *cmdsMap,
+ ctx context.Context, cmd Cmder, err error, failedCmds *cmdsMap,
) bool {
moved, ask, addr := isMovedError(err)
if !moved && !ask {
@@ -1203,13 +1228,13 @@ func (c *ClusterClient) checkMovedErr(
}
if moved {
- c.state.LazyReload()
+ c.state.LazyReload(ctx)
failedCmds.Add(node, cmd)
return true
}
if ask {
- failedCmds.Add(node, NewCmd("asking"), cmd)
+ failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
return true
}
@@ -1226,16 +1251,19 @@ func (c *ClusterClient) TxPipeline() Pipeliner {
return &pipe
}
-func (c *ClusterClient) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().Pipelined(fn)
+func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
}
func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, c._processTxPipeline)
+ return c.hooks.processTxPipeline(ctx, cmds, c._processTxPipeline)
}
func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error {
- state, err := c.state.Get()
+ // Trim multi .. exec.
+ cmds = cmds[1 : len(cmds)-1]
+
+ state, err := c.state.Get(ctx)
if err != nil {
setCmdsErr(cmds, err)
return err
@@ -1270,8 +1298,9 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er
if err == nil {
return
}
+
if attempt < c.opt.MaxRedirects {
- if err := c.mapCmdsByNode(failedCmds, cmds); err != nil {
+ if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
setCmdsErr(cmds, err)
}
} else {
@@ -1317,11 +1346,11 @@ func (c *ClusterClient) _processTxPipelineNode(
// Trim multi and exec.
cmds = cmds[1 : len(cmds)-1]
- err := c.txPipelineReadQueued(rd, statusCmd, cmds, failedCmds)
+ err := c.txPipelineReadQueued(ctx, rd, statusCmd, cmds, failedCmds)
if err != nil {
moved, ask, addr := isMovedError(err)
if moved || ask {
- return c.cmdsMoved(cmds, moved, ask, addr, failedCmds)
+ return c.cmdsMoved(ctx, cmds, moved, ask, addr, failedCmds)
}
return err
}
@@ -1333,7 +1362,11 @@ func (c *ClusterClient) _processTxPipelineNode(
}
func (c *ClusterClient) txPipelineReadQueued(
- rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder, failedCmds *cmdsMap,
+ ctx context.Context,
+ rd *proto.Reader,
+ statusCmd *StatusCmd,
+ cmds []Cmder,
+ failedCmds *cmdsMap,
) error {
// Parse queued replies.
if err := statusCmd.readReply(rd); err != nil {
@@ -1342,7 +1375,7 @@ func (c *ClusterClient) txPipelineReadQueued(
for _, cmd := range cmds {
err := statusCmd.readReply(rd)
- if err == nil || c.checkMovedErr(cmd, err, failedCmds) || isRedisError(err) {
+ if err == nil || c.checkMovedErr(ctx, cmd, err, failedCmds) || isRedisError(err) {
continue
}
return err
@@ -1370,7 +1403,10 @@ func (c *ClusterClient) txPipelineReadQueued(
}
func (c *ClusterClient) cmdsMoved(
- cmds []Cmder, moved, ask bool, addr string, failedCmds *cmdsMap,
+ ctx context.Context, cmds []Cmder,
+ moved, ask bool,
+ addr string,
+ failedCmds *cmdsMap,
) error {
node, err := c.nodes.Get(addr)
if err != nil {
@@ -1378,7 +1414,7 @@ func (c *ClusterClient) cmdsMoved(
}
if moved {
- c.state.LazyReload()
+ c.state.LazyReload(ctx)
for _, cmd := range cmds {
failedCmds.Add(node, cmd)
}
@@ -1387,7 +1423,7 @@ func (c *ClusterClient) cmdsMoved(
if ask {
for _, cmd := range cmds {
- failedCmds.Add(node, NewCmd("asking"), cmd)
+ failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
}
return nil
}
@@ -1395,11 +1431,7 @@ func (c *ClusterClient) cmdsMoved(
return nil
}
-func (c *ClusterClient) Watch(fn func(*Tx) error, keys ...string) error {
- return c.WatchContext(c.ctx, fn, keys...)
-}
-
-func (c *ClusterClient) WatchContext(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
if len(keys) == 0 {
return fmt.Errorf("redis: Watch requires at least one key")
}
@@ -1412,7 +1444,7 @@ func (c *ClusterClient) WatchContext(ctx context.Context, fn func(*Tx) error, ke
}
}
- node, err := c.slotMasterNode(slot)
+ node, err := c.slotMasterNode(ctx, slot)
if err != nil {
return err
}
@@ -1424,13 +1456,10 @@ func (c *ClusterClient) WatchContext(ctx context.Context, fn func(*Tx) error, ke
}
}
- err = node.Client.WatchContext(ctx, fn, keys...)
+ err = node.Client.Watch(ctx, fn, keys...)
if err == nil {
break
}
- if err != Nil {
- c.state.LazyReload()
- }
moved, ask, addr := isMovedError(err)
if moved || ask {
@@ -1441,15 +1470,18 @@ func (c *ClusterClient) WatchContext(ctx context.Context, fn func(*Tx) error, ke
continue
}
- if err == pool.ErrClosed || isReadOnlyError(err) {
- node, err = c.slotMasterNode(slot)
+ if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed {
+ if isReadOnly {
+ c.state.LazyReload(ctx)
+ }
+ node, err = c.slotMasterNode(ctx, slot)
if err != nil {
return err
}
continue
}
- if isRetryableError(err, true) {
+ if shouldRetry(err, true) {
continue
}
@@ -1464,7 +1496,7 @@ func (c *ClusterClient) pubSub() *PubSub {
pubsub := &PubSub{
opt: c.opt.clientOptions(),
- newConn: func(channels []string) (*pool.Conn, error) {
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
if node != nil {
panic("node != nil")
}
@@ -1472,7 +1504,7 @@ func (c *ClusterClient) pubSub() *PubSub {
var err error
if len(channels) > 0 {
slot := hashtag.Slot(channels[0])
- node, err = c.slotMasterNode(slot)
+ node, err = c.slotMasterNode(ctx, slot)
} else {
node, err = c.nodes.Random()
}
@@ -1502,20 +1534,20 @@ func (c *ClusterClient) pubSub() *PubSub {
// Subscribe subscribes the client to the specified channels.
// Channels can be omitted to create empty subscription.
-func (c *ClusterClient) Subscribe(channels ...string) *PubSub {
+func (c *ClusterClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
pubsub := c.pubSub()
if len(channels) > 0 {
- _ = pubsub.Subscribe(channels...)
+ _ = pubsub.Subscribe(ctx, channels...)
}
return pubsub
}
// PSubscribe subscribes the client to the given patterns.
// Patterns can be omitted to create empty subscription.
-func (c *ClusterClient) PSubscribe(channels ...string) *PubSub {
+func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
pubsub := c.pubSub()
if len(channels) > 0 {
- _ = pubsub.PSubscribe(channels...)
+ _ = pubsub.PSubscribe(ctx, channels...)
}
return pubsub
}
@@ -1524,23 +1556,34 @@ func (c *ClusterClient) retryBackoff(attempt int) time.Duration {
return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
}
-func (c *ClusterClient) cmdsInfo() (map[string]*CommandInfo, error) {
+func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
+ // Try 3 random nodes.
+ const nodeLimit = 3
+
addrs, err := c.nodes.Addrs()
if err != nil {
return nil, err
}
var firstErr error
- for _, addr := range addrs {
+
+ perm := rand.Perm(len(addrs))
+ if len(perm) > nodeLimit {
+ perm = perm[:nodeLimit]
+ }
+
+ for _, idx := range perm {
+ addr := addrs[idx]
+
node, err := c.nodes.Get(addr)
if err != nil {
- return nil, err
- }
- if node == nil {
+ if firstErr == nil {
+ firstErr = err
+ }
continue
}
- info, err := node.Client.Command().Result()
+ info, err := node.Client.Command(ctx).Result()
if err == nil {
return info, nil
}
@@ -1548,18 +1591,22 @@ func (c *ClusterClient) cmdsInfo() (map[string]*CommandInfo, error) {
firstErr = err
}
}
+
+ if firstErr == nil {
+ panic("not reached")
+ }
return nil, firstErr
}
func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
- cmdsInfo, err := c.cmdsInfoCache.Get()
+ cmdsInfo, err := c.cmdsInfoCache.Get(c.ctx)
if err != nil {
return nil
}
info := cmdsInfo[name]
if info == nil {
- internal.Logger.Printf("info for cmd=%s not found", name)
+ internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name)
}
return info
}
@@ -1582,8 +1629,12 @@ func cmdSlot(cmd Cmder, pos int) int {
return hashtag.Slot(firstKey)
}
-func (c *ClusterClient) cmdNode(cmdInfo *CommandInfo, slot int) (*clusterNode, error) {
- state, err := c.state.Get()
+func (c *ClusterClient) cmdNode(
+ ctx context.Context,
+ cmdInfo *CommandInfo,
+ slot int,
+) (*clusterNode, error) {
+ state, err := c.state.Get(ctx)
if err != nil {
return nil, err
}
@@ -1604,14 +1655,43 @@ func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*cluste
return state.slotSlaveNode(slot)
}
-func (c *ClusterClient) slotMasterNode(slot int) (*clusterNode, error) {
- state, err := c.state.Get()
+func (c *ClusterClient) slotMasterNode(ctx context.Context, slot int) (*clusterNode, error) {
+ state, err := c.state.Get(ctx)
if err != nil {
return nil, err
}
return state.slotMasterNode(slot)
}
+// SlaveForKey gets a client for a replica node to run any command on it.
+// This is especially useful if we want to run a particular lua script which has
+// only read only commands on the replica.
+// This is because other redis commands generally have a flag that points that
+// they are read only and automatically run on the replica nodes
+// if ClusterOptions.ReadOnly flag is set to true.
+func (c *ClusterClient) SlaveForKey(ctx context.Context, key string) (*Client, error) {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+ slot := hashtag.Slot(key)
+ node, err := c.slotReadOnlyNode(state, slot)
+ if err != nil {
+ return nil, err
+ }
+ return node.Client, err
+}
+
+// MasterForKey return a client to the master node for a particular key.
+func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, error) {
+ slot := hashtag.Slot(key)
+ node, err := c.slotMasterNode(ctx, slot)
+ if err != nil {
+ return nil, err
+ }
+ return node.Client, err
+}
+
func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
for _, n := range nodes {
if n == node {
@@ -1634,21 +1714,6 @@ loop:
return ss
}
-func remove(ss []string, es ...string) []string {
- if len(es) == 0 {
- return ss[:0]
- }
- for _, e := range es {
- for i, s := range ss {
- if s == e {
- ss = append(ss[:i], ss[i+1:]...)
- break
- }
- }
- }
- return ss
-}
-
//------------------------------------------------------------------------------
type cmdsMap struct {
diff --git a/vendor/github.com/go-redis/redis/v8/cluster_commands.go b/vendor/github.com/go-redis/redis/v8/cluster_commands.go
new file mode 100644
index 0000000000..1f0bae067a
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/cluster_commands.go
@@ -0,0 +1,25 @@
+package redis
+
+import (
+ "context"
+ "sync/atomic"
+)
+
+func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "dbsize")
+ var size int64
+ err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
+ n, err := master.DBSize(ctx).Result()
+ if err != nil {
+ return err
+ }
+ atomic.AddInt64(&size, n)
+ return nil
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ return cmd
+ }
+ cmd.val = size
+ return cmd
+}
diff --git a/vendor/github.com/go-redis/redis/v7/command.go b/vendor/github.com/go-redis/redis/v8/command.go
index dd7fe4a91e..2932035e72 100644
--- a/vendor/github.com/go-redis/redis/v7/command.go
+++ b/vendor/github.com/go-redis/redis/v8/command.go
@@ -1,22 +1,26 @@
package redis
import (
+ "context"
"fmt"
"net"
"strconv"
- "strings"
"time"
- "github.com/go-redis/redis/v7/internal"
- "github.com/go-redis/redis/v7/internal/proto"
- "github.com/go-redis/redis/v7/internal/util"
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/hscan"
+ "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/go-redis/redis/v8/internal/util"
)
type Cmder interface {
Name() string
+ FullName() string
Args() []interface{}
String() string
stringArg(int) string
+ firstKeyPos() int8
+ setFirstKeyPos(int8)
readTimeout() *time.Duration
readReply(rd *proto.Reader) error
@@ -55,27 +59,11 @@ func writeCmd(wr *proto.Writer, cmd Cmder) error {
return wr.WriteArgs(cmd.Args())
}
-func cmdString(cmd Cmder, val interface{}) string {
- ss := make([]string, 0, len(cmd.Args()))
- for _, arg := range cmd.Args() {
- ss = append(ss, fmt.Sprint(arg))
- }
- s := strings.Join(ss, " ")
- if err := cmd.Err(); err != nil {
- return s + ": " + err.Error()
- }
- if val != nil {
- switch vv := val.(type) {
- case []byte:
- return s + ": " + string(vv)
- default:
- return s + ": " + fmt.Sprint(val)
- }
+func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
+ if pos := cmd.firstKeyPos(); pos != 0 {
+ return int(pos)
}
- return s
-}
-func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
switch cmd.Name() {
case "eval", "evalsha":
if cmd.stringArg(2) != "0" {
@@ -85,18 +73,47 @@ func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
return 0
case "publish":
return 1
+ case "memory":
+ // https://github.com/redis/redis/issues/7493
+ if cmd.stringArg(1) == "usage" {
+ return 2
+ }
}
- if info == nil {
- return 0
+
+ if info != nil {
+ return int(info.FirstKeyPos)
+ }
+ return 0
+}
+
+func cmdString(cmd Cmder, val interface{}) string {
+ b := make([]byte, 0, 64)
+
+ for i, arg := range cmd.Args() {
+ if i > 0 {
+ b = append(b, ' ')
+ }
+ b = internal.AppendArg(b, arg)
+ }
+
+ if err := cmd.Err(); err != nil {
+ b = append(b, ": "...)
+ b = append(b, err.Error()...)
+ } else if val != nil {
+ b = append(b, ": "...)
+ b = internal.AppendArg(b, val)
}
- return int(info.FirstKeyPos)
+
+ return internal.String(b)
}
//------------------------------------------------------------------------------
type baseCmd struct {
- args []interface{}
- err error
+ ctx context.Context
+ args []interface{}
+ err error
+ keyPos int8
_readTimeout *time.Duration
}
@@ -111,6 +128,21 @@ func (cmd *baseCmd) Name() string {
return internal.ToLower(cmd.stringArg(0))
}
+func (cmd *baseCmd) FullName() string {
+ switch name := cmd.Name(); name {
+ case "cluster", "command":
+ if len(cmd.args) == 1 {
+ return name
+ }
+ if s2, ok := cmd.args[1].(string); ok {
+ return name + " " + s2
+ }
+ return name
+ default:
+ return name
+ }
+}
+
func (cmd *baseCmd) Args() []interface{} {
return cmd.args
}
@@ -123,6 +155,14 @@ func (cmd *baseCmd) stringArg(pos int) string {
return s
}
+func (cmd *baseCmd) firstKeyPos() int8 {
+ return cmd.keyPos
+}
+
+func (cmd *baseCmd) setFirstKeyPos(keyPos int8) {
+ cmd.keyPos = keyPos
+}
+
func (cmd *baseCmd) SetErr(e error) {
cmd.err = e
}
@@ -147,9 +187,12 @@ type Cmd struct {
val interface{}
}
-func NewCmd(args ...interface{}) *Cmd {
+func NewCmd(ctx context.Context, args ...interface{}) *Cmd {
return &Cmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -272,12 +315,12 @@ func (cmd *Cmd) Bool() (bool, error) {
}
}
-func (cmd *Cmd) readReply(rd *proto.Reader) error {
- cmd.val, cmd.err = rd.ReadReply(sliceParser)
- return cmd.err
+func (cmd *Cmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadReply(sliceParser)
+ return err
}
-// Implements proto.MultiBulkParse
+// sliceParser implements proto.MultiBulkParse.
func sliceParser(rd *proto.Reader, n int64) (interface{}, error) {
vals := make([]interface{}, n)
for i := 0; i < len(vals); i++ {
@@ -308,9 +351,12 @@ type SliceCmd struct {
var _ Cmder = (*SliceCmd)(nil)
-func NewSliceCmd(args ...interface{}) *SliceCmd {
+func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd {
return &SliceCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -326,12 +372,31 @@ func (cmd *SliceCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *SliceCmd) readReply(rd *proto.Reader) error {
- var v interface{}
- v, cmd.err = rd.ReadArrayReply(sliceParser)
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *SliceCmd) Scan(dst interface{}) error {
if cmd.err != nil {
return cmd.err
}
+
+ // Pass the list of keys and values.
+ // Skip the first two args for: HMGET key
+ var args []interface{}
+ if cmd.args[0] == "hmget" {
+ args = cmd.args[2:]
+ } else {
+ // Otherwise, it's: MGET field field ...
+ args = cmd.args[1:]
+ }
+
+ return hscan.Scan(dst, args, cmd.val)
+}
+
+func (cmd *SliceCmd) readReply(rd *proto.Reader) error {
+ v, err := rd.ReadArrayReply(sliceParser)
+ if err != nil {
+ return err
+ }
cmd.val = v.([]interface{})
return nil
}
@@ -346,9 +411,12 @@ type StatusCmd struct {
var _ Cmder = (*StatusCmd)(nil)
-func NewStatusCmd(args ...interface{}) *StatusCmd {
+func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd {
return &StatusCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -364,9 +432,9 @@ func (cmd *StatusCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *StatusCmd) readReply(rd *proto.Reader) error {
- cmd.val, cmd.err = rd.ReadString()
- return cmd.err
+func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadString()
+ return err
}
//------------------------------------------------------------------------------
@@ -379,9 +447,12 @@ type IntCmd struct {
var _ Cmder = (*IntCmd)(nil)
-func NewIntCmd(args ...interface{}) *IntCmd {
+func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd {
return &IntCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -401,9 +472,9 @@ func (cmd *IntCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *IntCmd) readReply(rd *proto.Reader) error {
- cmd.val, cmd.err = rd.ReadIntReply()
- return cmd.err
+func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadIntReply()
+ return err
}
//------------------------------------------------------------------------------
@@ -416,9 +487,12 @@ type IntSliceCmd struct {
var _ Cmder = (*IntSliceCmd)(nil)
-func NewIntSliceCmd(args ...interface{}) *IntSliceCmd {
+func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd {
return &IntSliceCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -435,7 +509,7 @@ func (cmd *IntSliceCmd) String() string {
}
func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error {
- _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
cmd.val = make([]int64, n)
for i := 0; i < len(cmd.val); i++ {
num, err := rd.ReadIntReply()
@@ -446,7 +520,7 @@ func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error {
}
return nil, nil
})
- return cmd.err
+ return err
}
//------------------------------------------------------------------------------
@@ -460,9 +534,12 @@ type DurationCmd struct {
var _ Cmder = (*DurationCmd)(nil)
-func NewDurationCmd(precision time.Duration, args ...interface{}) *DurationCmd {
+func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd {
return &DurationCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
precision: precision,
}
}
@@ -480,10 +557,9 @@ func (cmd *DurationCmd) String() string {
}
func (cmd *DurationCmd) readReply(rd *proto.Reader) error {
- var n int64
- n, cmd.err = rd.ReadIntReply()
- if cmd.err != nil {
- return cmd.err
+ n, err := rd.ReadIntReply()
+ if err != nil {
+ return err
}
switch n {
// -2 if the key does not exist
@@ -506,9 +582,12 @@ type TimeCmd struct {
var _ Cmder = (*TimeCmd)(nil)
-func NewTimeCmd(args ...interface{}) *TimeCmd {
+func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd {
return &TimeCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -525,7 +604,7 @@ func (cmd *TimeCmd) String() string {
}
func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
- _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
if n != 2 {
return nil, fmt.Errorf("got %d elements, expected 2", n)
}
@@ -543,7 +622,7 @@ func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
cmd.val = time.Unix(sec, microsec*1000)
return nil, nil
})
- return cmd.err
+ return err
}
//------------------------------------------------------------------------------
@@ -556,9 +635,12 @@ type BoolCmd struct {
var _ Cmder = (*BoolCmd)(nil)
-func NewBoolCmd(args ...interface{}) *BoolCmd {
+func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd {
return &BoolCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -575,17 +657,15 @@ func (cmd *BoolCmd) String() string {
}
func (cmd *BoolCmd) readReply(rd *proto.Reader) error {
- var v interface{}
- v, cmd.err = rd.ReadReply(nil)
+ v, err := rd.ReadReply(nil)
// `SET key value NX` returns nil when key already exists. But
// `SETNX key value` returns bool (0/1). So convert nil to bool.
- if cmd.err == Nil {
+ if err == Nil {
cmd.val = false
- cmd.err = nil
return nil
}
- if cmd.err != nil {
- return cmd.err
+ if err != nil {
+ return err
}
switch v := v.(type) {
case int64:
@@ -595,8 +675,7 @@ func (cmd *BoolCmd) readReply(rd *proto.Reader) error {
cmd.val = v == "OK"
return nil
default:
- cmd.err = fmt.Errorf("got %T, wanted int64 or string", v)
- return cmd.err
+ return fmt.Errorf("got %T, wanted int64 or string", v)
}
}
@@ -610,9 +689,12 @@ type StringCmd struct {
var _ Cmder = (*StringCmd)(nil)
-func NewStringCmd(args ...interface{}) *StringCmd {
+func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd {
return &StringCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -685,9 +767,9 @@ func (cmd *StringCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *StringCmd) readReply(rd *proto.Reader) error {
- cmd.val, cmd.err = rd.ReadString()
- return cmd.err
+func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadString()
+ return err
}
//------------------------------------------------------------------------------
@@ -700,9 +782,12 @@ type FloatCmd struct {
var _ Cmder = (*FloatCmd)(nil)
-func NewFloatCmd(args ...interface{}) *FloatCmd {
+func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd {
return &FloatCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -718,9 +803,9 @@ func (cmd *FloatCmd) String() string {
return cmdString(cmd, cmd.val)
}
-func (cmd *FloatCmd) readReply(rd *proto.Reader) error {
- cmd.val, cmd.err = rd.ReadFloatReply()
- return cmd.err
+func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadFloatReply()
+ return err
}
//------------------------------------------------------------------------------
@@ -733,9 +818,12 @@ type StringSliceCmd struct {
var _ Cmder = (*StringSliceCmd)(nil)
-func NewStringSliceCmd(args ...interface{}) *StringSliceCmd {
+func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd {
return &StringSliceCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -756,7 +844,7 @@ func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
}
func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
- _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
cmd.val = make([]string, n)
for i := 0; i < len(cmd.val); i++ {
switch s, err := rd.ReadString(); {
@@ -770,7 +858,7 @@ func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
}
return nil, nil
})
- return cmd.err
+ return err
}
//------------------------------------------------------------------------------
@@ -783,9 +871,12 @@ type BoolSliceCmd struct {
var _ Cmder = (*BoolSliceCmd)(nil)
-func NewBoolSliceCmd(args ...interface{}) *BoolSliceCmd {
+func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd {
return &BoolSliceCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -802,7 +893,7 @@ func (cmd *BoolSliceCmd) String() string {
}
func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
- _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
cmd.val = make([]bool, n)
for i := 0; i < len(cmd.val); i++ {
n, err := rd.ReadIntReply()
@@ -813,7 +904,7 @@ func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
}
return nil, nil
})
- return cmd.err
+ return err
}
//------------------------------------------------------------------------------
@@ -826,9 +917,12 @@ type StringStringMapCmd struct {
var _ Cmder = (*StringStringMapCmd)(nil)
-func NewStringStringMapCmd(args ...interface{}) *StringStringMapCmd {
+func NewStringStringMapCmd(ctx context.Context, args ...interface{}) *StringStringMapCmd {
return &StringStringMapCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -844,8 +938,29 @@ func (cmd *StringStringMapCmd) String() string {
return cmdString(cmd, cmd.val)
}
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *StringStringMapCmd) Scan(dst interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+
+ strct, err := hscan.Struct(dst)
+ if err != nil {
+ return err
+ }
+
+ for k, v := range cmd.val {
+ if err := strct.Scan(k, v); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error {
- _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
cmd.val = make(map[string]string, n/2)
for i := int64(0); i < n; i += 2 {
key, err := rd.ReadString()
@@ -862,7 +977,7 @@ func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error {
}
return nil, nil
})
- return cmd.err
+ return err
}
//------------------------------------------------------------------------------
@@ -875,9 +990,12 @@ type StringIntMapCmd struct {
var _ Cmder = (*StringIntMapCmd)(nil)
-func NewStringIntMapCmd(args ...interface{}) *StringIntMapCmd {
+func NewStringIntMapCmd(ctx context.Context, args ...interface{}) *StringIntMapCmd {
return &StringIntMapCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -894,7 +1012,7 @@ func (cmd *StringIntMapCmd) String() string {
}
func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error {
- _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
cmd.val = make(map[string]int64, n/2)
for i := int64(0); i < n; i += 2 {
key, err := rd.ReadString()
@@ -911,7 +1029,7 @@ func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error {
}
return nil, nil
})
- return cmd.err
+ return err
}
//------------------------------------------------------------------------------
@@ -924,9 +1042,12 @@ type StringStructMapCmd struct {
var _ Cmder = (*StringStructMapCmd)(nil)
-func NewStringStructMapCmd(args ...interface{}) *StringStructMapCmd {
+func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd {
return &StringStructMapCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -943,7 +1064,7 @@ func (cmd *StringStructMapCmd) String() string {
}
func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
- _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
cmd.val = make(map[string]struct{}, n)
for i := int64(0); i < n; i++ {
key, err := rd.ReadString()
@@ -954,7 +1075,7 @@ func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
}
return nil, nil
})
- return cmd.err
+ return err
}
//------------------------------------------------------------------------------
@@ -972,9 +1093,12 @@ type XMessageSliceCmd struct {
var _ Cmder = (*XMessageSliceCmd)(nil)
-func NewXMessageSliceCmd(args ...interface{}) *XMessageSliceCmd {
+func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd {
return &XMessageSliceCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -991,43 +1115,21 @@ func (cmd *XMessageSliceCmd) String() string {
}
func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error {
- var v interface{}
- v, cmd.err = rd.ReadArrayReply(xMessageSliceParser)
- if cmd.err != nil {
- return cmd.err
- }
- cmd.val = v.([]XMessage)
- return nil
+ var err error
+ cmd.val, err = readXMessageSlice(rd)
+ return err
}
-// Implements proto.MultiBulkParse
-func xMessageSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
- msgs := make([]XMessage, n)
- for i := 0; i < len(msgs); i++ {
- i := i
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- id, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- var values map[string]interface{}
-
- v, err := rd.ReadArrayReply(stringInterfaceMapParser)
- if err != nil {
- if err != proto.Nil {
- return nil, err
- }
- } else {
- values = v.(map[string]interface{})
- }
+func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
- msgs[i] = XMessage{
- ID: id,
- Values: values,
- }
- return nil, nil
- })
+ msgs := make([]XMessage, n)
+ for i := 0; i < n; i++ {
+ var err error
+ msgs[i], err = readXMessage(rd)
if err != nil {
return nil, err
}
@@ -1035,7 +1137,38 @@ func xMessageSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
return msgs, nil
}
-// Implements proto.MultiBulkParse
+func readXMessage(rd *proto.Reader) (XMessage, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return XMessage{}, err
+ }
+ if n != 2 {
+ return XMessage{}, fmt.Errorf("got %d, wanted 2", n)
+ }
+
+ id, err := rd.ReadString()
+ if err != nil {
+ return XMessage{}, err
+ }
+
+ var values map[string]interface{}
+
+ v, err := rd.ReadArrayReply(stringInterfaceMapParser)
+ if err != nil {
+ if err != proto.Nil {
+ return XMessage{}, err
+ }
+ } else {
+ values = v.(map[string]interface{})
+ }
+
+ return XMessage{
+ ID: id,
+ Values: values,
+ }, nil
+}
+
+// stringInterfaceMapParser implements proto.MultiBulkParse.
func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) {
m := make(map[string]interface{}, n/2)
for i := int64(0); i < n; i += 2 {
@@ -1069,9 +1202,12 @@ type XStreamSliceCmd struct {
var _ Cmder = (*XStreamSliceCmd)(nil)
-func NewXStreamSliceCmd(args ...interface{}) *XStreamSliceCmd {
+func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd {
return &XStreamSliceCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -1088,7 +1224,7 @@ func (cmd *XStreamSliceCmd) String() string {
}
func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
- _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
cmd.val = make([]XStream, n)
for i := 0; i < len(cmd.val); i++ {
i := i
@@ -1102,14 +1238,14 @@ func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
return nil, err
}
- v, err := rd.ReadArrayReply(xMessageSliceParser)
+ msgs, err := readXMessageSlice(rd)
if err != nil {
return nil, err
}
cmd.val[i] = XStream{
Stream: stream,
- Messages: v.([]XMessage),
+ Messages: msgs,
}
return nil, nil
})
@@ -1119,7 +1255,7 @@ func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
}
return nil, nil
})
- return cmd.err
+ return err
}
//------------------------------------------------------------------------------
@@ -1138,9 +1274,12 @@ type XPendingCmd struct {
var _ Cmder = (*XPendingCmd)(nil)
-func NewXPendingCmd(args ...interface{}) *XPendingCmd {
+func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd {
return &XPendingCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -1157,7 +1296,7 @@ func (cmd *XPendingCmd) String() string {
}
func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
- _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
if n != 4 {
return nil, fmt.Errorf("got %d, wanted 4", n)
}
@@ -1218,7 +1357,7 @@ func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
return nil, nil
})
- return cmd.err
+ return err
}
//------------------------------------------------------------------------------
@@ -1237,9 +1376,12 @@ type XPendingExtCmd struct {
var _ Cmder = (*XPendingExtCmd)(nil)
-func NewXPendingExtCmd(args ...interface{}) *XPendingExtCmd {
+func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd {
return &XPendingExtCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -1256,7 +1398,7 @@ func (cmd *XPendingExtCmd) String() string {
}
func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
- _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
cmd.val = make([]XPendingExt, 0, n)
for i := int64(0); i < n; i++ {
_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
@@ -1298,17 +1440,17 @@ func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
}
return nil, nil
})
- return cmd.err
+ return err
}
//------------------------------------------------------------------------------
type XInfoGroupsCmd struct {
baseCmd
- val []XInfoGroups
+ val []XInfoGroup
}
-type XInfoGroups struct {
+type XInfoGroup struct {
Name string
Consumers int64
Pending int64
@@ -1317,17 +1459,20 @@ type XInfoGroups struct {
var _ Cmder = (*XInfoGroupsCmd)(nil)
-func NewXInfoGroupsCmd(stream string) *XInfoGroupsCmd {
+func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd {
return &XInfoGroupsCmd{
- baseCmd: baseCmd{args: []interface{}{"xinfo", "groups", stream}},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "groups", stream},
+ },
}
}
-func (cmd *XInfoGroupsCmd) Val() []XInfoGroups {
+func (cmd *XInfoGroupsCmd) Val() []XInfoGroup {
return cmd.val
}
-func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroups, error) {
+func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) {
return cmd.val, cmd.err
}
@@ -1336,59 +1481,152 @@ func (cmd *XInfoGroupsCmd) String() string {
}
func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error {
- _, cmd.err = rd.ReadArrayReply(
- func(rd *proto.Reader, n int64) (interface{}, error) {
- for i := int64(0); i < n; i++ {
- v, err := rd.ReadReply(xGroupInfoParser)
- if err != nil {
- return nil, err
- }
- cmd.val = append(cmd.val, v.(XInfoGroups))
- }
- return nil, nil
- })
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]XInfoGroup, n)
+
+ for i := 0; i < n; i++ {
+ cmd.val[i], err = readXGroupInfo(rd)
+ if err != nil {
+ return err
+ }
+ }
+
return nil
}
-func xGroupInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
+func readXGroupInfo(rd *proto.Reader) (XInfoGroup, error) {
+ var group XInfoGroup
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return group, err
+ }
if n != 8 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO GROUPS reply,"+
- "wanted 8", n)
+ return group, fmt.Errorf("redis: got %d elements in XINFO GROUPS reply, wanted 8", n)
}
- var (
- err error
- grp XInfoGroups
- key string
- val string
- )
for i := 0; i < 4; i++ {
- key, err = rd.ReadString()
+ key, err := rd.ReadString()
if err != nil {
- return nil, err
+ return group, err
}
- val, err = rd.ReadString()
+
+ val, err := rd.ReadString()
if err != nil {
- return nil, err
+ return group, err
}
+
switch key {
case "name":
- grp.Name = val
+ group.Name = val
case "consumers":
- grp.Consumers, err = strconv.ParseInt(val, 0, 64)
+ group.Consumers, err = strconv.ParseInt(val, 0, 64)
+ if err != nil {
+ return group, err
+ }
case "pending":
- grp.Pending, err = strconv.ParseInt(val, 0, 64)
+ group.Pending, err = strconv.ParseInt(val, 0, 64)
+ if err != nil {
+ return group, err
+ }
case "last-delivered-id":
- grp.LastDeliveredID = val
+ group.LastDeliveredID = val
+ default:
+ return group, fmt.Errorf("redis: unexpected content %s in XINFO GROUPS reply", key)
+ }
+ }
+
+ return group, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoStreamCmd struct {
+ baseCmd
+ val *XInfoStream
+}
+
+type XInfoStream struct {
+ Length int64
+ RadixTreeKeys int64
+ RadixTreeNodes int64
+ Groups int64
+ LastGeneratedID string
+ FirstEntry XMessage
+ LastEntry XMessage
+}
+
+var _ Cmder = (*XInfoStreamCmd)(nil)
+
+func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd {
+ return &XInfoStreamCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "stream", stream},
+ },
+ }
+}
+
+func (cmd *XInfoStreamCmd) Val() *XInfoStream {
+ return cmd.val
+}
+
+func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error {
+ v, err := rd.ReadReply(xStreamInfoParser)
+ if err != nil {
+ return err
+ }
+ cmd.val = v.(*XInfoStream)
+ return nil
+}
+
+func xStreamInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 14 {
+ return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
+ "wanted 14", n)
+ }
+ var info XInfoStream
+ for i := 0; i < 7; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ switch key {
+ case "length":
+ info.Length, err = rd.ReadIntReply()
+ case "radix-tree-keys":
+ info.RadixTreeKeys, err = rd.ReadIntReply()
+ case "radix-tree-nodes":
+ info.RadixTreeNodes, err = rd.ReadIntReply()
+ case "groups":
+ info.Groups, err = rd.ReadIntReply()
+ case "last-generated-id":
+ info.LastGeneratedID, err = rd.ReadString()
+ case "first-entry":
+ info.FirstEntry, err = readXMessage(rd)
+ case "last-entry":
+ info.LastEntry, err = readXMessage(rd)
default:
return nil, fmt.Errorf("redis: unexpected content %s "+
- "in XINFO GROUPS reply", key)
+ "in XINFO STREAM reply", key)
}
if err != nil {
return nil, err
}
}
- return grp, err
+ return &info, nil
}
//------------------------------------------------------------------------------
@@ -1401,9 +1639,12 @@ type ZSliceCmd struct {
var _ Cmder = (*ZSliceCmd)(nil)
-func NewZSliceCmd(args ...interface{}) *ZSliceCmd {
+func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd {
return &ZSliceCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -1420,7 +1661,7 @@ func (cmd *ZSliceCmd) String() string {
}
func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error {
- _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
cmd.val = make([]Z, n/2)
for i := 0; i < len(cmd.val); i++ {
member, err := rd.ReadString()
@@ -1440,7 +1681,7 @@ func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error {
}
return nil, nil
})
- return cmd.err
+ return err
}
//------------------------------------------------------------------------------
@@ -1453,9 +1694,12 @@ type ZWithKeyCmd struct {
var _ Cmder = (*ZWithKeyCmd)(nil)
-func NewZWithKeyCmd(args ...interface{}) *ZWithKeyCmd {
+func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd {
return &ZWithKeyCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -1472,7 +1716,7 @@ func (cmd *ZWithKeyCmd) String() string {
}
func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error {
- _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
if n != 3 {
return nil, fmt.Errorf("got %d elements, expected 3", n)
}
@@ -1497,7 +1741,7 @@ func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error {
return nil, nil
})
- return cmd.err
+ return err
}
//------------------------------------------------------------------------------
@@ -1508,14 +1752,17 @@ type ScanCmd struct {
page []string
cursor uint64
- process func(cmd Cmder) error
+ process cmdable
}
var _ Cmder = (*ScanCmd)(nil)
-func NewScanCmd(process func(cmd Cmder) error, args ...interface{}) *ScanCmd {
+func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd {
return &ScanCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
process: process,
}
}
@@ -1532,9 +1779,9 @@ func (cmd *ScanCmd) String() string {
return cmdString(cmd, cmd.page)
}
-func (cmd *ScanCmd) readReply(rd *proto.Reader) error {
- cmd.page, cmd.cursor, cmd.err = rd.ReadScanReply()
- return cmd.err
+func (cmd *ScanCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.page, cmd.cursor, err = rd.ReadScanReply()
+ return err
}
// Iterator creates a new ScanIterator.
@@ -1565,9 +1812,12 @@ type ClusterSlotsCmd struct {
var _ Cmder = (*ClusterSlotsCmd)(nil)
-func NewClusterSlotsCmd(args ...interface{}) *ClusterSlotsCmd {
+func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd {
return &ClusterSlotsCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -1584,7 +1834,7 @@ func (cmd *ClusterSlotsCmd) String() string {
}
func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
- _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
cmd.val = make([]ClusterSlot, n)
for i := 0; i < len(cmd.val); i++ {
n, err := rd.ReadArrayLen()
@@ -1646,7 +1896,7 @@ func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
}
return nil, nil
})
- return cmd.err
+ return err
}
//------------------------------------------------------------------------------
@@ -1682,10 +1932,13 @@ type GeoLocationCmd struct {
var _ Cmder = (*GeoLocationCmd)(nil)
-func NewGeoLocationCmd(q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
+func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
return &GeoLocationCmd{
- baseCmd: baseCmd{args: geoLocationArgs(q, args...)},
- q: q,
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: geoLocationArgs(q, args...),
+ },
+ q: q,
}
}
@@ -1735,10 +1988,9 @@ func (cmd *GeoLocationCmd) String() string {
}
func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error {
- var v interface{}
- v, cmd.err = rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q))
- if cmd.err != nil {
- return cmd.err
+ v, err := rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q))
+ if err != nil {
+ return err
}
cmd.locations = v.([]GeoLocation)
return nil
@@ -1758,7 +2010,7 @@ func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse {
Name: vv,
})
case *GeoLocation:
- //TODO: avoid copying
+ // TODO: avoid copying
locs = append(locs, *vv)
default:
return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v)
@@ -1826,9 +2078,12 @@ type GeoPosCmd struct {
var _ Cmder = (*GeoPosCmd)(nil)
-func NewGeoPosCmd(args ...interface{}) *GeoPosCmd {
+func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd {
return &GeoPosCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -1845,7 +2100,7 @@ func (cmd *GeoPosCmd) String() string {
}
func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
- _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
cmd.val = make([]*GeoPos, n)
for i := 0; i < len(cmd.val); i++ {
i := i
@@ -1876,7 +2131,7 @@ func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
}
return nil, nil
})
- return cmd.err
+ return err
}
//------------------------------------------------------------------------------
@@ -1900,9 +2155,12 @@ type CommandsInfoCmd struct {
var _ Cmder = (*CommandsInfoCmd)(nil)
-func NewCommandsInfoCmd(args ...interface{}) *CommandsInfoCmd {
+func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd {
return &CommandsInfoCmd{
- baseCmd: baseCmd{args: args},
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
}
}
@@ -1919,7 +2177,7 @@ func (cmd *CommandsInfoCmd) String() string {
}
func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
- _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
cmd.val = make(map[string]*CommandInfo, n)
for i := int64(0); i < n; i++ {
v, err := rd.ReadReply(commandInfoParser)
@@ -1931,7 +2189,7 @@ func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
}
return nil, nil
})
- return cmd.err
+ return err
}
func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
@@ -2030,21 +2288,21 @@ func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
//------------------------------------------------------------------------------
type cmdsInfoCache struct {
- fn func() (map[string]*CommandInfo, error)
+ fn func(ctx context.Context) (map[string]*CommandInfo, error)
once internal.Once
cmds map[string]*CommandInfo
}
-func newCmdsInfoCache(fn func() (map[string]*CommandInfo, error)) *cmdsInfoCache {
+func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache {
return &cmdsInfoCache{
fn: fn,
}
}
-func (c *cmdsInfoCache) Get() (map[string]*CommandInfo, error) {
+func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) {
err := c.once.Do(func() error {
- cmds, err := c.fn()
+ cmds, err := c.fn(ctx)
if err != nil {
return err
}
@@ -2062,3 +2320,119 @@ func (c *cmdsInfoCache) Get() (map[string]*CommandInfo, error) {
})
return c.cmds, err
}
+
+//------------------------------------------------------------------------------
+
+type SlowLog struct {
+ ID int64
+ Time time.Time
+ Duration time.Duration
+ Args []string
+ // These are also optional fields emitted only by Redis 4.0 or greater:
+ // https://redis.io/commands/slowlog#output-format
+ ClientAddr string
+ ClientName string
+}
+
+type SlowLogCmd struct {
+ baseCmd
+
+ val []SlowLog
+}
+
+var _ Cmder = (*SlowLogCmd)(nil)
+
+func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd {
+ return &SlowLogCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *SlowLogCmd) Val() []SlowLog {
+ return cmd.val
+}
+
+func (cmd *SlowLogCmd) Result() ([]SlowLog, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *SlowLogCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]SlowLog, n)
+ for i := 0; i < len(cmd.val); i++ {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n < 4 {
+ err := fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", n)
+ return nil, err
+ }
+
+ id, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ createdAt, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ createdAtTime := time.Unix(createdAt, 0)
+
+ costs, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ costsDuration := time.Duration(costs) * time.Microsecond
+
+ cmdLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if cmdLen < 1 {
+ err := fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen)
+ return nil, err
+ }
+
+ cmdString := make([]string, cmdLen)
+ for i := 0; i < cmdLen; i++ {
+ cmdString[i], err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var address, name string
+ for i := 4; i < n; i++ {
+ str, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ if i == 4 {
+ address = str
+ } else if i == 5 {
+ name = str
+ }
+ }
+
+ cmd.val[i] = SlowLog{
+ ID: id,
+ Time: createdAtTime,
+ Duration: costsDuration,
+ Args: cmdString,
+ ClientAddr: address,
+ ClientName: name,
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
diff --git a/vendor/github.com/go-redis/redis/v8/commands.go b/vendor/github.com/go-redis/redis/v8/commands.go
new file mode 100644
index 0000000000..422a7c5be2
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/commands.go
@@ -0,0 +1,2790 @@
+package redis
+
+import (
+ "context"
+ "errors"
+ "io"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+)
+
+// KeepTTL is an option for Set command to keep key's existing TTL.
+// For example:
+//
+// rdb.Set(ctx, key, value, redis.KeepTTL)
+const KeepTTL = -1
+
+func usePrecise(dur time.Duration) bool {
+ return dur < time.Second || dur%time.Second != 0
+}
+
+func formatMs(ctx context.Context, dur time.Duration) int64 {
+ if dur > 0 && dur < time.Millisecond {
+ internal.Logger.Printf(
+ ctx,
+ "specified duration is %s, but minimal supported value is %s - truncating to 1ms",
+ dur, time.Millisecond,
+ )
+ return 1
+ }
+ return int64(dur / time.Millisecond)
+}
+
+func formatSec(ctx context.Context, dur time.Duration) int64 {
+ if dur > 0 && dur < time.Second {
+ internal.Logger.Printf(
+ ctx,
+ "specified duration is %s, but minimal supported value is %s - truncating to 1s",
+ dur, time.Second,
+ )
+ return 1
+ }
+ return int64(dur / time.Second)
+}
+
+func appendArgs(dst, src []interface{}) []interface{} {
+ if len(src) == 1 {
+ return appendArg(dst, src[0])
+ }
+
+ dst = append(dst, src...)
+ return dst
+}
+
+func appendArg(dst []interface{}, arg interface{}) []interface{} {
+ switch arg := arg.(type) {
+ case []string:
+ for _, s := range arg {
+ dst = append(dst, s)
+ }
+ return dst
+ case []interface{}:
+ dst = append(dst, arg...)
+ return dst
+ case map[string]interface{}:
+ for k, v := range arg {
+ dst = append(dst, k, v)
+ }
+ return dst
+ default:
+ return append(dst, arg)
+ }
+}
+
+type Cmdable interface {
+ Pipeline() Pipeliner
+ Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
+
+ TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
+ TxPipeline() Pipeliner
+
+ Command(ctx context.Context) *CommandsInfoCmd
+ ClientGetName(ctx context.Context) *StringCmd
+ Echo(ctx context.Context, message interface{}) *StringCmd
+ Ping(ctx context.Context) *StatusCmd
+ Quit(ctx context.Context) *StatusCmd
+ Del(ctx context.Context, keys ...string) *IntCmd
+ Unlink(ctx context.Context, keys ...string) *IntCmd
+ Dump(ctx context.Context, key string) *StringCmd
+ Exists(ctx context.Context, keys ...string) *IntCmd
+ Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+ Keys(ctx context.Context, pattern string) *StringSliceCmd
+ Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd
+ Move(ctx context.Context, key string, db int) *BoolCmd
+ ObjectRefCount(ctx context.Context, key string) *IntCmd
+ ObjectEncoding(ctx context.Context, key string) *StringCmd
+ ObjectIdleTime(ctx context.Context, key string) *DurationCmd
+ Persist(ctx context.Context, key string) *BoolCmd
+ PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+ PTTL(ctx context.Context, key string) *DurationCmd
+ RandomKey(ctx context.Context) *StringCmd
+ Rename(ctx context.Context, key, newkey string) *StatusCmd
+ RenameNX(ctx context.Context, key, newkey string) *BoolCmd
+ Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
+ RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
+ Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd
+ SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd
+ SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd
+ Touch(ctx context.Context, keys ...string) *IntCmd
+ TTL(ctx context.Context, key string) *DurationCmd
+ Type(ctx context.Context, key string) *StatusCmd
+ Append(ctx context.Context, key, value string) *IntCmd
+ Decr(ctx context.Context, key string) *IntCmd
+ DecrBy(ctx context.Context, key string, decrement int64) *IntCmd
+ Get(ctx context.Context, key string) *StringCmd
+ GetRange(ctx context.Context, key string, start, end int64) *StringCmd
+ GetSet(ctx context.Context, key string, value interface{}) *StringCmd
+ Incr(ctx context.Context, key string) *IntCmd
+ IncrBy(ctx context.Context, key string, value int64) *IntCmd
+ IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd
+ MGet(ctx context.Context, keys ...string) *SliceCmd
+ MSet(ctx context.Context, values ...interface{}) *StatusCmd
+ MSetNX(ctx context.Context, values ...interface{}) *BoolCmd
+ Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd
+ StrLen(ctx context.Context, key string) *IntCmd
+
+ GetBit(ctx context.Context, key string, offset int64) *IntCmd
+ SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd
+ BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd
+ BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpNot(ctx context.Context, destKey string, key string) *IntCmd
+ BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd
+ BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd
+
+ Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd
+ ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd
+ SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+ HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+ ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+
+ HDel(ctx context.Context, key string, fields ...string) *IntCmd
+ HExists(ctx context.Context, key, field string) *BoolCmd
+ HGet(ctx context.Context, key, field string) *StringCmd
+ HGetAll(ctx context.Context, key string) *StringStringMapCmd
+ HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd
+ HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd
+ HKeys(ctx context.Context, key string) *StringSliceCmd
+ HLen(ctx context.Context, key string) *IntCmd
+ HMGet(ctx context.Context, key string, fields ...string) *SliceCmd
+ HSet(ctx context.Context, key string, values ...interface{}) *IntCmd
+ HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd
+ HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd
+ HVals(ctx context.Context, key string) *StringSliceCmd
+
+ BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
+ BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
+ BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd
+ LIndex(ctx context.Context, key string, index int64) *StringCmd
+ LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd
+ LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd
+ LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd
+ LLen(ctx context.Context, key string) *IntCmd
+ LPop(ctx context.Context, key string) *StringCmd
+ LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd
+ LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd
+ LPush(ctx context.Context, key string, values ...interface{}) *IntCmd
+ LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
+ LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd
+ LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd
+ LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd
+ RPop(ctx context.Context, key string) *StringCmd
+ RPopLPush(ctx context.Context, source, destination string) *StringCmd
+ RPush(ctx context.Context, key string, values ...interface{}) *IntCmd
+ RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
+
+ SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd
+ SCard(ctx context.Context, key string) *IntCmd
+ SDiff(ctx context.Context, keys ...string) *StringSliceCmd
+ SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ SInter(ctx context.Context, keys ...string) *StringSliceCmd
+ SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd
+ SMembers(ctx context.Context, key string) *StringSliceCmd
+ SMembersMap(ctx context.Context, key string) *StringStructMapCmd
+ SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd
+ SPop(ctx context.Context, key string) *StringCmd
+ SPopN(ctx context.Context, key string, count int64) *StringSliceCmd
+ SRandMember(ctx context.Context, key string) *StringCmd
+ SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd
+ SRem(ctx context.Context, key string, members ...interface{}) *IntCmd
+ SUnion(ctx context.Context, keys ...string) *StringSliceCmd
+ SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd
+
+ XAdd(ctx context.Context, a *XAddArgs) *StringCmd
+ XDel(ctx context.Context, stream string, ids ...string) *IntCmd
+ XLen(ctx context.Context, stream string) *IntCmd
+ XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd
+ XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd
+ XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd
+ XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd
+ XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd
+ XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd
+ XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupDestroy(ctx context.Context, stream, group string) *IntCmd
+ XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
+ XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd
+ XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd
+ XPending(ctx context.Context, stream, group string) *XPendingCmd
+ XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd
+ XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd
+ XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd
+ XTrim(ctx context.Context, key string, maxLen int64) *IntCmd
+ XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd
+ XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd
+ XInfoStream(ctx context.Context, key string) *XInfoStreamCmd
+
+ BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+ BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+ ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZIncr(ctx context.Context, key string, member *Z) *FloatCmd
+ ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd
+ ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd
+ ZCard(ctx context.Context, key string) *IntCmd
+ ZCount(ctx context.Context, key, min, max string) *IntCmd
+ ZLexCount(ctx context.Context, key, min, max string) *IntCmd
+ ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd
+ ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd
+ ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd
+ ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd
+ ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
+ ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+ ZRank(ctx context.Context, key, member string) *IntCmd
+ ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd
+ ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd
+ ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd
+ ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd
+ ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
+ ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+ ZRevRank(ctx context.Context, key, member string) *IntCmd
+ ZScore(ctx context.Context, key, member string) *FloatCmd
+ ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd
+
+ PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd
+ PFCount(ctx context.Context, keys ...string) *IntCmd
+ PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd
+
+ BgRewriteAOF(ctx context.Context) *StatusCmd
+ BgSave(ctx context.Context) *StatusCmd
+ ClientKill(ctx context.Context, ipPort string) *StatusCmd
+ ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd
+ ClientList(ctx context.Context) *StringCmd
+ ClientPause(ctx context.Context, dur time.Duration) *BoolCmd
+ ClientID(ctx context.Context) *IntCmd
+ ConfigGet(ctx context.Context, parameter string) *SliceCmd
+ ConfigResetStat(ctx context.Context) *StatusCmd
+ ConfigSet(ctx context.Context, parameter, value string) *StatusCmd
+ ConfigRewrite(ctx context.Context) *StatusCmd
+ DBSize(ctx context.Context) *IntCmd
+ FlushAll(ctx context.Context) *StatusCmd
+ FlushAllAsync(ctx context.Context) *StatusCmd
+ FlushDB(ctx context.Context) *StatusCmd
+ FlushDBAsync(ctx context.Context) *StatusCmd
+ Info(ctx context.Context, section ...string) *StringCmd
+ LastSave(ctx context.Context) *IntCmd
+ Save(ctx context.Context) *StatusCmd
+ Shutdown(ctx context.Context) *StatusCmd
+ ShutdownSave(ctx context.Context) *StatusCmd
+ ShutdownNoSave(ctx context.Context) *StatusCmd
+ SlaveOf(ctx context.Context, host, port string) *StatusCmd
+ Time(ctx context.Context) *TimeCmd
+ DebugObject(ctx context.Context, key string) *StringCmd
+ ReadOnly(ctx context.Context) *StatusCmd
+ ReadWrite(ctx context.Context) *StatusCmd
+ MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd
+
+ Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
+ ScriptFlush(ctx context.Context) *StatusCmd
+ ScriptKill(ctx context.Context) *StatusCmd
+ ScriptLoad(ctx context.Context, script string) *StringCmd
+
+ Publish(ctx context.Context, channel string, message interface{}) *IntCmd
+ PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd
+ PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd
+ PubSubNumPat(ctx context.Context) *IntCmd
+
+ ClusterSlots(ctx context.Context) *ClusterSlotsCmd
+ ClusterNodes(ctx context.Context) *StringCmd
+ ClusterMeet(ctx context.Context, host, port string) *StatusCmd
+ ClusterForget(ctx context.Context, nodeID string) *StatusCmd
+ ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd
+ ClusterResetSoft(ctx context.Context) *StatusCmd
+ ClusterResetHard(ctx context.Context) *StatusCmd
+ ClusterInfo(ctx context.Context) *StringCmd
+ ClusterKeySlot(ctx context.Context, key string) *IntCmd
+ ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd
+ ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd
+ ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd
+ ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd
+ ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd
+ ClusterSaveConfig(ctx context.Context) *StatusCmd
+ ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd
+ ClusterFailover(ctx context.Context) *StatusCmd
+ ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd
+ ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd
+
+ GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd
+ GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd
+ GeoRadius(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd
+ GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd
+ GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd
+ GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd
+ GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd
+ GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd
+}
+
+type StatefulCmdable interface {
+ Cmdable
+ Auth(ctx context.Context, password string) *StatusCmd
+ AuthACL(ctx context.Context, username, password string) *StatusCmd
+ Select(ctx context.Context, index int) *StatusCmd
+ SwapDB(ctx context.Context, index1, index2 int) *StatusCmd
+ ClientSetName(ctx context.Context, name string) *BoolCmd
+}
+
+var (
+ _ Cmdable = (*Client)(nil)
+ _ Cmdable = (*Tx)(nil)
+ _ Cmdable = (*Ring)(nil)
+ _ Cmdable = (*ClusterClient)(nil)
+)
+
+type cmdable func(ctx context.Context, cmd Cmder) error
+
+type statefulCmdable func(ctx context.Context, cmd Cmder) error
+
+//------------------------------------------------------------------------------
+
+func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "auth", password)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Perform an AUTH command, using the given user and pass.
+// Should be used to authenticate the current connection with one of the connections defined in the ACL list
+// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "auth", username, password)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd {
+ cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond))
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "select", index)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "swapdb", index1, index2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientSetName assigns a name to the connection.
+func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "client", "setname", name)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd {
+ cmd := NewCommandsInfoCmd(ctx, "command")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientGetName returns the name of the connection.
+func (c cmdable) ClientGetName(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "client", "getname")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd {
+ cmd := NewStringCmd(ctx, "echo", message)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Ping(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "ping")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Quit(ctx context.Context) *StatusCmd {
+ panic("not implemented")
+}
+
+func (c cmdable) Del(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "del"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Unlink(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "unlink"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Dump(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "dump", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Exists(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "exists"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "expire", key, formatSec(ctx, expiration))
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "expireat", key, tm.Unix())
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "keys", pattern)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "migrate",
+ host,
+ port,
+ key,
+ db,
+ formatMs(ctx, timeout),
+ )
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Move(ctx context.Context, key string, db int) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "move", key, db)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectRefCount(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "object", "refcount", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectEncoding(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "object", "encoding", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectIdleTime(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Second, "object", "idletime", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Persist(ctx context.Context, key string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "persist", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "pexpire", key, formatMs(ctx, expiration))
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd(
+ ctx,
+ "pexpireat",
+ key,
+ tm.UnixNano()/int64(time.Millisecond),
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RandomKey(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "randomkey")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Rename(ctx context.Context, key, newkey string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "rename", key, newkey)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RenameNX(ctx context.Context, key, newkey string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "renamenx", key, newkey)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "restore",
+ key,
+ formatMs(ctx, ttl),
+ value,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "restore",
+ key,
+ formatMs(ctx, ttl),
+ value,
+ "replace",
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type Sort struct {
+ By string
+ Offset, Count int64
+ Get []string
+ Order string
+ Alpha bool
+}
+
+func (sort *Sort) args(key string) []interface{} {
+ args := []interface{}{"sort", key}
+ if sort.By != "" {
+ args = append(args, "by", sort.By)
+ }
+ if sort.Offset != 0 || sort.Count != 0 {
+ args = append(args, "limit", sort.Offset, sort.Count)
+ }
+ for _, get := range sort.Get {
+ args = append(args, "get", get)
+ }
+ if sort.Order != "" {
+ args = append(args, sort.Order)
+ }
+ if sort.Alpha {
+ args = append(args, "alpha")
+ }
+ return args
+}
+
+func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, sort.args(key)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd {
+ args := sort.args(key)
+ if store != "" {
+ args = append(args, "store", store)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd {
+ cmd := NewSliceCmd(ctx, sort.args(key)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Touch(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, len(keys)+1)
+ args[0] = "touch"
+ for i, key := range keys {
+ args[i+1] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) TTL(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Second, "ttl", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Type(ctx context.Context, key string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "type", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Append(ctx context.Context, key, value string) *IntCmd {
+ cmd := NewIntCmd(ctx, "append", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Decr(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "decr", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "decrby", key, decrement)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `GET key` command. It returns redis.Nil error when key does not exist.
+func (c cmdable) Get(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "get", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *StringCmd {
+ cmd := NewStringCmd(ctx, "getrange", key, start, end)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd {
+ cmd := NewStringCmd(ctx, "getset", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Incr(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "incr", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) IncrBy(ctx context.Context, key string, value int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "incrby", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "incrbyfloat", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "mget"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// MSet is like Set but accepts multiple values:
+// - MSet("key1", "value1", "key2", "value2")
+// - MSet([]string{"key1", "value1", "key2", "value2"})
+// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"})
+func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd {
+ args := make([]interface{}, 1, 1+len(values))
+ args[0] = "mset"
+ args = appendArgs(args, values)
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// MSetNX is like SetNX but accepts multiple values:
+// - MSetNX("key1", "value1", "key2", "value2")
+// - MSetNX([]string{"key1", "value1", "key2", "value2"})
+// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"})
+func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd {
+ args := make([]interface{}, 1, 1+len(values))
+ args[0] = "msetnx"
+ args = appendArgs(args, values)
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SET key value [expiration]` command.
+// Use expiration for `SETEX`-like behavior.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL(-1) expiration is a Redis KEEPTTL option to keep existing TTL.
+func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+ args := make([]interface{}, 3, 5)
+ args[0] = "set"
+ args[1] = key
+ args[2] = value
+ if expiration > 0 {
+ if usePrecise(expiration) {
+ args = append(args, "px", formatMs(ctx, expiration))
+ } else {
+ args = append(args, "ex", formatSec(ctx, expiration))
+ }
+ } else if expiration == KeepTTL {
+ args = append(args, "keepttl")
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SETEX key expiration value` command.
+func (c cmdable) SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SET key value [expiration] NX` command.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL(-1) expiration is a Redis KEEPTTL option to keep existing TTL.
+func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ switch expiration {
+ case 0:
+ // Use old `SETNX` to support old Redis versions.
+ cmd = NewBoolCmd(ctx, "setnx", key, value)
+ case KeepTTL:
+ cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "nx")
+ default:
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "nx")
+ } else {
+ cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "nx")
+ }
+ }
+
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SET key value [expiration] XX` command.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL(-1) expiration is a Redis KEEPTTL option to keep existing TTL.
+func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ switch expiration {
+ case 0:
+ cmd = NewBoolCmd(ctx, "set", key, value, "xx")
+ case KeepTTL:
+ cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "xx")
+ default:
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "xx")
+ } else {
+ cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "xx")
+ }
+ }
+
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd {
+ cmd := NewIntCmd(ctx, "setrange", key, offset, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "strlen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "getbit", key, offset)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd {
+ cmd := NewIntCmd(
+ ctx,
+ "setbit",
+ key,
+ offset,
+ value,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type BitCount struct {
+ Start, End int64
+}
+
+func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd {
+ args := []interface{}{"bitcount", key}
+ if bitCount != nil {
+ args = append(
+ args,
+ bitCount.Start,
+ bitCount.End,
+ )
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) *IntCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "bitop"
+ args[1] = op
+ args[2] = destKey
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "and", destKey, keys...)
+}
+
+func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "or", destKey, keys...)
+}
+
+func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "xor", destKey, keys...)
+}
+
+func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd {
+ return c.bitOp(ctx, "not", destKey, key)
+}
+
+func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd {
+ args := make([]interface{}, 3+len(pos))
+ args[0] = "bitpos"
+ args[1] = key
+ args[2] = bit
+ switch len(pos) {
+ case 0:
+ case 1:
+ args[3] = pos[0]
+ case 2:
+ args[3] = pos[0]
+ args[4] = pos[1]
+ default:
+ panic("too many arguments")
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd {
+ a := make([]interface{}, 0, 2+len(args))
+ a = append(a, "bitfield")
+ a = append(a, key)
+ a = append(a, args...)
+ cmd := NewIntSliceCmd(ctx, a...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"scan", cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd {
+ args := []interface{}{"scan", cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ if keyType != "" {
+ args = append(args, "type", keyType)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"sscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"hscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"zscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd {
+ args := make([]interface{}, 2+len(fields))
+ args[0] = "hdel"
+ args[1] = key
+ for i, field := range fields {
+ args[2+i] = field
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HExists(ctx context.Context, key, field string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "hexists", key, field)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HGet(ctx context.Context, key, field string) *StringCmd {
+ cmd := NewStringCmd(ctx, "hget", key, field)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HGetAll(ctx context.Context, key string) *StringStringMapCmd {
+ cmd := NewStringStringMapCmd(ctx, "hgetall", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "hincrby", key, field, incr)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "hincrbyfloat", key, field, incr)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HKeys(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "hkeys", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "hlen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HMGet returns the values for the specified fields in the hash stored at key.
+// It returns an interface{} to distinguish between empty string and nil value.
+func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *SliceCmd {
+ args := make([]interface{}, 2+len(fields))
+ args[0] = "hmget"
+ args[1] = key
+ for i, field := range fields {
+ args[2+i] = field
+ }
+ cmd := NewSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HSet accepts values in following formats:
+// - HSet("myhash", "key1", "value1", "key2", "value2")
+// - HSet("myhash", []string{"key1", "value1", "key2", "value2"})
+// - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"})
+//
+// Note that it requires Redis v4 for multiple field/value pairs support.
+func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "hset"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HMSet is a deprecated version of HSet left for compatibility with Redis 3.
+func (c cmdable) HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "hmset"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "hsetnx", key, field, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HVals(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "hvals", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "blpop"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "brpop"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(keys)+1] = formatSec(ctx, timeout)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd {
+ cmd := NewStringCmd(
+ ctx,
+ "brpoplpush",
+ source,
+ destination,
+ formatSec(ctx, timeout),
+ )
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd {
+ cmd := NewStringCmd(ctx, "lindex", key, index)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "linsert", key, "before", pivot, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "linsert", key, "after", pivot, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "llen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "lpop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type LPosArgs struct {
+ Rank, MaxLen int64
+}
+
+func (c cmdable) LPos(ctx context.Context, key string, value string, a LPosArgs) *IntCmd {
+ args := []interface{}{"lpos", key, value}
+ if a.Rank != 0 {
+ args = append(args, "rank", a.Rank)
+ }
+ if a.MaxLen != 0 {
+ args = append(args, "maxlen", a.MaxLen)
+ }
+
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPosCount(ctx context.Context, key string, value string, count int64, a LPosArgs) *IntSliceCmd {
+ args := []interface{}{"lpos", key, value, "count", count}
+ if a.Rank != 0 {
+ args = append(args, "rank", a.Rank)
+ }
+ if a.MaxLen != 0 {
+ args = append(args, "maxlen", a.MaxLen)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "lpush"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "lpushx"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(
+ ctx,
+ "lrange",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "lrem", key, count, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "lset", key, index, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "ltrim",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "rpop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd {
+ cmd := NewStringCmd(ctx, "rpoplpush", source, destination)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "rpush"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "rpushx"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "sadd"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SCard(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "scard", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sdiff"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sdiffstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sinter"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sinterstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "sismember", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SMEMBERS key` command output as a slice.
+func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "smembers", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SMEMBERS key` command output as a map.
+func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd {
+ cmd := NewStringStructMapCmd(ctx, "smembers", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "smove", source, destination, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SPOP key` command.
+func (c cmdable) SPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "spop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SPOP key count` command.
+func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "spop", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SRANDMEMBER key` command.
+func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "srandmember", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SRANDMEMBER key count` command.
+func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "srandmember", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "srem"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sunion"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sunionstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// XAddArgs accepts values in the following formats:
+// - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"}
+// - XAddArgs.Values = []string("key1", "value1", "key2", "value2")
+// - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"}
+//
+// Note that map will not preserve the order of key-value pairs.
+type XAddArgs struct {
+ Stream string
+ MaxLen int64 // MAXLEN N
+ MaxLenApprox int64 // MAXLEN ~ N
+ ID string
+ Values interface{}
+}
+
+func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd {
+ args := make([]interface{}, 0, 8)
+ args = append(args, "xadd")
+ args = append(args, a.Stream)
+ if a.MaxLen > 0 {
+ args = append(args, "maxlen", a.MaxLen)
+ } else if a.MaxLenApprox > 0 {
+ args = append(args, "maxlen", "~", a.MaxLenApprox)
+ }
+ if a.ID != "" {
+ args = append(args, a.ID)
+ } else {
+ args = append(args, "*")
+ }
+ args = appendArg(args, a.Values)
+
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd {
+ args := []interface{}{"xdel", stream}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xlen", stream)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XReadArgs struct {
+ Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+ Count int64
+ Block time.Duration
+}
+
+func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 5+len(a.Streams))
+ args = append(args, "xread")
+
+ keyPos := int8(1)
+ if a.Count > 0 {
+ args = append(args, "count")
+ args = append(args, a.Count)
+ keyPos += 2
+ }
+ if a.Block >= 0 {
+ args = append(args, "block")
+ args = append(args, int64(a.Block/time.Millisecond))
+ keyPos += 2
+ }
+ args = append(args, "streams")
+ keyPos++
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+
+ cmd := NewXStreamSliceCmd(ctx, args...)
+ if a.Block >= 0 {
+ cmd.setReadTimeout(a.Block)
+ }
+ cmd.setFirstKeyPos(keyPos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd {
+ return c.XRead(ctx, &XReadArgs{
+ Streams: streams,
+ Block: -1,
+ })
+}
+
+func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XReadGroupArgs struct {
+ Group string
+ Consumer string
+ Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+ Count int64
+ Block time.Duration
+ NoAck bool
+}
+
+func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 8+len(a.Streams))
+ args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
+
+ keyPos := int8(1)
+ if a.Count > 0 {
+ args = append(args, "count", a.Count)
+ keyPos += 2
+ }
+ if a.Block >= 0 {
+ args = append(args, "block", int64(a.Block/time.Millisecond))
+ keyPos += 2
+ }
+ if a.NoAck {
+ args = append(args, "noack")
+ keyPos++
+ }
+ args = append(args, "streams")
+ keyPos++
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+
+ cmd := NewXStreamSliceCmd(ctx, args...)
+ if a.Block >= 0 {
+ cmd.setReadTimeout(a.Block)
+ }
+ cmd.setFirstKeyPos(keyPos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd {
+ args := []interface{}{"xack", stream, group}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd {
+ cmd := NewXPendingCmd(ctx, "xpending", stream, group)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XPendingExtArgs struct {
+ Stream string
+ Group string
+ Start string
+ End string
+ Count int64
+ Consumer string
+}
+
+func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd {
+ args := make([]interface{}, 0, 7)
+ args = append(args, "xpending", a.Stream, a.Group, a.Start, a.End, a.Count)
+ if a.Consumer != "" {
+ args = append(args, a.Consumer)
+ }
+ cmd := NewXPendingExtCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XClaimArgs struct {
+ Stream string
+ Group string
+ Consumer string
+ MinIdle time.Duration
+ Messages []string
+}
+
+func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd {
+ args := xClaimArgs(a)
+ cmd := NewXMessageSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd {
+ args := xClaimArgs(a)
+ args = append(args, "justid")
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func xClaimArgs(a *XClaimArgs) []interface{} {
+ args := make([]interface{}, 0, 4+len(a.Messages))
+ args = append(args,
+ "xclaim",
+ a.Stream,
+ a.Group, a.Consumer,
+ int64(a.MinIdle/time.Millisecond))
+ for _, id := range a.Messages {
+ args = append(args, id)
+ }
+ return args
+}
+
+func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "xtrim", key, "maxlen", maxLen)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "xtrim", key, "maxlen", "~", maxLen)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd {
+ cmd := NewXInfoGroupsCmd(ctx, key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd {
+ cmd := NewXInfoStreamCmd(ctx, key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// Z represents sorted set member.
+type Z struct {
+ Score float64
+ Member interface{}
+}
+
+// ZWithKey represents sorted set member including the name of the key where it was popped.
+type ZWithKey struct {
+ Z
+ Key string
+}
+
+// ZStore is used as an arg to ZInterStore and ZUnionStore.
+type ZStore struct {
+ Keys []string
+ Weights []float64
+ // Can be SUM, MIN or MAX.
+ Aggregate string
+}
+
+// Redis `BZPOPMAX key [key ...] timeout` command.
+func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "bzpopmax"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewZWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `BZPOPMIN key [key ...] timeout` command.
+func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "bzpopmin"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewZWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) zAdd(ctx context.Context, a []interface{}, n int, members ...*Z) *IntCmd {
+ for i, m := range members {
+ a[n+2*i] = m.Score
+ a[n+2*i+1] = m.Member
+ }
+ cmd := NewIntCmd(ctx, a...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `ZADD key score member [score member ...]` command.
+func (c cmdable) ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd {
+ const n = 2
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1] = "zadd", key
+ return c.zAdd(ctx, a, n, members...)
+}
+
+// Redis `ZADD key NX score member [score member ...]` command.
+func (c cmdable) ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd {
+ const n = 3
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2] = "zadd", key, "nx"
+ return c.zAdd(ctx, a, n, members...)
+}
+
+// Redis `ZADD key XX score member [score member ...]` command.
+func (c cmdable) ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd {
+ const n = 3
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2] = "zadd", key, "xx"
+ return c.zAdd(ctx, a, n, members...)
+}
+
+// Redis `ZADD key CH score member [score member ...]` command.
+func (c cmdable) ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd {
+ const n = 3
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2] = "zadd", key, "ch"
+ return c.zAdd(ctx, a, n, members...)
+}
+
+// Redis `ZADD key NX CH score member [score member ...]` command.
+func (c cmdable) ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
+ const n = 4
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2], a[3] = "zadd", key, "nx", "ch"
+ return c.zAdd(ctx, a, n, members...)
+}
+
+// Redis `ZADD key XX CH score member [score member ...]` command.
+func (c cmdable) ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
+ const n = 4
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2], a[3] = "zadd", key, "xx", "ch"
+ return c.zAdd(ctx, a, n, members...)
+}
+
+func (c cmdable) zIncr(ctx context.Context, a []interface{}, n int, members ...*Z) *FloatCmd {
+ for i, m := range members {
+ a[n+2*i] = m.Score
+ a[n+2*i+1] = m.Member
+ }
+ cmd := NewFloatCmd(ctx, a...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `ZADD key INCR score member` command.
+func (c cmdable) ZIncr(ctx context.Context, key string, member *Z) *FloatCmd {
+ const n = 3
+ a := make([]interface{}, n+2)
+ a[0], a[1], a[2] = "zadd", key, "incr"
+ return c.zIncr(ctx, a, n, member)
+}
+
+// Redis `ZADD key NX INCR score member` command.
+func (c cmdable) ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd {
+ const n = 4
+ a := make([]interface{}, n+2)
+ a[0], a[1], a[2], a[3] = "zadd", key, "incr", "nx"
+ return c.zIncr(ctx, a, n, member)
+}
+
+// Redis `ZADD key XX INCR score member` command.
+func (c cmdable) ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd {
+ const n = 4
+ a := make([]interface{}, n+2)
+ a[0], a[1], a[2], a[3] = "zadd", key, "incr", "xx"
+ return c.zIncr(ctx, a, n, member)
+}
+
+func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zcard", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZCount(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zcount", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZLexCount(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zlexcount", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "zincrby", key, increment, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd {
+ args := make([]interface{}, 3+len(store.Keys))
+ args[0] = "zinterstore"
+ args[1] = destination
+ args[2] = len(store.Keys)
+ for i, key := range store.Keys {
+ args[3+i] = key
+ }
+ if len(store.Weights) > 0 {
+ args = append(args, "weights")
+ for _, weight := range store.Weights {
+ args = append(args, weight)
+ }
+ }
+ if store.Aggregate != "" {
+ args = append(args, "aggregate", store.Aggregate)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ cmd.setFirstKeyPos(3)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd {
+ args := []interface{}{
+ "zpopmax",
+ key,
+ }
+
+ switch len(count) {
+ case 0:
+ break
+ case 1:
+ args = append(args, count[0])
+ default:
+ panic("too many arguments")
+ }
+
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd {
+ args := []interface{}{
+ "zpopmin",
+ key,
+ }
+
+ switch len(count) {
+ case 0:
+ break
+ case 1:
+ args = append(args, count[0])
+ default:
+ panic("too many arguments")
+ }
+
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) zRange(ctx context.Context, key string, start, stop int64, withScores bool) *StringSliceCmd {
+ args := []interface{}{
+ "zrange",
+ key,
+ start,
+ stop,
+ }
+ if withScores {
+ args = append(args, "withscores")
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ return c.zRange(ctx, key, start, stop, false)
+}
+
+func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
+ cmd := NewZSliceCmd(ctx, "zrange", key, start, stop, "withscores")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type ZRangeBy struct {
+ Min, Max string
+ Offset, Count int64
+}
+
+func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Min, opt.Max}
+ if withScores {
+ args = append(args, "withscores")
+ }
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy(ctx, "zrangebyscore", key, opt, false)
+}
+
+func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy(ctx, "zrangebylex", key, opt, false)
+}
+
+func (c cmdable) ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zrank", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "zrem"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd {
+ cmd := NewIntCmd(
+ ctx,
+ "zremrangebyrank",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zremrangebyscore", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zremrangebylex", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
+ cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Max, opt.Min}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt)
+}
+
+func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt)
+}
+
+func (c cmdable) ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zrevrank", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "zscore", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd {
+ args := make([]interface{}, 3+len(store.Keys))
+ args[0] = "zunionstore"
+ args[1] = dest
+ args[2] = len(store.Keys)
+ for i, key := range store.Keys {
+ args[3+i] = key
+ }
+ if len(store.Weights) > 0 {
+ args = append(args, "weights")
+ for _, weight := range store.Weights {
+ args = append(args, weight)
+ }
+ }
+ if store.Aggregate != "" {
+ args = append(args, "aggregate", store.Aggregate)
+ }
+
+ cmd := NewIntCmd(ctx, args...)
+ cmd.setFirstKeyPos(3)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(els))
+ args[0] = "pfadd"
+ args[1] = key
+ args = appendArgs(args, els)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PFCount(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "pfcount"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "pfmerge"
+ args[1] = dest
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "bgrewriteaof")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BgSave(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "bgsave")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "client", "kill", ipPort)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientKillByFilter is new style syntax, while the ClientKill is old
+//
+// CLIENT KILL <option> [value] ... <option> [value]
+func (c cmdable) ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "client"
+ args[1] = "kill"
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientList(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "client", "list")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientPause(ctx context.Context, dur time.Duration) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "client", "pause", formatMs(ctx, dur))
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientID(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "client", "id")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientUnblock(ctx context.Context, id int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "client", "unblock", id)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientUnblockWithError(ctx context.Context, id int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "client", "unblock", id, "error")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigGet(ctx context.Context, parameter string) *SliceCmd {
+ cmd := NewSliceCmd(ctx, "config", "get", parameter)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigResetStat(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "config", "resetstat")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigSet(ctx context.Context, parameter, value string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "config", "set", parameter, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigRewrite(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "config", "rewrite")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DBSize(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "dbsize")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushAll(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushall")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushAllAsync(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushall", "async")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushDB(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushdb")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushDBAsync(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushdb", "async")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Info(ctx context.Context, section ...string) *StringCmd {
+ args := []interface{}{"info"}
+ if len(section) > 0 {
+ args = append(args, section[0])
+ }
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LastSave(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "lastsave")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Save(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "save")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) shutdown(ctx context.Context, modifier string) *StatusCmd {
+ var args []interface{}
+ if modifier == "" {
+ args = []interface{}{"shutdown"}
+ } else {
+ args = []interface{}{"shutdown", modifier}
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ if err := cmd.Err(); err != nil {
+ if err == io.EOF {
+ // Server quit as expected.
+ cmd.err = nil
+ }
+ } else {
+ // Server did not quit. String reply contains the reason.
+ cmd.err = errors.New(cmd.val)
+ cmd.val = ""
+ }
+ return cmd
+}
+
+func (c cmdable) Shutdown(ctx context.Context) *StatusCmd {
+ return c.shutdown(ctx, "")
+}
+
+func (c cmdable) ShutdownSave(ctx context.Context) *StatusCmd {
+ return c.shutdown(ctx, "save")
+}
+
+func (c cmdable) ShutdownNoSave(ctx context.Context) *StatusCmd {
+ return c.shutdown(ctx, "nosave")
+}
+
+func (c cmdable) SlaveOf(ctx context.Context, host, port string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "slaveof", host, port)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SlowLogGet(ctx context.Context, num int64) *SlowLogCmd {
+ cmd := NewSlowLogCmd(context.Background(), "slowlog", "get", num)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Sync(ctx context.Context) {
+ panic("not implemented")
+}
+
+func (c cmdable) Time(ctx context.Context) *TimeCmd {
+ cmd := NewTimeCmd(ctx, "time")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DebugObject(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "debug", "object", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ReadOnly(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "readonly")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ReadWrite(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "readwrite")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd {
+ args := []interface{}{"memory", "usage", key}
+ if len(samples) > 0 {
+ if len(samples) != 1 {
+ panic("MemoryUsage expects single sample count")
+ }
+ args = append(args, "SAMPLES", samples[0])
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+ cmdArgs[0] = "eval"
+ cmdArgs[1] = script
+ cmdArgs[2] = len(keys)
+ for i, key := range keys {
+ cmdArgs[3+i] = key
+ }
+ cmdArgs = appendArgs(cmdArgs, args)
+ cmd := NewCmd(ctx, cmdArgs...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+ cmdArgs[0] = "evalsha"
+ cmdArgs[1] = sha1
+ cmdArgs[2] = len(keys)
+ for i, key := range keys {
+ cmdArgs[3+i] = key
+ }
+ cmdArgs = appendArgs(cmdArgs, args)
+ cmd := NewCmd(ctx, cmdArgs...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
+ args := make([]interface{}, 2+len(hashes))
+ args[0] = "script"
+ args[1] = "exists"
+ for i, hash := range hashes {
+ args[2+i] = hash
+ }
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptFlush(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "flush")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptKill(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "kill")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptLoad(ctx context.Context, script string) *StringCmd {
+ cmd := NewStringCmd(ctx, "script", "load", script)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// Publish posts the message to the channel.
+func (c cmdable) Publish(ctx context.Context, channel string, message interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "publish", channel, message)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd {
+ args := []interface{}{"pubsub", "channels"}
+ if pattern != "*" {
+ args = append(args, pattern)
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd {
+ args := make([]interface{}, 2+len(channels))
+ args[0] = "pubsub"
+ args[1] = "numsub"
+ for i, channel := range channels {
+ args[2+i] = channel
+ }
+ cmd := NewStringIntMapCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubNumPat(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "pubsub", "numpat")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) ClusterSlots(ctx context.Context) *ClusterSlotsCmd {
+ cmd := NewClusterSlotsCmd(ctx, "cluster", "slots")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterNodes(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "nodes")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterMeet(ctx context.Context, host, port string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "meet", host, port)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterForget(ctx context.Context, nodeID string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "forget", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "replicate", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterResetSoft(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "reset", "soft")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterResetHard(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "reset", "hard")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterInfo(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "info")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterKeySlot(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "keyslot", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "cluster", "getkeysinslot", slot, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "count-failure-reports", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "countkeysinslot", slot)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd {
+ args := make([]interface{}, 2+len(slots))
+ args[0] = "cluster"
+ args[1] = "delslots"
+ for i, slot := range slots {
+ args[2+i] = slot
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd {
+ size := max - min + 1
+ slots := make([]int, size)
+ for i := 0; i < size; i++ {
+ slots[i] = min + i
+ }
+ return c.ClusterDelSlots(ctx, slots...)
+}
+
+func (c cmdable) ClusterSaveConfig(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "saveconfig")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "cluster", "slaves", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterFailover(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "failover")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd {
+ args := make([]interface{}, 2+len(slots))
+ args[0] = "cluster"
+ args[1] = "addslots"
+ for i, num := range slots {
+ args[2+i] = num
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd {
+ size := max - min + 1
+ slots := make([]int, size)
+ for i := 0; i < size; i++ {
+ slots[i] = min + i
+ }
+ return c.ClusterAddSlots(ctx, slots...)
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd {
+ args := make([]interface{}, 2+3*len(geoLocation))
+ args[0] = "geoadd"
+ args[1] = key
+ for i, eachLoc := range geoLocation {
+ args[2+3*i] = eachLoc.Longitude
+ args[2+3*i+1] = eachLoc.Latitude
+ args[2+3*i+2] = eachLoc.Name
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadius is a read-only GEORADIUS_RO command.
+func (c cmdable) GeoRadius(
+ ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
+) *GeoLocationCmd {
+ cmd := NewGeoLocationCmd(ctx, query, "georadius_ro", key, longitude, latitude)
+ if query.Store != "" || query.StoreDist != "" {
+ cmd.SetErr(errors.New("GeoRadius does not support Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadiusStore is a writing GEORADIUS command.
+func (c cmdable) GeoRadiusStore(
+ ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
+) *IntCmd {
+ args := geoLocationArgs(query, "georadius", key, longitude, latitude)
+ cmd := NewIntCmd(ctx, args...)
+ if query.Store == "" && query.StoreDist == "" {
+ cmd.SetErr(errors.New("GeoRadiusStore requires Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadius is a read-only GEORADIUSBYMEMBER_RO command.
+func (c cmdable) GeoRadiusByMember(
+ ctx context.Context, key, member string, query *GeoRadiusQuery,
+) *GeoLocationCmd {
+ cmd := NewGeoLocationCmd(ctx, query, "georadiusbymember_ro", key, member)
+ if query.Store != "" || query.StoreDist != "" {
+ cmd.SetErr(errors.New("GeoRadiusByMember does not support Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadiusByMemberStore is a writing GEORADIUSBYMEMBER command.
+func (c cmdable) GeoRadiusByMemberStore(
+ ctx context.Context, key, member string, query *GeoRadiusQuery,
+) *IntCmd {
+ args := geoLocationArgs(query, "georadiusbymember", key, member)
+ cmd := NewIntCmd(ctx, args...)
+ if query.Store == "" && query.StoreDist == "" {
+ cmd.SetErr(errors.New("GeoRadiusByMemberStore requires Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoDist(
+ ctx context.Context, key string, member1, member2, unit string,
+) *FloatCmd {
+ if unit == "" {
+ unit = "km"
+ }
+ cmd := NewFloatCmd(ctx, "geodist", key, member1, member2, unit)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "geohash"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "geopos"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewGeoPosCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/go-redis/redis/v7/doc.go b/vendor/github.com/go-redis/redis/v8/doc.go
index 55262533a6..55262533a6 100644
--- a/vendor/github.com/go-redis/redis/v7/doc.go
+++ b/vendor/github.com/go-redis/redis/v8/doc.go
diff --git a/vendor/github.com/go-redis/redis/v7/error.go b/vendor/github.com/go-redis/redis/v8/error.go
index 0ffaca9f8f..2ed5dc6284 100644
--- a/vendor/github.com/go-redis/redis/v7/error.go
+++ b/vendor/github.com/go-redis/redis/v8/error.go
@@ -6,8 +6,8 @@ import (
"net"
"strings"
- "github.com/go-redis/redis/v7/internal/pool"
- "github.com/go-redis/redis/v7/internal/proto"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/proto"
)
var ErrClosed = pool.ErrClosed
@@ -24,15 +24,16 @@ type Error interface {
var _ Error = proto.RedisError("")
-func isRetryableError(err error, retryTimeout bool) bool {
+func shouldRetry(err error, retryTimeout bool) bool {
switch err {
+ case io.EOF, io.ErrUnexpectedEOF:
+ return true
case nil, context.Canceled, context.DeadlineExceeded:
return false
- case io.EOF:
- return true
}
- if netErr, ok := err.(net.Error); ok {
- if netErr.Timeout() {
+
+ if v, ok := err.(timeoutError); ok {
+ if v.Timeout() {
return retryTimeout
}
return true
@@ -51,6 +52,10 @@ func isRetryableError(err error, retryTimeout bool) bool {
if strings.HasPrefix(s, "CLUSTERDOWN ") {
return true
}
+ if strings.HasPrefix(s, "TRYAGAIN ") {
+ return true
+ }
+
return false
}
@@ -60,19 +65,25 @@ func isRedisError(err error) bool {
}
func isBadConn(err error, allowTimeout bool) bool {
- if err == nil {
+ switch err {
+ case nil:
return false
+ case context.Canceled, context.DeadlineExceeded:
+ return true
}
+
if isRedisError(err) {
// Close connections in read only state in case domain addr is used
// and domain resolves to a different Redis Server. See #790.
return isReadOnlyError(err)
}
+
if allowTimeout {
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
- return false
+ return !netErr.Temporary()
}
}
+
return true
}
@@ -106,3 +117,9 @@ func isLoadingError(err error) bool {
func isReadOnlyError(err error) bool {
return strings.HasPrefix(err.Error(), "READONLY ")
}
+
+//------------------------------------------------------------------------------
+
+type timeoutError interface {
+ Timeout() bool
+}
diff --git a/vendor/github.com/go-redis/redis/v8/go.mod b/vendor/github.com/go-redis/redis/v8/go.mod
new file mode 100644
index 0000000000..4f5952762f
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/go.mod
@@ -0,0 +1,11 @@
+module github.com/go-redis/redis/v8
+
+go 1.13
+
+require (
+ github.com/cespare/xxhash/v2 v2.1.1
+ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f
+ github.com/onsi/ginkgo v1.15.0
+ github.com/onsi/gomega v1.10.5
+ go.opentelemetry.io/otel v0.16.0
+)
diff --git a/vendor/github.com/go-redis/redis/v8/go.sum b/vendor/github.com/go-redis/redis/v8/go.sum
new file mode 100644
index 0000000000..b46929e693
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/go.sum
@@ -0,0 +1,97 @@
+github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M=
+github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.10.4 h1:NiTx7EEvBzu9sFOD1zORteLSt3o8gnlvZZwSE9TnY9U=
+github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ=
+github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+go.opentelemetry.io/otel v0.16.0 h1:uIWEbdeb4vpKPGITLsRVUS44L5oDbDUCZxn8lkxhmgw=
+go.opentelemetry.io/otel v0.16.0/go.mod h1:e4GKElweB8W2gWUqbghw0B8t5MCTccc9212eNHnOHwA=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
+golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/vendor/github.com/go-redis/redis/v8/internal/arg.go b/vendor/github.com/go-redis/redis/v8/internal/arg.go
new file mode 100644
index 0000000000..b97fa0d685
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/arg.go
@@ -0,0 +1,56 @@
+package internal
+
+import (
+ "fmt"
+ "strconv"
+ "time"
+)
+
+func AppendArg(b []byte, v interface{}) []byte {
+ switch v := v.(type) {
+ case nil:
+ return append(b, "<nil>"...)
+ case string:
+ return appendUTF8String(b, Bytes(v))
+ case []byte:
+ return appendUTF8String(b, v)
+ case int:
+ return strconv.AppendInt(b, int64(v), 10)
+ case int8:
+ return strconv.AppendInt(b, int64(v), 10)
+ case int16:
+ return strconv.AppendInt(b, int64(v), 10)
+ case int32:
+ return strconv.AppendInt(b, int64(v), 10)
+ case int64:
+ return strconv.AppendInt(b, v, 10)
+ case uint:
+ return strconv.AppendUint(b, uint64(v), 10)
+ case uint8:
+ return strconv.AppendUint(b, uint64(v), 10)
+ case uint16:
+ return strconv.AppendUint(b, uint64(v), 10)
+ case uint32:
+ return strconv.AppendUint(b, uint64(v), 10)
+ case uint64:
+ return strconv.AppendUint(b, v, 10)
+ case float32:
+ return strconv.AppendFloat(b, float64(v), 'f', -1, 64)
+ case float64:
+ return strconv.AppendFloat(b, v, 'f', -1, 64)
+ case bool:
+ if v {
+ return append(b, "true"...)
+ }
+ return append(b, "false"...)
+ case time.Time:
+ return v.AppendFormat(b, time.RFC3339Nano)
+ default:
+ return append(b, fmt.Sprint(v)...)
+ }
+}
+
+func appendUTF8String(dst []byte, src []byte) []byte {
+ dst = append(dst, src...)
+ return dst
+}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/hashtag/hashtag.go b/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
index 22f5b3981b..2fc74ad1cd 100644
--- a/vendor/github.com/go-redis/redis/v7/internal/hashtag/hashtag.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
@@ -1,8 +1,9 @@
package hashtag
import (
- "math/rand"
"strings"
+
+ "github.com/go-redis/redis/v8/internal/rand"
)
const slotNumber = 16384
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go b/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go
new file mode 100644
index 0000000000..181260b802
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go
@@ -0,0 +1,151 @@
+package hscan
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+)
+
+// decoderFunc represents decoding functions for default built-in types.
+type decoderFunc func(reflect.Value, string) error
+
+var (
+ // List of built-in decoders indexed by their numeric constant values (eg: reflect.Bool = 1).
+ decoders = []decoderFunc{
+ reflect.Bool: decodeBool,
+ reflect.Int: decodeInt,
+ reflect.Int8: decodeInt,
+ reflect.Int16: decodeInt,
+ reflect.Int32: decodeInt,
+ reflect.Int64: decodeInt,
+ reflect.Uint: decodeUint,
+ reflect.Uint8: decodeUint,
+ reflect.Uint16: decodeUint,
+ reflect.Uint32: decodeUint,
+ reflect.Uint64: decodeUint,
+ reflect.Float32: decodeFloat,
+ reflect.Float64: decodeFloat,
+ reflect.Complex64: decodeUnsupported,
+ reflect.Complex128: decodeUnsupported,
+ reflect.Array: decodeUnsupported,
+ reflect.Chan: decodeUnsupported,
+ reflect.Func: decodeUnsupported,
+ reflect.Interface: decodeUnsupported,
+ reflect.Map: decodeUnsupported,
+ reflect.Ptr: decodeUnsupported,
+ reflect.Slice: decodeSlice,
+ reflect.String: decodeString,
+ reflect.Struct: decodeUnsupported,
+ reflect.UnsafePointer: decodeUnsupported,
+ }
+
+ // Global map of struct field specs that is populated once for every new
+ // struct type that is scanned. This caches the field types and the corresponding
+ // decoder functions to avoid iterating through struct fields on subsequent scans.
+ globalStructMap = newStructMap()
+)
+
+func Struct(dst interface{}) (StructValue, error) {
+ v := reflect.ValueOf(dst)
+
+ // The dstination to scan into should be a struct pointer.
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ return StructValue{}, fmt.Errorf("redis.Scan(non-pointer %T)", dst)
+ }
+
+ v = v.Elem()
+ if v.Kind() != reflect.Struct {
+ return StructValue{}, fmt.Errorf("redis.Scan(non-struct %T)", dst)
+ }
+
+ return StructValue{
+ spec: globalStructMap.get(v.Type()),
+ value: v,
+ }, nil
+}
+
+// Scan scans the results from a key-value Redis map result set to a destination struct.
+// The Redis keys are matched to the struct's field with the `redis` tag.
+func Scan(dst interface{}, keys []interface{}, vals []interface{}) error {
+ if len(keys) != len(vals) {
+ return errors.New("args should have the same number of keys and vals")
+ }
+
+ strct, err := Struct(dst)
+ if err != nil {
+ return err
+ }
+
+ // Iterate through the (key, value) sequence.
+ for i := 0; i < len(vals); i++ {
+ key, ok := keys[i].(string)
+ if !ok {
+ continue
+ }
+
+ val, ok := vals[i].(string)
+ if !ok {
+ continue
+ }
+
+ if err := strct.Scan(key, val); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func decodeBool(f reflect.Value, s string) error {
+ b, err := strconv.ParseBool(s)
+ if err != nil {
+ return err
+ }
+ f.SetBool(b)
+ return nil
+}
+
+func decodeInt(f reflect.Value, s string) error {
+ v, err := strconv.ParseInt(s, 10, 0)
+ if err != nil {
+ return err
+ }
+ f.SetInt(v)
+ return nil
+}
+
+func decodeUint(f reflect.Value, s string) error {
+ v, err := strconv.ParseUint(s, 10, 0)
+ if err != nil {
+ return err
+ }
+ f.SetUint(v)
+ return nil
+}
+
+func decodeFloat(f reflect.Value, s string) error {
+ v, err := strconv.ParseFloat(s, 0)
+ if err != nil {
+ return err
+ }
+ f.SetFloat(v)
+ return nil
+}
+
+func decodeString(f reflect.Value, s string) error {
+ f.SetString(s)
+ return nil
+}
+
+func decodeSlice(f reflect.Value, s string) error {
+ // []byte slice ([]uint8).
+ if f.Type().Elem().Kind() == reflect.Uint8 {
+ f.SetBytes([]byte(s))
+ }
+ return nil
+}
+
+func decodeUnsupported(v reflect.Value, s string) error {
+ return fmt.Errorf("redis.Scan(unsupported %s)", v.Type())
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go b/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go
new file mode 100644
index 0000000000..37d86ba551
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go
@@ -0,0 +1,87 @@
+package hscan
+
+import (
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// structMap contains the map of struct fields for target structs
+// indexed by the struct type.
+type structMap struct {
+ m sync.Map
+}
+
+func newStructMap() *structMap {
+ return new(structMap)
+}
+
+func (s *structMap) get(t reflect.Type) *structSpec {
+ if v, ok := s.m.Load(t); ok {
+ return v.(*structSpec)
+ }
+
+ spec := newStructSpec(t, "redis")
+ s.m.Store(t, spec)
+ return spec
+}
+
+//------------------------------------------------------------------------------
+
+// structSpec contains the list of all fields in a target struct.
+type structSpec struct {
+ m map[string]*structField
+}
+
+func (s *structSpec) set(tag string, sf *structField) {
+ s.m[tag] = sf
+}
+
+func newStructSpec(t reflect.Type, fieldTag string) *structSpec {
+ out := &structSpec{
+ m: make(map[string]*structField),
+ }
+
+ num := t.NumField()
+ for i := 0; i < num; i++ {
+ f := t.Field(i)
+
+ tag := f.Tag.Get(fieldTag)
+ if tag == "" || tag == "-" {
+ continue
+ }
+
+ tag = strings.Split(tag, ",")[0]
+ if tag == "" {
+ continue
+ }
+
+ // Use the built-in decoder.
+ out.set(tag, &structField{index: i, fn: decoders[f.Type.Kind()]})
+ }
+
+ return out
+}
+
+//------------------------------------------------------------------------------
+
+// structField represents a single field in a target struct.
+type structField struct {
+ index int
+ fn decoderFunc
+}
+
+//------------------------------------------------------------------------------
+
+type StructValue struct {
+ spec *structSpec
+ value reflect.Value
+}
+
+func (s StructValue) Scan(key string, value string) error {
+ field, ok := s.spec.m[key]
+ if !ok {
+ return nil
+ }
+ return field.fn(s.value.Field(field.index), value)
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/instruments.go b/vendor/github.com/go-redis/redis/v8/internal/instruments.go
new file mode 100644
index 0000000000..2d7dacce7a
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/instruments.go
@@ -0,0 +1,33 @@
+package internal
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/metric"
+)
+
+var (
+ // WritesCounter is a count of write commands performed.
+ WritesCounter metric.Int64Counter
+ // NewConnectionsCounter is a count of new connections.
+ NewConnectionsCounter metric.Int64Counter
+)
+
+func init() {
+ defer func() {
+ if r := recover(); r != nil {
+ Logger.Printf(context.Background(), "Error creating meter github.com/go-redis/redis for Instruments", r)
+ }
+ }()
+
+ meter := metric.Must(otel.Meter("github.com/go-redis/redis"))
+
+ WritesCounter = meter.NewInt64Counter("redis.writes",
+ metric.WithDescription("the number of writes initiated"),
+ )
+
+ NewConnectionsCounter = meter.NewInt64Counter("redis.new_connections",
+ metric.WithDescription("the number of connections created"),
+ )
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/internal.go b/vendor/github.com/go-redis/redis/v8/internal/internal.go
new file mode 100644
index 0000000000..4a59c599be
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/internal.go
@@ -0,0 +1,29 @@
+package internal
+
+import (
+ "time"
+
+ "github.com/go-redis/redis/v8/internal/rand"
+)
+
+func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration {
+ if retry < 0 {
+ panic("not reached")
+ }
+ if minBackoff == 0 {
+ return 0
+ }
+
+ d := minBackoff << uint(retry)
+ if d < minBackoff {
+ return maxBackoff
+ }
+
+ d = minBackoff + time.Duration(rand.Int63n(int64(d)))
+
+ if d > maxBackoff || d < minBackoff {
+ d = maxBackoff
+ }
+
+ return d
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/log.go b/vendor/github.com/go-redis/redis/v8/internal/log.go
new file mode 100644
index 0000000000..3810f9e4e7
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/log.go
@@ -0,0 +1,24 @@
+package internal
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "os"
+)
+
+type Logging interface {
+ Printf(ctx context.Context, format string, v ...interface{})
+}
+
+type logger struct {
+ log *log.Logger
+}
+
+func (l *logger) Printf(ctx context.Context, format string, v ...interface{}) {
+ _ = l.log.Output(2, fmt.Sprintf(format, v...))
+}
+
+var Logger Logging = &logger{
+ log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile),
+}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/once.go b/vendor/github.com/go-redis/redis/v8/internal/once.go
index 64f46272ae..64f46272ae 100644
--- a/vendor/github.com/go-redis/redis/v7/internal/once.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/once.go
diff --git a/vendor/github.com/go-redis/redis/v7/internal/pool/conn.go b/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
index e9a2585463..b784530438 100644
--- a/vendor/github.com/go-redis/redis/v7/internal/pool/conn.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
@@ -1,26 +1,30 @@
package pool
import (
+ "bufio"
"context"
"net"
"sync/atomic"
"time"
- "github.com/go-redis/redis/v7/internal/proto"
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/proto"
+ "go.opentelemetry.io/otel/trace"
)
var noDeadline = time.Time{}
type Conn struct {
+ usedAt int64 // atomic
netConn net.Conn
rd *proto.Reader
+ bw *bufio.Writer
wr *proto.Writer
Inited bool
pooled bool
createdAt time.Time
- usedAt int64 // atomic
}
func NewConn(netConn net.Conn) *Conn {
@@ -29,7 +33,8 @@ func NewConn(netConn net.Conn) *Conn {
createdAt: time.Now(),
}
cn.rd = proto.NewReader(netConn)
- cn.wr = proto.NewWriter(netConn)
+ cn.bw = bufio.NewWriter(netConn)
+ cn.wr = proto.NewWriter(cn.bw)
cn.SetUsedAt(time.Now())
return cn
}
@@ -46,7 +51,7 @@ func (cn *Conn) SetUsedAt(tm time.Time) {
func (cn *Conn) SetNetConn(netConn net.Conn) {
cn.netConn = netConn
cn.rd.Reset(netConn)
- cn.wr.Reset(netConn)
+ cn.bw.Reset(netConn)
}
func (cn *Conn) Write(b []byte) (int, error) {
@@ -54,35 +59,48 @@ func (cn *Conn) Write(b []byte) (int, error) {
}
func (cn *Conn) RemoteAddr() net.Addr {
- return cn.netConn.RemoteAddr()
+ if cn.netConn != nil {
+ return cn.netConn.RemoteAddr()
+ }
+ return nil
}
func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error {
- err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout))
- if err != nil {
- return err
- }
- return fn(cn.rd)
+ return internal.WithSpan(ctx, "redis.with_reader", func(ctx context.Context, span trace.Span) error {
+ if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
+ return internal.RecordError(ctx, span, err)
+ }
+ if err := fn(cn.rd); err != nil {
+ return internal.RecordError(ctx, span, err)
+ }
+ return nil
+ })
}
func (cn *Conn) WithWriter(
ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error,
) error {
- err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout))
- if err != nil {
- return err
- }
+ return internal.WithSpan(ctx, "redis.with_writer", func(ctx context.Context, span trace.Span) error {
+ if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
+ return internal.RecordError(ctx, span, err)
+ }
- if cn.wr.Buffered() > 0 {
- cn.wr.Reset(cn.netConn)
- }
+ if cn.bw.Buffered() > 0 {
+ cn.bw.Reset(cn.netConn)
+ }
- err = fn(cn.wr)
- if err != nil {
- return err
- }
+ if err := fn(cn.wr); err != nil {
+ return internal.RecordError(ctx, span, err)
+ }
+
+ if err := cn.bw.Flush(); err != nil {
+ return internal.RecordError(ctx, span, err)
+ }
+
+ internal.WritesCounter.Add(ctx, 1)
- return cn.wr.Flush()
+ return nil
+ })
}
func (cn *Conn) Close() error {
diff --git a/vendor/github.com/go-redis/redis/v7/internal/pool/pool.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
index a8d8276a9f..254a18de50 100644
--- a/vendor/github.com/go-redis/redis/v7/internal/pool/pool.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
@@ -8,11 +8,13 @@ import (
"sync/atomic"
"time"
- "github.com/go-redis/redis/v7/internal"
+ "github.com/go-redis/redis/v8/internal"
)
-var ErrClosed = errors.New("redis: client is closed")
-var ErrPoolTimeout = errors.New("redis: connection pool timeout")
+var (
+ ErrClosed = errors.New("redis: client is closed")
+ ErrPoolTimeout = errors.New("redis: connection pool timeout")
+)
var timers = sync.Pool{
New: func() interface{} {
@@ -38,8 +40,8 @@ type Pooler interface {
CloseConn(*Conn) error
Get(context.Context) (*Conn, error)
- Put(*Conn)
- Remove(*Conn, error)
+ Put(context.Context, *Conn)
+ Remove(context.Context, *Conn, error)
Len() int
IdleLen() int
@@ -60,13 +62,16 @@ type Options struct {
IdleCheckFrequency time.Duration
}
+type lastDialErrorWrap struct {
+ err error
+}
+
type ConnPool struct {
opt *Options
dialErrorsNum uint32 // atomic
- lastDialErrorMu sync.RWMutex
- lastDialError error
+ lastDialError atomic.Value
queue chan struct{}
@@ -158,6 +163,7 @@ func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) {
}
}
p.connsMu.Unlock()
+
return cn, nil
}
@@ -179,6 +185,7 @@ func (p *ConnPool) dialConn(ctx context.Context, pooled bool) (*Conn, error) {
return nil, err
}
+ internal.NewConnectionsCounter.Add(ctx, 1)
cn := NewConn(netConn)
cn.pooled = pooled
return cn, nil
@@ -204,16 +211,15 @@ func (p *ConnPool) tryDial() {
}
func (p *ConnPool) setLastDialError(err error) {
- p.lastDialErrorMu.Lock()
- p.lastDialError = err
- p.lastDialErrorMu.Unlock()
+ p.lastDialError.Store(&lastDialErrorWrap{err: err})
}
func (p *ConnPool) getLastDialError() error {
- p.lastDialErrorMu.RLock()
- err := p.lastDialError
- p.lastDialErrorMu.RUnlock()
- return err
+ err, _ := p.lastDialError.Load().(*lastDialErrorWrap)
+ if err != nil {
+ return err.err
+ }
+ return nil
}
// Get returns existed connection from the pool or creates a new one.
@@ -313,15 +319,15 @@ func (p *ConnPool) popIdle() *Conn {
return cn
}
-func (p *ConnPool) Put(cn *Conn) {
+func (p *ConnPool) Put(ctx context.Context, cn *Conn) {
if cn.rd.Buffered() > 0 {
- internal.Logger.Printf("Conn has unread data")
- p.Remove(cn, BadConnError{})
+ internal.Logger.Printf(ctx, "Conn has unread data")
+ p.Remove(ctx, cn, BadConnError{})
return
}
if !cn.pooled {
- p.Remove(cn, nil)
+ p.Remove(ctx, cn, nil)
return
}
@@ -332,7 +338,7 @@ func (p *ConnPool) Put(cn *Conn) {
p.freeTurn()
}
-func (p *ConnPool) Remove(cn *Conn, reason error) {
+func (p *ConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
p.removeConnWithLock(cn)
p.freeTurn()
_ = p.closeConn(cn)
@@ -403,8 +409,10 @@ func (p *ConnPool) closed() bool {
}
func (p *ConnPool) Filter(fn func(*Conn) bool) error {
- var firstErr error
p.connsMu.Lock()
+ defer p.connsMu.Unlock()
+
+ var firstErr error
for _, cn := range p.conns {
if fn(cn) {
if err := p.closeConn(cn); err != nil && firstErr == nil {
@@ -412,7 +420,6 @@ func (p *ConnPool) Filter(fn func(*Conn) bool) error {
}
}
}
- p.connsMu.Unlock()
return firstErr
}
@@ -453,7 +460,7 @@ func (p *ConnPool) reaper(frequency time.Duration) {
}
_, err := p.ReapStaleConns()
if err != nil {
- internal.Logger.Printf("ReapStaleConns failed: %s", err)
+ internal.Logger.Printf(context.Background(), "ReapStaleConns failed: %s", err)
continue
}
case <-p.closedCh:
@@ -470,6 +477,7 @@ func (p *ConnPool) ReapStaleConns() (int, error) {
p.connsMu.Lock()
cn := p.reapStaleConn()
p.connsMu.Unlock()
+
p.freeTurn()
if cn != nil {
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go
new file mode 100644
index 0000000000..5a3fde191b
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go
@@ -0,0 +1,58 @@
+package pool
+
+import "context"
+
+type SingleConnPool struct {
+ pool Pooler
+ cn *Conn
+ stickyErr error
+}
+
+var _ Pooler = (*SingleConnPool)(nil)
+
+func NewSingleConnPool(pool Pooler, cn *Conn) *SingleConnPool {
+ return &SingleConnPool{
+ pool: pool,
+ cn: cn,
+ }
+}
+
+func (p *SingleConnPool) NewConn(ctx context.Context) (*Conn, error) {
+ return p.pool.NewConn(ctx)
+}
+
+func (p *SingleConnPool) CloseConn(cn *Conn) error {
+ return p.pool.CloseConn(cn)
+}
+
+func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) {
+ if p.stickyErr != nil {
+ return nil, p.stickyErr
+ }
+ return p.cn, nil
+}
+
+func (p *SingleConnPool) Put(ctx context.Context, cn *Conn) {}
+
+func (p *SingleConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
+ p.cn = nil
+ p.stickyErr = reason
+}
+
+func (p *SingleConnPool) Close() error {
+ p.cn = nil
+ p.stickyErr = ErrClosed
+ return nil
+}
+
+func (p *SingleConnPool) Len() int {
+ return 0
+}
+
+func (p *SingleConnPool) IdleLen() int {
+ return 0
+}
+
+func (p *SingleConnPool) Stats() *Stats {
+ return &Stats{}
+}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/pool/pool_single.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
index 04758a00d4..c3e7e7c045 100644
--- a/vendor/github.com/go-redis/redis/v7/internal/pool/pool_single.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
@@ -2,6 +2,7 @@ package pool
import (
"context"
+ "errors"
"fmt"
"sync/atomic"
)
@@ -30,9 +31,11 @@ func (e BadConnError) Unwrap() error {
return e.wrapped
}
-type SingleConnPool struct {
- pool Pooler
- level int32 // atomic
+//------------------------------------------------------------------------------
+
+type StickyConnPool struct {
+ pool Pooler
+ shared int32 // atomic
state uint32 // atomic
ch chan *Conn
@@ -40,37 +43,29 @@ type SingleConnPool struct {
_badConnError atomic.Value
}
-var _ Pooler = (*SingleConnPool)(nil)
+var _ Pooler = (*StickyConnPool)(nil)
-func NewSingleConnPool(pool Pooler) *SingleConnPool {
- p, ok := pool.(*SingleConnPool)
+func NewStickyConnPool(pool Pooler) *StickyConnPool {
+ p, ok := pool.(*StickyConnPool)
if !ok {
- p = &SingleConnPool{
+ p = &StickyConnPool{
pool: pool,
ch: make(chan *Conn, 1),
}
}
- atomic.AddInt32(&p.level, 1)
+ atomic.AddInt32(&p.shared, 1)
return p
}
-func (p *SingleConnPool) SetConn(cn *Conn) {
- if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) {
- p.ch <- cn
- } else {
- panic("not reached")
- }
-}
-
-func (p *SingleConnPool) NewConn(ctx context.Context) (*Conn, error) {
+func (p *StickyConnPool) NewConn(ctx context.Context) (*Conn, error) {
return p.pool.NewConn(ctx)
}
-func (p *SingleConnPool) CloseConn(cn *Conn) error {
+func (p *StickyConnPool) CloseConn(cn *Conn) error {
return p.pool.CloseConn(cn)
}
-func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) {
+func (p *StickyConnPool) Get(ctx context.Context) (*Conn, error) {
// In worst case this races with Close which is not a very common operation.
for i := 0; i < 1000; i++ {
switch atomic.LoadUint32(&p.state) {
@@ -82,7 +77,7 @@ func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) {
if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) {
return cn, nil
}
- p.pool.Remove(cn, ErrClosed)
+ p.pool.Remove(ctx, cn, ErrClosed)
case stateInited:
if err := p.badConnError(); err != nil {
return nil, err
@@ -98,60 +93,38 @@ func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) {
panic("not reached")
}
}
- return nil, fmt.Errorf("redis: SingleConnPool.Get: infinite loop")
+ return nil, fmt.Errorf("redis: StickyConnPool.Get: infinite loop")
}
-func (p *SingleConnPool) Put(cn *Conn) {
+func (p *StickyConnPool) Put(ctx context.Context, cn *Conn) {
defer func() {
if recover() != nil {
- p.freeConn(cn)
+ p.freeConn(ctx, cn)
}
}()
p.ch <- cn
}
-func (p *SingleConnPool) freeConn(cn *Conn) {
+func (p *StickyConnPool) freeConn(ctx context.Context, cn *Conn) {
if err := p.badConnError(); err != nil {
- p.pool.Remove(cn, err)
+ p.pool.Remove(ctx, cn, err)
} else {
- p.pool.Put(cn)
+ p.pool.Put(ctx, cn)
}
}
-func (p *SingleConnPool) Remove(cn *Conn, reason error) {
+func (p *StickyConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
defer func() {
if recover() != nil {
- p.pool.Remove(cn, ErrClosed)
+ p.pool.Remove(ctx, cn, ErrClosed)
}
}()
p._badConnError.Store(BadConnError{wrapped: reason})
p.ch <- cn
}
-func (p *SingleConnPool) Len() int {
- switch atomic.LoadUint32(&p.state) {
- case stateDefault:
- return 0
- case stateInited:
- return 1
- case stateClosed:
- return 0
- default:
- panic("not reached")
- }
-}
-
-func (p *SingleConnPool) IdleLen() int {
- return len(p.ch)
-}
-
-func (p *SingleConnPool) Stats() *Stats {
- return &Stats{}
-}
-
-func (p *SingleConnPool) Close() error {
- level := atomic.AddInt32(&p.level, -1)
- if level > 0 {
+func (p *StickyConnPool) Close() error {
+ if shared := atomic.AddInt32(&p.shared, -1); shared > 0 {
return nil
}
@@ -164,16 +137,16 @@ func (p *SingleConnPool) Close() error {
close(p.ch)
cn, ok := <-p.ch
if ok {
- p.freeConn(cn)
+ p.freeConn(context.TODO(), cn)
}
return nil
}
}
- return fmt.Errorf("redis: SingleConnPool.Close: infinite loop")
+ return errors.New("redis: StickyConnPool.Close: infinite loop")
}
-func (p *SingleConnPool) Reset() error {
+func (p *StickyConnPool) Reset(ctx context.Context) error {
if p.badConnError() == nil {
return nil
}
@@ -183,21 +156,21 @@ func (p *SingleConnPool) Reset() error {
if !ok {
return ErrClosed
}
- p.pool.Remove(cn, ErrClosed)
+ p.pool.Remove(ctx, cn, ErrClosed)
p._badConnError.Store(BadConnError{wrapped: nil})
default:
- return fmt.Errorf("redis: SingleConnPool does not have a Conn")
+ return errors.New("redis: StickyConnPool does not have a Conn")
}
if !atomic.CompareAndSwapUint32(&p.state, stateInited, stateDefault) {
state := atomic.LoadUint32(&p.state)
- return fmt.Errorf("redis: invalid SingleConnPool state: %d", state)
+ return fmt.Errorf("redis: invalid StickyConnPool state: %d", state)
}
return nil
}
-func (p *SingleConnPool) badConnError() error {
+func (p *StickyConnPool) badConnError() error {
if v := p._badConnError.Load(); v != nil {
err := v.(BadConnError)
if err.wrapped != nil {
@@ -206,3 +179,24 @@ func (p *SingleConnPool) badConnError() error {
}
return nil
}
+
+func (p *StickyConnPool) Len() int {
+ switch atomic.LoadUint32(&p.state) {
+ case stateDefault:
+ return 0
+ case stateInited:
+ return 1
+ case stateClosed:
+ return 0
+ default:
+ panic("not reached")
+ }
+}
+
+func (p *StickyConnPool) IdleLen() int {
+ return len(p.ch)
+}
+
+func (p *StickyConnPool) Stats() *Stats {
+ return &Stats{}
+}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/proto/reader.go b/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
index d3f646e98f..0fbc51e9ac 100644
--- a/vendor/github.com/go-redis/redis/v7/internal/proto/reader.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
@@ -5,7 +5,7 @@ import (
"fmt"
"io"
- "github.com/go-redis/redis/v7/internal/util"
+ "github.com/go-redis/redis/v8/internal/util"
)
const (
@@ -71,13 +71,25 @@ func (r *Reader) ReadLine() ([]byte, error) {
func (r *Reader) readLine() ([]byte, error) {
b, err := r.rd.ReadSlice('\n')
if err != nil {
- return nil, err
+ if err != bufio.ErrBufferFull {
+ return nil, err
+ }
+
+ full := make([]byte, len(b))
+ copy(full, b)
+
+ b, err = r.rd.ReadBytes('\n')
+ if err != nil {
+ return nil, err
+ }
+
+ full = append(full, b...)
+ b = full
}
if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
return nil, fmt.Errorf("redis: invalid reply: %q", b)
}
- b = b[:len(b)-2]
- return b, nil
+ return b[:len(b)-2], nil
}
func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) {
@@ -181,7 +193,7 @@ func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) {
}
}
-func (r *Reader) ReadArrayLen() (int64, error) {
+func (r *Reader) ReadArrayLen() (int, error) {
line, err := r.ReadLine()
if err != nil {
return 0, err
@@ -190,7 +202,11 @@ func (r *Reader) ReadArrayLen() (int64, error) {
case ErrorReply:
return 0, ParseErrorReply(line)
case ArrayReply:
- return parseArrayLen(line)
+ n, err := parseArrayLen(line)
+ if err != nil {
+ return 0, err
+ }
+ return int(n), nil
default:
return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line)
}
@@ -216,7 +232,8 @@ func (r *Reader) ReadScanReply() ([]string, uint64, error) {
}
keys := make([]string, n)
- for i := int64(0); i < n; i++ {
+
+ for i := 0; i < n; i++ {
key, err := r.ReadString()
if err != nil {
return nil, 0, err
diff --git a/vendor/github.com/go-redis/redis/v7/internal/proto/scan.go b/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
index 90c1e4ae6e..c2c3ed17de 100644
--- a/vendor/github.com/go-redis/redis/v7/internal/proto/scan.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
@@ -4,10 +4,13 @@ import (
"encoding"
"fmt"
"reflect"
+ "time"
- "github.com/go-redis/redis/v7/internal/util"
+ "github.com/go-redis/redis/v8/internal/util"
)
+// Scan parses bytes `b` to `v` with appropriate type.
+// nolint: gocyclo
func Scan(b []byte, v interface{}) error {
switch v := v.(type) {
case nil:
@@ -99,6 +102,10 @@ func Scan(b []byte, v interface{}) error {
case *bool:
*v = len(b) == 1 && b[0] == '1'
return nil
+ case *time.Time:
+ var err error
+ *v, err = time.Parse(time.RFC3339Nano, util.BytesToString(b))
+ return err
case encoding.BinaryUnmarshaler:
return v.UnmarshalBinary(b)
default:
@@ -124,7 +131,7 @@ func ScanSlice(data []string, slice interface{}) error {
for i, s := range data {
elem := next()
if err := Scan([]byte(s), elem.Addr().Interface()); err != nil {
- err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %s", i, s, err)
+ err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %w", i, s, err)
return err
}
}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/proto/writer.go b/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
index d552f1e84d..81b09b8e4f 100644
--- a/vendor/github.com/go-redis/redis/v7/internal/proto/writer.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
@@ -1,26 +1,32 @@
package proto
import (
- "bufio"
"encoding"
"fmt"
"io"
"strconv"
"time"
- "github.com/go-redis/redis/v7/internal/util"
+ "github.com/go-redis/redis/v8/internal/util"
)
+type writer interface {
+ io.Writer
+ io.ByteWriter
+ // io.StringWriter
+ WriteString(s string) (n int, err error)
+}
+
type Writer struct {
- wr *bufio.Writer
+ writer
lenBuf []byte
numBuf []byte
}
-func NewWriter(wr io.Writer) *Writer {
+func NewWriter(wr writer) *Writer {
return &Writer{
- wr: bufio.NewWriter(wr),
+ writer: wr,
lenBuf: make([]byte, 64),
numBuf: make([]byte, 64),
@@ -28,19 +34,16 @@ func NewWriter(wr io.Writer) *Writer {
}
func (w *Writer) WriteArgs(args []interface{}) error {
- err := w.wr.WriteByte(ArrayReply)
- if err != nil {
+ if err := w.WriteByte(ArrayReply); err != nil {
return err
}
- err = w.writeLen(len(args))
- if err != nil {
+ if err := w.writeLen(len(args)); err != nil {
return err
}
for _, arg := range args {
- err := w.writeArg(arg)
- if err != nil {
+ if err := w.WriteArg(arg); err != nil {
return err
}
}
@@ -51,11 +54,11 @@ func (w *Writer) WriteArgs(args []interface{}) error {
func (w *Writer) writeLen(n int) error {
w.lenBuf = strconv.AppendUint(w.lenBuf[:0], uint64(n), 10)
w.lenBuf = append(w.lenBuf, '\r', '\n')
- _, err := w.wr.Write(w.lenBuf)
+ _, err := w.Write(w.lenBuf)
return err
}
-func (w *Writer) writeArg(v interface{}) error {
+func (w *Writer) WriteArg(v interface{}) error {
switch v := v.(type) {
case nil:
return w.string("")
@@ -93,7 +96,8 @@ func (w *Writer) writeArg(v interface{}) error {
}
return w.int(0)
case time.Time:
- return w.string(v.Format(time.RFC3339Nano))
+ w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano)
+ return w.bytes(w.numBuf)
case encoding.BinaryMarshaler:
b, err := v.MarshalBinary()
if err != nil {
@@ -107,18 +111,15 @@ func (w *Writer) writeArg(v interface{}) error {
}
func (w *Writer) bytes(b []byte) error {
- err := w.wr.WriteByte(StringReply)
- if err != nil {
+ if err := w.WriteByte(StringReply); err != nil {
return err
}
- err = w.writeLen(len(b))
- if err != nil {
+ if err := w.writeLen(len(b)); err != nil {
return err
}
- _, err = w.wr.Write(b)
- if err != nil {
+ if _, err := w.Write(b); err != nil {
return err
}
@@ -145,21 +146,8 @@ func (w *Writer) float(f float64) error {
}
func (w *Writer) crlf() error {
- err := w.wr.WriteByte('\r')
- if err != nil {
+ if err := w.WriteByte('\r'); err != nil {
return err
}
- return w.wr.WriteByte('\n')
-}
-
-func (w *Writer) Buffered() int {
- return w.wr.Buffered()
-}
-
-func (w *Writer) Reset(wr io.Writer) {
- w.wr.Reset(wr)
-}
-
-func (w *Writer) Flush() error {
- return w.wr.Flush()
+ return w.WriteByte('\n')
}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go b/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
new file mode 100644
index 0000000000..40676f3cb6
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
@@ -0,0 +1,45 @@
+package rand
+
+import (
+ "math/rand"
+ "sync"
+)
+
+// Int returns a non-negative pseudo-random int.
+func Int() int { return pseudo.Int() }
+
+// Intn returns, as an int, a non-negative pseudo-random number in [0,n).
+// It panics if n <= 0.
+func Intn(n int) int { return pseudo.Intn(n) }
+
+// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n).
+// It panics if n <= 0.
+func Int63n(n int64) int64 { return pseudo.Int63n(n) }
+
+// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n).
+func Perm(n int) []int { return pseudo.Perm(n) }
+
+// Seed uses the provided seed value to initialize the default Source to a
+// deterministic state. If Seed is not called, the generator behaves as if
+// seeded by Seed(1).
+func Seed(n int64) { pseudo.Seed(n) }
+
+var pseudo = rand.New(&source{src: rand.NewSource(1)})
+
+type source struct {
+ src rand.Source
+ mu sync.Mutex
+}
+
+func (s *source) Int63() int64 {
+ s.mu.Lock()
+ n := s.src.Int63()
+ s.mu.Unlock()
+ return n
+}
+
+func (s *source) Seed(seed int64) {
+ s.mu.Lock()
+ s.src.Seed(seed)
+ s.mu.Unlock()
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/safe.go b/vendor/github.com/go-redis/redis/v8/internal/safe.go
new file mode 100644
index 0000000000..862ff0eb3a
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/safe.go
@@ -0,0 +1,11 @@
+// +build appengine
+
+package internal
+
+func String(b []byte) string {
+ return string(b)
+}
+
+func Bytes(s string) []byte {
+ return []byte(s)
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/unsafe.go b/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
new file mode 100644
index 0000000000..4bc79701f4
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
@@ -0,0 +1,20 @@
+// +build !appengine
+
+package internal
+
+import "unsafe"
+
+// String converts byte slice to string.
+func String(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+// Bytes converts string to byte slice.
+func Bytes(s string) []byte {
+ return *(*[]byte)(unsafe.Pointer(
+ &struct {
+ string
+ Cap int
+ }{s, len(s)},
+ ))
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util.go b/vendor/github.com/go-redis/redis/v8/internal/util.go
new file mode 100644
index 0000000000..4d7921bf32
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/util.go
@@ -0,0 +1,73 @@
+package internal
+
+import (
+ "context"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/go-redis/redis/v8/internal/util"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func Sleep(ctx context.Context, dur time.Duration) error {
+ return WithSpan(ctx, "time.Sleep", func(ctx context.Context, span trace.Span) error {
+ t := time.NewTimer(dur)
+ defer t.Stop()
+
+ select {
+ case <-t.C:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ })
+}
+
+func ToLower(s string) string {
+ if isLower(s) {
+ return s
+ }
+
+ b := make([]byte, len(s))
+ for i := range b {
+ c := s[i]
+ if c >= 'A' && c <= 'Z' {
+ c += 'a' - 'A'
+ }
+ b[i] = c
+ }
+ return util.BytesToString(b)
+}
+
+func isLower(s string) bool {
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c >= 'A' && c <= 'Z' {
+ return false
+ }
+ }
+ return true
+}
+
+//------------------------------------------------------------------------------
+
+var tracer = otel.Tracer("github.com/go-redis/redis")
+
+func WithSpan(ctx context.Context, name string, fn func(context.Context, trace.Span) error) error {
+ if span := trace.SpanFromContext(ctx); !span.IsRecording() {
+ return fn(ctx, span)
+ }
+
+ ctx, span := tracer.Start(ctx, name)
+ defer span.End()
+
+ return fn(ctx, span)
+}
+
+func RecordError(ctx context.Context, span trace.Span, err error) error {
+ if err != proto.Nil {
+ span.RecordError(err)
+ }
+ return err
+}
diff --git a/vendor/github.com/go-redis/redis/v7/internal/util/safe.go b/vendor/github.com/go-redis/redis/v8/internal/util/safe.go
index 1b3060ebc2..1b3060ebc2 100644
--- a/vendor/github.com/go-redis/redis/v7/internal/util/safe.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/util/safe.go
diff --git a/vendor/github.com/go-redis/redis/v7/internal/util/strconv.go b/vendor/github.com/go-redis/redis/v8/internal/util/strconv.go
index db5033802a..db5033802a 100644
--- a/vendor/github.com/go-redis/redis/v7/internal/util/strconv.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/util/strconv.go
diff --git a/vendor/github.com/go-redis/redis/v7/internal/util/unsafe.go b/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
index c9868aac2b..c9868aac2b 100644
--- a/vendor/github.com/go-redis/redis/v7/internal/util/unsafe.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
diff --git a/vendor/github.com/go-redis/redis/v7/iterator.go b/vendor/github.com/go-redis/redis/v8/iterator.go
index f9d3aab6d2..2f8bc2beda 100644
--- a/vendor/github.com/go-redis/redis/v7/iterator.go
+++ b/vendor/github.com/go-redis/redis/v8/iterator.go
@@ -1,6 +1,7 @@
package redis
import (
+ "context"
"sync"
)
@@ -21,7 +22,7 @@ func (it *ScanIterator) Err() error {
}
// Next advances the cursor and returns true if more values can be read.
-func (it *ScanIterator) Next() bool {
+func (it *ScanIterator) Next(ctx context.Context) bool {
it.mu.Lock()
defer it.mu.Unlock()
@@ -43,13 +44,14 @@ func (it *ScanIterator) Next() bool {
}
// Fetch next page.
- if it.cmd.args[0] == "scan" {
+ switch it.cmd.args[0] {
+ case "scan", "qscan":
it.cmd.args[1] = it.cmd.cursor
- } else {
+ default:
it.cmd.args[2] = it.cmd.cursor
}
- err := it.cmd.process(it.cmd)
+ err := it.cmd.process(ctx, it.cmd)
if err != nil {
return false
}
diff --git a/vendor/github.com/go-redis/redis/v7/options.go b/vendor/github.com/go-redis/redis/v8/options.go
index 47dcc29bf3..7a02002401 100644
--- a/vendor/github.com/go-redis/redis/v7/options.go
+++ b/vendor/github.com/go-redis/redis/v8/options.go
@@ -12,7 +12,10 @@ import (
"strings"
"time"
- "github.com/go-redis/redis/v7/internal/pool"
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "go.opentelemetry.io/otel/label"
+ "go.opentelemetry.io/otel/trace"
)
// Limiter is the interface of a rate limiter or a circuit breaker.
@@ -26,6 +29,7 @@ type Limiter interface {
ReportResult(result error)
}
+// Options keeps the settings to setup redis connection.
type Options struct {
// The network type, either tcp or unix.
// Default is tcp.
@@ -38,21 +42,23 @@ type Options struct {
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
// Hook that is called when new connection is established.
- OnConnect func(*Conn) error
+ OnConnect func(ctx context.Context, cn *Conn) error
- // Use the specified Username to authenticate the current connection with one of the connections defined in the ACL
- // list when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+ // Use the specified Username to authenticate the current connection
+ // with one of the connections defined in the ACL list when connecting
+ // to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
Username string
-
// Optional password. Must match the password specified in the
// requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower),
- // or the User Password when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+ // or the User Password when connecting to a Redis 6.0 instance, or greater,
+ // that is using the Redis ACL system.
Password string
+
// Database to be selected after connecting to the server.
DB int
// Maximum number of retries before giving up.
- // Default is to not retry failed commands.
+ // Default is 3 retries; -1 (not 0) disables retries.
MaxRetries int
// Minimum backoff between each retry.
// Default is 8 milliseconds; -1 disables backoff.
@@ -117,6 +123,9 @@ func (opt *Options) init() {
opt.Network = "tcp"
}
}
+ if opt.DialTimeout == 0 {
+ opt.DialTimeout = 5 * time.Second
+ }
if opt.Dialer == nil {
opt.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) {
netDialer := &net.Dialer{
@@ -132,9 +141,6 @@ func (opt *Options) init() {
if opt.PoolSize == 0 {
opt.PoolSize = 10 * runtime.NumCPU()
}
- if opt.DialTimeout == 0 {
- opt.DialTimeout = 5 * time.Second
- }
switch opt.ReadTimeout {
case -1:
opt.ReadTimeout = 0
@@ -159,6 +165,8 @@ func (opt *Options) init() {
if opt.MaxRetries == -1 {
opt.MaxRetries = 0
+ } else if opt.MaxRetries == 0 {
+ opt.MaxRetries = 3
}
switch opt.MinRetryBackoff {
case -1:
@@ -180,26 +188,35 @@ func (opt *Options) clone() *Options {
}
// ParseURL parses an URL into Options that can be used to connect to Redis.
+// Scheme is required.
+// There are two connection types: by tcp socket and by unix socket.
+// Tcp connection:
+// redis://<user>:<password>@<host>:<port>/<db_number>
+// Unix connection:
+// unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>
func ParseURL(redisURL string) (*Options, error) {
- o := &Options{Network: "tcp"}
u, err := url.Parse(redisURL)
if err != nil {
return nil, err
}
- if u.Scheme != "redis" && u.Scheme != "rediss" {
- return nil, errors.New("invalid redis URL scheme: " + u.Scheme)
+ switch u.Scheme {
+ case "redis", "rediss":
+ return setupTCPConn(u)
+ case "unix":
+ return setupUnixConn(u)
+ default:
+ return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme)
}
+}
- if u.User != nil {
- o.Username = u.User.Username()
- if p, ok := u.User.Password(); ok {
- o.Password = p
- }
- }
+func setupTCPConn(u *url.URL) (*Options, error) {
+ o := &Options{Network: "tcp"}
+
+ o.Username, o.Password = getUserPassword(u)
if len(u.Query()) > 0 {
- return nil, errors.New("no options supported")
+ return nil, errors.New("redis: no options supported")
}
h, p, err := net.SplitHostPort(u.Host)
@@ -222,22 +239,73 @@ func ParseURL(redisURL string) (*Options, error) {
o.DB = 0
case 1:
if o.DB, err = strconv.Atoi(f[0]); err != nil {
- return nil, fmt.Errorf("invalid redis database number: %q", f[0])
+ return nil, fmt.Errorf("redis: invalid database number: %q", f[0])
}
default:
- return nil, errors.New("invalid redis URL path: " + u.Path)
+ return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path)
}
if u.Scheme == "rediss" {
o.TLSConfig = &tls.Config{ServerName: h}
}
+
+ return o, nil
+}
+
+func setupUnixConn(u *url.URL) (*Options, error) {
+ o := &Options{
+ Network: "unix",
+ }
+
+ if strings.TrimSpace(u.Path) == "" { // path is required with unix connection
+ return nil, errors.New("redis: empty unix socket path")
+ }
+ o.Addr = u.Path
+
+ o.Username, o.Password = getUserPassword(u)
+
+ dbStr := u.Query().Get("db")
+ if dbStr == "" {
+ return o, nil // if database is not set, connect to 0 db.
+ }
+
+ db, err := strconv.Atoi(dbStr)
+ if err != nil {
+ return nil, fmt.Errorf("redis: invalid database number: %w", err)
+ }
+ o.DB = db
+
return o, nil
}
+func getUserPassword(u *url.URL) (string, string) {
+ var user, password string
+ if u.User != nil {
+ user = u.User.Username()
+ if p, ok := u.User.Password(); ok {
+ password = p
+ }
+ }
+ return user, password
+}
+
func newConnPool(opt *Options) *pool.ConnPool {
return pool.NewConnPool(&pool.Options{
Dialer: func(ctx context.Context) (net.Conn, error) {
- return opt.Dialer(ctx, opt.Network, opt.Addr)
+ var conn net.Conn
+ err := internal.WithSpan(ctx, "redis.dial", func(ctx context.Context, span trace.Span) error {
+ span.SetAttributes(
+ label.String("db.connection_string", opt.Addr),
+ )
+
+ var err error
+ conn, err = opt.Dialer(ctx, opt.Network, opt.Addr)
+ if err != nil {
+ _ = internal.RecordError(ctx, span, err)
+ }
+ return err
+ })
+ return conn, err
},
PoolSize: opt.PoolSize,
MinIdleConns: opt.MinIdleConns,
diff --git a/vendor/github.com/go-redis/redis/v7/pipeline.go b/vendor/github.com/go-redis/redis/v8/pipeline.go
index d48566a787..c6ec340998 100644
--- a/vendor/github.com/go-redis/redis/v7/pipeline.go
+++ b/vendor/github.com/go-redis/redis/v8/pipeline.go
@@ -4,7 +4,7 @@ import (
"context"
"sync"
- "github.com/go-redis/redis/v7/internal/pool"
+ "github.com/go-redis/redis/v8/internal/pool"
)
type pipelineExecer func(context.Context, []Cmder) error
@@ -24,12 +24,11 @@ type pipelineExecer func(context.Context, []Cmder) error
// depends of your batch size and/or use TxPipeline.
type Pipeliner interface {
StatefulCmdable
- Do(args ...interface{}) *Cmd
- Process(cmd Cmder) error
+ Do(ctx context.Context, args ...interface{}) *Cmd
+ Process(ctx context.Context, cmd Cmder) error
Close() error
Discard() error
- Exec() ([]Cmder, error)
- ExecContext(ctx context.Context) ([]Cmder, error)
+ Exec(ctx context.Context) ([]Cmder, error)
}
var _ Pipeliner = (*Pipeline)(nil)
@@ -54,14 +53,14 @@ func (c *Pipeline) init() {
c.statefulCmdable = c.Process
}
-func (c *Pipeline) Do(args ...interface{}) *Cmd {
- cmd := NewCmd(args...)
- _ = c.Process(cmd)
+func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
return cmd
}
// Process queues the cmd for later execution.
-func (c *Pipeline) Process(cmd Cmder) error {
+func (c *Pipeline) Process(ctx context.Context, cmd Cmder) error {
c.mu.Lock()
c.cmds = append(c.cmds, cmd)
c.mu.Unlock()
@@ -98,11 +97,7 @@ func (c *Pipeline) discard() error {
//
// Exec always returns list of commands and error of the first failed
// command if any.
-func (c *Pipeline) Exec() ([]Cmder, error) {
- return c.ExecContext(c.ctx)
-}
-
-func (c *Pipeline) ExecContext(ctx context.Context) ([]Cmder, error) {
+func (c *Pipeline) Exec(ctx context.Context) ([]Cmder, error) {
c.mu.Lock()
defer c.mu.Unlock()
@@ -120,11 +115,11 @@ func (c *Pipeline) ExecContext(ctx context.Context) ([]Cmder, error) {
return cmds, c.exec(ctx, cmds)
}
-func (c *Pipeline) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+func (c *Pipeline) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
if err := fn(c); err != nil {
return nil, err
}
- cmds, err := c.Exec()
+ cmds, err := c.Exec(ctx)
_ = c.Close()
return cmds, err
}
@@ -133,8 +128,8 @@ func (c *Pipeline) Pipeline() Pipeliner {
return c
}
-func (c *Pipeline) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipelined(fn)
+func (c *Pipeline) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipelined(ctx, fn)
}
func (c *Pipeline) TxPipeline() Pipeliner {
diff --git a/vendor/github.com/go-redis/redis/v7/pubsub.go b/vendor/github.com/go-redis/redis/v8/pubsub.go
index 26cde242be..c56270b443 100644
--- a/vendor/github.com/go-redis/redis/v7/pubsub.go
+++ b/vendor/github.com/go-redis/redis/v8/pubsub.go
@@ -8,12 +8,15 @@ import (
"sync"
"time"
- "github.com/go-redis/redis/v7/internal"
- "github.com/go-redis/redis/v7/internal/pool"
- "github.com/go-redis/redis/v7/internal/proto"
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/proto"
)
-const pingTimeout = 30 * time.Second
+const (
+ pingTimeout = time.Second
+ chanSendTimeout = time.Minute
+)
var errPingTimeout = errors.New("redis: ping timeout")
@@ -26,7 +29,7 @@ var errPingTimeout = errors.New("redis: ping timeout")
type PubSub struct {
opt *Options
- newConn func([]string) (*pool.Conn, error)
+ newConn func(ctx context.Context, channels []string) (*pool.Conn, error)
closeConn func(*pool.Conn) error
mu sync.Mutex
@@ -55,14 +58,14 @@ func (c *PubSub) init() {
c.exit = make(chan struct{})
}
-func (c *PubSub) connWithLock() (*pool.Conn, error) {
+func (c *PubSub) connWithLock(ctx context.Context) (*pool.Conn, error) {
c.mu.Lock()
- cn, err := c.conn(nil)
+ cn, err := c.conn(ctx, nil)
c.mu.Unlock()
return cn, err
}
-func (c *PubSub) conn(newChannels []string) (*pool.Conn, error) {
+func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, error) {
if c.closed {
return nil, pool.ErrClosed
}
@@ -73,12 +76,12 @@ func (c *PubSub) conn(newChannels []string) (*pool.Conn, error) {
channels := mapKeys(c.channels)
channels = append(channels, newChannels...)
- cn, err := c.newConn(channels)
+ cn, err := c.newConn(ctx, channels)
if err != nil {
return nil, err
}
- if err := c.resubscribe(cn); err != nil {
+ if err := c.resubscribe(ctx, cn); err != nil {
_ = c.closeConn(cn)
return nil, err
}
@@ -93,15 +96,15 @@ func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error {
})
}
-func (c *PubSub) resubscribe(cn *pool.Conn) error {
+func (c *PubSub) resubscribe(ctx context.Context, cn *pool.Conn) error {
var firstErr error
if len(c.channels) > 0 {
- firstErr = c._subscribe(cn, "subscribe", mapKeys(c.channels))
+ firstErr = c._subscribe(ctx, cn, "subscribe", mapKeys(c.channels))
}
if len(c.patterns) > 0 {
- err := c._subscribe(cn, "psubscribe", mapKeys(c.patterns))
+ err := c._subscribe(ctx, cn, "psubscribe", mapKeys(c.patterns))
if err != nil && firstErr == nil {
firstErr = err
}
@@ -121,35 +124,40 @@ func mapKeys(m map[string]struct{}) []string {
}
func (c *PubSub) _subscribe(
- cn *pool.Conn, redisCmd string, channels []string,
+ ctx context.Context, cn *pool.Conn, redisCmd string, channels []string,
) error {
args := make([]interface{}, 0, 1+len(channels))
args = append(args, redisCmd)
for _, channel := range channels {
args = append(args, channel)
}
- cmd := NewSliceCmd(args...)
- return c.writeCmd(context.TODO(), cn, cmd)
+ cmd := NewSliceCmd(ctx, args...)
+ return c.writeCmd(ctx, cn, cmd)
}
-func (c *PubSub) releaseConnWithLock(cn *pool.Conn, err error, allowTimeout bool) {
+func (c *PubSub) releaseConnWithLock(
+ ctx context.Context,
+ cn *pool.Conn,
+ err error,
+ allowTimeout bool,
+) {
c.mu.Lock()
- c.releaseConn(cn, err, allowTimeout)
+ c.releaseConn(ctx, cn, err, allowTimeout)
c.mu.Unlock()
}
-func (c *PubSub) releaseConn(cn *pool.Conn, err error, allowTimeout bool) {
+func (c *PubSub) releaseConn(ctx context.Context, cn *pool.Conn, err error, allowTimeout bool) {
if c.cn != cn {
return
}
if isBadConn(err, allowTimeout) {
- c.reconnect(err)
+ c.reconnect(ctx, err)
}
}
-func (c *PubSub) reconnect(reason error) {
+func (c *PubSub) reconnect(ctx context.Context, reason error) {
_ = c.closeTheCn(reason)
- _, _ = c.conn(nil)
+ _, _ = c.conn(ctx, nil)
}
func (c *PubSub) closeTheCn(reason error) error {
@@ -157,7 +165,7 @@ func (c *PubSub) closeTheCn(reason error) error {
return nil
}
if !c.closed {
- internal.Logger.Printf("redis: discarding bad PubSub connection: %s", reason)
+ internal.Logger.Printf(c.getContext(), "redis: discarding bad PubSub connection: %s", reason)
}
err := c.closeConn(c.cn)
c.cn = nil
@@ -179,11 +187,11 @@ func (c *PubSub) Close() error {
// Subscribe the client to the specified channels. It returns
// empty subscription if there are no channels.
-func (c *PubSub) Subscribe(channels ...string) error {
+func (c *PubSub) Subscribe(ctx context.Context, channels ...string) error {
c.mu.Lock()
defer c.mu.Unlock()
- err := c.subscribe("subscribe", channels...)
+ err := c.subscribe(ctx, "subscribe", channels...)
if c.channels == nil {
c.channels = make(map[string]struct{})
}
@@ -195,11 +203,11 @@ func (c *PubSub) Subscribe(channels ...string) error {
// PSubscribe the client to the given patterns. It returns
// empty subscription if there are no patterns.
-func (c *PubSub) PSubscribe(patterns ...string) error {
+func (c *PubSub) PSubscribe(ctx context.Context, patterns ...string) error {
c.mu.Lock()
defer c.mu.Unlock()
- err := c.subscribe("psubscribe", patterns...)
+ err := c.subscribe(ctx, "psubscribe", patterns...)
if c.patterns == nil {
c.patterns = make(map[string]struct{})
}
@@ -211,55 +219,55 @@ func (c *PubSub) PSubscribe(patterns ...string) error {
// Unsubscribe the client from the given channels, or from all of
// them if none is given.
-func (c *PubSub) Unsubscribe(channels ...string) error {
+func (c *PubSub) Unsubscribe(ctx context.Context, channels ...string) error {
c.mu.Lock()
defer c.mu.Unlock()
for _, channel := range channels {
delete(c.channels, channel)
}
- err := c.subscribe("unsubscribe", channels...)
+ err := c.subscribe(ctx, "unsubscribe", channels...)
return err
}
// PUnsubscribe the client from the given patterns, or from all of
// them if none is given.
-func (c *PubSub) PUnsubscribe(patterns ...string) error {
+func (c *PubSub) PUnsubscribe(ctx context.Context, patterns ...string) error {
c.mu.Lock()
defer c.mu.Unlock()
for _, pattern := range patterns {
delete(c.patterns, pattern)
}
- err := c.subscribe("punsubscribe", patterns...)
+ err := c.subscribe(ctx, "punsubscribe", patterns...)
return err
}
-func (c *PubSub) subscribe(redisCmd string, channels ...string) error {
- cn, err := c.conn(channels)
+func (c *PubSub) subscribe(ctx context.Context, redisCmd string, channels ...string) error {
+ cn, err := c.conn(ctx, channels)
if err != nil {
return err
}
- err = c._subscribe(cn, redisCmd, channels)
- c.releaseConn(cn, err, false)
+ err = c._subscribe(ctx, cn, redisCmd, channels)
+ c.releaseConn(ctx, cn, err, false)
return err
}
-func (c *PubSub) Ping(payload ...string) error {
+func (c *PubSub) Ping(ctx context.Context, payload ...string) error {
args := []interface{}{"ping"}
if len(payload) == 1 {
args = append(args, payload[0])
}
- cmd := NewCmd(args...)
+ cmd := NewCmd(ctx, args...)
- cn, err := c.connWithLock()
+ cn, err := c.connWithLock(ctx)
if err != nil {
return err
}
- err = c.writeCmd(context.TODO(), cn, cmd)
- c.releaseConnWithLock(cn, err, false)
+ err = c.writeCmd(ctx, cn, cmd)
+ c.releaseConnWithLock(ctx, cn, err, false)
return err
}
@@ -279,9 +287,10 @@ func (m *Subscription) String() string {
// Message received as result of a PUBLISH command issued by another client.
type Message struct {
- Channel string
- Pattern string
- Payload string
+ Channel string
+ Pattern string
+ Payload string
+ PayloadSlice []string
}
func (m *Message) String() string {
@@ -317,10 +326,24 @@ func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
Count: int(reply[2].(int64)),
}, nil
case "message":
- return &Message{
- Channel: reply[1].(string),
- Payload: reply[2].(string),
- }, nil
+ switch payload := reply[2].(type) {
+ case string:
+ return &Message{
+ Channel: reply[1].(string),
+ Payload: payload,
+ }, nil
+ case []interface{}:
+ ss := make([]string, len(payload))
+ for i, s := range payload {
+ ss[i] = s.(string)
+ }
+ return &Message{
+ Channel: reply[1].(string),
+ PayloadSlice: ss,
+ }, nil
+ default:
+ return nil, fmt.Errorf("redis: unsupported pubsub message payload: %T", payload)
+ }
case "pmessage":
return &Message{
Pattern: reply[1].(string),
@@ -342,21 +365,21 @@ func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
// ReceiveTimeout acts like Receive but returns an error if message
// is not received in time. This is low-level API and in most cases
// Channel should be used instead.
-func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) {
+func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (interface{}, error) {
if c.cmd == nil {
- c.cmd = NewCmd()
+ c.cmd = NewCmd(ctx)
}
- cn, err := c.connWithLock()
+ cn, err := c.connWithLock(ctx)
if err != nil {
return nil, err
}
- err = cn.WithReader(context.TODO(), timeout, func(rd *proto.Reader) error {
+ err = cn.WithReader(ctx, timeout, func(rd *proto.Reader) error {
return c.cmd.readReply(rd)
})
- c.releaseConnWithLock(cn, err, timeout > 0)
+ c.releaseConnWithLock(ctx, cn, err, timeout > 0)
if err != nil {
return nil, err
}
@@ -367,16 +390,16 @@ func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) {
// Receive returns a message as a Subscription, Message, Pong or error.
// See PubSub example for details. This is low-level API and in most cases
// Channel should be used instead.
-func (c *PubSub) Receive() (interface{}, error) {
- return c.ReceiveTimeout(0)
+func (c *PubSub) Receive(ctx context.Context) (interface{}, error) {
+ return c.ReceiveTimeout(ctx, 0)
}
// ReceiveMessage returns a Message or error ignoring Subscription and Pong
// messages. This is low-level API and in most cases Channel should be used
// instead.
-func (c *PubSub) ReceiveMessage() (*Message, error) {
+func (c *PubSub) ReceiveMessage(ctx context.Context) (*Message, error) {
for {
- msg, err := c.Receive()
+ msg, err := c.Receive(ctx)
if err != nil {
return nil, err
}
@@ -429,7 +452,7 @@ func (c *PubSub) ChannelSize(size int) <-chan *Message {
// reconnections.
//
// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
-func (c *PubSub) ChannelWithSubscriptions(size int) <-chan interface{} {
+func (c *PubSub) ChannelWithSubscriptions(ctx context.Context, size int) <-chan interface{} {
c.chOnce.Do(func() {
c.initPing()
c.initAllChan(size)
@@ -445,10 +468,18 @@ func (c *PubSub) ChannelWithSubscriptions(size int) <-chan interface{} {
return c.allCh
}
+func (c *PubSub) getContext() context.Context {
+ if c.cmd != nil {
+ return c.cmd.ctx
+ }
+ return context.Background()
+}
+
func (c *PubSub) initPing() {
+ ctx := context.TODO()
c.ping = make(chan struct{}, 1)
go func() {
- timer := time.NewTimer(pingTimeout)
+ timer := time.NewTimer(time.Minute)
timer.Stop()
healthy := true
@@ -461,7 +492,7 @@ func (c *PubSub) initPing() {
<-timer.C
}
case <-timer.C:
- pingErr := c.Ping()
+ pingErr := c.Ping(ctx)
if healthy {
healthy = false
} else {
@@ -469,7 +500,7 @@ func (c *PubSub) initPing() {
pingErr = errPingTimeout
}
c.mu.Lock()
- c.reconnect(pingErr)
+ c.reconnect(ctx, pingErr)
healthy = true
c.mu.Unlock()
}
@@ -482,21 +513,22 @@ func (c *PubSub) initPing() {
// initMsgChan must be in sync with initAllChan.
func (c *PubSub) initMsgChan(size int) {
+ ctx := context.TODO()
c.msgCh = make(chan *Message, size)
go func() {
- timer := time.NewTimer(pingTimeout)
+ timer := time.NewTimer(time.Minute)
timer.Stop()
var errCount int
for {
- msg, err := c.Receive()
+ msg, err := c.Receive(ctx)
if err != nil {
if err == pool.ErrClosed {
close(c.msgCh)
return
}
if errCount > 0 {
- time.Sleep(c.retryBackoff(errCount))
+ time.Sleep(100 * time.Millisecond)
}
errCount++
continue
@@ -516,7 +548,7 @@ func (c *PubSub) initMsgChan(size int) {
case *Pong:
// Ignore.
case *Message:
- timer.Reset(pingTimeout)
+ timer.Reset(chanSendTimeout)
select {
case c.msgCh <- msg:
if !timer.Stop() {
@@ -524,10 +556,14 @@ func (c *PubSub) initMsgChan(size int) {
}
case <-timer.C:
internal.Logger.Printf(
- "redis: %s channel is full for %s (message is dropped)", c, pingTimeout)
+ c.getContext(),
+ "redis: %s channel is full for %s (message is dropped)",
+ c,
+ chanSendTimeout,
+ )
}
default:
- internal.Logger.Printf("redis: unknown message type: %T", msg)
+ internal.Logger.Printf(c.getContext(), "redis: unknown message type: %T", msg)
}
}
}()
@@ -535,6 +571,7 @@ func (c *PubSub) initMsgChan(size int) {
// initAllChan must be in sync with initMsgChan.
func (c *PubSub) initAllChan(size int) {
+ ctx := context.TODO()
c.allCh = make(chan interface{}, size)
go func() {
timer := time.NewTimer(pingTimeout)
@@ -542,14 +579,14 @@ func (c *PubSub) initAllChan(size int) {
var errCount int
for {
- msg, err := c.Receive()
+ msg, err := c.Receive(ctx)
if err != nil {
if err == pool.ErrClosed {
close(c.allCh)
return
}
if errCount > 0 {
- time.Sleep(c.retryBackoff(errCount))
+ time.Sleep(100 * time.Millisecond)
}
errCount++
continue
@@ -571,7 +608,7 @@ func (c *PubSub) initAllChan(size int) {
case *Message:
c.sendMessage(msg, timer)
default:
- internal.Logger.Printf("redis: unknown message type: %T", msg)
+ internal.Logger.Printf(c.getContext(), "redis: unknown message type: %T", msg)
}
}
}()
@@ -586,10 +623,7 @@ func (c *PubSub) sendMessage(msg interface{}, timer *time.Timer) {
}
case <-timer.C:
internal.Logger.Printf(
+ c.getContext(),
"redis: %s channel is full for %s (message is dropped)", c, pingTimeout)
}
}
-
-func (c *PubSub) retryBackoff(attempt int) time.Duration {
- return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
-}
diff --git a/vendor/github.com/go-redis/redis/v7/redis.go b/vendor/github.com/go-redis/redis/v8/redis.go
index 3d9dfed7db..712579d4ca 100644
--- a/vendor/github.com/go-redis/redis/v7/redis.go
+++ b/vendor/github.com/go-redis/redis/v8/redis.go
@@ -2,19 +2,22 @@ package redis
import (
"context"
+ "errors"
"fmt"
- "log"
+ "sync/atomic"
"time"
- "github.com/go-redis/redis/v7/internal"
- "github.com/go-redis/redis/v7/internal/pool"
- "github.com/go-redis/redis/v7/internal/proto"
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/proto"
+ "go.opentelemetry.io/otel/label"
+ "go.opentelemetry.io/otel/trace"
)
// Nil reply returned by Redis when key does not exist.
const Nil = proto.Nil
-func SetLogger(logger *log.Logger) {
+func SetLogger(logger internal.Logging) {
internal.Logger = logger
}
@@ -49,92 +52,88 @@ func (hs *hooks) AddHook(hook Hook) {
func (hs hooks) process(
ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error,
) error {
- ctx, err := hs.beforeProcess(ctx, cmd)
- if err != nil {
+ if len(hs.hooks) == 0 {
+ err := hs.withContext(ctx, func() error {
+ return fn(ctx, cmd)
+ })
cmd.SetErr(err)
return err
}
- cmdErr := fn(ctx, cmd)
+ var hookIndex int
+ var retErr error
- if err := hs.afterProcess(ctx, cmd); err != nil {
- cmd.SetErr(err)
- return err
+ for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
+ ctx, retErr = hs.hooks[hookIndex].BeforeProcess(ctx, cmd)
+ if retErr != nil {
+ cmd.SetErr(retErr)
+ }
}
- return cmdErr
-}
-
-func (hs hooks) beforeProcess(ctx context.Context, cmd Cmder) (context.Context, error) {
- for _, h := range hs.hooks {
- var err error
- ctx, err = h.BeforeProcess(ctx, cmd)
- if err != nil {
- return nil, err
- }
+ if retErr == nil {
+ retErr = hs.withContext(ctx, func() error {
+ return fn(ctx, cmd)
+ })
+ cmd.SetErr(retErr)
}
- return ctx, nil
-}
-func (hs hooks) afterProcess(ctx context.Context, cmd Cmder) error {
- var firstErr error
- for _, h := range hs.hooks {
- err := h.AfterProcess(ctx, cmd)
- if err != nil && firstErr == nil {
- firstErr = err
+ for hookIndex--; hookIndex >= 0; hookIndex-- {
+ if err := hs.hooks[hookIndex].AfterProcess(ctx, cmd); err != nil {
+ retErr = err
+ cmd.SetErr(retErr)
}
}
- return firstErr
+
+ return retErr
}
func (hs hooks) processPipeline(
ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
) error {
- ctx, err := hs.beforeProcessPipeline(ctx, cmds)
- if err != nil {
- setCmdsErr(cmds, err)
+ if len(hs.hooks) == 0 {
+ err := hs.withContext(ctx, func() error {
+ return fn(ctx, cmds)
+ })
return err
}
- cmdsErr := fn(ctx, cmds)
+ var hookIndex int
+ var retErr error
- if err := hs.afterProcessPipeline(ctx, cmds); err != nil {
- setCmdsErr(cmds, err)
- return err
+ for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
+ ctx, retErr = hs.hooks[hookIndex].BeforeProcessPipeline(ctx, cmds)
+ if retErr != nil {
+ setCmdsErr(cmds, retErr)
+ }
}
- return cmdsErr
-}
-
-func (hs hooks) beforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error) {
- for _, h := range hs.hooks {
- var err error
- ctx, err = h.BeforeProcessPipeline(ctx, cmds)
- if err != nil {
- return nil, err
- }
+ if retErr == nil {
+ retErr = hs.withContext(ctx, func() error {
+ return fn(ctx, cmds)
+ })
}
- return ctx, nil
-}
-func (hs hooks) afterProcessPipeline(ctx context.Context, cmds []Cmder) error {
- var firstErr error
- for _, h := range hs.hooks {
- err := h.AfterProcessPipeline(ctx, cmds)
- if err != nil && firstErr == nil {
- firstErr = err
+ for hookIndex--; hookIndex >= 0; hookIndex-- {
+ if err := hs.hooks[hookIndex].AfterProcessPipeline(ctx, cmds); err != nil {
+ retErr = err
+ setCmdsErr(cmds, retErr)
}
}
- return firstErr
+
+ return retErr
}
func (hs hooks) processTxPipeline(
ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
) error {
- cmds = wrapMultiExec(cmds)
+ cmds = wrapMultiExec(ctx, cmds)
return hs.processPipeline(ctx, cmds, fn)
}
+func (hs hooks) withContext(ctx context.Context, fn func() error) error {
+ return fn()
+}
+
//------------------------------------------------------------------------------
type baseClient struct {
@@ -201,6 +200,7 @@ func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
}
return nil, err
}
+
return cn, nil
}
@@ -210,10 +210,16 @@ func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
return nil, err
}
- err = c.initConn(ctx, cn)
+ if cn.Inited {
+ return cn, nil
+ }
+
+ err = internal.WithSpan(ctx, "redis.init_conn", func(ctx context.Context, span trace.Span) error {
+ return c.initConn(ctx, cn)
+ })
if err != nil {
- c.connPool.Remove(cn, err)
- if err := internal.Unwrap(err); err != nil {
+ c.connPool.Remove(ctx, cn, err)
+ if err := errors.Unwrap(err); err != nil {
return nil, err
}
return nil, err
@@ -235,25 +241,24 @@ func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
return nil
}
- connPool := pool.NewSingleConnPool(nil)
- connPool.SetConn(cn)
+ connPool := pool.NewSingleConnPool(c.connPool, cn)
conn := newConn(ctx, c.opt, connPool)
- _, err := conn.Pipelined(func(pipe Pipeliner) error {
+ _, err := conn.Pipelined(ctx, func(pipe Pipeliner) error {
if c.opt.Password != "" {
if c.opt.Username != "" {
- pipe.AuthACL(c.opt.Username, c.opt.Password)
+ pipe.AuthACL(ctx, c.opt.Username, c.opt.Password)
} else {
- pipe.Auth(c.opt.Password)
+ pipe.Auth(ctx, c.opt.Password)
}
}
if c.opt.DB > 0 {
- pipe.Select(c.opt.DB)
+ pipe.Select(ctx, c.opt.DB)
}
if c.opt.readOnly {
- pipe.ReadOnly()
+ pipe.ReadOnly(ctx)
}
return nil
@@ -263,76 +268,107 @@ func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
}
if c.opt.OnConnect != nil {
- return c.opt.OnConnect(conn)
+ return c.opt.OnConnect(ctx, conn)
}
return nil
}
-func (c *baseClient) releaseConn(cn *pool.Conn, err error) {
+func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) {
if c.opt.Limiter != nil {
c.opt.Limiter.ReportResult(err)
}
if isBadConn(err, false) {
- c.connPool.Remove(cn, err)
+ c.connPool.Remove(ctx, cn, err)
} else {
- c.connPool.Put(cn)
+ c.connPool.Put(ctx, cn)
}
}
func (c *baseClient) withConn(
ctx context.Context, fn func(context.Context, *pool.Conn) error,
) error {
- cn, err := c.getConn(ctx)
- if err != nil {
- return err
- }
- defer func() {
- c.releaseConn(cn, err)
- }()
+ return internal.WithSpan(ctx, "redis.with_conn", func(ctx context.Context, span trace.Span) error {
+ cn, err := c.getConn(ctx)
+ if err != nil {
+ return err
+ }
- err = fn(ctx, cn)
- return err
-}
+ if span.IsRecording() {
+ if remoteAddr := cn.RemoteAddr(); remoteAddr != nil {
+ span.SetAttributes(label.String("net.peer.ip", remoteAddr.String()))
+ }
+ }
-func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
- err := c._process(ctx, cmd)
- if err != nil {
- cmd.SetErr(err)
- return err
- }
- return nil
+ defer func() {
+ c.releaseConn(ctx, cn, err)
+ }()
+
+ done := ctx.Done()
+ if done == nil {
+ err = fn(ctx, cn)
+ return err
+ }
+
+ errc := make(chan error, 1)
+ go func() { errc <- fn(ctx, cn) }()
+
+ select {
+ case <-done:
+ _ = cn.Close()
+ // Wait for the goroutine to finish and send something.
+ <-errc
+
+ err = ctx.Err()
+ return err
+ case err = <-errc:
+ return err
+ }
+ })
}
-func (c *baseClient) _process(ctx context.Context, cmd Cmder) error {
+func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
var lastErr error
for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
- if attempt > 0 {
- if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
- return err
+ attempt := attempt
+
+ var retry bool
+ err := internal.WithSpan(ctx, "redis.process", func(ctx context.Context, span trace.Span) error {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
}
- }
- retryTimeout := true
- lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmd(wr, cmd)
+ retryTimeout := uint32(1)
+ err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmd(wr, cmd)
+ })
+ if err != nil {
+ return err
+ }
+
+ err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
+ if err != nil {
+ if cmd.readTimeout() == nil {
+ atomic.StoreUint32(&retryTimeout, 1)
+ }
+ return err
+ }
+
+ return nil
})
- if err != nil {
- return err
+ if err == nil {
+ return nil
}
-
- err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
- if err != nil {
- retryTimeout = cmd.readTimeout() == nil
- return err
- }
-
- return nil
+ retry = shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1)
+ return err
})
- if lastErr == nil || !isRetryableError(lastErr, retryTimeout) {
- return lastErr
+ if err == nil || !retry {
+ return err
}
+ lastErr = err
}
return lastErr
}
@@ -411,7 +447,7 @@ func (c *baseClient) _generalProcessPipeline(
canRetry, err = p(ctx, cn, cmds)
return err
})
- if lastErr == nil || !canRetry || !isRetryableError(lastErr, true) {
+ if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) {
return lastErr
}
}
@@ -437,6 +473,7 @@ func (c *baseClient) pipelineProcessCmds(
func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
for _, cmd := range cmds {
err := cmd.readReply(rd)
+ cmd.SetErr(err)
if err != nil && !isRedisError(err) {
return err
}
@@ -469,15 +506,15 @@ func (c *baseClient) txPipelineProcessCmds(
return false, err
}
-func wrapMultiExec(cmds []Cmder) []Cmder {
+func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder {
if len(cmds) == 0 {
panic("not reached")
}
- cmds = append(cmds, make([]Cmder, 2)...)
- copy(cmds[1:], cmds[:len(cmds)-2])
- cmds[0] = NewStatusCmd("multi")
- cmds[len(cmds)-1] = NewSliceCmd("exec")
- return cmds
+ cmdCopy := make([]Cmder, len(cmds)+2)
+ cmdCopy[0] = NewStatusCmd(ctx, "multi")
+ copy(cmdCopy[1:], cmds)
+ cmdCopy[len(cmdCopy)-1] = NewSliceCmd(ctx, "exec")
+ return cmdCopy
}
func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error {
@@ -565,26 +602,18 @@ func (c *Client) WithContext(ctx context.Context) *Client {
return clone
}
-func (c *Client) Conn() *Conn {
- return newConn(c.ctx, c.opt, pool.NewSingleConnPool(c.connPool))
+func (c *Client) Conn(ctx context.Context) *Conn {
+ return newConn(ctx, c.opt, pool.NewStickyConnPool(c.connPool))
}
// Do creates a Cmd from the args and processes the cmd.
-func (c *Client) Do(args ...interface{}) *Cmd {
- return c.DoContext(c.ctx, args...)
-}
-
-func (c *Client) DoContext(ctx context.Context, args ...interface{}) *Cmd {
- cmd := NewCmd(args...)
- _ = c.ProcessContext(ctx, cmd)
+func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
return cmd
}
-func (c *Client) Process(cmd Cmder) error {
- return c.ProcessContext(c.ctx, cmd)
-}
-
-func (c *Client) ProcessContext(ctx context.Context, cmd Cmder) error {
+func (c *Client) Process(ctx context.Context, cmd Cmder) error {
return c.hooks.process(ctx, cmd, c.baseClient.process)
}
@@ -609,8 +638,8 @@ func (c *Client) PoolStats() *PoolStats {
return (*PoolStats)(stats)
}
-func (c *Client) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().Pipelined(fn)
+func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
}
func (c *Client) Pipeline() Pipeliner {
@@ -622,8 +651,8 @@ func (c *Client) Pipeline() Pipeliner {
return &pipe
}
-func (c *Client) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().Pipelined(fn)
+func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
}
// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
@@ -640,8 +669,8 @@ func (c *Client) pubSub() *PubSub {
pubsub := &PubSub{
opt: c.opt,
- newConn: func(channels []string) (*pool.Conn, error) {
- return c.newConn(context.TODO())
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+ return c.newConn(ctx)
},
closeConn: c.connPool.CloseConn,
}
@@ -675,20 +704,20 @@ func (c *Client) pubSub() *PubSub {
// }
//
// ch := sub.Channel()
-func (c *Client) Subscribe(channels ...string) *PubSub {
+func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub {
pubsub := c.pubSub()
if len(channels) > 0 {
- _ = pubsub.Subscribe(channels...)
+ _ = pubsub.Subscribe(ctx, channels...)
}
return pubsub
}
// PSubscribe subscribes the client to the given patterns.
// Patterns can be omitted to create empty subscription.
-func (c *Client) PSubscribe(channels ...string) *PubSub {
+func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub {
pubsub := c.pubSub()
if len(channels) > 0 {
- _ = pubsub.PSubscribe(channels...)
+ _ = pubsub.PSubscribe(ctx, channels...)
}
return pubsub
}
@@ -699,6 +728,7 @@ type conn struct {
baseClient
cmdable
statefulCmdable
+ hooks // TODO: inherit hooks
}
// Conn is like Client, but its pool contains single connection.
@@ -722,16 +752,20 @@ func newConn(ctx context.Context, opt *Options, connPool pool.Pooler) *Conn {
return &c
}
-func (c *Conn) Process(cmd Cmder) error {
- return c.ProcessContext(c.ctx, cmd)
+func (c *Conn) Process(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.baseClient.process)
}
-func (c *Conn) ProcessContext(ctx context.Context, cmd Cmder) error {
- return c.baseClient.process(ctx, cmd)
+func (c *Conn) processPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+}
+
+func (c *Conn) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
}
-func (c *Conn) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().Pipelined(fn)
+func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
}
func (c *Conn) Pipeline() Pipeliner {
@@ -743,8 +777,8 @@ func (c *Conn) Pipeline() Pipeliner {
return &pipe
}
-func (c *Conn) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().Pipelined(fn)
+func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
}
// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
diff --git a/vendor/github.com/go-redis/redis/v7/result.go b/vendor/github.com/go-redis/redis/v8/result.go
index 5bec26ca95..24cfd49940 100644
--- a/vendor/github.com/go-redis/redis/v7/result.go
+++ b/vendor/github.com/go-redis/redis/v8/result.go
@@ -2,7 +2,7 @@ package redis
import "time"
-// NewCmdResult returns a Cmd initialised with val and err for testing
+// NewCmdResult returns a Cmd initialised with val and err for testing.
func NewCmdResult(val interface{}, err error) *Cmd {
var cmd Cmd
cmd.val = val
@@ -10,7 +10,7 @@ func NewCmdResult(val interface{}, err error) *Cmd {
return &cmd
}
-// NewSliceResult returns a SliceCmd initialised with val and err for testing
+// NewSliceResult returns a SliceCmd initialised with val and err for testing.
func NewSliceResult(val []interface{}, err error) *SliceCmd {
var cmd SliceCmd
cmd.val = val
@@ -18,7 +18,7 @@ func NewSliceResult(val []interface{}, err error) *SliceCmd {
return &cmd
}
-// NewStatusResult returns a StatusCmd initialised with val and err for testing
+// NewStatusResult returns a StatusCmd initialised with val and err for testing.
func NewStatusResult(val string, err error) *StatusCmd {
var cmd StatusCmd
cmd.val = val
@@ -26,7 +26,7 @@ func NewStatusResult(val string, err error) *StatusCmd {
return &cmd
}
-// NewIntResult returns an IntCmd initialised with val and err for testing
+// NewIntResult returns an IntCmd initialised with val and err for testing.
func NewIntResult(val int64, err error) *IntCmd {
var cmd IntCmd
cmd.val = val
@@ -34,7 +34,7 @@ func NewIntResult(val int64, err error) *IntCmd {
return &cmd
}
-// NewDurationResult returns a DurationCmd initialised with val and err for testing
+// NewDurationResult returns a DurationCmd initialised with val and err for testing.
func NewDurationResult(val time.Duration, err error) *DurationCmd {
var cmd DurationCmd
cmd.val = val
@@ -42,7 +42,7 @@ func NewDurationResult(val time.Duration, err error) *DurationCmd {
return &cmd
}
-// NewBoolResult returns a BoolCmd initialised with val and err for testing
+// NewBoolResult returns a BoolCmd initialised with val and err for testing.
func NewBoolResult(val bool, err error) *BoolCmd {
var cmd BoolCmd
cmd.val = val
@@ -50,7 +50,7 @@ func NewBoolResult(val bool, err error) *BoolCmd {
return &cmd
}
-// NewStringResult returns a StringCmd initialised with val and err for testing
+// NewStringResult returns a StringCmd initialised with val and err for testing.
func NewStringResult(val string, err error) *StringCmd {
var cmd StringCmd
cmd.val = val
@@ -58,7 +58,7 @@ func NewStringResult(val string, err error) *StringCmd {
return &cmd
}
-// NewFloatResult returns a FloatCmd initialised with val and err for testing
+// NewFloatResult returns a FloatCmd initialised with val and err for testing.
func NewFloatResult(val float64, err error) *FloatCmd {
var cmd FloatCmd
cmd.val = val
@@ -66,7 +66,7 @@ func NewFloatResult(val float64, err error) *FloatCmd {
return &cmd
}
-// NewStringSliceResult returns a StringSliceCmd initialised with val and err for testing
+// NewStringSliceResult returns a StringSliceCmd initialised with val and err for testing.
func NewStringSliceResult(val []string, err error) *StringSliceCmd {
var cmd StringSliceCmd
cmd.val = val
@@ -74,7 +74,7 @@ func NewStringSliceResult(val []string, err error) *StringSliceCmd {
return &cmd
}
-// NewBoolSliceResult returns a BoolSliceCmd initialised with val and err for testing
+// NewBoolSliceResult returns a BoolSliceCmd initialised with val and err for testing.
func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
var cmd BoolSliceCmd
cmd.val = val
@@ -82,7 +82,7 @@ func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
return &cmd
}
-// NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing
+// NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing.
func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd {
var cmd StringStringMapCmd
cmd.val = val
@@ -90,7 +90,7 @@ func NewStringStringMapResult(val map[string]string, err error) *StringStringMap
return &cmd
}
-// NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing
+// NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing.
func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd {
var cmd StringIntMapCmd
cmd.val = val
@@ -98,7 +98,7 @@ func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd
return &cmd
}
-// NewTimeCmdResult returns a TimeCmd initialised with val and err for testing
+// NewTimeCmdResult returns a TimeCmd initialised with val and err for testing.
func NewTimeCmdResult(val time.Time, err error) *TimeCmd {
var cmd TimeCmd
cmd.val = val
@@ -106,7 +106,7 @@ func NewTimeCmdResult(val time.Time, err error) *TimeCmd {
return &cmd
}
-// NewZSliceCmdResult returns a ZSliceCmd initialised with val and err for testing
+// NewZSliceCmdResult returns a ZSliceCmd initialised with val and err for testing.
func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
var cmd ZSliceCmd
cmd.val = val
@@ -114,7 +114,7 @@ func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
return &cmd
}
-// NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing
+// NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing.
func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd {
var cmd ZWithKeyCmd
cmd.val = val
@@ -122,7 +122,7 @@ func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd {
return &cmd
}
-// NewScanCmdResult returns a ScanCmd initialised with val and err for testing
+// NewScanCmdResult returns a ScanCmd initialised with val and err for testing.
func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd {
var cmd ScanCmd
cmd.page = keys
@@ -131,7 +131,7 @@ func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd {
return &cmd
}
-// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initialised with val and err for testing
+// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initialised with val and err for testing.
func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd {
var cmd ClusterSlotsCmd
cmd.val = val
@@ -139,7 +139,7 @@ func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd {
return &cmd
}
-// NewGeoLocationCmdResult returns a GeoLocationCmd initialised with val and err for testing
+// NewGeoLocationCmdResult returns a GeoLocationCmd initialised with val and err for testing.
func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd {
var cmd GeoLocationCmd
cmd.locations = val
@@ -147,7 +147,7 @@ func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd {
return &cmd
}
-// NewGeoPosCmdResult returns a GeoPosCmd initialised with val and err for testing
+// NewGeoPosCmdResult returns a GeoPosCmd initialised with val and err for testing.
func NewGeoPosCmdResult(val []*GeoPos, err error) *GeoPosCmd {
var cmd GeoPosCmd
cmd.val = val
@@ -155,7 +155,7 @@ func NewGeoPosCmdResult(val []*GeoPos, err error) *GeoPosCmd {
return &cmd
}
-// NewCommandsInfoCmdResult returns a CommandsInfoCmd initialised with val and err for testing
+// NewCommandsInfoCmdResult returns a CommandsInfoCmd initialised with val and err for testing.
func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd {
var cmd CommandsInfoCmd
cmd.val = val
@@ -163,7 +163,7 @@ func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsI
return &cmd
}
-// NewXMessageSliceCmdResult returns a XMessageSliceCmd initialised with val and err for testing
+// NewXMessageSliceCmdResult returns a XMessageSliceCmd initialised with val and err for testing.
func NewXMessageSliceCmdResult(val []XMessage, err error) *XMessageSliceCmd {
var cmd XMessageSliceCmd
cmd.val = val
@@ -171,7 +171,7 @@ func NewXMessageSliceCmdResult(val []XMessage, err error) *XMessageSliceCmd {
return &cmd
}
-// NewXStreamSliceCmdResult returns a XStreamSliceCmd initialised with val and err for testing
+// NewXStreamSliceCmdResult returns a XStreamSliceCmd initialised with val and err for testing.
func NewXStreamSliceCmdResult(val []XStream, err error) *XStreamSliceCmd {
var cmd XStreamSliceCmd
cmd.val = val
diff --git a/vendor/github.com/go-redis/redis/v7/ring.go b/vendor/github.com/go-redis/redis/v8/ring.go
index 44fc623f8a..34d05f35ae 100644
--- a/vendor/github.com/go-redis/redis/v7/ring.go
+++ b/vendor/github.com/go-redis/redis/v8/ring.go
@@ -2,72 +2,73 @@ package redis
import (
"context"
+ "crypto/tls"
"errors"
"fmt"
- "math/rand"
+ "net"
"strconv"
"sync"
"sync/atomic"
"time"
- "github.com/go-redis/redis/v7/internal"
- "github.com/go-redis/redis/v7/internal/consistenthash"
- "github.com/go-redis/redis/v7/internal/hashtag"
- "github.com/go-redis/redis/v7/internal/pool"
+ "github.com/cespare/xxhash/v2"
+ "github.com/dgryski/go-rendezvous"
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/hashtag"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/rand"
)
-// Hash is type of hash function used in consistent hash.
-type Hash consistenthash.Hash
-
var errRingShardsDown = errors.New("redis: all ring shards are down")
+//------------------------------------------------------------------------------
+
+type ConsistentHash interface {
+ Get(string) string
+}
+
+type rendezvousWrapper struct {
+ *rendezvous.Rendezvous
+}
+
+func (w rendezvousWrapper) Get(key string) string {
+ return w.Lookup(key)
+}
+
+func newRendezvous(shards []string) ConsistentHash {
+ return rendezvousWrapper{rendezvous.New(shards, xxhash.Sum64String)}
+}
+
+//------------------------------------------------------------------------------
+
// RingOptions are used to configure a ring client and should be
// passed to NewRing.
type RingOptions struct {
// Map of name => host:port addresses of ring shards.
Addrs map[string]string
- // Map of name => password of ring shards, to allow different shards to have
- // different passwords. It will be ignored if the Password field is set.
- Passwords map[string]string
+ // NewClient creates a shard client with provided name and options.
+ NewClient func(name string, opt *Options) *Client
// Frequency of PING commands sent to check shards availability.
// Shard is considered down after 3 subsequent failed checks.
HeartbeatFrequency time.Duration
- // Hash function used in consistent hash.
- // Default is crc32.ChecksumIEEE.
- Hash Hash
-
- // Number of replicas in consistent hash.
- // Default is 100 replicas.
- //
- // Higher number of replicas will provide less deviation, that is keys will be
- // distributed to nodes more evenly.
+ // NewConsistentHash returns a consistent hash that is used
+ // to distribute keys across the shards.
//
- // Following is deviation for common nreplicas:
- // --------------------------------------------------------
- // | nreplicas | standard error | 99% confidence interval |
- // | 10 | 0.3152 | (0.37, 1.98) |
- // | 100 | 0.0997 | (0.76, 1.28) |
- // | 1000 | 0.0316 | (0.92, 1.09) |
- // --------------------------------------------------------
- //
- // See https://arxiv.org/abs/1406.2294 for reference
- HashReplicas int
-
- // NewClient creates a shard client with provided name and options.
- NewClient func(name string, opt *Options) *Client
-
- // Optional hook that is called when a new shard is created.
- OnNewShard func(*Client)
+ // See https://medium.com/@dgryski/consistent-hashing-algorithmic-tradeoffs-ef6b8e2fcae8
+ // for consistent hashing algorithmic tradeoffs.
+ NewConsistentHash func(shards []string) ConsistentHash
// Following options are copied from Options struct.
- OnConnect func(*Conn) error
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(ctx context.Context, cn *Conn) error
- DB int
+ Username string
Password string
+ DB int
MaxRetries int
MinRetryBackoff time.Duration
@@ -83,17 +84,31 @@ type RingOptions struct {
PoolTimeout time.Duration
IdleTimeout time.Duration
IdleCheckFrequency time.Duration
+
+ TLSConfig *tls.Config
+ Limiter Limiter
}
func (opt *RingOptions) init() {
+ if opt.NewClient == nil {
+ opt.NewClient = func(name string, opt *Options) *Client {
+ return NewClient(opt)
+ }
+ }
+
if opt.HeartbeatFrequency == 0 {
opt.HeartbeatFrequency = 500 * time.Millisecond
}
- if opt.HashReplicas == 0 {
- opt.HashReplicas = 100
+ if opt.NewConsistentHash == nil {
+ opt.NewConsistentHash = newRendezvous
}
+ if opt.MaxRetries == -1 {
+ opt.MaxRetries = 0
+ } else if opt.MaxRetries == 0 {
+ opt.MaxRetries = 3
+ }
switch opt.MinRetryBackoff {
case -1:
opt.MinRetryBackoff = 0
@@ -108,12 +123,16 @@ func (opt *RingOptions) init() {
}
}
-func (opt *RingOptions) clientOptions(shard string) *Options {
+func (opt *RingOptions) clientOptions() *Options {
return &Options{
+ Dialer: opt.Dialer,
OnConnect: opt.OnConnect,
+ Username: opt.Username,
+ Password: opt.Password,
DB: opt.DB,
- Password: opt.getPassword(shard),
+
+ MaxRetries: -1,
DialTimeout: opt.DialTimeout,
ReadTimeout: opt.ReadTimeout,
@@ -125,14 +144,10 @@ func (opt *RingOptions) clientOptions(shard string) *Options {
PoolTimeout: opt.PoolTimeout,
IdleTimeout: opt.IdleTimeout,
IdleCheckFrequency: opt.IdleCheckFrequency,
- }
-}
-func (opt *RingOptions) getPassword(shard string) string {
- if opt.Password == "" {
- return opt.Passwords[shard]
+ TLSConfig: opt.TLSConfig,
+ Limiter: opt.Limiter,
}
- return opt.Password
}
//------------------------------------------------------------------------------
@@ -142,6 +157,15 @@ type ringShard struct {
down int32
}
+func newRingShard(opt *RingOptions, name, addr string) *ringShard {
+ clopt := opt.clientOptions()
+ clopt.Addr = addr
+
+ return &ringShard{
+ Client: opt.NewClient(name, clopt),
+ }
+}
+
func (shard *ringShard) String() string {
var state string
if shard.IsUp() {
@@ -182,41 +206,59 @@ func (shard *ringShard) Vote(up bool) bool {
type ringShards struct {
opt *RingOptions
- mu sync.RWMutex
- hash *consistenthash.Map
- shards map[string]*ringShard // read only
- list []*ringShard // read only
- len int
- closed bool
+ mu sync.RWMutex
+ hash ConsistentHash
+ shards map[string]*ringShard // read only
+ list []*ringShard // read only
+ numShard int
+ closed bool
}
func newRingShards(opt *RingOptions) *ringShards {
- return &ringShards{
+ shards := make(map[string]*ringShard, len(opt.Addrs))
+ list := make([]*ringShard, 0, len(shards))
+
+ for name, addr := range opt.Addrs {
+ shard := newRingShard(opt, name, addr)
+ shards[name] = shard
+
+ list = append(list, shard)
+ }
+
+ c := &ringShards{
opt: opt,
- hash: newConsistentHash(opt),
- shards: make(map[string]*ringShard),
+ shards: shards,
+ list: list,
}
-}
+ c.rebalance()
-func (c *ringShards) Add(name string, cl *Client) {
- shard := &ringShard{Client: cl}
- c.hash.Add(name)
- c.shards[name] = shard
- c.list = append(c.list, shard)
+ return c
}
func (c *ringShards) List() []*ringShard {
+ var list []*ringShard
+
c.mu.RLock()
- list := c.list
+ if !c.closed {
+ list = c.list
+ }
c.mu.RUnlock()
+
return list
}
func (c *ringShards) Hash(key string) string {
+ key = hashtag.Key(key)
+
+ var hash string
+
c.mu.RLock()
- hash := c.hash.Get(key)
+ if c.numShard > 0 {
+ hash = c.hash.Get(key)
+ }
c.mu.RUnlock()
+
return hash
}
@@ -230,6 +272,11 @@ func (c *ringShards) GetByKey(key string) (*ringShard, error) {
return nil, pool.ErrClosed
}
+ if c.numShard == 0 {
+ c.mu.RUnlock()
+ return nil, errRingShardsDown
+ }
+
hash := c.hash.Get(key)
if hash == "" {
c.mu.RUnlock()
@@ -242,13 +289,13 @@ func (c *ringShards) GetByKey(key string) (*ringShard, error) {
return shard, nil
}
-func (c *ringShards) GetByHash(name string) (*ringShard, error) {
- if name == "" {
+func (c *ringShards) GetByName(shardName string) (*ringShard, error) {
+ if shardName == "" {
return c.Random()
}
c.mu.RLock()
- shard := c.shards[name]
+ shard := c.shards[shardName]
c.mu.RUnlock()
return shard, nil
}
@@ -261,23 +308,16 @@ func (c *ringShards) Random() (*ringShard, error) {
func (c *ringShards) Heartbeat(frequency time.Duration) {
ticker := time.NewTicker(frequency)
defer ticker.Stop()
+
+ ctx := context.Background()
for range ticker.C {
var rebalance bool
- c.mu.RLock()
-
- if c.closed {
- c.mu.RUnlock()
- break
- }
-
- shards := c.list
- c.mu.RUnlock()
-
- for _, shard := range shards {
- err := shard.Client.Ping().Err()
- if shard.Vote(err == nil || err == pool.ErrPoolTimeout) {
- internal.Logger.Printf("ring shard state changed: %s", shard)
+ for _, shard := range c.List() {
+ err := shard.Client.Ping(ctx).Err()
+ isUp := err == nil || err == pool.ErrPoolTimeout
+ if shard.Vote(isUp) {
+ internal.Logger.Printf(context.Background(), "ring shard state changed: %s", shard)
rebalance = true
}
}
@@ -294,24 +334,25 @@ func (c *ringShards) rebalance() {
shards := c.shards
c.mu.RUnlock()
- hash := newConsistentHash(c.opt)
- var shardsNum int
+ liveShards := make([]string, 0, len(shards))
+
for name, shard := range shards {
if shard.IsUp() {
- hash.Add(name)
- shardsNum++
+ liveShards = append(liveShards, name)
}
}
+ hash := c.opt.NewConsistentHash(liveShards)
+
c.mu.Lock()
c.hash = hash
- c.len = shardsNum
+ c.numShard = len(liveShards)
c.mu.Unlock()
}
func (c *ringShards) Len() int {
c.mu.RLock()
- l := c.len
+ l := c.numShard
c.mu.RUnlock()
return l
}
@@ -377,34 +418,15 @@ func NewRing(opt *RingOptions) *Ring {
},
ctx: context.Background(),
}
+
ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo)
ring.cmdable = ring.Process
- for name, addr := range opt.Addrs {
- shard := newRingShard(opt, name, addr)
- ring.shards.Add(name, shard)
- }
-
go ring.shards.Heartbeat(opt.HeartbeatFrequency)
return &ring
}
-func newRingShard(opt *RingOptions, name, addr string) *Client {
- clopt := opt.clientOptions(name)
- clopt.Addr = addr
- var shard *Client
- if opt.NewClient != nil {
- shard = opt.NewClient(name, clopt)
- } else {
- shard = NewClient(clopt)
- }
- if opt.OnNewShard != nil {
- opt.OnNewShard(shard)
- }
- return shard
-}
-
func (c *Ring) Context() context.Context {
return c.ctx
}
@@ -421,21 +443,13 @@ func (c *Ring) WithContext(ctx context.Context) *Ring {
}
// Do creates a Cmd from the args and processes the cmd.
-func (c *Ring) Do(args ...interface{}) *Cmd {
- return c.DoContext(c.ctx, args...)
-}
-
-func (c *Ring) DoContext(ctx context.Context, args ...interface{}) *Cmd {
- cmd := NewCmd(args...)
- _ = c.ProcessContext(ctx, cmd)
+func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
return cmd
}
-func (c *Ring) Process(cmd Cmder) error {
- return c.ProcessContext(c.ctx, cmd)
-}
-
-func (c *Ring) ProcessContext(ctx context.Context, cmd Cmder) error {
+func (c *Ring) Process(ctx context.Context, cmd Cmder) error {
return c.hooks.process(ctx, cmd, c.process)
}
@@ -469,36 +483,39 @@ func (c *Ring) Len() int {
}
// Subscribe subscribes the client to the specified channels.
-func (c *Ring) Subscribe(channels ...string) *PubSub {
+func (c *Ring) Subscribe(ctx context.Context, channels ...string) *PubSub {
if len(channels) == 0 {
panic("at least one channel is required")
}
shard, err := c.shards.GetByKey(channels[0])
if err != nil {
- //TODO: return PubSub with sticky error
+ // TODO: return PubSub with sticky error
panic(err)
}
- return shard.Client.Subscribe(channels...)
+ return shard.Client.Subscribe(ctx, channels...)
}
// PSubscribe subscribes the client to the given patterns.
-func (c *Ring) PSubscribe(channels ...string) *PubSub {
+func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub {
if len(channels) == 0 {
panic("at least one channel is required")
}
shard, err := c.shards.GetByKey(channels[0])
if err != nil {
- //TODO: return PubSub with sticky error
+ // TODO: return PubSub with sticky error
panic(err)
}
- return shard.Client.PSubscribe(channels...)
+ return shard.Client.PSubscribe(ctx, channels...)
}
// ForEachShard concurrently calls the fn on each live shard in the ring.
// It returns the first error if any.
-func (c *Ring) ForEachShard(fn func(client *Client) error) error {
+func (c *Ring) ForEachShard(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
shards := c.shards.List()
var wg sync.WaitGroup
errCh := make(chan error, 1)
@@ -510,7 +527,7 @@ func (c *Ring) ForEachShard(fn func(client *Client) error) error {
wg.Add(1)
go func(shard *ringShard) {
defer wg.Done()
- err := fn(shard.Client)
+ err := fn(ctx, shard.Client)
if err != nil {
select {
case errCh <- err:
@@ -529,11 +546,11 @@ func (c *Ring) ForEachShard(fn func(client *Client) error) error {
}
}
-func (c *Ring) cmdsInfo() (map[string]*CommandInfo, error) {
+func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
shards := c.shards.List()
- firstErr := errRingShardsDown
+ var firstErr error
for _, shard := range shards {
- cmdsInfo, err := shard.Client.Command().Result()
+ cmdsInfo, err := shard.Client.Command(ctx).Result()
if err == nil {
return cmdsInfo, nil
}
@@ -541,23 +558,26 @@ func (c *Ring) cmdsInfo() (map[string]*CommandInfo, error) {
firstErr = err
}
}
+ if firstErr == nil {
+ return nil, errRingShardsDown
+ }
return nil, firstErr
}
-func (c *Ring) cmdInfo(name string) *CommandInfo {
- cmdsInfo, err := c.cmdsInfoCache.Get()
+func (c *Ring) cmdInfo(ctx context.Context, name string) *CommandInfo {
+ cmdsInfo, err := c.cmdsInfoCache.Get(ctx)
if err != nil {
return nil
}
info := cmdsInfo[name]
if info == nil {
- internal.Logger.Printf("info for cmd=%s not found", name)
+ internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name)
}
return info
}
-func (c *Ring) cmdShard(cmd Cmder) (*ringShard, error) {
- cmdInfo := c.cmdInfo(cmd.Name())
+func (c *Ring) cmdShard(ctx context.Context, cmd Cmder) (*ringShard, error) {
+ cmdInfo := c.cmdInfo(ctx, cmd.Name())
pos := cmdFirstKeyPos(cmd, cmdInfo)
if pos == 0 {
return c.shards.Random()
@@ -567,15 +587,6 @@ func (c *Ring) cmdShard(cmd Cmder) (*ringShard, error) {
}
func (c *Ring) process(ctx context.Context, cmd Cmder) error {
- err := c._process(ctx, cmd)
- if err != nil {
- cmd.SetErr(err)
- return err
- }
- return nil
-}
-
-func (c *Ring) _process(ctx context.Context, cmd Cmder) error {
var lastErr error
for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
if attempt > 0 {
@@ -584,21 +595,21 @@ func (c *Ring) _process(ctx context.Context, cmd Cmder) error {
}
}
- shard, err := c.cmdShard(cmd)
+ shard, err := c.cmdShard(ctx, cmd)
if err != nil {
return err
}
- lastErr = shard.Client.ProcessContext(ctx, cmd)
- if lastErr == nil || !isRetryableError(lastErr, cmd.readTimeout() == nil) {
+ lastErr = shard.Client.Process(ctx, cmd)
+ if lastErr == nil || !shouldRetry(lastErr, cmd.readTimeout() == nil) {
return lastErr
}
}
return lastErr
}
-func (c *Ring) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().Pipelined(fn)
+func (c *Ring) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
}
func (c *Ring) Pipeline() Pipeliner {
@@ -616,8 +627,8 @@ func (c *Ring) processPipeline(ctx context.Context, cmds []Cmder) error {
})
}
-func (c *Ring) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().Pipelined(fn)
+func (c *Ring) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
}
func (c *Ring) TxPipeline() Pipeliner {
@@ -640,10 +651,10 @@ func (c *Ring) generalProcessPipeline(
) error {
cmdsMap := make(map[string][]Cmder)
for _, cmd := range cmds {
- cmdInfo := c.cmdInfo(cmd.Name())
+ cmdInfo := c.cmdInfo(ctx, cmd.Name())
hash := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
if hash != "" {
- hash = c.shards.Hash(hashtag.Key(hash))
+ hash = c.shards.Hash(hash)
}
cmdsMap[hash] = append(cmdsMap[hash], cmd)
}
@@ -665,30 +676,20 @@ func (c *Ring) generalProcessPipeline(
func (c *Ring) processShardPipeline(
ctx context.Context, hash string, cmds []Cmder, tx bool,
) error {
- //TODO: retry?
- shard, err := c.shards.GetByHash(hash)
+ // TODO: retry?
+ shard, err := c.shards.GetByName(hash)
if err != nil {
setCmdsErr(cmds, err)
return err
}
if tx {
- err = shard.Client.processTxPipeline(ctx, cmds)
- } else {
- err = shard.Client.processPipeline(ctx, cmds)
+ return shard.Client.processTxPipeline(ctx, cmds)
}
- return err
-}
-
-// Close closes the ring client, releasing any open resources.
-//
-// It is rare to Close a Ring, as the Ring is meant to be long-lived
-// and shared between many goroutines.
-func (c *Ring) Close() error {
- return c.shards.Close()
+ return shard.Client.processPipeline(ctx, cmds)
}
-func (c *Ring) Watch(fn func(*Tx) error, keys ...string) error {
+func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
if len(keys) == 0 {
return fmt.Errorf("redis: Watch requires at least one key")
}
@@ -718,9 +719,13 @@ func (c *Ring) Watch(fn func(*Tx) error, keys ...string) error {
}
}
- return shards[0].Client.Watch(fn, keys...)
+ return shards[0].Client.Watch(ctx, fn, keys...)
}
-func newConsistentHash(opt *RingOptions) *consistenthash.Map {
- return consistenthash.New(opt.HashReplicas, consistenthash.Hash(opt.Hash))
+// Close closes the ring client, releasing any open resources.
+//
+// It is rare to Close a Ring, as the Ring is meant to be long-lived
+// and shared between many goroutines.
+func (c *Ring) Close() error {
+ return c.shards.Close()
}
diff --git a/vendor/github.com/go-redis/redis/v8/script.go b/vendor/github.com/go-redis/redis/v8/script.go
new file mode 100644
index 0000000000..5cab18d617
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/script.go
@@ -0,0 +1,65 @@
+package redis
+
+import (
+ "context"
+ "crypto/sha1"
+ "encoding/hex"
+ "io"
+ "strings"
+)
+
+type Scripter interface {
+ Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
+ ScriptLoad(ctx context.Context, script string) *StringCmd
+}
+
+var (
+ _ Scripter = (*Client)(nil)
+ _ Scripter = (*Ring)(nil)
+ _ Scripter = (*ClusterClient)(nil)
+)
+
+type Script struct {
+ src, hash string
+}
+
+func NewScript(src string) *Script {
+ h := sha1.New()
+ _, _ = io.WriteString(h, src)
+ return &Script{
+ src: src,
+ hash: hex.EncodeToString(h.Sum(nil)),
+ }
+}
+
+func (s *Script) Hash() string {
+ return s.hash
+}
+
+func (s *Script) Load(ctx context.Context, c Scripter) *StringCmd {
+ return c.ScriptLoad(ctx, s.src)
+}
+
+func (s *Script) Exists(ctx context.Context, c Scripter) *BoolSliceCmd {
+ return c.ScriptExists(ctx, s.hash)
+}
+
+func (s *Script) Eval(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ return c.Eval(ctx, s.src, keys, args...)
+}
+
+func (s *Script) EvalSha(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ return c.EvalSha(ctx, s.hash, keys, args...)
+}
+
+// Run optimistically uses EVALSHA to run the script. If script does not exist
+// it is retried using EVAL.
+func (s *Script) Run(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+ r := s.EvalSha(ctx, c, keys, args...)
+ if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
+ return s.Eval(ctx, c, keys, args...)
+ }
+ return r
+}
diff --git a/vendor/github.com/go-redis/redis/v8/sentinel.go b/vendor/github.com/go-redis/redis/v8/sentinel.go
new file mode 100644
index 0000000000..d785168ff1
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/sentinel.go
@@ -0,0 +1,738 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "net"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/rand"
+)
+
+//------------------------------------------------------------------------------
+
+// FailoverOptions are used to configure a failover client and should
+// be passed to NewFailoverClient.
+type FailoverOptions struct {
+ // The master name.
+ MasterName string
+ // A seed list of host:port addresses of sentinel nodes.
+ SentinelAddrs []string
+ // Sentinel password from "requirepass <password>" (if enabled) in Sentinel configuration
+ SentinelPassword string
+
+ // Allows routing read-only commands to the closest master or slave node.
+ // This option only works with NewFailoverClusterClient.
+ RouteByLatency bool
+ // Allows routing read-only commands to the random master or slave node.
+ // This option only works with NewFailoverClusterClient.
+ RouteRandomly bool
+
+ // Route all commands to slave read-only nodes.
+ SlaveOnly bool
+
+ // Following options are copied from Options struct.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Username string
+ Password string
+ DB int
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+
+ TLSConfig *tls.Config
+}
+
+func (opt *FailoverOptions) clientOptions() *Options {
+ return &Options{
+ Addr: "FailoverClient",
+
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ DB: opt.DB,
+ Username: opt.Username,
+ Password: opt.Password,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+
+ TLSConfig: opt.TLSConfig,
+ }
+}
+
+func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
+ return &Options{
+ Addr: addr,
+
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ DB: 0,
+ Password: opt.SentinelPassword,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+
+ TLSConfig: opt.TLSConfig,
+ }
+}
+
+func (opt *FailoverOptions) clusterOptions() *ClusterOptions {
+ return &ClusterOptions{
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ Username: opt.Username,
+ Password: opt.Password,
+
+ MaxRedirects: opt.MaxRetries,
+
+ RouteByLatency: opt.RouteByLatency,
+ RouteRandomly: opt.RouteRandomly,
+
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+
+ TLSConfig: opt.TLSConfig,
+ }
+}
+
+// NewFailoverClient returns a Redis client that uses Redis Sentinel
+// for automatic failover. It's safe for concurrent use by multiple
+// goroutines.
+func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
+ if failoverOpt.RouteByLatency {
+ panic("to route commands by latency, use NewFailoverClusterClient")
+ }
+ if failoverOpt.RouteRandomly {
+ panic("to route commands randomly, use NewFailoverClusterClient")
+ }
+
+ sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
+ copy(sentinelAddrs, failoverOpt.SentinelAddrs)
+
+ failover := &sentinelFailover{
+ opt: failoverOpt,
+ sentinelAddrs: sentinelAddrs,
+ }
+
+ opt := failoverOpt.clientOptions()
+ opt.Dialer = masterSlaveDialer(failover)
+ opt.init()
+
+ connPool := newConnPool(opt)
+
+ failover.mu.Lock()
+ failover.onFailover = func(ctx context.Context, addr string) {
+ _ = connPool.Filter(func(cn *pool.Conn) bool {
+ return cn.RemoteAddr().String() != addr
+ })
+ }
+ failover.mu.Unlock()
+
+ c := Client{
+ baseClient: newBaseClient(opt, connPool),
+ ctx: context.Background(),
+ }
+ c.cmdable = c.Process
+ c.onClose = failover.Close
+
+ return &c
+}
+
+func masterSlaveDialer(
+ failover *sentinelFailover,
+) func(ctx context.Context, network, addr string) (net.Conn, error) {
+ return func(ctx context.Context, network, _ string) (net.Conn, error) {
+ var addr string
+ var err error
+
+ if failover.opt.SlaveOnly {
+ addr, err = failover.RandomSlaveAddr(ctx)
+ } else {
+ addr, err = failover.MasterAddr(ctx)
+ if err == nil {
+ failover.trySwitchMaster(ctx, addr)
+ }
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ if failover.opt.Dialer != nil {
+ return failover.opt.Dialer(ctx, network, addr)
+ }
+ return net.DialTimeout("tcp", addr, failover.opt.DialTimeout)
+ }
+}
+
+//------------------------------------------------------------------------------
+
+// SentinelClient is a client for a Redis Sentinel.
+type SentinelClient struct {
+ *baseClient
+ hooks
+ ctx context.Context
+}
+
+func NewSentinelClient(opt *Options) *SentinelClient {
+ opt.init()
+ c := &SentinelClient{
+ baseClient: &baseClient{
+ opt: opt,
+ connPool: newConnPool(opt),
+ },
+ ctx: context.Background(),
+ }
+ return c
+}
+
+func (c *SentinelClient) Context() context.Context {
+ return c.ctx
+}
+
+func (c *SentinelClient) WithContext(ctx context.Context) *SentinelClient {
+ if ctx == nil {
+ panic("nil context")
+ }
+ clone := *c
+ clone.ctx = ctx
+ return &clone
+}
+
+func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+func (c *SentinelClient) pubSub() *PubSub {
+ pubsub := &PubSub{
+ opt: c.opt,
+
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+ return c.newConn(ctx)
+ },
+ closeConn: c.connPool.CloseConn,
+ }
+ pubsub.init()
+ return pubsub
+}
+
+// Ping is used to test if a connection is still alive, or to
+// measure latency.
+func (c *SentinelClient) Ping(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "ping")
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *SentinelClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *SentinelClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+func (c *SentinelClient) GetMasterAddrByName(ctx context.Context, name string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "sentinel", "get-master-addr-by-name", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *SentinelClient) Sentinels(ctx context.Context, name string) *SliceCmd {
+ cmd := NewSliceCmd(ctx, "sentinel", "sentinels", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Failover forces a failover as if the master was not reachable, and without
+// asking for agreement to other Sentinels.
+func (c *SentinelClient) Failover(ctx context.Context, name string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "sentinel", "failover", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Reset resets all the masters with matching name. The pattern argument is a
+// glob-style pattern. The reset process clears any previous state in a master
+// (including a failover in progress), and removes every slave and sentinel
+// already discovered and associated with the master.
+func (c *SentinelClient) Reset(ctx context.Context, pattern string) *IntCmd {
+ cmd := NewIntCmd(ctx, "sentinel", "reset", pattern)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// FlushConfig forces Sentinel to rewrite its configuration on disk, including
+// the current Sentinel state.
+func (c *SentinelClient) FlushConfig(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "sentinel", "flushconfig")
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Master shows the state and info of the specified master.
+func (c *SentinelClient) Master(ctx context.Context, name string) *StringStringMapCmd {
+ cmd := NewStringStringMapCmd(ctx, "sentinel", "master", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Masters shows a list of monitored masters and their state.
+func (c *SentinelClient) Masters(ctx context.Context) *SliceCmd {
+ cmd := NewSliceCmd(ctx, "sentinel", "masters")
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Slaves shows a list of slaves for the specified master and their state.
+func (c *SentinelClient) Slaves(ctx context.Context, name string) *SliceCmd {
+ cmd := NewSliceCmd(ctx, "sentinel", "slaves", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// CkQuorum checks if the current Sentinel configuration is able to reach the
+// quorum needed to failover a master, and the majority needed to authorize the
+// failover. This command should be used in monitoring systems to check if a
+// Sentinel deployment is ok.
+func (c *SentinelClient) CkQuorum(ctx context.Context, name string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "ckquorum", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Monitor tells the Sentinel to start monitoring a new master with the specified
+// name, ip, port, and quorum.
+func (c *SentinelClient) Monitor(ctx context.Context, name, ip, port, quorum string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "monitor", name, ip, port, quorum)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Set is used in order to change configuration parameters of a specific master.
+func (c *SentinelClient) Set(ctx context.Context, name, option, value string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "set", name, option, value)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Remove is used in order to remove the specified master: the master will no
+// longer be monitored, and will totally be removed from the internal state of
+// the Sentinel.
+func (c *SentinelClient) Remove(ctx context.Context, name string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "remove", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+type sentinelFailover struct {
+ opt *FailoverOptions
+
+ sentinelAddrs []string
+
+ onFailover func(ctx context.Context, addr string)
+ onUpdate func(ctx context.Context)
+
+ mu sync.RWMutex
+ _masterAddr string
+ sentinel *SentinelClient
+ pubsub *PubSub
+}
+
+func (c *sentinelFailover) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.sentinel != nil {
+ return c.closeSentinel()
+ }
+ return nil
+}
+
+func (c *sentinelFailover) closeSentinel() error {
+ firstErr := c.pubsub.Close()
+ c.pubsub = nil
+
+ err := c.sentinel.Close()
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ c.sentinel = nil
+
+ return firstErr
+}
+
+func (c *sentinelFailover) RandomSlaveAddr(ctx context.Context) (string, error) {
+ addresses, err := c.slaveAddrs(ctx)
+ if err != nil {
+ return "", err
+ }
+ if len(addresses) == 0 {
+ return c.MasterAddr(ctx)
+ }
+ return addresses[rand.Intn(len(addresses))], nil
+}
+
+func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
+ c.mu.RLock()
+ sentinel := c.sentinel
+ c.mu.RUnlock()
+
+ if sentinel != nil {
+ addr := c.getMasterAddr(ctx, sentinel)
+ if addr != "" {
+ return addr, nil
+ }
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.sentinel != nil {
+ addr := c.getMasterAddr(ctx, c.sentinel)
+ if addr != "" {
+ return addr, nil
+ }
+ _ = c.closeSentinel()
+ }
+
+ for i, sentinelAddr := range c.sentinelAddrs {
+ sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
+
+ masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s",
+ c.opt.MasterName, err)
+ _ = sentinel.Close()
+ continue
+ }
+
+ // Push working sentinel to the top.
+ c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
+ c.setSentinel(ctx, sentinel)
+
+ addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
+ return addr, nil
+ }
+
+ return "", errors.New("redis: all sentinels specified in configuration are unreachable")
+}
+
+func (c *sentinelFailover) slaveAddrs(ctx context.Context) ([]string, error) {
+ c.mu.RLock()
+ sentinel := c.sentinel
+ c.mu.RUnlock()
+
+ if sentinel != nil {
+ addrs := c.getSlaveAddrs(ctx, sentinel)
+ if len(addrs) > 0 {
+ return addrs, nil
+ }
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.sentinel != nil {
+ addrs := c.getSlaveAddrs(ctx, c.sentinel)
+ if len(addrs) > 0 {
+ return addrs, nil
+ }
+ _ = c.closeSentinel()
+ }
+
+ for i, sentinelAddr := range c.sentinelAddrs {
+ sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
+
+ slaves, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: Slaves master=%q failed: %s",
+ c.opt.MasterName, err)
+ _ = sentinel.Close()
+ continue
+ }
+
+ // Push working sentinel to the top.
+ c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
+ c.setSentinel(ctx, sentinel)
+
+ addrs := parseSlaveAddrs(slaves)
+ return addrs, nil
+ }
+
+ return []string{}, errors.New("redis: all sentinels specified in configuration are unreachable")
+}
+
+func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) string {
+ addr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
+ c.opt.MasterName, err)
+ return ""
+ }
+ return net.JoinHostPort(addr[0], addr[1])
+}
+
+func (c *sentinelFailover) getSlaveAddrs(ctx context.Context, sentinel *SentinelClient) []string {
+ addrs, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: Slaves name=%q failed: %s",
+ c.opt.MasterName, err)
+ return []string{}
+ }
+ return parseSlaveAddrs(addrs)
+}
+
+func parseSlaveAddrs(addrs []interface{}) []string {
+ nodes := make([]string, 0, len(addrs))
+
+ for _, node := range addrs {
+ ip := ""
+ port := ""
+ flags := []string{}
+ lastkey := ""
+ isDown := false
+
+ for _, key := range node.([]interface{}) {
+ switch lastkey {
+ case "ip":
+ ip = key.(string)
+ case "port":
+ port = key.(string)
+ case "flags":
+ flags = strings.Split(key.(string), ",")
+ }
+ lastkey = key.(string)
+ }
+
+ for _, flag := range flags {
+ switch flag {
+ case "s_down", "o_down", "disconnected":
+ isDown = true
+ }
+ }
+
+ if !isDown {
+ nodes = append(nodes, net.JoinHostPort(ip, port))
+ }
+ }
+
+ return nodes
+}
+
+func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) {
+ c.mu.RLock()
+ currentAddr := c._masterAddr
+ c.mu.RUnlock()
+
+ if addr == currentAddr {
+ return
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if addr == c._masterAddr {
+ return
+ }
+ c._masterAddr = addr
+
+ internal.Logger.Printf(ctx, "sentinel: new master=%q addr=%q",
+ c.opt.MasterName, addr)
+ if c.onFailover != nil {
+ c.onFailover(ctx, addr)
+ }
+}
+
+func (c *sentinelFailover) setSentinel(ctx context.Context, sentinel *SentinelClient) {
+ if c.sentinel != nil {
+ panic("not reached")
+ }
+ c.sentinel = sentinel
+ c.discoverSentinels(ctx)
+
+ c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+slave-reconf-done")
+ go c.listen(c.pubsub)
+}
+
+func (c *sentinelFailover) discoverSentinels(ctx context.Context) {
+ sentinels, err := c.sentinel.Sentinels(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: Sentinels master=%q failed: %s", c.opt.MasterName, err)
+ return
+ }
+ for _, sentinel := range sentinels {
+ vals := sentinel.([]interface{})
+ for i := 0; i < len(vals); i += 2 {
+ key := vals[i].(string)
+ if key == "name" {
+ sentinelAddr := vals[i+1].(string)
+ if !contains(c.sentinelAddrs, sentinelAddr) {
+ internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q",
+ sentinelAddr, c.opt.MasterName)
+ c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
+ }
+ }
+ }
+ }
+}
+
+func (c *sentinelFailover) listen(pubsub *PubSub) {
+ ctx := context.TODO()
+
+ if c.onUpdate != nil {
+ c.onUpdate(ctx)
+ }
+
+ ch := pubsub.Channel()
+ for msg := range ch {
+ if msg.Channel == "+switch-master" {
+ parts := strings.Split(msg.Payload, " ")
+ if parts[0] != c.opt.MasterName {
+ internal.Logger.Printf(pubsub.getContext(), "sentinel: ignore addr for master=%q", parts[0])
+ continue
+ }
+ addr := net.JoinHostPort(parts[3], parts[4])
+ c.trySwitchMaster(pubsub.getContext(), addr)
+ }
+
+ if c.onUpdate != nil {
+ c.onUpdate(ctx)
+ }
+ }
+}
+
+func contains(slice []string, str string) bool {
+ for _, s := range slice {
+ if s == str {
+ return true
+ }
+ }
+ return false
+}
+
+//------------------------------------------------------------------------------
+
+// NewFailoverClusterClient returns a client that supports routing read-only commands
+// to a slave node.
+func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient {
+ sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
+ copy(sentinelAddrs, failoverOpt.SentinelAddrs)
+
+ failover := &sentinelFailover{
+ opt: failoverOpt,
+ sentinelAddrs: sentinelAddrs,
+ }
+
+ opt := failoverOpt.clusterOptions()
+ opt.ClusterSlots = func(ctx context.Context) ([]ClusterSlot, error) {
+ masterAddr, err := failover.MasterAddr(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ nodes := []ClusterNode{{
+ Addr: masterAddr,
+ }}
+
+ slaveAddrs, err := failover.slaveAddrs(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, slaveAddr := range slaveAddrs {
+ nodes = append(nodes, ClusterNode{
+ Addr: slaveAddr,
+ })
+ }
+
+ slots := []ClusterSlot{
+ {
+ Start: 0,
+ End: 16383,
+ Nodes: nodes,
+ },
+ }
+ return slots, nil
+ }
+
+ c := NewClusterClient(opt)
+
+ failover.mu.Lock()
+ failover.onUpdate = func(ctx context.Context) {
+ c.ReloadState(ctx)
+ }
+ failover.mu.Unlock()
+
+ return c
+}
diff --git a/vendor/github.com/go-redis/redis/v7/tx.go b/vendor/github.com/go-redis/redis/v8/tx.go
index 9ae159015e..08d381a9d3 100644
--- a/vendor/github.com/go-redis/redis/v7/tx.go
+++ b/vendor/github.com/go-redis/redis/v8/tx.go
@@ -3,8 +3,8 @@ package redis
import (
"context"
- "github.com/go-redis/redis/v7/internal/pool"
- "github.com/go-redis/redis/v7/internal/proto"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/proto"
)
// TxFailedErr transaction redis failed.
@@ -26,7 +26,7 @@ func (c *Client) newTx(ctx context.Context) *Tx {
tx := Tx{
baseClient: baseClient{
opt: c.opt,
- connPool: pool.NewStickyConnPool(c.connPool.(*pool.ConnPool), true),
+ connPool: pool.NewStickyConnPool(c.connPool),
},
hooks: c.hooks.clone(),
ctx: ctx,
@@ -55,11 +55,7 @@ func (c *Tx) WithContext(ctx context.Context) *Tx {
return &clone
}
-func (c *Tx) Process(cmd Cmder) error {
- return c.ProcessContext(c.ctx, cmd)
-}
-
-func (c *Tx) ProcessContext(ctx context.Context, cmd Cmder) error {
+func (c *Tx) Process(ctx context.Context, cmd Cmder) error {
return c.hooks.process(ctx, cmd, c.baseClient.process)
}
@@ -67,52 +63,45 @@ func (c *Tx) ProcessContext(ctx context.Context, cmd Cmder) error {
// for conditional execution if there are any keys.
//
// The transaction is automatically closed when fn exits.
-func (c *Client) Watch(fn func(*Tx) error, keys ...string) error {
- return c.WatchContext(c.ctx, fn, keys...)
-}
-
-func (c *Client) WatchContext(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
tx := c.newTx(ctx)
+ defer tx.Close(ctx)
if len(keys) > 0 {
- if err := tx.Watch(keys...).Err(); err != nil {
- _ = tx.Close()
+ if err := tx.Watch(ctx, keys...).Err(); err != nil {
return err
}
}
-
- err := fn(tx)
- _ = tx.Close()
- return err
+ return fn(tx)
}
// Close closes the transaction, releasing any open resources.
-func (c *Tx) Close() error {
- _ = c.Unwatch().Err()
+func (c *Tx) Close(ctx context.Context) error {
+ _ = c.Unwatch(ctx).Err()
return c.baseClient.Close()
}
// Watch marks the keys to be watched for conditional execution
// of a transaction.
-func (c *Tx) Watch(keys ...string) *StatusCmd {
+func (c *Tx) Watch(ctx context.Context, keys ...string) *StatusCmd {
args := make([]interface{}, 1+len(keys))
args[0] = "watch"
for i, key := range keys {
args[1+i] = key
}
- cmd := NewStatusCmd(args...)
- _ = c.Process(cmd)
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
return cmd
}
// Unwatch flushes all the previously watched keys for a transaction.
-func (c *Tx) Unwatch(keys ...string) *StatusCmd {
+func (c *Tx) Unwatch(ctx context.Context, keys ...string) *StatusCmd {
args := make([]interface{}, 1+len(keys))
args[0] = "unwatch"
for i, key := range keys {
args[1+i] = key
}
- cmd := NewStatusCmd(args...)
- _ = c.Process(cmd)
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
return cmd
}
@@ -130,8 +119,8 @@ func (c *Tx) Pipeline() Pipeliner {
// Pipelined executes commands queued in the fn outside of the transaction.
// Use TxPipelined if you need transactional behavior.
-func (c *Tx) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().Pipelined(fn)
+func (c *Tx) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
}
// TxPipelined executes commands queued in the fn in the transaction.
@@ -142,8 +131,8 @@ func (c *Tx) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
// Exec always returns list of commands. If transaction fails
// TxFailedErr is returned. Otherwise Exec returns an error of the first
// failed command or nil.
-func (c *Tx) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().Pipelined(fn)
+func (c *Tx) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
}
// TxPipeline creates a pipeline. Usually it is more convenient to use TxPipelined.
diff --git a/vendor/github.com/go-redis/redis/v7/universal.go b/vendor/github.com/go-redis/redis/v8/universal.go
index 005ca682c5..5f0e1e33a5 100644
--- a/vendor/github.com/go-redis/redis/v7/universal.go
+++ b/vendor/github.com/go-redis/redis/v8/universal.go
@@ -20,23 +20,29 @@ type UniversalOptions struct {
// Common options.
- Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
- OnConnect func(*Conn) error
- Username string
- Password string
- MaxRetries int
- MinRetryBackoff time.Duration
- MaxRetryBackoff time.Duration
- DialTimeout time.Duration
- ReadTimeout time.Duration
- WriteTimeout time.Duration
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Username string
+ Password string
+ SentinelPassword string
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
PoolSize int
MinIdleConns int
MaxConnAge time.Duration
PoolTimeout time.Duration
IdleTimeout time.Duration
IdleCheckFrequency time.Duration
- TLSConfig *tls.Config
+
+ TLSConfig *tls.Config
// Only cluster clients.
@@ -100,9 +106,10 @@ func (o *UniversalOptions) Failover() *FailoverOptions {
Dialer: o.Dialer,
OnConnect: o.OnConnect,
- DB: o.DB,
- Username: o.Username,
- Password: o.Password,
+ DB: o.DB,
+ Username: o.Username,
+ Password: o.Password,
+ SentinelPassword: o.SentinelPassword,
MaxRetries: o.MaxRetries,
MinRetryBackoff: o.MinRetryBackoff,
@@ -168,19 +175,20 @@ type UniversalClient interface {
Cmdable
Context() context.Context
AddHook(Hook)
- Watch(fn func(*Tx) error, keys ...string) error
- Do(args ...interface{}) *Cmd
- DoContext(ctx context.Context, args ...interface{}) *Cmd
- Process(cmd Cmder) error
- ProcessContext(ctx context.Context, cmd Cmder) error
- Subscribe(channels ...string) *PubSub
- PSubscribe(channels ...string) *PubSub
+ Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error
+ Do(ctx context.Context, args ...interface{}) *Cmd
+ Process(ctx context.Context, cmd Cmder) error
+ Subscribe(ctx context.Context, channels ...string) *PubSub
+ PSubscribe(ctx context.Context, channels ...string) *PubSub
Close() error
+ PoolStats() *PoolStats
}
-var _ UniversalClient = (*Client)(nil)
-var _ UniversalClient = (*ClusterClient)(nil)
-var _ UniversalClient = (*Ring)(nil)
+var (
+ _ UniversalClient = (*Client)(nil)
+ _ UniversalClient = (*ClusterClient)(nil)
+ _ UniversalClient = (*Ring)(nil)
+)
// NewUniversalClient returns a new multi client. The type of client returned depends
// on the following three conditions: