aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/github.com
diff options
context:
space:
mode:
author6543 <6543@obermui.de>2020-11-03 07:04:09 +0100
committerGitHub <noreply@github.com>2020-11-03 08:04:09 +0200
commit70ea2300ca24311f033d85b41e938e86b1d50acd (patch)
treeeb25c089d5def4df2c036ab2820df7c895798572 /vendor/github.com
parentb687707014b31d0f388d1dfb60c09b5dcd48fc4c (diff)
downloadgitea-70ea2300ca24311f033d85b41e938e86b1d50acd.tar.gz
gitea-70ea2300ca24311f033d85b41e938e86b1d50acd.zip
[Vendor] update macaron related (#13409)
* Vendor: update gitea.com/macaron/session to a177a270 * make vendor * Vendor: update gitea.com/macaron/macaron to 0db5d458 * make vendor * Vendor: update gitea.com/macaron/cache to 905232fb * make vendor * Vendor: update gitea.com/macaron/i18n to 4ca3dd0c * make vendor * Vendor: update gitea.com/macaron/gzip to efa5e847 * make vendor * Vendor: update gitea.com/macaron/captcha to e8597820 * make vendor
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/BurntSushi/toml/.gitignore5
-rw-r--r--vendor/github.com/BurntSushi/toml/.travis.yml15
-rw-r--r--vendor/github.com/BurntSushi/toml/COMPATIBLE3
-rw-r--r--vendor/github.com/BurntSushi/toml/COPYING21
-rw-r--r--vendor/github.com/BurntSushi/toml/Makefile19
-rw-r--r--vendor/github.com/BurntSushi/toml/README.md218
-rw-r--r--vendor/github.com/BurntSushi/toml/decode.go509
-rw-r--r--vendor/github.com/BurntSushi/toml/decode_meta.go121
-rw-r--r--vendor/github.com/BurntSushi/toml/doc.go27
-rw-r--r--vendor/github.com/BurntSushi/toml/encode.go568
-rw-r--r--vendor/github.com/BurntSushi/toml/encoding_types.go19
-rw-r--r--vendor/github.com/BurntSushi/toml/encoding_types_1.1.go18
-rw-r--r--vendor/github.com/BurntSushi/toml/lex.go953
-rw-r--r--vendor/github.com/BurntSushi/toml/parse.go592
-rw-r--r--vendor/github.com/BurntSushi/toml/session.vim1
-rw-r--r--vendor/github.com/BurntSushi/toml/type_check.go91
-rw-r--r--vendor/github.com/BurntSushi/toml/type_fields.go242
-rw-r--r--vendor/github.com/couchbase/go-couchbase/.gitignore (renamed from vendor/github.com/couchbaselabs/go-couchbase/.gitignore)0
-rw-r--r--vendor/github.com/couchbase/go-couchbase/.travis.yml (renamed from vendor/github.com/couchbaselabs/go-couchbase/.travis.yml)0
-rw-r--r--vendor/github.com/couchbase/go-couchbase/LICENSE (renamed from vendor/github.com/couchbaselabs/go-couchbase/LICENSE)0
-rw-r--r--vendor/github.com/couchbase/go-couchbase/README.markdown (renamed from vendor/github.com/couchbaselabs/go-couchbase/README.markdown)0
-rw-r--r--vendor/github.com/couchbase/go-couchbase/audit.go (renamed from vendor/github.com/couchbaselabs/go-couchbase/audit.go)0
-rw-r--r--vendor/github.com/couchbase/go-couchbase/client.go (renamed from vendor/github.com/couchbaselabs/go-couchbase/client.go)294
-rw-r--r--vendor/github.com/couchbase/go-couchbase/conn_pool.go (renamed from vendor/github.com/couchbaselabs/go-couchbase/conn_pool.go)37
-rw-r--r--vendor/github.com/couchbase/go-couchbase/ddocs.go (renamed from vendor/github.com/couchbaselabs/go-couchbase/ddocs.go)0
-rw-r--r--vendor/github.com/couchbase/go-couchbase/go.mod3
-rw-r--r--vendor/github.com/couchbase/go-couchbase/observe.go (renamed from vendor/github.com/couchbaselabs/go-couchbase/observe.go)0
-rw-r--r--vendor/github.com/couchbase/go-couchbase/pools.go (renamed from vendor/github.com/couchbaselabs/go-couchbase/pools.go)370
-rw-r--r--vendor/github.com/couchbase/go-couchbase/port_map.go106
-rw-r--r--vendor/github.com/couchbase/go-couchbase/streaming.go (renamed from vendor/github.com/couchbaselabs/go-couchbase/streaming.go)47
-rw-r--r--vendor/github.com/couchbase/go-couchbase/tap.go (renamed from vendor/github.com/couchbaselabs/go-couchbase/tap.go)0
-rw-r--r--vendor/github.com/couchbase/go-couchbase/upr.go (renamed from vendor/github.com/couchbaselabs/go-couchbase/upr.go)1
-rw-r--r--vendor/github.com/couchbase/go-couchbase/users.go (renamed from vendor/github.com/couchbaselabs/go-couchbase/users.go)6
-rw-r--r--vendor/github.com/couchbase/go-couchbase/util.go (renamed from vendor/github.com/couchbaselabs/go-couchbase/util.go)0
-rw-r--r--vendor/github.com/couchbase/go-couchbase/vbmap.go (renamed from vendor/github.com/couchbaselabs/go-couchbase/vbmap.go)0
-rw-r--r--vendor/github.com/couchbase/go-couchbase/views.go (renamed from vendor/github.com/couchbaselabs/go-couchbase/views.go)0
-rw-r--r--vendor/github.com/couchbase/gomemcached/client/collections_filter.go8
-rw-r--r--vendor/github.com/couchbase/gomemcached/client/mc.go413
-rw-r--r--vendor/github.com/couchbase/gomemcached/client/upr_event.go19
-rw-r--r--vendor/github.com/couchbase/gomemcached/client/upr_feed.go67
-rw-r--r--vendor/github.com/couchbase/gomemcached/mc_constants.go20
-rw-r--r--vendor/github.com/couchbase/gomemcached/mc_req.go61
-rw-r--r--vendor/github.com/couchbaselabs/go-couchbase/port_map.go84
-rw-r--r--vendor/github.com/klauspost/compress/zstd/README.md4
-rw-r--r--vendor/github.com/lunny/log/.gitignore26
-rw-r--r--vendor/github.com/lunny/log/LICENSE27
-rw-r--r--vendor/github.com/lunny/log/README.md49
-rw-r--r--vendor/github.com/lunny/log/README_CN.md52
-rw-r--r--vendor/github.com/lunny/log/dbwriter.go36
-rw-r--r--vendor/github.com/lunny/log/filewriter.go112
-rw-r--r--vendor/github.com/lunny/log/logext.go595
-rw-r--r--vendor/github.com/lunny/nodb/.gitignore7
-rw-r--r--vendor/github.com/lunny/nodb/LICENSE21
-rw-r--r--vendor/github.com/lunny/nodb/README.md84
-rw-r--r--vendor/github.com/lunny/nodb/README_CN.md81
-rw-r--r--vendor/github.com/lunny/nodb/batch.go106
-rw-r--r--vendor/github.com/lunny/nodb/binlog.go391
-rw-r--r--vendor/github.com/lunny/nodb/binlog_util.go215
-rw-r--r--vendor/github.com/lunny/nodb/config/config.go135
-rw-r--r--vendor/github.com/lunny/nodb/config/config.toml45
-rw-r--r--vendor/github.com/lunny/nodb/const.go98
-rw-r--r--vendor/github.com/lunny/nodb/doc.go61
-rw-r--r--vendor/github.com/lunny/nodb/dump.go200
-rw-r--r--vendor/github.com/lunny/nodb/info.go24
-rw-r--r--vendor/github.com/lunny/nodb/multi.go73
-rw-r--r--vendor/github.com/lunny/nodb/nodb.go128
-rw-r--r--vendor/github.com/lunny/nodb/nodb_db.go171
-rw-r--r--vendor/github.com/lunny/nodb/replication.go312
-rw-r--r--vendor/github.com/lunny/nodb/scan.go144
-rw-r--r--vendor/github.com/lunny/nodb/store/db.go61
-rw-r--r--vendor/github.com/lunny/nodb/store/driver/batch.go39
-rw-r--r--vendor/github.com/lunny/nodb/store/driver/driver.go67
-rw-r--r--vendor/github.com/lunny/nodb/store/driver/store.go46
-rw-r--r--vendor/github.com/lunny/nodb/store/goleveldb/batch.go27
-rw-r--r--vendor/github.com/lunny/nodb/store/goleveldb/const.go4
-rw-r--r--vendor/github.com/lunny/nodb/store/goleveldb/db.go187
-rw-r--r--vendor/github.com/lunny/nodb/store/goleveldb/iterator.go49
-rw-r--r--vendor/github.com/lunny/nodb/store/goleveldb/snapshot.go26
-rw-r--r--vendor/github.com/lunny/nodb/store/iterator.go327
-rw-r--r--vendor/github.com/lunny/nodb/store/snapshot.go16
-rw-r--r--vendor/github.com/lunny/nodb/store/store.go51
-rw-r--r--vendor/github.com/lunny/nodb/store/tx.go42
-rw-r--r--vendor/github.com/lunny/nodb/store/writebatch.go9
-rw-r--r--vendor/github.com/lunny/nodb/t_bit.go922
-rw-r--r--vendor/github.com/lunny/nodb/t_hash.go509
-rw-r--r--vendor/github.com/lunny/nodb/t_kv.go387
-rw-r--r--vendor/github.com/lunny/nodb/t_list.go492
-rw-r--r--vendor/github.com/lunny/nodb/t_set.go601
-rw-r--r--vendor/github.com/lunny/nodb/t_ttl.go195
-rw-r--r--vendor/github.com/lunny/nodb/t_zset.go943
-rw-r--r--vendor/github.com/lunny/nodb/tx.go113
-rw-r--r--vendor/github.com/lunny/nodb/util.go113
92 files changed, 1103 insertions, 12278 deletions
diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore
deleted file mode 100644
index 0cd3800377..0000000000
--- a/vendor/github.com/BurntSushi/toml/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-TAGS
-tags
-.*.swp
-tomlcheck/tomlcheck
-toml.test
diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml
deleted file mode 100644
index 8b8afc4f0e..0000000000
--- a/vendor/github.com/BurntSushi/toml/.travis.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-language: go
-go:
- - 1.1
- - 1.2
- - 1.3
- - 1.4
- - 1.5
- - 1.6
- - tip
-install:
- - go install ./...
- - go get github.com/BurntSushi/toml-test
-script:
- - export PATH="$PATH:$HOME/gopath/bin"
- - make test
diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE
deleted file mode 100644
index 6efcfd0ce5..0000000000
--- a/vendor/github.com/BurntSushi/toml/COMPATIBLE
+++ /dev/null
@@ -1,3 +0,0 @@
-Compatible with TOML version
-[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)
-
diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING
deleted file mode 100644
index 01b5743200..0000000000
--- a/vendor/github.com/BurntSushi/toml/COPYING
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013 TOML authors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile
deleted file mode 100644
index 3600848d33..0000000000
--- a/vendor/github.com/BurntSushi/toml/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-install:
- go install ./...
-
-test: install
- go test -v
- toml-test toml-test-decoder
- toml-test -encoder toml-test-encoder
-
-fmt:
- gofmt -w *.go */*.go
- colcheck *.go */*.go
-
-tags:
- find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
-
-push:
- git push origin master
- git push github master
-
diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md
deleted file mode 100644
index 7c1b37ecc7..0000000000
--- a/vendor/github.com/BurntSushi/toml/README.md
+++ /dev/null
@@ -1,218 +0,0 @@
-## TOML parser and encoder for Go with reflection
-
-TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
-reflection interface similar to Go's standard library `json` and `xml`
-packages. This package also supports the `encoding.TextUnmarshaler` and
-`encoding.TextMarshaler` interfaces so that you can define custom data
-representations. (There is an example of this below.)
-
-Spec: https://github.com/toml-lang/toml
-
-Compatible with TOML version
-[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
-
-Documentation: https://godoc.org/github.com/BurntSushi/toml
-
-Installation:
-
-```bash
-go get github.com/BurntSushi/toml
-```
-
-Try the toml validator:
-
-```bash
-go get github.com/BurntSushi/toml/cmd/tomlv
-tomlv some-toml-file.toml
-```
-
-[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
-
-### Testing
-
-This package passes all tests in
-[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
-and the encoder.
-
-### Examples
-
-This package works similarly to how the Go standard library handles `XML`
-and `JSON`. Namely, data is loaded into Go values via reflection.
-
-For the simplest example, consider some TOML file as just a list of keys
-and values:
-
-```toml
-Age = 25
-Cats = [ "Cauchy", "Plato" ]
-Pi = 3.14
-Perfection = [ 6, 28, 496, 8128 ]
-DOB = 1987-07-05T05:45:00Z
-```
-
-Which could be defined in Go as:
-
-```go
-type Config struct {
- Age int
- Cats []string
- Pi float64
- Perfection []int
- DOB time.Time // requires `import time`
-}
-```
-
-And then decoded with:
-
-```go
-var conf Config
-if _, err := toml.Decode(tomlData, &conf); err != nil {
- // handle error
-}
-```
-
-You can also use struct tags if your struct field name doesn't map to a TOML
-key value directly:
-
-```toml
-some_key_NAME = "wat"
-```
-
-```go
-type TOML struct {
- ObscureKey string `toml:"some_key_NAME"`
-}
-```
-
-### Using the `encoding.TextUnmarshaler` interface
-
-Here's an example that automatically parses duration strings into
-`time.Duration` values:
-
-```toml
-[[song]]
-name = "Thunder Road"
-duration = "4m49s"
-
-[[song]]
-name = "Stairway to Heaven"
-duration = "8m03s"
-```
-
-Which can be decoded with:
-
-```go
-type song struct {
- Name string
- Duration duration
-}
-type songs struct {
- Song []song
-}
-var favorites songs
-if _, err := toml.Decode(blob, &favorites); err != nil {
- log.Fatal(err)
-}
-
-for _, s := range favorites.Song {
- fmt.Printf("%s (%s)\n", s.Name, s.Duration)
-}
-```
-
-And you'll also need a `duration` type that satisfies the
-`encoding.TextUnmarshaler` interface:
-
-```go
-type duration struct {
- time.Duration
-}
-
-func (d *duration) UnmarshalText(text []byte) error {
- var err error
- d.Duration, err = time.ParseDuration(string(text))
- return err
-}
-```
-
-### More complex usage
-
-Here's an example of how to load the example from the official spec page:
-
-```toml
-# This is a TOML document. Boom.
-
-title = "TOML Example"
-
-[owner]
-name = "Tom Preston-Werner"
-organization = "GitHub"
-bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
-dob = 1979-05-27T07:32:00Z # First class dates? Why not?
-
-[database]
-server = "192.168.1.1"
-ports = [ 8001, 8001, 8002 ]
-connection_max = 5000
-enabled = true
-
-[servers]
-
- # You can indent as you please. Tabs or spaces. TOML don't care.
- [servers.alpha]
- ip = "10.0.0.1"
- dc = "eqdc10"
-
- [servers.beta]
- ip = "10.0.0.2"
- dc = "eqdc10"
-
-[clients]
-data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
-
-# Line breaks are OK when inside arrays
-hosts = [
- "alpha",
- "omega"
-]
-```
-
-And the corresponding Go types are:
-
-```go
-type tomlConfig struct {
- Title string
- Owner ownerInfo
- DB database `toml:"database"`
- Servers map[string]server
- Clients clients
-}
-
-type ownerInfo struct {
- Name string
- Org string `toml:"organization"`
- Bio string
- DOB time.Time
-}
-
-type database struct {
- Server string
- Ports []int
- ConnMax int `toml:"connection_max"`
- Enabled bool
-}
-
-type server struct {
- IP string
- DC string
-}
-
-type clients struct {
- Data [][]interface{}
- Hosts []string
-}
-```
-
-Note that a case insensitive match will be tried if an exact match can't be
-found.
-
-A working example of the above can be found in `_examples/example.{go,toml}`.
diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go
deleted file mode 100644
index b0fd51d5b6..0000000000
--- a/vendor/github.com/BurntSushi/toml/decode.go
+++ /dev/null
@@ -1,509 +0,0 @@
-package toml
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "math"
- "reflect"
- "strings"
- "time"
-)
-
-func e(format string, args ...interface{}) error {
- return fmt.Errorf("toml: "+format, args...)
-}
-
-// Unmarshaler is the interface implemented by objects that can unmarshal a
-// TOML description of themselves.
-type Unmarshaler interface {
- UnmarshalTOML(interface{}) error
-}
-
-// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
-func Unmarshal(p []byte, v interface{}) error {
- _, err := Decode(string(p), v)
- return err
-}
-
-// Primitive is a TOML value that hasn't been decoded into a Go value.
-// When using the various `Decode*` functions, the type `Primitive` may
-// be given to any value, and its decoding will be delayed.
-//
-// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
-//
-// The underlying representation of a `Primitive` value is subject to change.
-// Do not rely on it.
-//
-// N.B. Primitive values are still parsed, so using them will only avoid
-// the overhead of reflection. They can be useful when you don't know the
-// exact type of TOML data until run time.
-type Primitive struct {
- undecoded interface{}
- context Key
-}
-
-// DEPRECATED!
-//
-// Use MetaData.PrimitiveDecode instead.
-func PrimitiveDecode(primValue Primitive, v interface{}) error {
- md := MetaData{decoded: make(map[string]bool)}
- return md.unify(primValue.undecoded, rvalue(v))
-}
-
-// PrimitiveDecode is just like the other `Decode*` functions, except it
-// decodes a TOML value that has already been parsed. Valid primitive values
-// can *only* be obtained from values filled by the decoder functions,
-// including this method. (i.e., `v` may contain more `Primitive`
-// values.)
-//
-// Meta data for primitive values is included in the meta data returned by
-// the `Decode*` functions with one exception: keys returned by the Undecoded
-// method will only reflect keys that were decoded. Namely, any keys hidden
-// behind a Primitive will be considered undecoded. Executing this method will
-// update the undecoded keys in the meta data. (See the example.)
-func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
- md.context = primValue.context
- defer func() { md.context = nil }()
- return md.unify(primValue.undecoded, rvalue(v))
-}
-
-// Decode will decode the contents of `data` in TOML format into a pointer
-// `v`.
-//
-// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
-// used interchangeably.)
-//
-// TOML arrays of tables correspond to either a slice of structs or a slice
-// of maps.
-//
-// TOML datetimes correspond to Go `time.Time` values.
-//
-// All other TOML types (float, string, int, bool and array) correspond
-// to the obvious Go types.
-//
-// An exception to the above rules is if a type implements the
-// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
-// (floats, strings, integers, booleans and datetimes) will be converted to
-// a byte string and given to the value's UnmarshalText method. See the
-// Unmarshaler example for a demonstration with time duration strings.
-//
-// Key mapping
-//
-// TOML keys can map to either keys in a Go map or field names in a Go
-// struct. The special `toml` struct tag may be used to map TOML keys to
-// struct fields that don't match the key name exactly. (See the example.)
-// A case insensitive match to struct names will be tried if an exact match
-// can't be found.
-//
-// The mapping between TOML values and Go values is loose. That is, there
-// may exist TOML values that cannot be placed into your representation, and
-// there may be parts of your representation that do not correspond to
-// TOML values. This loose mapping can be made stricter by using the IsDefined
-// and/or Undecoded methods on the MetaData returned.
-//
-// This decoder will not handle cyclic types. If a cyclic type is passed,
-// `Decode` will not terminate.
-func Decode(data string, v interface{}) (MetaData, error) {
- rv := reflect.ValueOf(v)
- if rv.Kind() != reflect.Ptr {
- return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
- }
- if rv.IsNil() {
- return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
- }
- p, err := parse(data)
- if err != nil {
- return MetaData{}, err
- }
- md := MetaData{
- p.mapping, p.types, p.ordered,
- make(map[string]bool, len(p.ordered)), nil,
- }
- return md, md.unify(p.mapping, indirect(rv))
-}
-
-// DecodeFile is just like Decode, except it will automatically read the
-// contents of the file at `fpath` and decode it for you.
-func DecodeFile(fpath string, v interface{}) (MetaData, error) {
- bs, err := ioutil.ReadFile(fpath)
- if err != nil {
- return MetaData{}, err
- }
- return Decode(string(bs), v)
-}
-
-// DecodeReader is just like Decode, except it will consume all bytes
-// from the reader and decode it for you.
-func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
- bs, err := ioutil.ReadAll(r)
- if err != nil {
- return MetaData{}, err
- }
- return Decode(string(bs), v)
-}
-
-// unify performs a sort of type unification based on the structure of `rv`,
-// which is the client representation.
-//
-// Any type mismatch produces an error. Finding a type that we don't know
-// how to handle produces an unsupported type error.
-func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
-
- // Special case. Look for a `Primitive` value.
- if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
- // Save the undecoded data and the key context into the primitive
- // value.
- context := make(Key, len(md.context))
- copy(context, md.context)
- rv.Set(reflect.ValueOf(Primitive{
- undecoded: data,
- context: context,
- }))
- return nil
- }
-
- // Special case. Unmarshaler Interface support.
- if rv.CanAddr() {
- if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
- return v.UnmarshalTOML(data)
- }
- }
-
- // Special case. Handle time.Time values specifically.
- // TODO: Remove this code when we decide to drop support for Go 1.1.
- // This isn't necessary in Go 1.2 because time.Time satisfies the encoding
- // interfaces.
- if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
- return md.unifyDatetime(data, rv)
- }
-
- // Special case. Look for a value satisfying the TextUnmarshaler interface.
- if v, ok := rv.Interface().(TextUnmarshaler); ok {
- return md.unifyText(data, v)
- }
- // BUG(burntsushi)
- // The behavior here is incorrect whenever a Go type satisfies the
- // encoding.TextUnmarshaler interface but also corresponds to a TOML
- // hash or array. In particular, the unmarshaler should only be applied
- // to primitive TOML values. But at this point, it will be applied to
- // all kinds of values and produce an incorrect error whenever those values
- // are hashes or arrays (including arrays of tables).
-
- k := rv.Kind()
-
- // laziness
- if k >= reflect.Int && k <= reflect.Uint64 {
- return md.unifyInt(data, rv)
- }
- switch k {
- case reflect.Ptr:
- elem := reflect.New(rv.Type().Elem())
- err := md.unify(data, reflect.Indirect(elem))
- if err != nil {
- return err
- }
- rv.Set(elem)
- return nil
- case reflect.Struct:
- return md.unifyStruct(data, rv)
- case reflect.Map:
- return md.unifyMap(data, rv)
- case reflect.Array:
- return md.unifyArray(data, rv)
- case reflect.Slice:
- return md.unifySlice(data, rv)
- case reflect.String:
- return md.unifyString(data, rv)
- case reflect.Bool:
- return md.unifyBool(data, rv)
- case reflect.Interface:
- // we only support empty interfaces.
- if rv.NumMethod() > 0 {
- return e("unsupported type %s", rv.Type())
- }
- return md.unifyAnything(data, rv)
- case reflect.Float32:
- fallthrough
- case reflect.Float64:
- return md.unifyFloat64(data, rv)
- }
- return e("unsupported type %s", rv.Kind())
-}
-
-func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
- tmap, ok := mapping.(map[string]interface{})
- if !ok {
- if mapping == nil {
- return nil
- }
- return e("type mismatch for %s: expected table but found %T",
- rv.Type().String(), mapping)
- }
-
- for key, datum := range tmap {
- var f *field
- fields := cachedTypeFields(rv.Type())
- for i := range fields {
- ff := &fields[i]
- if ff.name == key {
- f = ff
- break
- }
- if f == nil && strings.EqualFold(ff.name, key) {
- f = ff
- }
- }
- if f != nil {
- subv := rv
- for _, i := range f.index {
- subv = indirect(subv.Field(i))
- }
- if isUnifiable(subv) {
- md.decoded[md.context.add(key).String()] = true
- md.context = append(md.context, key)
- if err := md.unify(datum, subv); err != nil {
- return err
- }
- md.context = md.context[0 : len(md.context)-1]
- } else if f.name != "" {
- // Bad user! No soup for you!
- return e("cannot write unexported field %s.%s",
- rv.Type().String(), f.name)
- }
- }
- }
- return nil
-}
-
-func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
- tmap, ok := mapping.(map[string]interface{})
- if !ok {
- if tmap == nil {
- return nil
- }
- return badtype("map", mapping)
- }
- if rv.IsNil() {
- rv.Set(reflect.MakeMap(rv.Type()))
- }
- for k, v := range tmap {
- md.decoded[md.context.add(k).String()] = true
- md.context = append(md.context, k)
-
- rvkey := indirect(reflect.New(rv.Type().Key()))
- rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
- if err := md.unify(v, rvval); err != nil {
- return err
- }
- md.context = md.context[0 : len(md.context)-1]
-
- rvkey.SetString(k)
- rv.SetMapIndex(rvkey, rvval)
- }
- return nil
-}
-
-func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
- datav := reflect.ValueOf(data)
- if datav.Kind() != reflect.Slice {
- if !datav.IsValid() {
- return nil
- }
- return badtype("slice", data)
- }
- sliceLen := datav.Len()
- if sliceLen != rv.Len() {
- return e("expected array length %d; got TOML array of length %d",
- rv.Len(), sliceLen)
- }
- return md.unifySliceArray(datav, rv)
-}
-
-func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
- datav := reflect.ValueOf(data)
- if datav.Kind() != reflect.Slice {
- if !datav.IsValid() {
- return nil
- }
- return badtype("slice", data)
- }
- n := datav.Len()
- if rv.IsNil() || rv.Cap() < n {
- rv.Set(reflect.MakeSlice(rv.Type(), n, n))
- }
- rv.SetLen(n)
- return md.unifySliceArray(datav, rv)
-}
-
-func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
- sliceLen := data.Len()
- for i := 0; i < sliceLen; i++ {
- v := data.Index(i).Interface()
- sliceval := indirect(rv.Index(i))
- if err := md.unify(v, sliceval); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
- if _, ok := data.(time.Time); ok {
- rv.Set(reflect.ValueOf(data))
- return nil
- }
- return badtype("time.Time", data)
-}
-
-func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
- if s, ok := data.(string); ok {
- rv.SetString(s)
- return nil
- }
- return badtype("string", data)
-}
-
-func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
- if num, ok := data.(float64); ok {
- switch rv.Kind() {
- case reflect.Float32:
- fallthrough
- case reflect.Float64:
- rv.SetFloat(num)
- default:
- panic("bug")
- }
- return nil
- }
- return badtype("float", data)
-}
-
-func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
- if num, ok := data.(int64); ok {
- if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
- switch rv.Kind() {
- case reflect.Int, reflect.Int64:
- // No bounds checking necessary.
- case reflect.Int8:
- if num < math.MinInt8 || num > math.MaxInt8 {
- return e("value %d is out of range for int8", num)
- }
- case reflect.Int16:
- if num < math.MinInt16 || num > math.MaxInt16 {
- return e("value %d is out of range for int16", num)
- }
- case reflect.Int32:
- if num < math.MinInt32 || num > math.MaxInt32 {
- return e("value %d is out of range for int32", num)
- }
- }
- rv.SetInt(num)
- } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
- unum := uint64(num)
- switch rv.Kind() {
- case reflect.Uint, reflect.Uint64:
- // No bounds checking necessary.
- case reflect.Uint8:
- if num < 0 || unum > math.MaxUint8 {
- return e("value %d is out of range for uint8", num)
- }
- case reflect.Uint16:
- if num < 0 || unum > math.MaxUint16 {
- return e("value %d is out of range for uint16", num)
- }
- case reflect.Uint32:
- if num < 0 || unum > math.MaxUint32 {
- return e("value %d is out of range for uint32", num)
- }
- }
- rv.SetUint(unum)
- } else {
- panic("unreachable")
- }
- return nil
- }
- return badtype("integer", data)
-}
-
-func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
- if b, ok := data.(bool); ok {
- rv.SetBool(b)
- return nil
- }
- return badtype("boolean", data)
-}
-
-func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
- rv.Set(reflect.ValueOf(data))
- return nil
-}
-
-func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
- var s string
- switch sdata := data.(type) {
- case TextMarshaler:
- text, err := sdata.MarshalText()
- if err != nil {
- return err
- }
- s = string(text)
- case fmt.Stringer:
- s = sdata.String()
- case string:
- s = sdata
- case bool:
- s = fmt.Sprintf("%v", sdata)
- case int64:
- s = fmt.Sprintf("%d", sdata)
- case float64:
- s = fmt.Sprintf("%f", sdata)
- default:
- return badtype("primitive (string-like)", data)
- }
- if err := v.UnmarshalText([]byte(s)); err != nil {
- return err
- }
- return nil
-}
-
-// rvalue returns a reflect.Value of `v`. All pointers are resolved.
-func rvalue(v interface{}) reflect.Value {
- return indirect(reflect.ValueOf(v))
-}
-
-// indirect returns the value pointed to by a pointer.
-// Pointers are followed until the value is not a pointer.
-// New values are allocated for each nil pointer.
-//
-// An exception to this rule is if the value satisfies an interface of
-// interest to us (like encoding.TextUnmarshaler).
-func indirect(v reflect.Value) reflect.Value {
- if v.Kind() != reflect.Ptr {
- if v.CanSet() {
- pv := v.Addr()
- if _, ok := pv.Interface().(TextUnmarshaler); ok {
- return pv
- }
- }
- return v
- }
- if v.IsNil() {
- v.Set(reflect.New(v.Type().Elem()))
- }
- return indirect(reflect.Indirect(v))
-}
-
-func isUnifiable(rv reflect.Value) bool {
- if rv.CanSet() {
- return true
- }
- if _, ok := rv.Interface().(TextUnmarshaler); ok {
- return true
- }
- return false
-}
-
-func badtype(expected string, data interface{}) error {
- return e("cannot load TOML value of type %T into a Go %s", data, expected)
-}
diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go
deleted file mode 100644
index b9914a6798..0000000000
--- a/vendor/github.com/BurntSushi/toml/decode_meta.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package toml
-
-import "strings"
-
-// MetaData allows access to meta information about TOML data that may not
-// be inferrable via reflection. In particular, whether a key has been defined
-// and the TOML type of a key.
-type MetaData struct {
- mapping map[string]interface{}
- types map[string]tomlType
- keys []Key
- decoded map[string]bool
- context Key // Used only during decoding.
-}
-
-// IsDefined returns true if the key given exists in the TOML data. The key
-// should be specified hierarchially. e.g.,
-//
-// // access the TOML key 'a.b.c'
-// IsDefined("a", "b", "c")
-//
-// IsDefined will return false if an empty key given. Keys are case sensitive.
-func (md *MetaData) IsDefined(key ...string) bool {
- if len(key) == 0 {
- return false
- }
-
- var hash map[string]interface{}
- var ok bool
- var hashOrVal interface{} = md.mapping
- for _, k := range key {
- if hash, ok = hashOrVal.(map[string]interface{}); !ok {
- return false
- }
- if hashOrVal, ok = hash[k]; !ok {
- return false
- }
- }
- return true
-}
-
-// Type returns a string representation of the type of the key specified.
-//
-// Type will return the empty string if given an empty key or a key that
-// does not exist. Keys are case sensitive.
-func (md *MetaData) Type(key ...string) string {
- fullkey := strings.Join(key, ".")
- if typ, ok := md.types[fullkey]; ok {
- return typ.typeString()
- }
- return ""
-}
-
-// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
-// to get values of this type.
-type Key []string
-
-func (k Key) String() string {
- return strings.Join(k, ".")
-}
-
-func (k Key) maybeQuotedAll() string {
- var ss []string
- for i := range k {
- ss = append(ss, k.maybeQuoted(i))
- }
- return strings.Join(ss, ".")
-}
-
-func (k Key) maybeQuoted(i int) string {
- quote := false
- for _, c := range k[i] {
- if !isBareKeyChar(c) {
- quote = true
- break
- }
- }
- if quote {
- return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
- }
- return k[i]
-}
-
-func (k Key) add(piece string) Key {
- newKey := make(Key, len(k)+1)
- copy(newKey, k)
- newKey[len(k)] = piece
- return newKey
-}
-
-// Keys returns a slice of every key in the TOML data, including key groups.
-// Each key is itself a slice, where the first element is the top of the
-// hierarchy and the last is the most specific.
-//
-// The list will have the same order as the keys appeared in the TOML data.
-//
-// All keys returned are non-empty.
-func (md *MetaData) Keys() []Key {
- return md.keys
-}
-
-// Undecoded returns all keys that have not been decoded in the order in which
-// they appear in the original TOML document.
-//
-// This includes keys that haven't been decoded because of a Primitive value.
-// Once the Primitive value is decoded, the keys will be considered decoded.
-//
-// Also note that decoding into an empty interface will result in no decoding,
-// and so no keys will be considered decoded.
-//
-// In this sense, the Undecoded keys correspond to keys in the TOML document
-// that do not have a concrete type in your representation.
-func (md *MetaData) Undecoded() []Key {
- undecoded := make([]Key, 0, len(md.keys))
- for _, key := range md.keys {
- if !md.decoded[key.String()] {
- undecoded = append(undecoded, key)
- }
- }
- return undecoded
-}
diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go
deleted file mode 100644
index b371f396ed..0000000000
--- a/vendor/github.com/BurntSushi/toml/doc.go
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
-Package toml provides facilities for decoding and encoding TOML configuration
-files via reflection. There is also support for delaying decoding with
-the Primitive type, and querying the set of keys in a TOML document with the
-MetaData type.
-
-The specification implemented: https://github.com/toml-lang/toml
-
-The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
-whether a file is a valid TOML document. It can also be used to print the
-type of each key in a TOML document.
-
-Testing
-
-There are two important types of tests used for this package. The first is
-contained inside '*_test.go' files and uses the standard Go unit testing
-framework. These tests are primarily devoted to holistically testing the
-decoder and encoder.
-
-The second type of testing is used to verify the implementation's adherence
-to the TOML specification. These tests have been factored into their own
-project: https://github.com/BurntSushi/toml-test
-
-The reason the tests are in a separate project is so that they can be used by
-any implementation of TOML. Namely, it is language agnostic.
-*/
-package toml
diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go
deleted file mode 100644
index d905c21a24..0000000000
--- a/vendor/github.com/BurntSushi/toml/encode.go
+++ /dev/null
@@ -1,568 +0,0 @@
-package toml
-
-import (
- "bufio"
- "errors"
- "fmt"
- "io"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-type tomlEncodeError struct{ error }
-
-var (
- errArrayMixedElementTypes = errors.New(
- "toml: cannot encode array with mixed element types")
- errArrayNilElement = errors.New(
- "toml: cannot encode array with nil element")
- errNonString = errors.New(
- "toml: cannot encode a map with non-string key type")
- errAnonNonStruct = errors.New(
- "toml: cannot encode an anonymous field that is not a struct")
- errArrayNoTable = errors.New(
- "toml: TOML array element cannot contain a table")
- errNoKey = errors.New(
- "toml: top-level values must be Go maps or structs")
- errAnything = errors.New("") // used in testing
-)
-
-var quotedReplacer = strings.NewReplacer(
- "\t", "\\t",
- "\n", "\\n",
- "\r", "\\r",
- "\"", "\\\"",
- "\\", "\\\\",
-)
-
-// Encoder controls the encoding of Go values to a TOML document to some
-// io.Writer.
-//
-// The indentation level can be controlled with the Indent field.
-type Encoder struct {
- // A single indentation level. By default it is two spaces.
- Indent string
-
- // hasWritten is whether we have written any output to w yet.
- hasWritten bool
- w *bufio.Writer
-}
-
-// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
-// given. By default, a single indentation level is 2 spaces.
-func NewEncoder(w io.Writer) *Encoder {
- return &Encoder{
- w: bufio.NewWriter(w),
- Indent: " ",
- }
-}
-
-// Encode writes a TOML representation of the Go value to the underlying
-// io.Writer. If the value given cannot be encoded to a valid TOML document,
-// then an error is returned.
-//
-// The mapping between Go values and TOML values should be precisely the same
-// as for the Decode* functions. Similarly, the TextMarshaler interface is
-// supported by encoding the resulting bytes as strings. (If you want to write
-// arbitrary binary data then you will need to use something like base64 since
-// TOML does not have any binary types.)
-//
-// When encoding TOML hashes (i.e., Go maps or structs), keys without any
-// sub-hashes are encoded first.
-//
-// If a Go map is encoded, then its keys are sorted alphabetically for
-// deterministic output. More control over this behavior may be provided if
-// there is demand for it.
-//
-// Encoding Go values without a corresponding TOML representation---like map
-// types with non-string keys---will cause an error to be returned. Similarly
-// for mixed arrays/slices, arrays/slices with nil elements, embedded
-// non-struct types and nested slices containing maps or structs.
-// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
-// and so is []map[string][]string.)
-func (enc *Encoder) Encode(v interface{}) error {
- rv := eindirect(reflect.ValueOf(v))
- if err := enc.safeEncode(Key([]string{}), rv); err != nil {
- return err
- }
- return enc.w.Flush()
-}
-
-func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
- defer func() {
- if r := recover(); r != nil {
- if terr, ok := r.(tomlEncodeError); ok {
- err = terr.error
- return
- }
- panic(r)
- }
- }()
- enc.encode(key, rv)
- return nil
-}
-
-func (enc *Encoder) encode(key Key, rv reflect.Value) {
- // Special case. Time needs to be in ISO8601 format.
- // Special case. If we can marshal the type to text, then we used that.
- // Basically, this prevents the encoder for handling these types as
- // generic structs (or whatever the underlying type of a TextMarshaler is).
- switch rv.Interface().(type) {
- case time.Time, TextMarshaler:
- enc.keyEqElement(key, rv)
- return
- }
-
- k := rv.Kind()
- switch k {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
- reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
- reflect.Uint64,
- reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
- enc.keyEqElement(key, rv)
- case reflect.Array, reflect.Slice:
- if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
- enc.eArrayOfTables(key, rv)
- } else {
- enc.keyEqElement(key, rv)
- }
- case reflect.Interface:
- if rv.IsNil() {
- return
- }
- enc.encode(key, rv.Elem())
- case reflect.Map:
- if rv.IsNil() {
- return
- }
- enc.eTable(key, rv)
- case reflect.Ptr:
- if rv.IsNil() {
- return
- }
- enc.encode(key, rv.Elem())
- case reflect.Struct:
- enc.eTable(key, rv)
- default:
- panic(e("unsupported type for key '%s': %s", key, k))
- }
-}
-
-// eElement encodes any value that can be an array element (primitives and
-// arrays).
-func (enc *Encoder) eElement(rv reflect.Value) {
- switch v := rv.Interface().(type) {
- case time.Time:
- // Special case time.Time as a primitive. Has to come before
- // TextMarshaler below because time.Time implements
- // encoding.TextMarshaler, but we need to always use UTC.
- enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
- return
- case TextMarshaler:
- // Special case. Use text marshaler if it's available for this value.
- if s, err := v.MarshalText(); err != nil {
- encPanic(err)
- } else {
- enc.writeQuoted(string(s))
- }
- return
- }
- switch rv.Kind() {
- case reflect.Bool:
- enc.wf(strconv.FormatBool(rv.Bool()))
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
- reflect.Int64:
- enc.wf(strconv.FormatInt(rv.Int(), 10))
- case reflect.Uint, reflect.Uint8, reflect.Uint16,
- reflect.Uint32, reflect.Uint64:
- enc.wf(strconv.FormatUint(rv.Uint(), 10))
- case reflect.Float32:
- enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
- case reflect.Float64:
- enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
- case reflect.Array, reflect.Slice:
- enc.eArrayOrSliceElement(rv)
- case reflect.Interface:
- enc.eElement(rv.Elem())
- case reflect.String:
- enc.writeQuoted(rv.String())
- default:
- panic(e("unexpected primitive type: %s", rv.Kind()))
- }
-}
-
-// By the TOML spec, all floats must have a decimal with at least one
-// number on either side.
-func floatAddDecimal(fstr string) string {
- if !strings.Contains(fstr, ".") {
- return fstr + ".0"
- }
- return fstr
-}
-
-func (enc *Encoder) writeQuoted(s string) {
- enc.wf("\"%s\"", quotedReplacer.Replace(s))
-}
-
-func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
- length := rv.Len()
- enc.wf("[")
- for i := 0; i < length; i++ {
- elem := rv.Index(i)
- enc.eElement(elem)
- if i != length-1 {
- enc.wf(", ")
- }
- }
- enc.wf("]")
-}
-
-func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
- if len(key) == 0 {
- encPanic(errNoKey)
- }
- for i := 0; i < rv.Len(); i++ {
- trv := rv.Index(i)
- if isNil(trv) {
- continue
- }
- panicIfInvalidKey(key)
- enc.newline()
- enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
- enc.newline()
- enc.eMapOrStruct(key, trv)
- }
-}
-
-func (enc *Encoder) eTable(key Key, rv reflect.Value) {
- panicIfInvalidKey(key)
- if len(key) == 1 {
- // Output an extra newline between top-level tables.
- // (The newline isn't written if nothing else has been written though.)
- enc.newline()
- }
- if len(key) > 0 {
- enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
- enc.newline()
- }
- enc.eMapOrStruct(key, rv)
-}
-
-func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
- switch rv := eindirect(rv); rv.Kind() {
- case reflect.Map:
- enc.eMap(key, rv)
- case reflect.Struct:
- enc.eStruct(key, rv)
- default:
- panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
- }
-}
-
-func (enc *Encoder) eMap(key Key, rv reflect.Value) {
- rt := rv.Type()
- if rt.Key().Kind() != reflect.String {
- encPanic(errNonString)
- }
-
- // Sort keys so that we have deterministic output. And write keys directly
- // underneath this key first, before writing sub-structs or sub-maps.
- var mapKeysDirect, mapKeysSub []string
- for _, mapKey := range rv.MapKeys() {
- k := mapKey.String()
- if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
- mapKeysSub = append(mapKeysSub, k)
- } else {
- mapKeysDirect = append(mapKeysDirect, k)
- }
- }
-
- var writeMapKeys = func(mapKeys []string) {
- sort.Strings(mapKeys)
- for _, mapKey := range mapKeys {
- mrv := rv.MapIndex(reflect.ValueOf(mapKey))
- if isNil(mrv) {
- // Don't write anything for nil fields.
- continue
- }
- enc.encode(key.add(mapKey), mrv)
- }
- }
- writeMapKeys(mapKeysDirect)
- writeMapKeys(mapKeysSub)
-}
-
-func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
- // Write keys for fields directly under this key first, because if we write
- // a field that creates a new table, then all keys under it will be in that
- // table (not the one we're writing here).
- rt := rv.Type()
- var fieldsDirect, fieldsSub [][]int
- var addFields func(rt reflect.Type, rv reflect.Value, start []int)
- addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
- for i := 0; i < rt.NumField(); i++ {
- f := rt.Field(i)
- // skip unexported fields
- if f.PkgPath != "" && !f.Anonymous {
- continue
- }
- frv := rv.Field(i)
- if f.Anonymous {
- t := f.Type
- switch t.Kind() {
- case reflect.Struct:
- // Treat anonymous struct fields with
- // tag names as though they are not
- // anonymous, like encoding/json does.
- if getOptions(f.Tag).name == "" {
- addFields(t, frv, f.Index)
- continue
- }
- case reflect.Ptr:
- if t.Elem().Kind() == reflect.Struct &&
- getOptions(f.Tag).name == "" {
- if !frv.IsNil() {
- addFields(t.Elem(), frv.Elem(), f.Index)
- }
- continue
- }
- // Fall through to the normal field encoding logic below
- // for non-struct anonymous fields.
- }
- }
-
- if typeIsHash(tomlTypeOfGo(frv)) {
- fieldsSub = append(fieldsSub, append(start, f.Index...))
- } else {
- fieldsDirect = append(fieldsDirect, append(start, f.Index...))
- }
- }
- }
- addFields(rt, rv, nil)
-
- var writeFields = func(fields [][]int) {
- for _, fieldIndex := range fields {
- sft := rt.FieldByIndex(fieldIndex)
- sf := rv.FieldByIndex(fieldIndex)
- if isNil(sf) {
- // Don't write anything for nil fields.
- continue
- }
-
- opts := getOptions(sft.Tag)
- if opts.skip {
- continue
- }
- keyName := sft.Name
- if opts.name != "" {
- keyName = opts.name
- }
- if opts.omitempty && isEmpty(sf) {
- continue
- }
- if opts.omitzero && isZero(sf) {
- continue
- }
-
- enc.encode(key.add(keyName), sf)
- }
- }
- writeFields(fieldsDirect)
- writeFields(fieldsSub)
-}
-
-// tomlTypeName returns the TOML type name of the Go value's type. It is
-// used to determine whether the types of array elements are mixed (which is
-// forbidden). If the Go value is nil, then it is illegal for it to be an array
-// element, and valueIsNil is returned as true.
-
-// Returns the TOML type of a Go value. The type may be `nil`, which means
-// no concrete TOML type could be found.
-func tomlTypeOfGo(rv reflect.Value) tomlType {
- if isNil(rv) || !rv.IsValid() {
- return nil
- }
- switch rv.Kind() {
- case reflect.Bool:
- return tomlBool
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
- reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
- reflect.Uint64:
- return tomlInteger
- case reflect.Float32, reflect.Float64:
- return tomlFloat
- case reflect.Array, reflect.Slice:
- if typeEqual(tomlHash, tomlArrayType(rv)) {
- return tomlArrayHash
- }
- return tomlArray
- case reflect.Ptr, reflect.Interface:
- return tomlTypeOfGo(rv.Elem())
- case reflect.String:
- return tomlString
- case reflect.Map:
- return tomlHash
- case reflect.Struct:
- switch rv.Interface().(type) {
- case time.Time:
- return tomlDatetime
- case TextMarshaler:
- return tomlString
- default:
- return tomlHash
- }
- default:
- panic("unexpected reflect.Kind: " + rv.Kind().String())
- }
-}
-
-// tomlArrayType returns the element type of a TOML array. The type returned
-// may be nil if it cannot be determined (e.g., a nil slice or a zero length
-// slize). This function may also panic if it finds a type that cannot be
-// expressed in TOML (such as nil elements, heterogeneous arrays or directly
-// nested arrays of tables).
-func tomlArrayType(rv reflect.Value) tomlType {
- if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
- return nil
- }
- firstType := tomlTypeOfGo(rv.Index(0))
- if firstType == nil {
- encPanic(errArrayNilElement)
- }
-
- rvlen := rv.Len()
- for i := 1; i < rvlen; i++ {
- elem := rv.Index(i)
- switch elemType := tomlTypeOfGo(elem); {
- case elemType == nil:
- encPanic(errArrayNilElement)
- case !typeEqual(firstType, elemType):
- encPanic(errArrayMixedElementTypes)
- }
- }
- // If we have a nested array, then we must make sure that the nested
- // array contains ONLY primitives.
- // This checks arbitrarily nested arrays.
- if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
- nest := tomlArrayType(eindirect(rv.Index(0)))
- if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
- encPanic(errArrayNoTable)
- }
- }
- return firstType
-}
-
-type tagOptions struct {
- skip bool // "-"
- name string
- omitempty bool
- omitzero bool
-}
-
-func getOptions(tag reflect.StructTag) tagOptions {
- t := tag.Get("toml")
- if t == "-" {
- return tagOptions{skip: true}
- }
- var opts tagOptions
- parts := strings.Split(t, ",")
- opts.name = parts[0]
- for _, s := range parts[1:] {
- switch s {
- case "omitempty":
- opts.omitempty = true
- case "omitzero":
- opts.omitzero = true
- }
- }
- return opts
-}
-
-func isZero(rv reflect.Value) bool {
- switch rv.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return rv.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return rv.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return rv.Float() == 0.0
- }
- return false
-}
-
-func isEmpty(rv reflect.Value) bool {
- switch rv.Kind() {
- case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
- return rv.Len() == 0
- case reflect.Bool:
- return !rv.Bool()
- }
- return false
-}
-
-func (enc *Encoder) newline() {
- if enc.hasWritten {
- enc.wf("\n")
- }
-}
-
-func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
- if len(key) == 0 {
- encPanic(errNoKey)
- }
- panicIfInvalidKey(key)
- enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
- enc.eElement(val)
- enc.newline()
-}
-
-func (enc *Encoder) wf(format string, v ...interface{}) {
- if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
- encPanic(err)
- }
- enc.hasWritten = true
-}
-
-func (enc *Encoder) indentStr(key Key) string {
- return strings.Repeat(enc.Indent, len(key)-1)
-}
-
-func encPanic(err error) {
- panic(tomlEncodeError{err})
-}
-
-func eindirect(v reflect.Value) reflect.Value {
- switch v.Kind() {
- case reflect.Ptr, reflect.Interface:
- return eindirect(v.Elem())
- default:
- return v
- }
-}
-
-func isNil(rv reflect.Value) bool {
- switch rv.Kind() {
- case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
- return rv.IsNil()
- default:
- return false
- }
-}
-
-func panicIfInvalidKey(key Key) {
- for _, k := range key {
- if len(k) == 0 {
- encPanic(e("Key '%s' is not a valid table name. Key names "+
- "cannot be empty.", key.maybeQuotedAll()))
- }
- }
-}
-
-func isValidKeyName(s string) bool {
- return len(s) != 0
-}
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go
deleted file mode 100644
index d36e1dd600..0000000000
--- a/vendor/github.com/BurntSushi/toml/encoding_types.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build go1.2
-
-package toml
-
-// In order to support Go 1.1, we define our own TextMarshaler and
-// TextUnmarshaler types. For Go 1.2+, we just alias them with the
-// standard library interfaces.
-
-import (
- "encoding"
-)
-
-// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextMarshaler encoding.TextMarshaler
-
-// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
-// here so that Go 1.1 can be supported.
-type TextUnmarshaler encoding.TextUnmarshaler
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
deleted file mode 100644
index e8d503d046..0000000000
--- a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build !go1.2
-
-package toml
-
-// These interfaces were introduced in Go 1.2, so we add them manually when
-// compiling for Go 1.1.
-
-// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextMarshaler interface {
- MarshalText() (text []byte, err error)
-}
-
-// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
-// here so that Go 1.1 can be supported.
-type TextUnmarshaler interface {
- UnmarshalText(text []byte) error
-}
diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go
deleted file mode 100644
index e0a742a887..0000000000
--- a/vendor/github.com/BurntSushi/toml/lex.go
+++ /dev/null
@@ -1,953 +0,0 @@
-package toml
-
-import (
- "fmt"
- "strings"
- "unicode"
- "unicode/utf8"
-)
-
-type itemType int
-
-const (
- itemError itemType = iota
- itemNIL // used in the parser to indicate no type
- itemEOF
- itemText
- itemString
- itemRawString
- itemMultilineString
- itemRawMultilineString
- itemBool
- itemInteger
- itemFloat
- itemDatetime
- itemArray // the start of an array
- itemArrayEnd
- itemTableStart
- itemTableEnd
- itemArrayTableStart
- itemArrayTableEnd
- itemKeyStart
- itemCommentStart
- itemInlineTableStart
- itemInlineTableEnd
-)
-
-const (
- eof = 0
- comma = ','
- tableStart = '['
- tableEnd = ']'
- arrayTableStart = '['
- arrayTableEnd = ']'
- tableSep = '.'
- keySep = '='
- arrayStart = '['
- arrayEnd = ']'
- commentStart = '#'
- stringStart = '"'
- stringEnd = '"'
- rawStringStart = '\''
- rawStringEnd = '\''
- inlineTableStart = '{'
- inlineTableEnd = '}'
-)
-
-type stateFn func(lx *lexer) stateFn
-
-type lexer struct {
- input string
- start int
- pos int
- line int
- state stateFn
- items chan item
-
- // Allow for backing up up to three runes.
- // This is necessary because TOML contains 3-rune tokens (""" and ''').
- prevWidths [3]int
- nprev int // how many of prevWidths are in use
- // If we emit an eof, we can still back up, but it is not OK to call
- // next again.
- atEOF bool
-
- // A stack of state functions used to maintain context.
- // The idea is to reuse parts of the state machine in various places.
- // For example, values can appear at the top level or within arbitrarily
- // nested arrays. The last state on the stack is used after a value has
- // been lexed. Similarly for comments.
- stack []stateFn
-}
-
-type item struct {
- typ itemType
- val string
- line int
-}
-
-func (lx *lexer) nextItem() item {
- for {
- select {
- case item := <-lx.items:
- return item
- default:
- lx.state = lx.state(lx)
- }
- }
-}
-
-func lex(input string) *lexer {
- lx := &lexer{
- input: input,
- state: lexTop,
- line: 1,
- items: make(chan item, 10),
- stack: make([]stateFn, 0, 10),
- }
- return lx
-}
-
-func (lx *lexer) push(state stateFn) {
- lx.stack = append(lx.stack, state)
-}
-
-func (lx *lexer) pop() stateFn {
- if len(lx.stack) == 0 {
- return lx.errorf("BUG in lexer: no states to pop")
- }
- last := lx.stack[len(lx.stack)-1]
- lx.stack = lx.stack[0 : len(lx.stack)-1]
- return last
-}
-
-func (lx *lexer) current() string {
- return lx.input[lx.start:lx.pos]
-}
-
-func (lx *lexer) emit(typ itemType) {
- lx.items <- item{typ, lx.current(), lx.line}
- lx.start = lx.pos
-}
-
-func (lx *lexer) emitTrim(typ itemType) {
- lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
- lx.start = lx.pos
-}
-
-func (lx *lexer) next() (r rune) {
- if lx.atEOF {
- panic("next called after EOF")
- }
- if lx.pos >= len(lx.input) {
- lx.atEOF = true
- return eof
- }
-
- if lx.input[lx.pos] == '\n' {
- lx.line++
- }
- lx.prevWidths[2] = lx.prevWidths[1]
- lx.prevWidths[1] = lx.prevWidths[0]
- if lx.nprev < 3 {
- lx.nprev++
- }
- r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
- lx.prevWidths[0] = w
- lx.pos += w
- return r
-}
-
-// ignore skips over the pending input before this point.
-func (lx *lexer) ignore() {
- lx.start = lx.pos
-}
-
-// backup steps back one rune. Can be called only twice between calls to next.
-func (lx *lexer) backup() {
- if lx.atEOF {
- lx.atEOF = false
- return
- }
- if lx.nprev < 1 {
- panic("backed up too far")
- }
- w := lx.prevWidths[0]
- lx.prevWidths[0] = lx.prevWidths[1]
- lx.prevWidths[1] = lx.prevWidths[2]
- lx.nprev--
- lx.pos -= w
- if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
- lx.line--
- }
-}
-
-// accept consumes the next rune if it's equal to `valid`.
-func (lx *lexer) accept(valid rune) bool {
- if lx.next() == valid {
- return true
- }
- lx.backup()
- return false
-}
-
-// peek returns but does not consume the next rune in the input.
-func (lx *lexer) peek() rune {
- r := lx.next()
- lx.backup()
- return r
-}
-
-// skip ignores all input that matches the given predicate.
-func (lx *lexer) skip(pred func(rune) bool) {
- for {
- r := lx.next()
- if pred(r) {
- continue
- }
- lx.backup()
- lx.ignore()
- return
- }
-}
-
-// errorf stops all lexing by emitting an error and returning `nil`.
-// Note that any value that is a character is escaped if it's a special
-// character (newlines, tabs, etc.).
-func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
- lx.items <- item{
- itemError,
- fmt.Sprintf(format, values...),
- lx.line,
- }
- return nil
-}
-
-// lexTop consumes elements at the top level of TOML data.
-func lexTop(lx *lexer) stateFn {
- r := lx.next()
- if isWhitespace(r) || isNL(r) {
- return lexSkip(lx, lexTop)
- }
- switch r {
- case commentStart:
- lx.push(lexTop)
- return lexCommentStart
- case tableStart:
- return lexTableStart
- case eof:
- if lx.pos > lx.start {
- return lx.errorf("unexpected EOF")
- }
- lx.emit(itemEOF)
- return nil
- }
-
- // At this point, the only valid item can be a key, so we back up
- // and let the key lexer do the rest.
- lx.backup()
- lx.push(lexTopEnd)
- return lexKeyStart
-}
-
-// lexTopEnd is entered whenever a top-level item has been consumed. (A value
-// or a table.) It must see only whitespace, and will turn back to lexTop
-// upon a newline. If it sees EOF, it will quit the lexer successfully.
-func lexTopEnd(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case r == commentStart:
- // a comment will read to a newline for us.
- lx.push(lexTop)
- return lexCommentStart
- case isWhitespace(r):
- return lexTopEnd
- case isNL(r):
- lx.ignore()
- return lexTop
- case r == eof:
- lx.emit(itemEOF)
- return nil
- }
- return lx.errorf("expected a top-level item to end with a newline, "+
- "comment, or EOF, but got %q instead", r)
-}
-
-// lexTable lexes the beginning of a table. Namely, it makes sure that
-// it starts with a character other than '.' and ']'.
-// It assumes that '[' has already been consumed.
-// It also handles the case that this is an item in an array of tables.
-// e.g., '[[name]]'.
-func lexTableStart(lx *lexer) stateFn {
- if lx.peek() == arrayTableStart {
- lx.next()
- lx.emit(itemArrayTableStart)
- lx.push(lexArrayTableEnd)
- } else {
- lx.emit(itemTableStart)
- lx.push(lexTableEnd)
- }
- return lexTableNameStart
-}
-
-func lexTableEnd(lx *lexer) stateFn {
- lx.emit(itemTableEnd)
- return lexTopEnd
-}
-
-func lexArrayTableEnd(lx *lexer) stateFn {
- if r := lx.next(); r != arrayTableEnd {
- return lx.errorf("expected end of table array name delimiter %q, "+
- "but got %q instead", arrayTableEnd, r)
- }
- lx.emit(itemArrayTableEnd)
- return lexTopEnd
-}
-
-func lexTableNameStart(lx *lexer) stateFn {
- lx.skip(isWhitespace)
- switch r := lx.peek(); {
- case r == tableEnd || r == eof:
- return lx.errorf("unexpected end of table name " +
- "(table names cannot be empty)")
- case r == tableSep:
- return lx.errorf("unexpected table separator " +
- "(table names cannot be empty)")
- case r == stringStart || r == rawStringStart:
- lx.ignore()
- lx.push(lexTableNameEnd)
- return lexValue // reuse string lexing
- default:
- return lexBareTableName
- }
-}
-
-// lexBareTableName lexes the name of a table. It assumes that at least one
-// valid character for the table has already been read.
-func lexBareTableName(lx *lexer) stateFn {
- r := lx.next()
- if isBareKeyChar(r) {
- return lexBareTableName
- }
- lx.backup()
- lx.emit(itemText)
- return lexTableNameEnd
-}
-
-// lexTableNameEnd reads the end of a piece of a table name, optionally
-// consuming whitespace.
-func lexTableNameEnd(lx *lexer) stateFn {
- lx.skip(isWhitespace)
- switch r := lx.next(); {
- case isWhitespace(r):
- return lexTableNameEnd
- case r == tableSep:
- lx.ignore()
- return lexTableNameStart
- case r == tableEnd:
- return lx.pop()
- default:
- return lx.errorf("expected '.' or ']' to end table name, "+
- "but got %q instead", r)
- }
-}
-
-// lexKeyStart consumes a key name up until the first non-whitespace character.
-// lexKeyStart will ignore whitespace.
-func lexKeyStart(lx *lexer) stateFn {
- r := lx.peek()
- switch {
- case r == keySep:
- return lx.errorf("unexpected key separator %q", keySep)
- case isWhitespace(r) || isNL(r):
- lx.next()
- return lexSkip(lx, lexKeyStart)
- case r == stringStart || r == rawStringStart:
- lx.ignore()
- lx.emit(itemKeyStart)
- lx.push(lexKeyEnd)
- return lexValue // reuse string lexing
- default:
- lx.ignore()
- lx.emit(itemKeyStart)
- return lexBareKey
- }
-}
-
-// lexBareKey consumes the text of a bare key. Assumes that the first character
-// (which is not whitespace) has not yet been consumed.
-func lexBareKey(lx *lexer) stateFn {
- switch r := lx.next(); {
- case isBareKeyChar(r):
- return lexBareKey
- case isWhitespace(r):
- lx.backup()
- lx.emit(itemText)
- return lexKeyEnd
- case r == keySep:
- lx.backup()
- lx.emit(itemText)
- return lexKeyEnd
- default:
- return lx.errorf("bare keys cannot contain %q", r)
- }
-}
-
-// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
-// separator).
-func lexKeyEnd(lx *lexer) stateFn {
- switch r := lx.next(); {
- case r == keySep:
- return lexSkip(lx, lexValue)
- case isWhitespace(r):
- return lexSkip(lx, lexKeyEnd)
- default:
- return lx.errorf("expected key separator %q, but got %q instead",
- keySep, r)
- }
-}
-
-// lexValue starts the consumption of a value anywhere a value is expected.
-// lexValue will ignore whitespace.
-// After a value is lexed, the last state on the next is popped and returned.
-func lexValue(lx *lexer) stateFn {
- // We allow whitespace to precede a value, but NOT newlines.
- // In array syntax, the array states are responsible for ignoring newlines.
- r := lx.next()
- switch {
- case isWhitespace(r):
- return lexSkip(lx, lexValue)
- case isDigit(r):
- lx.backup() // avoid an extra state and use the same as above
- return lexNumberOrDateStart
- }
- switch r {
- case arrayStart:
- lx.ignore()
- lx.emit(itemArray)
- return lexArrayValue
- case inlineTableStart:
- lx.ignore()
- lx.emit(itemInlineTableStart)
- return lexInlineTableValue
- case stringStart:
- if lx.accept(stringStart) {
- if lx.accept(stringStart) {
- lx.ignore() // Ignore """
- return lexMultilineString
- }
- lx.backup()
- }
- lx.ignore() // ignore the '"'
- return lexString
- case rawStringStart:
- if lx.accept(rawStringStart) {
- if lx.accept(rawStringStart) {
- lx.ignore() // Ignore """
- return lexMultilineRawString
- }
- lx.backup()
- }
- lx.ignore() // ignore the "'"
- return lexRawString
- case '+', '-':
- return lexNumberStart
- case '.': // special error case, be kind to users
- return lx.errorf("floats must start with a digit, not '.'")
- }
- if unicode.IsLetter(r) {
- // Be permissive here; lexBool will give a nice error if the
- // user wrote something like
- // x = foo
- // (i.e. not 'true' or 'false' but is something else word-like.)
- lx.backup()
- return lexBool
- }
- return lx.errorf("expected value but found %q instead", r)
-}
-
-// lexArrayValue consumes one value in an array. It assumes that '[' or ','
-// have already been consumed. All whitespace and newlines are ignored.
-func lexArrayValue(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case isWhitespace(r) || isNL(r):
- return lexSkip(lx, lexArrayValue)
- case r == commentStart:
- lx.push(lexArrayValue)
- return lexCommentStart
- case r == comma:
- return lx.errorf("unexpected comma")
- case r == arrayEnd:
- // NOTE(caleb): The spec isn't clear about whether you can have
- // a trailing comma or not, so we'll allow it.
- return lexArrayEnd
- }
-
- lx.backup()
- lx.push(lexArrayValueEnd)
- return lexValue
-}
-
-// lexArrayValueEnd consumes everything between the end of an array value and
-// the next value (or the end of the array): it ignores whitespace and newlines
-// and expects either a ',' or a ']'.
-func lexArrayValueEnd(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case isWhitespace(r) || isNL(r):
- return lexSkip(lx, lexArrayValueEnd)
- case r == commentStart:
- lx.push(lexArrayValueEnd)
- return lexCommentStart
- case r == comma:
- lx.ignore()
- return lexArrayValue // move on to the next value
- case r == arrayEnd:
- return lexArrayEnd
- }
- return lx.errorf(
- "expected a comma or array terminator %q, but got %q instead",
- arrayEnd, r,
- )
-}
-
-// lexArrayEnd finishes the lexing of an array.
-// It assumes that a ']' has just been consumed.
-func lexArrayEnd(lx *lexer) stateFn {
- lx.ignore()
- lx.emit(itemArrayEnd)
- return lx.pop()
-}
-
-// lexInlineTableValue consumes one key/value pair in an inline table.
-// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
-func lexInlineTableValue(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case isWhitespace(r):
- return lexSkip(lx, lexInlineTableValue)
- case isNL(r):
- return lx.errorf("newlines not allowed within inline tables")
- case r == commentStart:
- lx.push(lexInlineTableValue)
- return lexCommentStart
- case r == comma:
- return lx.errorf("unexpected comma")
- case r == inlineTableEnd:
- return lexInlineTableEnd
- }
- lx.backup()
- lx.push(lexInlineTableValueEnd)
- return lexKeyStart
-}
-
-// lexInlineTableValueEnd consumes everything between the end of an inline table
-// key/value pair and the next pair (or the end of the table):
-// it ignores whitespace and expects either a ',' or a '}'.
-func lexInlineTableValueEnd(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case isWhitespace(r):
- return lexSkip(lx, lexInlineTableValueEnd)
- case isNL(r):
- return lx.errorf("newlines not allowed within inline tables")
- case r == commentStart:
- lx.push(lexInlineTableValueEnd)
- return lexCommentStart
- case r == comma:
- lx.ignore()
- return lexInlineTableValue
- case r == inlineTableEnd:
- return lexInlineTableEnd
- }
- return lx.errorf("expected a comma or an inline table terminator %q, "+
- "but got %q instead", inlineTableEnd, r)
-}
-
-// lexInlineTableEnd finishes the lexing of an inline table.
-// It assumes that a '}' has just been consumed.
-func lexInlineTableEnd(lx *lexer) stateFn {
- lx.ignore()
- lx.emit(itemInlineTableEnd)
- return lx.pop()
-}
-
-// lexString consumes the inner contents of a string. It assumes that the
-// beginning '"' has already been consumed and ignored.
-func lexString(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case r == eof:
- return lx.errorf("unexpected EOF")
- case isNL(r):
- return lx.errorf("strings cannot contain newlines")
- case r == '\\':
- lx.push(lexString)
- return lexStringEscape
- case r == stringEnd:
- lx.backup()
- lx.emit(itemString)
- lx.next()
- lx.ignore()
- return lx.pop()
- }
- return lexString
-}
-
-// lexMultilineString consumes the inner contents of a string. It assumes that
-// the beginning '"""' has already been consumed and ignored.
-func lexMultilineString(lx *lexer) stateFn {
- switch lx.next() {
- case eof:
- return lx.errorf("unexpected EOF")
- case '\\':
- return lexMultilineStringEscape
- case stringEnd:
- if lx.accept(stringEnd) {
- if lx.accept(stringEnd) {
- lx.backup()
- lx.backup()
- lx.backup()
- lx.emit(itemMultilineString)
- lx.next()
- lx.next()
- lx.next()
- lx.ignore()
- return lx.pop()
- }
- lx.backup()
- }
- }
- return lexMultilineString
-}
-
-// lexRawString consumes a raw string. Nothing can be escaped in such a string.
-// It assumes that the beginning "'" has already been consumed and ignored.
-func lexRawString(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case r == eof:
- return lx.errorf("unexpected EOF")
- case isNL(r):
- return lx.errorf("strings cannot contain newlines")
- case r == rawStringEnd:
- lx.backup()
- lx.emit(itemRawString)
- lx.next()
- lx.ignore()
- return lx.pop()
- }
- return lexRawString
-}
-
-// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
-// a string. It assumes that the beginning "'''" has already been consumed and
-// ignored.
-func lexMultilineRawString(lx *lexer) stateFn {
- switch lx.next() {
- case eof:
- return lx.errorf("unexpected EOF")
- case rawStringEnd:
- if lx.accept(rawStringEnd) {
- if lx.accept(rawStringEnd) {
- lx.backup()
- lx.backup()
- lx.backup()
- lx.emit(itemRawMultilineString)
- lx.next()
- lx.next()
- lx.next()
- lx.ignore()
- return lx.pop()
- }
- lx.backup()
- }
- }
- return lexMultilineRawString
-}
-
-// lexMultilineStringEscape consumes an escaped character. It assumes that the
-// preceding '\\' has already been consumed.
-func lexMultilineStringEscape(lx *lexer) stateFn {
- // Handle the special case first:
- if isNL(lx.next()) {
- return lexMultilineString
- }
- lx.backup()
- lx.push(lexMultilineString)
- return lexStringEscape(lx)
-}
-
-func lexStringEscape(lx *lexer) stateFn {
- r := lx.next()
- switch r {
- case 'b':
- fallthrough
- case 't':
- fallthrough
- case 'n':
- fallthrough
- case 'f':
- fallthrough
- case 'r':
- fallthrough
- case '"':
- fallthrough
- case '\\':
- return lx.pop()
- case 'u':
- return lexShortUnicodeEscape
- case 'U':
- return lexLongUnicodeEscape
- }
- return lx.errorf("invalid escape character %q; only the following "+
- "escape characters are allowed: "+
- `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
-}
-
-func lexShortUnicodeEscape(lx *lexer) stateFn {
- var r rune
- for i := 0; i < 4; i++ {
- r = lx.next()
- if !isHexadecimal(r) {
- return lx.errorf(`expected four hexadecimal digits after '\u', `+
- "but got %q instead", lx.current())
- }
- }
- return lx.pop()
-}
-
-func lexLongUnicodeEscape(lx *lexer) stateFn {
- var r rune
- for i := 0; i < 8; i++ {
- r = lx.next()
- if !isHexadecimal(r) {
- return lx.errorf(`expected eight hexadecimal digits after '\U', `+
- "but got %q instead", lx.current())
- }
- }
- return lx.pop()
-}
-
-// lexNumberOrDateStart consumes either an integer, a float, or datetime.
-func lexNumberOrDateStart(lx *lexer) stateFn {
- r := lx.next()
- if isDigit(r) {
- return lexNumberOrDate
- }
- switch r {
- case '_':
- return lexNumber
- case 'e', 'E':
- return lexFloat
- case '.':
- return lx.errorf("floats must start with a digit, not '.'")
- }
- return lx.errorf("expected a digit but got %q", r)
-}
-
-// lexNumberOrDate consumes either an integer, float or datetime.
-func lexNumberOrDate(lx *lexer) stateFn {
- r := lx.next()
- if isDigit(r) {
- return lexNumberOrDate
- }
- switch r {
- case '-':
- return lexDatetime
- case '_':
- return lexNumber
- case '.', 'e', 'E':
- return lexFloat
- }
-
- lx.backup()
- lx.emit(itemInteger)
- return lx.pop()
-}
-
-// lexDatetime consumes a Datetime, to a first approximation.
-// The parser validates that it matches one of the accepted formats.
-func lexDatetime(lx *lexer) stateFn {
- r := lx.next()
- if isDigit(r) {
- return lexDatetime
- }
- switch r {
- case '-', 'T', ':', '.', 'Z', '+':
- return lexDatetime
- }
-
- lx.backup()
- lx.emit(itemDatetime)
- return lx.pop()
-}
-
-// lexNumberStart consumes either an integer or a float. It assumes that a sign
-// has already been read, but that *no* digits have been consumed.
-// lexNumberStart will move to the appropriate integer or float states.
-func lexNumberStart(lx *lexer) stateFn {
- // We MUST see a digit. Even floats have to start with a digit.
- r := lx.next()
- if !isDigit(r) {
- if r == '.' {
- return lx.errorf("floats must start with a digit, not '.'")
- }
- return lx.errorf("expected a digit but got %q", r)
- }
- return lexNumber
-}
-
-// lexNumber consumes an integer or a float after seeing the first digit.
-func lexNumber(lx *lexer) stateFn {
- r := lx.next()
- if isDigit(r) {
- return lexNumber
- }
- switch r {
- case '_':
- return lexNumber
- case '.', 'e', 'E':
- return lexFloat
- }
-
- lx.backup()
- lx.emit(itemInteger)
- return lx.pop()
-}
-
-// lexFloat consumes the elements of a float. It allows any sequence of
-// float-like characters, so floats emitted by the lexer are only a first
-// approximation and must be validated by the parser.
-func lexFloat(lx *lexer) stateFn {
- r := lx.next()
- if isDigit(r) {
- return lexFloat
- }
- switch r {
- case '_', '.', '-', '+', 'e', 'E':
- return lexFloat
- }
-
- lx.backup()
- lx.emit(itemFloat)
- return lx.pop()
-}
-
-// lexBool consumes a bool string: 'true' or 'false.
-func lexBool(lx *lexer) stateFn {
- var rs []rune
- for {
- r := lx.next()
- if !unicode.IsLetter(r) {
- lx.backup()
- break
- }
- rs = append(rs, r)
- }
- s := string(rs)
- switch s {
- case "true", "false":
- lx.emit(itemBool)
- return lx.pop()
- }
- return lx.errorf("expected value but found %q instead", s)
-}
-
-// lexCommentStart begins the lexing of a comment. It will emit
-// itemCommentStart and consume no characters, passing control to lexComment.
-func lexCommentStart(lx *lexer) stateFn {
- lx.ignore()
- lx.emit(itemCommentStart)
- return lexComment
-}
-
-// lexComment lexes an entire comment. It assumes that '#' has been consumed.
-// It will consume *up to* the first newline character, and pass control
-// back to the last state on the stack.
-func lexComment(lx *lexer) stateFn {
- r := lx.peek()
- if isNL(r) || r == eof {
- lx.emit(itemText)
- return lx.pop()
- }
- lx.next()
- return lexComment
-}
-
-// lexSkip ignores all slurped input and moves on to the next state.
-func lexSkip(lx *lexer, nextState stateFn) stateFn {
- return func(lx *lexer) stateFn {
- lx.ignore()
- return nextState
- }
-}
-
-// isWhitespace returns true if `r` is a whitespace character according
-// to the spec.
-func isWhitespace(r rune) bool {
- return r == '\t' || r == ' '
-}
-
-func isNL(r rune) bool {
- return r == '\n' || r == '\r'
-}
-
-func isDigit(r rune) bool {
- return r >= '0' && r <= '9'
-}
-
-func isHexadecimal(r rune) bool {
- return (r >= '0' && r <= '9') ||
- (r >= 'a' && r <= 'f') ||
- (r >= 'A' && r <= 'F')
-}
-
-func isBareKeyChar(r rune) bool {
- return (r >= 'A' && r <= 'Z') ||
- (r >= 'a' && r <= 'z') ||
- (r >= '0' && r <= '9') ||
- r == '_' ||
- r == '-'
-}
-
-func (itype itemType) String() string {
- switch itype {
- case itemError:
- return "Error"
- case itemNIL:
- return "NIL"
- case itemEOF:
- return "EOF"
- case itemText:
- return "Text"
- case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
- return "String"
- case itemBool:
- return "Bool"
- case itemInteger:
- return "Integer"
- case itemFloat:
- return "Float"
- case itemDatetime:
- return "DateTime"
- case itemTableStart:
- return "TableStart"
- case itemTableEnd:
- return "TableEnd"
- case itemKeyStart:
- return "KeyStart"
- case itemArray:
- return "Array"
- case itemArrayEnd:
- return "ArrayEnd"
- case itemCommentStart:
- return "CommentStart"
- }
- panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
-}
-
-func (item item) String() string {
- return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
-}
diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go
deleted file mode 100644
index 50869ef926..0000000000
--- a/vendor/github.com/BurntSushi/toml/parse.go
+++ /dev/null
@@ -1,592 +0,0 @@
-package toml
-
-import (
- "fmt"
- "strconv"
- "strings"
- "time"
- "unicode"
- "unicode/utf8"
-)
-
-type parser struct {
- mapping map[string]interface{}
- types map[string]tomlType
- lx *lexer
-
- // A list of keys in the order that they appear in the TOML data.
- ordered []Key
-
- // the full key for the current hash in scope
- context Key
-
- // the base key name for everything except hashes
- currentKey string
-
- // rough approximation of line number
- approxLine int
-
- // A map of 'key.group.names' to whether they were created implicitly.
- implicits map[string]bool
-}
-
-type parseError string
-
-func (pe parseError) Error() string {
- return string(pe)
-}
-
-func parse(data string) (p *parser, err error) {
- defer func() {
- if r := recover(); r != nil {
- var ok bool
- if err, ok = r.(parseError); ok {
- return
- }
- panic(r)
- }
- }()
-
- p = &parser{
- mapping: make(map[string]interface{}),
- types: make(map[string]tomlType),
- lx: lex(data),
- ordered: make([]Key, 0),
- implicits: make(map[string]bool),
- }
- for {
- item := p.next()
- if item.typ == itemEOF {
- break
- }
- p.topLevel(item)
- }
-
- return p, nil
-}
-
-func (p *parser) panicf(format string, v ...interface{}) {
- msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
- p.approxLine, p.current(), fmt.Sprintf(format, v...))
- panic(parseError(msg))
-}
-
-func (p *parser) next() item {
- it := p.lx.nextItem()
- if it.typ == itemError {
- p.panicf("%s", it.val)
- }
- return it
-}
-
-func (p *parser) bug(format string, v ...interface{}) {
- panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
-}
-
-func (p *parser) expect(typ itemType) item {
- it := p.next()
- p.assertEqual(typ, it.typ)
- return it
-}
-
-func (p *parser) assertEqual(expected, got itemType) {
- if expected != got {
- p.bug("Expected '%s' but got '%s'.", expected, got)
- }
-}
-
-func (p *parser) topLevel(item item) {
- switch item.typ {
- case itemCommentStart:
- p.approxLine = item.line
- p.expect(itemText)
- case itemTableStart:
- kg := p.next()
- p.approxLine = kg.line
-
- var key Key
- for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
- key = append(key, p.keyString(kg))
- }
- p.assertEqual(itemTableEnd, kg.typ)
-
- p.establishContext(key, false)
- p.setType("", tomlHash)
- p.ordered = append(p.ordered, key)
- case itemArrayTableStart:
- kg := p.next()
- p.approxLine = kg.line
-
- var key Key
- for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
- key = append(key, p.keyString(kg))
- }
- p.assertEqual(itemArrayTableEnd, kg.typ)
-
- p.establishContext(key, true)
- p.setType("", tomlArrayHash)
- p.ordered = append(p.ordered, key)
- case itemKeyStart:
- kname := p.next()
- p.approxLine = kname.line
- p.currentKey = p.keyString(kname)
-
- val, typ := p.value(p.next())
- p.setValue(p.currentKey, val)
- p.setType(p.currentKey, typ)
- p.ordered = append(p.ordered, p.context.add(p.currentKey))
- p.currentKey = ""
- default:
- p.bug("Unexpected type at top level: %s", item.typ)
- }
-}
-
-// Gets a string for a key (or part of a key in a table name).
-func (p *parser) keyString(it item) string {
- switch it.typ {
- case itemText:
- return it.val
- case itemString, itemMultilineString,
- itemRawString, itemRawMultilineString:
- s, _ := p.value(it)
- return s.(string)
- default:
- p.bug("Unexpected key type: %s", it.typ)
- panic("unreachable")
- }
-}
-
-// value translates an expected value from the lexer into a Go value wrapped
-// as an empty interface.
-func (p *parser) value(it item) (interface{}, tomlType) {
- switch it.typ {
- case itemString:
- return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
- case itemMultilineString:
- trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
- return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
- case itemRawString:
- return it.val, p.typeOfPrimitive(it)
- case itemRawMultilineString:
- return stripFirstNewline(it.val), p.typeOfPrimitive(it)
- case itemBool:
- switch it.val {
- case "true":
- return true, p.typeOfPrimitive(it)
- case "false":
- return false, p.typeOfPrimitive(it)
- }
- p.bug("Expected boolean value, but got '%s'.", it.val)
- case itemInteger:
- if !numUnderscoresOK(it.val) {
- p.panicf("Invalid integer %q: underscores must be surrounded by digits",
- it.val)
- }
- val := strings.Replace(it.val, "_", "", -1)
- num, err := strconv.ParseInt(val, 10, 64)
- if err != nil {
- // Distinguish integer values. Normally, it'd be a bug if the lexer
- // provides an invalid integer, but it's possible that the number is
- // out of range of valid values (which the lexer cannot determine).
- // So mark the former as a bug but the latter as a legitimate user
- // error.
- if e, ok := err.(*strconv.NumError); ok &&
- e.Err == strconv.ErrRange {
-
- p.panicf("Integer '%s' is out of the range of 64-bit "+
- "signed integers.", it.val)
- } else {
- p.bug("Expected integer value, but got '%s'.", it.val)
- }
- }
- return num, p.typeOfPrimitive(it)
- case itemFloat:
- parts := strings.FieldsFunc(it.val, func(r rune) bool {
- switch r {
- case '.', 'e', 'E':
- return true
- }
- return false
- })
- for _, part := range parts {
- if !numUnderscoresOK(part) {
- p.panicf("Invalid float %q: underscores must be "+
- "surrounded by digits", it.val)
- }
- }
- if !numPeriodsOK(it.val) {
- // As a special case, numbers like '123.' or '1.e2',
- // which are valid as far as Go/strconv are concerned,
- // must be rejected because TOML says that a fractional
- // part consists of '.' followed by 1+ digits.
- p.panicf("Invalid float %q: '.' must be followed "+
- "by one or more digits", it.val)
- }
- val := strings.Replace(it.val, "_", "", -1)
- num, err := strconv.ParseFloat(val, 64)
- if err != nil {
- if e, ok := err.(*strconv.NumError); ok &&
- e.Err == strconv.ErrRange {
-
- p.panicf("Float '%s' is out of the range of 64-bit "+
- "IEEE-754 floating-point numbers.", it.val)
- } else {
- p.panicf("Invalid float value: %q", it.val)
- }
- }
- return num, p.typeOfPrimitive(it)
- case itemDatetime:
- var t time.Time
- var ok bool
- var err error
- for _, format := range []string{
- "2006-01-02T15:04:05Z07:00",
- "2006-01-02T15:04:05",
- "2006-01-02",
- } {
- t, err = time.ParseInLocation(format, it.val, time.Local)
- if err == nil {
- ok = true
- break
- }
- }
- if !ok {
- p.panicf("Invalid TOML Datetime: %q.", it.val)
- }
- return t, p.typeOfPrimitive(it)
- case itemArray:
- array := make([]interface{}, 0)
- types := make([]tomlType, 0)
-
- for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
- if it.typ == itemCommentStart {
- p.expect(itemText)
- continue
- }
-
- val, typ := p.value(it)
- array = append(array, val)
- types = append(types, typ)
- }
- return array, p.typeOfArray(types)
- case itemInlineTableStart:
- var (
- hash = make(map[string]interface{})
- outerContext = p.context
- outerKey = p.currentKey
- )
-
- p.context = append(p.context, p.currentKey)
- p.currentKey = ""
- for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
- if it.typ != itemKeyStart {
- p.bug("Expected key start but instead found %q, around line %d",
- it.val, p.approxLine)
- }
- if it.typ == itemCommentStart {
- p.expect(itemText)
- continue
- }
-
- // retrieve key
- k := p.next()
- p.approxLine = k.line
- kname := p.keyString(k)
-
- // retrieve value
- p.currentKey = kname
- val, typ := p.value(p.next())
- // make sure we keep metadata up to date
- p.setType(kname, typ)
- p.ordered = append(p.ordered, p.context.add(p.currentKey))
- hash[kname] = val
- }
- p.context = outerContext
- p.currentKey = outerKey
- return hash, tomlHash
- }
- p.bug("Unexpected value type: %s", it.typ)
- panic("unreachable")
-}
-
-// numUnderscoresOK checks whether each underscore in s is surrounded by
-// characters that are not underscores.
-func numUnderscoresOK(s string) bool {
- accept := false
- for _, r := range s {
- if r == '_' {
- if !accept {
- return false
- }
- accept = false
- continue
- }
- accept = true
- }
- return accept
-}
-
-// numPeriodsOK checks whether every period in s is followed by a digit.
-func numPeriodsOK(s string) bool {
- period := false
- for _, r := range s {
- if period && !isDigit(r) {
- return false
- }
- period = r == '.'
- }
- return !period
-}
-
-// establishContext sets the current context of the parser,
-// where the context is either a hash or an array of hashes. Which one is
-// set depends on the value of the `array` parameter.
-//
-// Establishing the context also makes sure that the key isn't a duplicate, and
-// will create implicit hashes automatically.
-func (p *parser) establishContext(key Key, array bool) {
- var ok bool
-
- // Always start at the top level and drill down for our context.
- hashContext := p.mapping
- keyContext := make(Key, 0)
-
- // We only need implicit hashes for key[0:-1]
- for _, k := range key[0 : len(key)-1] {
- _, ok = hashContext[k]
- keyContext = append(keyContext, k)
-
- // No key? Make an implicit hash and move on.
- if !ok {
- p.addImplicit(keyContext)
- hashContext[k] = make(map[string]interface{})
- }
-
- // If the hash context is actually an array of tables, then set
- // the hash context to the last element in that array.
- //
- // Otherwise, it better be a table, since this MUST be a key group (by
- // virtue of it not being the last element in a key).
- switch t := hashContext[k].(type) {
- case []map[string]interface{}:
- hashContext = t[len(t)-1]
- case map[string]interface{}:
- hashContext = t
- default:
- p.panicf("Key '%s' was already created as a hash.", keyContext)
- }
- }
-
- p.context = keyContext
- if array {
- // If this is the first element for this array, then allocate a new
- // list of tables for it.
- k := key[len(key)-1]
- if _, ok := hashContext[k]; !ok {
- hashContext[k] = make([]map[string]interface{}, 0, 5)
- }
-
- // Add a new table. But make sure the key hasn't already been used
- // for something else.
- if hash, ok := hashContext[k].([]map[string]interface{}); ok {
- hashContext[k] = append(hash, make(map[string]interface{}))
- } else {
- p.panicf("Key '%s' was already created and cannot be used as "+
- "an array.", keyContext)
- }
- } else {
- p.setValue(key[len(key)-1], make(map[string]interface{}))
- }
- p.context = append(p.context, key[len(key)-1])
-}
-
-// setValue sets the given key to the given value in the current context.
-// It will make sure that the key hasn't already been defined, account for
-// implicit key groups.
-func (p *parser) setValue(key string, value interface{}) {
- var tmpHash interface{}
- var ok bool
-
- hash := p.mapping
- keyContext := make(Key, 0)
- for _, k := range p.context {
- keyContext = append(keyContext, k)
- if tmpHash, ok = hash[k]; !ok {
- p.bug("Context for key '%s' has not been established.", keyContext)
- }
- switch t := tmpHash.(type) {
- case []map[string]interface{}:
- // The context is a table of hashes. Pick the most recent table
- // defined as the current hash.
- hash = t[len(t)-1]
- case map[string]interface{}:
- hash = t
- default:
- p.bug("Expected hash to have type 'map[string]interface{}', but "+
- "it has '%T' instead.", tmpHash)
- }
- }
- keyContext = append(keyContext, key)
-
- if _, ok := hash[key]; ok {
- // Typically, if the given key has already been set, then we have
- // to raise an error since duplicate keys are disallowed. However,
- // it's possible that a key was previously defined implicitly. In this
- // case, it is allowed to be redefined concretely. (See the
- // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
- //
- // But we have to make sure to stop marking it as an implicit. (So that
- // another redefinition provokes an error.)
- //
- // Note that since it has already been defined (as a hash), we don't
- // want to overwrite it. So our business is done.
- if p.isImplicit(keyContext) {
- p.removeImplicit(keyContext)
- return
- }
-
- // Otherwise, we have a concrete key trying to override a previous
- // key, which is *always* wrong.
- p.panicf("Key '%s' has already been defined.", keyContext)
- }
- hash[key] = value
-}
-
-// setType sets the type of a particular value at a given key.
-// It should be called immediately AFTER setValue.
-//
-// Note that if `key` is empty, then the type given will be applied to the
-// current context (which is either a table or an array of tables).
-func (p *parser) setType(key string, typ tomlType) {
- keyContext := make(Key, 0, len(p.context)+1)
- for _, k := range p.context {
- keyContext = append(keyContext, k)
- }
- if len(key) > 0 { // allow type setting for hashes
- keyContext = append(keyContext, key)
- }
- p.types[keyContext.String()] = typ
-}
-
-// addImplicit sets the given Key as having been created implicitly.
-func (p *parser) addImplicit(key Key) {
- p.implicits[key.String()] = true
-}
-
-// removeImplicit stops tagging the given key as having been implicitly
-// created.
-func (p *parser) removeImplicit(key Key) {
- p.implicits[key.String()] = false
-}
-
-// isImplicit returns true if the key group pointed to by the key was created
-// implicitly.
-func (p *parser) isImplicit(key Key) bool {
- return p.implicits[key.String()]
-}
-
-// current returns the full key name of the current context.
-func (p *parser) current() string {
- if len(p.currentKey) == 0 {
- return p.context.String()
- }
- if len(p.context) == 0 {
- return p.currentKey
- }
- return fmt.Sprintf("%s.%s", p.context, p.currentKey)
-}
-
-func stripFirstNewline(s string) string {
- if len(s) == 0 || s[0] != '\n' {
- return s
- }
- return s[1:]
-}
-
-func stripEscapedWhitespace(s string) string {
- esc := strings.Split(s, "\\\n")
- if len(esc) > 1 {
- for i := 1; i < len(esc); i++ {
- esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
- }
- }
- return strings.Join(esc, "")
-}
-
-func (p *parser) replaceEscapes(str string) string {
- var replaced []rune
- s := []byte(str)
- r := 0
- for r < len(s) {
- if s[r] != '\\' {
- c, size := utf8.DecodeRune(s[r:])
- r += size
- replaced = append(replaced, c)
- continue
- }
- r += 1
- if r >= len(s) {
- p.bug("Escape sequence at end of string.")
- return ""
- }
- switch s[r] {
- default:
- p.bug("Expected valid escape code after \\, but got %q.", s[r])
- return ""
- case 'b':
- replaced = append(replaced, rune(0x0008))
- r += 1
- case 't':
- replaced = append(replaced, rune(0x0009))
- r += 1
- case 'n':
- replaced = append(replaced, rune(0x000A))
- r += 1
- case 'f':
- replaced = append(replaced, rune(0x000C))
- r += 1
- case 'r':
- replaced = append(replaced, rune(0x000D))
- r += 1
- case '"':
- replaced = append(replaced, rune(0x0022))
- r += 1
- case '\\':
- replaced = append(replaced, rune(0x005C))
- r += 1
- case 'u':
- // At this point, we know we have a Unicode escape of the form
- // `uXXXX` at [r, r+5). (Because the lexer guarantees this
- // for us.)
- escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
- replaced = append(replaced, escaped)
- r += 5
- case 'U':
- // At this point, we know we have a Unicode escape of the form
- // `uXXXX` at [r, r+9). (Because the lexer guarantees this
- // for us.)
- escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
- replaced = append(replaced, escaped)
- r += 9
- }
- }
- return string(replaced)
-}
-
-func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
- s := string(bs)
- hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
- if err != nil {
- p.bug("Could not parse '%s' as a hexadecimal number, but the "+
- "lexer claims it's OK: %s", s, err)
- }
- if !utf8.ValidRune(rune(hex)) {
- p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
- }
- return rune(hex)
-}
-
-func isStringType(ty itemType) bool {
- return ty == itemString || ty == itemMultilineString ||
- ty == itemRawString || ty == itemRawMultilineString
-}
diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim
deleted file mode 100644
index 562164be06..0000000000
--- a/vendor/github.com/BurntSushi/toml/session.vim
+++ /dev/null
@@ -1 +0,0 @@
-au BufWritePost *.go silent!make tags > /dev/null 2>&1
diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go
deleted file mode 100644
index c73f8afc1a..0000000000
--- a/vendor/github.com/BurntSushi/toml/type_check.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package toml
-
-// tomlType represents any Go type that corresponds to a TOML type.
-// While the first draft of the TOML spec has a simplistic type system that
-// probably doesn't need this level of sophistication, we seem to be militating
-// toward adding real composite types.
-type tomlType interface {
- typeString() string
-}
-
-// typeEqual accepts any two types and returns true if they are equal.
-func typeEqual(t1, t2 tomlType) bool {
- if t1 == nil || t2 == nil {
- return false
- }
- return t1.typeString() == t2.typeString()
-}
-
-func typeIsHash(t tomlType) bool {
- return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
-}
-
-type tomlBaseType string
-
-func (btype tomlBaseType) typeString() string {
- return string(btype)
-}
-
-func (btype tomlBaseType) String() string {
- return btype.typeString()
-}
-
-var (
- tomlInteger tomlBaseType = "Integer"
- tomlFloat tomlBaseType = "Float"
- tomlDatetime tomlBaseType = "Datetime"
- tomlString tomlBaseType = "String"
- tomlBool tomlBaseType = "Bool"
- tomlArray tomlBaseType = "Array"
- tomlHash tomlBaseType = "Hash"
- tomlArrayHash tomlBaseType = "ArrayHash"
-)
-
-// typeOfPrimitive returns a tomlType of any primitive value in TOML.
-// Primitive values are: Integer, Float, Datetime, String and Bool.
-//
-// Passing a lexer item other than the following will cause a BUG message
-// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
-func (p *parser) typeOfPrimitive(lexItem item) tomlType {
- switch lexItem.typ {
- case itemInteger:
- return tomlInteger
- case itemFloat:
- return tomlFloat
- case itemDatetime:
- return tomlDatetime
- case itemString:
- return tomlString
- case itemMultilineString:
- return tomlString
- case itemRawString:
- return tomlString
- case itemRawMultilineString:
- return tomlString
- case itemBool:
- return tomlBool
- }
- p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
- panic("unreachable")
-}
-
-// typeOfArray returns a tomlType for an array given a list of types of its
-// values.
-//
-// In the current spec, if an array is homogeneous, then its type is always
-// "Array". If the array is not homogeneous, an error is generated.
-func (p *parser) typeOfArray(types []tomlType) tomlType {
- // Empty arrays are cool.
- if len(types) == 0 {
- return tomlArray
- }
-
- theType := types[0]
- for _, t := range types[1:] {
- if !typeEqual(theType, t) {
- p.panicf("Array contains values of type '%s' and '%s', but "+
- "arrays must be homogeneous.", theType, t)
- }
- }
- return tomlArray
-}
diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go
deleted file mode 100644
index 608997c22f..0000000000
--- a/vendor/github.com/BurntSushi/toml/type_fields.go
+++ /dev/null
@@ -1,242 +0,0 @@
-package toml
-
-// Struct field handling is adapted from code in encoding/json:
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the Go distribution.
-
-import (
- "reflect"
- "sort"
- "sync"
-)
-
-// A field represents a single field found in a struct.
-type field struct {
- name string // the name of the field (`toml` tag included)
- tag bool // whether field has a `toml` tag
- index []int // represents the depth of an anonymous field
- typ reflect.Type // the type of the field
-}
-
-// byName sorts field by name, breaking ties with depth,
-// then breaking ties with "name came from toml tag", then
-// breaking ties with index sequence.
-type byName []field
-
-func (x byName) Len() int { return len(x) }
-
-func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-func (x byName) Less(i, j int) bool {
- if x[i].name != x[j].name {
- return x[i].name < x[j].name
- }
- if len(x[i].index) != len(x[j].index) {
- return len(x[i].index) < len(x[j].index)
- }
- if x[i].tag != x[j].tag {
- return x[i].tag
- }
- return byIndex(x).Less(i, j)
-}
-
-// byIndex sorts field by index sequence.
-type byIndex []field
-
-func (x byIndex) Len() int { return len(x) }
-
-func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-func (x byIndex) Less(i, j int) bool {
- for k, xik := range x[i].index {
- if k >= len(x[j].index) {
- return false
- }
- if xik != x[j].index[k] {
- return xik < x[j].index[k]
- }
- }
- return len(x[i].index) < len(x[j].index)
-}
-
-// typeFields returns a list of fields that TOML should recognize for the given
-// type. The algorithm is breadth-first search over the set of structs to
-// include - the top struct and then any reachable anonymous structs.
-func typeFields(t reflect.Type) []field {
- // Anonymous fields to explore at the current level and the next.
- current := []field{}
- next := []field{{typ: t}}
-
- // Count of queued names for current level and the next.
- count := map[reflect.Type]int{}
- nextCount := map[reflect.Type]int{}
-
- // Types already visited at an earlier level.
- visited := map[reflect.Type]bool{}
-
- // Fields found.
- var fields []field
-
- for len(next) > 0 {
- current, next = next, current[:0]
- count, nextCount = nextCount, map[reflect.Type]int{}
-
- for _, f := range current {
- if visited[f.typ] {
- continue
- }
- visited[f.typ] = true
-
- // Scan f.typ for fields to include.
- for i := 0; i < f.typ.NumField(); i++ {
- sf := f.typ.Field(i)
- if sf.PkgPath != "" && !sf.Anonymous { // unexported
- continue
- }
- opts := getOptions(sf.Tag)
- if opts.skip {
- continue
- }
- index := make([]int, len(f.index)+1)
- copy(index, f.index)
- index[len(f.index)] = i
-
- ft := sf.Type
- if ft.Name() == "" && ft.Kind() == reflect.Ptr {
- // Follow pointer.
- ft = ft.Elem()
- }
-
- // Record found field and index sequence.
- if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
- tagged := opts.name != ""
- name := opts.name
- if name == "" {
- name = sf.Name
- }
- fields = append(fields, field{name, tagged, index, ft})
- if count[f.typ] > 1 {
- // If there were multiple instances, add a second,
- // so that the annihilation code will see a duplicate.
- // It only cares about the distinction between 1 or 2,
- // so don't bother generating any more copies.
- fields = append(fields, fields[len(fields)-1])
- }
- continue
- }
-
- // Record new anonymous struct to explore in next round.
- nextCount[ft]++
- if nextCount[ft] == 1 {
- f := field{name: ft.Name(), index: index, typ: ft}
- next = append(next, f)
- }
- }
- }
- }
-
- sort.Sort(byName(fields))
-
- // Delete all fields that are hidden by the Go rules for embedded fields,
- // except that fields with TOML tags are promoted.
-
- // The fields are sorted in primary order of name, secondary order
- // of field index length. Loop over names; for each name, delete
- // hidden fields by choosing the one dominant field that survives.
- out := fields[:0]
- for advance, i := 0, 0; i < len(fields); i += advance {
- // One iteration per name.
- // Find the sequence of fields with the name of this first field.
- fi := fields[i]
- name := fi.name
- for advance = 1; i+advance < len(fields); advance++ {
- fj := fields[i+advance]
- if fj.name != name {
- break
- }
- }
- if advance == 1 { // Only one field with this name
- out = append(out, fi)
- continue
- }
- dominant, ok := dominantField(fields[i : i+advance])
- if ok {
- out = append(out, dominant)
- }
- }
-
- fields = out
- sort.Sort(byIndex(fields))
-
- return fields
-}
-
-// dominantField looks through the fields, all of which are known to
-// have the same name, to find the single field that dominates the
-// others using Go's embedding rules, modified by the presence of
-// TOML tags. If there are multiple top-level fields, the boolean
-// will be false: This condition is an error in Go and we skip all
-// the fields.
-func dominantField(fields []field) (field, bool) {
- // The fields are sorted in increasing index-length order. The winner
- // must therefore be one with the shortest index length. Drop all
- // longer entries, which is easy: just truncate the slice.
- length := len(fields[0].index)
- tagged := -1 // Index of first tagged field.
- for i, f := range fields {
- if len(f.index) > length {
- fields = fields[:i]
- break
- }
- if f.tag {
- if tagged >= 0 {
- // Multiple tagged fields at the same level: conflict.
- // Return no field.
- return field{}, false
- }
- tagged = i
- }
- }
- if tagged >= 0 {
- return fields[tagged], true
- }
- // All remaining fields have the same length. If there's more than one,
- // we have a conflict (two fields named "X" at the same level) and we
- // return no field.
- if len(fields) > 1 {
- return field{}, false
- }
- return fields[0], true
-}
-
-var fieldCache struct {
- sync.RWMutex
- m map[reflect.Type][]field
-}
-
-// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
-func cachedTypeFields(t reflect.Type) []field {
- fieldCache.RLock()
- f := fieldCache.m[t]
- fieldCache.RUnlock()
- if f != nil {
- return f
- }
-
- // Compute fields without lock.
- // Might duplicate effort but won't hold other computations back.
- f = typeFields(t)
- if f == nil {
- f = []field{}
- }
-
- fieldCache.Lock()
- if fieldCache.m == nil {
- fieldCache.m = map[reflect.Type][]field{}
- }
- fieldCache.m[t] = f
- fieldCache.Unlock()
- return f
-}
diff --git a/vendor/github.com/couchbaselabs/go-couchbase/.gitignore b/vendor/github.com/couchbase/go-couchbase/.gitignore
index eda885ce8d..eda885ce8d 100644
--- a/vendor/github.com/couchbaselabs/go-couchbase/.gitignore
+++ b/vendor/github.com/couchbase/go-couchbase/.gitignore
diff --git a/vendor/github.com/couchbaselabs/go-couchbase/.travis.yml b/vendor/github.com/couchbase/go-couchbase/.travis.yml
index 4ecafb1894..4ecafb1894 100644
--- a/vendor/github.com/couchbaselabs/go-couchbase/.travis.yml
+++ b/vendor/github.com/couchbase/go-couchbase/.travis.yml
diff --git a/vendor/github.com/couchbaselabs/go-couchbase/LICENSE b/vendor/github.com/couchbase/go-couchbase/LICENSE
index 0b23ef358e..0b23ef358e 100644
--- a/vendor/github.com/couchbaselabs/go-couchbase/LICENSE
+++ b/vendor/github.com/couchbase/go-couchbase/LICENSE
diff --git a/vendor/github.com/couchbaselabs/go-couchbase/README.markdown b/vendor/github.com/couchbase/go-couchbase/README.markdown
index bf5fe49421..bf5fe49421 100644
--- a/vendor/github.com/couchbaselabs/go-couchbase/README.markdown
+++ b/vendor/github.com/couchbase/go-couchbase/README.markdown
diff --git a/vendor/github.com/couchbaselabs/go-couchbase/audit.go b/vendor/github.com/couchbase/go-couchbase/audit.go
index 3db7d9f9ff..3db7d9f9ff 100644
--- a/vendor/github.com/couchbaselabs/go-couchbase/audit.go
+++ b/vendor/github.com/couchbase/go-couchbase/audit.go
diff --git a/vendor/github.com/couchbaselabs/go-couchbase/client.go b/vendor/github.com/couchbase/go-couchbase/client.go
index 433b08ff02..63d125dade 100644
--- a/vendor/github.com/couchbaselabs/go-couchbase/client.go
+++ b/vendor/github.com/couchbase/go-couchbase/client.go
@@ -98,6 +98,17 @@ func IsRefreshRequired(err error) bool {
return false
}
+// Return true if a collection is not known. Required by cbq-engine
+func IsUnknownCollection(err error) bool {
+
+ res, ok := err.(*gomemcached.MCResponse)
+ if ok && (res.Status == gomemcached.UNKNOWN_COLLECTION) {
+ return true
+ }
+
+ return false
+}
+
// ClientOpCallback is called for each invocation of Do.
var ClientOpCallback func(opname, k string, start time.Time, err error)
@@ -129,11 +140,10 @@ func (b *Bucket) Do2(k string, f func(mc *memcached.Client, vb uint16) error, de
if deadline && DefaultTimeout > 0 {
conn.SetDeadline(getDeadline(noDeadline, DefaultTimeout))
- err = f(conn, uint16(vb))
- conn.SetDeadline(noDeadline)
} else {
- err = f(conn, uint16(vb))
+ conn.SetDeadline(noDeadline)
}
+ err = f(conn, uint16(vb))
var retry bool
discard := isOutOfBoundsError(err)
@@ -195,6 +205,7 @@ func getStatsParallel(sn string, b *Bucket, offset int, which string,
if err != nil {
gatheredStats = GatheredStats{Server: sn, Err: err}
} else {
+ conn.SetDeadline(getDeadline(time.Time{}, DefaultTimeout))
sm, err := conn.StatsMap(which)
gatheredStats = GatheredStats{Server: sn, Stats: sm, Err: err}
}
@@ -236,19 +247,48 @@ func (b *Bucket) GatherStats(which string) map[string]GatheredStats {
}
// Get bucket count through the bucket stats
-func (b *Bucket) GetCount(refresh bool) (count int64, err error) {
+func (b *Bucket) GetCount(refresh bool, context ...*memcached.ClientContext) (count int64, err error) {
if refresh {
b.Refresh()
}
var cnt int64
- for _, gs := range b.GatherStats("") {
- if len(gs.Stats) > 0 {
- cnt, err = strconv.ParseInt(gs.Stats["curr_items"], 10, 64)
- if err != nil {
- return 0, err
+ if len(context) > 0 {
+ key := fmt.Sprintf("collections-byid 0x%x", context[0].CollId)
+ resKey := ""
+ for _, gs := range b.GatherStats(key) {
+ if len(gs.Stats) > 0 {
+
+ // the key encodes the scope and collection id
+ // we don't have the scope id, so we have to find it...
+ if resKey == "" {
+ for k, _ := range gs.Stats {
+ resKey = strings.TrimRightFunc(k, func(r rune) bool {
+ return r != ':'
+ }) + "items"
+ break
+ }
+ }
+ cnt, err = strconv.ParseInt(gs.Stats[resKey], 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ count += cnt
+ } else if gs.Err != nil {
+ return 0, gs.Err
+ }
+ }
+ } else {
+ for _, gs := range b.GatherStats("") {
+ if len(gs.Stats) > 0 {
+ cnt, err = strconv.ParseInt(gs.Stats["curr_items"], 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ count += cnt
+ } else if gs.Err != nil {
+ return 0, gs.Err
}
- count += cnt
}
}
@@ -256,19 +296,49 @@ func (b *Bucket) GetCount(refresh bool) (count int64, err error) {
}
// Get bucket document size through the bucket stats
-func (b *Bucket) GetSize(refresh bool) (size int64, err error) {
+func (b *Bucket) GetSize(refresh bool, context ...*memcached.ClientContext) (size int64, err error) {
+
if refresh {
b.Refresh()
}
var sz int64
- for _, gs := range b.GatherStats("") {
- if len(gs.Stats) > 0 {
- sz, err = strconv.ParseInt(gs.Stats["ep_value_size"], 10, 64)
- if err != nil {
- return 0, err
+ if len(context) > 0 {
+ key := fmt.Sprintf("collections-byid 0x%x", context[0].CollId)
+ resKey := ""
+ for _, gs := range b.GatherStats(key) {
+ if len(gs.Stats) > 0 {
+
+ // the key encodes the scope and collection id
+ // we don't have the scope id, so we have to find it...
+ if resKey == "" {
+ for k, _ := range gs.Stats {
+ resKey = strings.TrimRightFunc(k, func(r rune) bool {
+ return r != ':'
+ }) + "disk_size"
+ break
+ }
+ }
+ sz, err = strconv.ParseInt(gs.Stats[resKey], 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ size += sz
+ } else if gs.Err != nil {
+ return 0, gs.Err
+ }
+ }
+ } else {
+ for _, gs := range b.GatherStats("") {
+ if len(gs.Stats) > 0 {
+ sz, err = strconv.ParseInt(gs.Stats["ep_value_size"], 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ size += sz
+ } else if gs.Err != nil {
+ return 0, gs.Err
}
- size += sz
}
}
@@ -311,8 +381,12 @@ func isOutOfBoundsError(err error) bool {
}
func getDeadline(reqDeadline time.Time, duration time.Duration) time.Time {
- if reqDeadline.IsZero() && duration > 0 {
- return time.Now().Add(duration)
+ if reqDeadline.IsZero() {
+ if duration > 0 {
+ return time.Unix(time.Now().Unix(), 0).Add(duration)
+ } else {
+ return noDeadline
+ }
}
return reqDeadline
}
@@ -334,7 +408,7 @@ func backOff(attempt, maxAttempts int, duration time.Duration, exponential bool)
func (b *Bucket) doBulkGet(vb uint16, keys []string, reqDeadline time.Time,
ch chan<- map[string]*gomemcached.MCResponse, ech chan<- error, subPaths []string,
- eStatus *errorStatus) {
+ eStatus *errorStatus, context ...*memcached.ClientContext) {
if SlowServerCallWarningThreshold > 0 {
defer slowLog(time.Now(), "call to doBulkGet(%d, %d keys)", vb, len(keys))
}
@@ -389,8 +463,7 @@ func (b *Bucket) doBulkGet(vb uint16, keys []string, reqDeadline time.Time,
}
conn.SetDeadline(getDeadline(reqDeadline, DefaultTimeout))
- err = conn.GetBulk(vb, keys, rv, subPaths)
- conn.SetDeadline(noDeadline)
+ err = conn.GetBulk(vb, keys, rv, subPaths, context...)
discard := false
defer func() {
@@ -474,6 +547,7 @@ type vbBulkGet struct {
wg *sync.WaitGroup
subPaths []string
groupError *errorStatus
+ context []*memcached.ClientContext
}
const _NUM_CHANNELS = 5
@@ -523,14 +597,14 @@ func vbDoBulkGet(vbg *vbBulkGet) {
// Workers cannot panic and die
recover()
}()
- vbg.b.doBulkGet(vbg.k, vbg.keys, vbg.reqDeadline, vbg.ch, vbg.ech, vbg.subPaths, vbg.groupError)
+ vbg.b.doBulkGet(vbg.k, vbg.keys, vbg.reqDeadline, vbg.ch, vbg.ech, vbg.subPaths, vbg.groupError, vbg.context...)
}
var _ERR_CHAN_FULL = fmt.Errorf("Data request queue full, aborting query.")
func (b *Bucket) processBulkGet(kdm map[uint16][]string, reqDeadline time.Time,
ch chan<- map[string]*gomemcached.MCResponse, ech chan<- error, subPaths []string,
- eStatus *errorStatus) {
+ eStatus *errorStatus, context ...*memcached.ClientContext) {
defer close(ch)
defer close(ech)
@@ -554,6 +628,7 @@ func (b *Bucket) processBulkGet(kdm map[uint16][]string, reqDeadline time.Time,
wg: wg,
subPaths: subPaths,
groupError: eStatus,
+ context: context,
}
wg.Add(1)
@@ -612,9 +687,9 @@ func errorCollector(ech <-chan error, eout chan<- error, eStatus *errorStatus) {
// This is a wrapper around GetBulk which converts all values returned
// by GetBulk from raw memcached responses into []byte slices.
// Returns one document for duplicate keys
-func (b *Bucket) GetBulkRaw(keys []string) (map[string][]byte, error) {
+func (b *Bucket) GetBulkRaw(keys []string, context ...*memcached.ClientContext) (map[string][]byte, error) {
- resp, eout := b.getBulk(keys, noDeadline, nil)
+ resp, eout := b.getBulk(keys, noDeadline, nil, context...)
rv := make(map[string][]byte, len(keys))
for k, av := range resp {
@@ -632,15 +707,15 @@ func (b *Bucket) GetBulkRaw(keys []string) (map[string][]byte, error) {
// map array for each key. Keys that were not found will not be included in
// the map.
-func (b *Bucket) GetBulk(keys []string, reqDeadline time.Time, subPaths []string) (map[string]*gomemcached.MCResponse, error) {
- return b.getBulk(keys, reqDeadline, subPaths)
+func (b *Bucket) GetBulk(keys []string, reqDeadline time.Time, subPaths []string, context ...*memcached.ClientContext) (map[string]*gomemcached.MCResponse, error) {
+ return b.getBulk(keys, reqDeadline, subPaths, context...)
}
func (b *Bucket) ReleaseGetBulkPools(rv map[string]*gomemcached.MCResponse) {
_STRING_MCRESPONSE_POOL.Put(rv)
}
-func (b *Bucket) getBulk(keys []string, reqDeadline time.Time, subPaths []string) (map[string]*gomemcached.MCResponse, error) {
+func (b *Bucket) getBulk(keys []string, reqDeadline time.Time, subPaths []string, context ...*memcached.ClientContext) (map[string]*gomemcached.MCResponse, error) {
kdm := _VB_STRING_POOL.Get()
defer _VB_STRING_POOL.Put(kdm)
for _, k := range keys {
@@ -663,7 +738,7 @@ func (b *Bucket) getBulk(keys []string, reqDeadline time.Time, subPaths []string
ech := make(chan error)
go errorCollector(ech, eout, groupErrorStatus)
- go b.processBulkGet(kdm, reqDeadline, ch, ech, subPaths, groupErrorStatus)
+ go b.processBulkGet(kdm, reqDeadline, ch, ech, subPaths, groupErrorStatus, context...)
var rv map[string]*gomemcached.MCResponse
@@ -739,7 +814,7 @@ var ErrKeyExists = errors.New("key exists")
// before being written. It must be JSON-marshalable and it must not
// be nil.
func (b *Bucket) Write(k string, flags, exp int, v interface{},
- opt WriteOptions) (err error) {
+ opt WriteOptions, context ...*memcached.ClientContext) (err error) {
if ClientOpCallback != nil {
defer func(t time.Time) {
@@ -761,7 +836,7 @@ func (b *Bucket) Write(k string, flags, exp int, v interface{},
err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
if opt&AddOnly != 0 {
res, err = memcached.UnwrapMemcachedError(
- mc.Add(vb, k, flags, exp, data))
+ mc.Add(vb, k, flags, exp, data, context...))
if err == nil && res.Status != gomemcached.SUCCESS {
if res.Status == gomemcached.KEY_EEXISTS {
err = ErrKeyExists
@@ -770,11 +845,11 @@ func (b *Bucket) Write(k string, flags, exp int, v interface{},
}
}
} else if opt&Append != 0 {
- res, err = mc.Append(vb, k, data)
+ res, err = mc.Append(vb, k, data, context...)
} else if data == nil {
- res, err = mc.Del(vb, k)
+ res, err = mc.Del(vb, k, context...)
} else {
- res, err = mc.Set(vb, k, flags, exp, data)
+ res, err = mc.Set(vb, k, flags, exp, data, context...)
}
return err
@@ -788,7 +863,7 @@ func (b *Bucket) Write(k string, flags, exp int, v interface{},
}
func (b *Bucket) WriteWithMT(k string, flags, exp int, v interface{},
- opt WriteOptions) (mt *MutationToken, err error) {
+ opt WriteOptions, context ...*memcached.ClientContext) (mt *MutationToken, err error) {
if ClientOpCallback != nil {
defer func(t time.Time) {
@@ -810,7 +885,7 @@ func (b *Bucket) WriteWithMT(k string, flags, exp int, v interface{},
err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
if opt&AddOnly != 0 {
res, err = memcached.UnwrapMemcachedError(
- mc.Add(vb, k, flags, exp, data))
+ mc.Add(vb, k, flags, exp, data, context...))
if err == nil && res.Status != gomemcached.SUCCESS {
if res.Status == gomemcached.KEY_EEXISTS {
err = ErrKeyExists
@@ -819,11 +894,11 @@ func (b *Bucket) WriteWithMT(k string, flags, exp int, v interface{},
}
}
} else if opt&Append != 0 {
- res, err = mc.Append(vb, k, data)
+ res, err = mc.Append(vb, k, data, context...)
} else if data == nil {
- res, err = mc.Del(vb, k)
+ res, err = mc.Del(vb, k, context...)
} else {
- res, err = mc.Set(vb, k, flags, exp, data)
+ res, err = mc.Set(vb, k, flags, exp, data, context...)
}
if len(res.Extras) >= 16 {
@@ -843,17 +918,17 @@ func (b *Bucket) WriteWithMT(k string, flags, exp int, v interface{},
}
// Set a value in this bucket with Cas and return the new Cas value
-func (b *Bucket) Cas(k string, exp int, cas uint64, v interface{}) (uint64, error) {
- return b.WriteCas(k, 0, exp, cas, v, 0)
+func (b *Bucket) Cas(k string, exp int, cas uint64, v interface{}, context ...*memcached.ClientContext) (uint64, error) {
+ return b.WriteCas(k, 0, exp, cas, v, 0, context...)
}
// Set a value in this bucket with Cas without json encoding it
-func (b *Bucket) CasRaw(k string, exp int, cas uint64, v interface{}) (uint64, error) {
- return b.WriteCas(k, 0, exp, cas, v, Raw)
+func (b *Bucket) CasRaw(k string, exp int, cas uint64, v interface{}, context ...*memcached.ClientContext) (uint64, error) {
+ return b.WriteCas(k, 0, exp, cas, v, Raw, context...)
}
func (b *Bucket) WriteCas(k string, flags, exp int, cas uint64, v interface{},
- opt WriteOptions) (newCas uint64, err error) {
+ opt WriteOptions, context ...*memcached.ClientContext) (newCas uint64, err error) {
if ClientOpCallback != nil {
defer func(t time.Time) {
@@ -873,7 +948,7 @@ func (b *Bucket) WriteCas(k string, flags, exp int, cas uint64, v interface{},
var res *gomemcached.MCResponse
err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
- res, err = mc.SetCas(vb, k, flags, exp, cas, data)
+ res, err = mc.SetCas(vb, k, flags, exp, cas, data, context...)
return err
})
@@ -885,16 +960,16 @@ func (b *Bucket) WriteCas(k string, flags, exp int, cas uint64, v interface{},
}
// Extended CAS operation. These functions will return the mutation token, i.e vbuuid & guard
-func (b *Bucket) CasWithMeta(k string, flags int, exp int, cas uint64, v interface{}) (uint64, *MutationToken, error) {
- return b.WriteCasWithMT(k, flags, exp, cas, v, 0)
+func (b *Bucket) CasWithMeta(k string, flags int, exp int, cas uint64, v interface{}, context ...*memcached.ClientContext) (uint64, *MutationToken, error) {
+ return b.WriteCasWithMT(k, flags, exp, cas, v, 0, context...)
}
-func (b *Bucket) CasWithMetaRaw(k string, flags int, exp int, cas uint64, v interface{}) (uint64, *MutationToken, error) {
- return b.WriteCasWithMT(k, flags, exp, cas, v, Raw)
+func (b *Bucket) CasWithMetaRaw(k string, flags int, exp int, cas uint64, v interface{}, context ...*memcached.ClientContext) (uint64, *MutationToken, error) {
+ return b.WriteCasWithMT(k, flags, exp, cas, v, Raw, context...)
}
func (b *Bucket) WriteCasWithMT(k string, flags, exp int, cas uint64, v interface{},
- opt WriteOptions) (newCas uint64, mt *MutationToken, err error) {
+ opt WriteOptions, context ...*memcached.ClientContext) (newCas uint64, mt *MutationToken, err error) {
if ClientOpCallback != nil {
defer func(t time.Time) {
@@ -914,7 +989,7 @@ func (b *Bucket) WriteCasWithMT(k string, flags, exp int, cas uint64, v interfac
var res *gomemcached.MCResponse
err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
- res, err = mc.SetCas(vb, k, flags, exp, cas, data)
+ res, err = mc.SetCas(vb, k, flags, exp, cas, data, context...)
return err
})
@@ -939,25 +1014,25 @@ func (b *Bucket) WriteCasWithMT(k string, flags, exp int, cas uint64, v interfac
// Set a value in this bucket.
// The value will be serialized into a JSON document.
-func (b *Bucket) Set(k string, exp int, v interface{}) error {
- return b.Write(k, 0, exp, v, 0)
+func (b *Bucket) Set(k string, exp int, v interface{}, context ...*memcached.ClientContext) error {
+ return b.Write(k, 0, exp, v, 0, context...)
}
// Set a value in this bucket with with flags
-func (b *Bucket) SetWithMeta(k string, flags int, exp int, v interface{}) (*MutationToken, error) {
- return b.WriteWithMT(k, flags, exp, v, 0)
+func (b *Bucket) SetWithMeta(k string, flags int, exp int, v interface{}, context ...*memcached.ClientContext) (*MutationToken, error) {
+ return b.WriteWithMT(k, flags, exp, v, 0, context...)
}
// SetRaw sets a value in this bucket without JSON encoding it.
-func (b *Bucket) SetRaw(k string, exp int, v []byte) error {
- return b.Write(k, 0, exp, v, Raw)
+func (b *Bucket) SetRaw(k string, exp int, v []byte, context ...*memcached.ClientContext) error {
+ return b.Write(k, 0, exp, v, Raw, context...)
}
// Add adds a value to this bucket; like Set except that nothing
// happens if the key exists. The value will be serialized into a
// JSON document.
-func (b *Bucket) Add(k string, exp int, v interface{}) (added bool, err error) {
- err = b.Write(k, 0, exp, v, AddOnly)
+func (b *Bucket) Add(k string, exp int, v interface{}, context ...*memcached.ClientContext) (added bool, err error) {
+ err = b.Write(k, 0, exp, v, AddOnly, context...)
if err == ErrKeyExists {
return false, nil
}
@@ -966,8 +1041,8 @@ func (b *Bucket) Add(k string, exp int, v interface{}) (added bool, err error) {
// AddRaw adds a value to this bucket; like SetRaw except that nothing
// happens if the key exists. The value will be stored as raw bytes.
-func (b *Bucket) AddRaw(k string, exp int, v []byte) (added bool, err error) {
- err = b.Write(k, 0, exp, v, AddOnly|Raw)
+func (b *Bucket) AddRaw(k string, exp int, v []byte, context ...*memcached.ClientContext) (added bool, err error) {
+ err = b.Write(k, 0, exp, v, AddOnly|Raw, context...)
if err == ErrKeyExists {
return false, nil
}
@@ -977,8 +1052,8 @@ func (b *Bucket) AddRaw(k string, exp int, v []byte) (added bool, err error) {
// Add adds a value to this bucket; like Set except that nothing
// happens if the key exists. The value will be serialized into a
// JSON document.
-func (b *Bucket) AddWithMT(k string, exp int, v interface{}) (added bool, mt *MutationToken, err error) {
- mt, err = b.WriteWithMT(k, 0, exp, v, AddOnly)
+func (b *Bucket) AddWithMT(k string, exp int, v interface{}, context ...*memcached.ClientContext) (added bool, mt *MutationToken, err error) {
+ mt, err = b.WriteWithMT(k, 0, exp, v, AddOnly, context...)
if err == ErrKeyExists {
return false, mt, nil
}
@@ -987,8 +1062,8 @@ func (b *Bucket) AddWithMT(k string, exp int, v interface{}) (added bool, mt *Mu
// AddRaw adds a value to this bucket; like SetRaw except that nothing
// happens if the key exists. The value will be stored as raw bytes.
-func (b *Bucket) AddRawWithMT(k string, exp int, v []byte) (added bool, mt *MutationToken, err error) {
- mt, err = b.WriteWithMT(k, 0, exp, v, AddOnly|Raw)
+func (b *Bucket) AddRawWithMT(k string, exp int, v []byte, context ...*memcached.ClientContext) (added bool, mt *MutationToken, err error) {
+ mt, err = b.WriteWithMT(k, 0, exp, v, AddOnly|Raw, context...)
if err == ErrKeyExists {
return false, mt, nil
}
@@ -996,43 +1071,8 @@ func (b *Bucket) AddRawWithMT(k string, exp int, v []byte) (added bool, mt *Muta
}
// Append appends raw data to an existing item.
-func (b *Bucket) Append(k string, data []byte) error {
- return b.Write(k, 0, 0, data, Append|Raw)
-}
-
-func (b *Bucket) GetsMCFromCollection(collUid uint32, key string, reqDeadline time.Time) (*gomemcached.MCResponse, error) {
- var err error
- var response *gomemcached.MCResponse
-
- if key == "" {
- return nil, nil
- }
-
- if ClientOpCallback != nil {
- defer func(t time.Time) { ClientOpCallback("GetsMCFromCollection", key, t, err) }(time.Now())
- }
-
- err = b.Do2(key, func(mc *memcached.Client, vb uint16) error {
- var err1 error
-
- mc.SetDeadline(getDeadline(reqDeadline, DefaultTimeout))
- _, err1 = mc.SelectBucket(b.Name)
- if err1 != nil {
- mc.SetDeadline(noDeadline)
- return err1
- }
-
- mc.SetDeadline(getDeadline(reqDeadline, DefaultTimeout))
- response, err1 = mc.GetFromCollection(vb, collUid, key)
- if err1 != nil {
- mc.SetDeadline(noDeadline)
- return err1
- }
-
- return nil
- }, false)
-
- return response, err
+func (b *Bucket) Append(k string, data []byte, context ...*memcached.ClientContext) error {
+ return b.Write(k, 0, 0, data, Append|Raw, context...)
}
// Returns collectionUid, manifestUid, error.
@@ -1053,13 +1093,11 @@ func (b *Bucket) GetCollectionCID(scope string, collection string, reqDeadline t
mc.SetDeadline(getDeadline(reqDeadline, DefaultTimeout))
_, err1 = mc.SelectBucket(b.Name)
if err1 != nil {
- mc.SetDeadline(noDeadline)
return err1
}
response, err1 = mc.CollectionsGetCID(scope, collection)
if err1 != nil {
- mc.SetDeadline(noDeadline)
return err1
}
@@ -1073,7 +1111,7 @@ func (b *Bucket) GetCollectionCID(scope string, collection string, reqDeadline t
}
// Get a value straight from Memcached
-func (b *Bucket) GetsMC(key string, reqDeadline time.Time) (*gomemcached.MCResponse, error) {
+func (b *Bucket) GetsMC(key string, reqDeadline time.Time, context ...*memcached.ClientContext) (*gomemcached.MCResponse, error) {
var err error
var response *gomemcached.MCResponse
@@ -1089,8 +1127,7 @@ func (b *Bucket) GetsMC(key string, reqDeadline time.Time) (*gomemcached.MCRespo
var err1 error
mc.SetDeadline(getDeadline(reqDeadline, DefaultTimeout))
- response, err1 = mc.Get(vb, key)
- mc.SetDeadline(noDeadline)
+ response, err1 = mc.Get(vb, key, context...)
if err1 != nil {
return err1
}
@@ -1100,7 +1137,7 @@ func (b *Bucket) GetsMC(key string, reqDeadline time.Time) (*gomemcached.MCRespo
}
// Get a value through the subdoc API
-func (b *Bucket) GetsSubDoc(key string, reqDeadline time.Time, subPaths []string) (*gomemcached.MCResponse, error) {
+func (b *Bucket) GetsSubDoc(key string, reqDeadline time.Time, subPaths []string, context ...*memcached.ClientContext) (*gomemcached.MCResponse, error) {
var err error
var response *gomemcached.MCResponse
@@ -1116,8 +1153,7 @@ func (b *Bucket) GetsSubDoc(key string, reqDeadline time.Time, subPaths []string
var err1 error
mc.SetDeadline(getDeadline(reqDeadline, DefaultTimeout))
- response, err1 = mc.GetSubdoc(vb, key, subPaths)
- mc.SetDeadline(noDeadline)
+ response, err1 = mc.GetSubdoc(vb, key, subPaths, context...)
if err1 != nil {
return err1
}
@@ -1128,7 +1164,7 @@ func (b *Bucket) GetsSubDoc(key string, reqDeadline time.Time, subPaths []string
// GetsRaw gets a raw value from this bucket including its CAS
// counter and flags.
-func (b *Bucket) GetsRaw(k string) (data []byte, flags int,
+func (b *Bucket) GetsRaw(k string, context ...*memcached.ClientContext) (data []byte, flags int,
cas uint64, err error) {
if ClientOpCallback != nil {
@@ -1136,7 +1172,7 @@ func (b *Bucket) GetsRaw(k string) (data []byte, flags int,
}
err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
- res, err := mc.Get(vb, k)
+ res, err := mc.Get(vb, k, context...)
if err != nil {
return err
}
@@ -1153,8 +1189,8 @@ func (b *Bucket) GetsRaw(k string) (data []byte, flags int,
// Gets gets a value from this bucket, including its CAS counter. The
// value is expected to be a JSON stream and will be deserialized into
// rv.
-func (b *Bucket) Gets(k string, rv interface{}, caso *uint64) error {
- data, _, cas, err := b.GetsRaw(k)
+func (b *Bucket) Gets(k string, rv interface{}, caso *uint64, context ...*memcached.ClientContext) error {
+ data, _, cas, err := b.GetsRaw(k, context...)
if err != nil {
return err
}
@@ -1167,19 +1203,19 @@ func (b *Bucket) Gets(k string, rv interface{}, caso *uint64) error {
// Get a value from this bucket.
// The value is expected to be a JSON stream and will be deserialized
// into rv.
-func (b *Bucket) Get(k string, rv interface{}) error {
- return b.Gets(k, rv, nil)
+func (b *Bucket) Get(k string, rv interface{}, context ...*memcached.ClientContext) error {
+ return b.Gets(k, rv, nil, context...)
}
// GetRaw gets a raw value from this bucket. No marshaling is performed.
-func (b *Bucket) GetRaw(k string) ([]byte, error) {
- d, _, _, err := b.GetsRaw(k)
+func (b *Bucket) GetRaw(k string, context ...*memcached.ClientContext) ([]byte, error) {
+ d, _, _, err := b.GetsRaw(k, context...)
return d, err
}
// GetAndTouchRaw gets a raw value from this bucket including its CAS
// counter and flags, and updates the expiry on the doc.
-func (b *Bucket) GetAndTouchRaw(k string, exp int) (data []byte,
+func (b *Bucket) GetAndTouchRaw(k string, exp int, context ...*memcached.ClientContext) (data []byte,
cas uint64, err error) {
if ClientOpCallback != nil {
@@ -1187,7 +1223,7 @@ func (b *Bucket) GetAndTouchRaw(k string, exp int) (data []byte,
}
err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
- res, err := mc.GetAndTouch(vb, k, exp)
+ res, err := mc.GetAndTouch(vb, k, exp, context...)
if err != nil {
return err
}
@@ -1199,14 +1235,14 @@ func (b *Bucket) GetAndTouchRaw(k string, exp int) (data []byte,
}
// GetMeta returns the meta values for a key
-func (b *Bucket) GetMeta(k string, flags *int, expiry *int, cas *uint64, seqNo *uint64) (err error) {
+func (b *Bucket) GetMeta(k string, flags *int, expiry *int, cas *uint64, seqNo *uint64, context ...*memcached.ClientContext) (err error) {
if ClientOpCallback != nil {
defer func(t time.Time) { ClientOpCallback("GetsMeta", k, t, err) }(time.Now())
}
err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
- res, err := mc.GetMeta(vb, k)
+ res, err := mc.GetMeta(vb, k, context...)
if err != nil {
return err
}
@@ -1231,19 +1267,19 @@ func (b *Bucket) GetMeta(k string, flags *int, expiry *int, cas *uint64, seqNo *
}
// Delete a key from this bucket.
-func (b *Bucket) Delete(k string) error {
- return b.Write(k, 0, 0, nil, Raw)
+func (b *Bucket) Delete(k string, context ...*memcached.ClientContext) error {
+ return b.Write(k, 0, 0, nil, Raw, context...)
}
// Incr increments the value at a given key by amt and defaults to def if no value present.
-func (b *Bucket) Incr(k string, amt, def uint64, exp int) (val uint64, err error) {
+func (b *Bucket) Incr(k string, amt, def uint64, exp int, context ...*memcached.ClientContext) (val uint64, err error) {
if ClientOpCallback != nil {
defer func(t time.Time) { ClientOpCallback("Incr", k, t, err) }(time.Now())
}
var rv uint64
err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
- res, err := mc.Incr(vb, k, amt, def, exp)
+ res, err := mc.Incr(vb, k, amt, def, exp, context...)
if err != nil {
return err
}
@@ -1254,14 +1290,14 @@ func (b *Bucket) Incr(k string, amt, def uint64, exp int) (val uint64, err error
}
// Decr decrements the value at a given key by amt and defaults to def if no value present
-func (b *Bucket) Decr(k string, amt, def uint64, exp int) (val uint64, err error) {
+func (b *Bucket) Decr(k string, amt, def uint64, exp int, context ...*memcached.ClientContext) (val uint64, err error) {
if ClientOpCallback != nil {
defer func(t time.Time) { ClientOpCallback("Decr", k, t, err) }(time.Now())
}
var rv uint64
err = b.Do(k, func(mc *memcached.Client, vb uint16) error {
- res, err := mc.Decr(vb, k, amt, def, exp)
+ res, err := mc.Decr(vb, k, amt, def, exp, context...)
if err != nil {
return err
}
diff --git a/vendor/github.com/couchbaselabs/go-couchbase/conn_pool.go b/vendor/github.com/couchbase/go-couchbase/conn_pool.go
index 23102abd84..47854c09f1 100644
--- a/vendor/github.com/couchbaselabs/go-couchbase/conn_pool.go
+++ b/vendor/github.com/couchbase/go-couchbase/conn_pool.go
@@ -45,11 +45,12 @@ type connectionPool struct {
poolSize int
connCount uint64
inUse bool
+ encrypted bool
tlsConfig *tls.Config
- bucket string
+ bucket string
}
-func newConnectionPool(host string, ah AuthHandler, closer bool, poolSize, poolOverflow int, tlsConfig *tls.Config, bucket string) *connectionPool {
+func newConnectionPool(host string, ah AuthHandler, closer bool, poolSize, poolOverflow int, tlsConfig *tls.Config, bucket string, encrypted bool) *connectionPool {
connSize := poolSize
if closer {
connSize += poolOverflow
@@ -61,9 +62,14 @@ func newConnectionPool(host string, ah AuthHandler, closer bool, poolSize, poolO
mkConn: defaultMkConn,
auth: ah,
poolSize: poolSize,
- tlsConfig: tlsConfig,
bucket: bucket,
+ encrypted: encrypted,
}
+
+ if encrypted {
+ rv.tlsConfig = tlsConfig
+ }
+
if closer {
rv.bailOut = make(chan bool, 1)
go rv.connCloser()
@@ -91,6 +97,10 @@ func defaultMkConn(host string, ah AuthHandler, tlsConfig *tls.Config, bucketNam
return nil, err
}
+ if DefaultTimeout > 0 {
+ conn.SetDeadline(getDeadline(noDeadline, DefaultTimeout))
+ }
+
if TCPKeepalive == true {
conn.SetKeepAliveOptions(time.Duration(TCPKeepaliveInterval) * time.Second)
}
@@ -111,16 +121,7 @@ func defaultMkConn(host string, ah AuthHandler, tlsConfig *tls.Config, bucketNam
}
if len(features) > 0 {
- if DefaultTimeout > 0 {
- conn.SetDeadline(getDeadline(noDeadline, DefaultTimeout))
- }
-
res, err := conn.EnableFeatures(features)
-
- if DefaultTimeout > 0 {
- conn.SetDeadline(noDeadline)
- }
-
if err != nil && isTimeoutError(err) {
conn.Close()
return nil, err
@@ -137,10 +138,15 @@ func defaultMkConn(host string, ah AuthHandler, tlsConfig *tls.Config, bucketNam
conn.Close()
return nil, err
}
+
+ if DefaultTimeout > 0 {
+ conn.SetDeadline(noDeadline)
+ }
+
return conn, nil
}
name, pass, bucket := ah.GetCredentials()
- if bucket == "" {
+ if bucket == "" {
// Authenticator does not know specific bucket.
bucket = bucketName
}
@@ -161,6 +167,11 @@ func defaultMkConn(host string, ah AuthHandler, tlsConfig *tls.Config, bucketNam
}
}
}
+
+ if DefaultTimeout > 0 {
+ conn.SetDeadline(noDeadline)
+ }
+
return conn, nil
}
diff --git a/vendor/github.com/couchbaselabs/go-couchbase/ddocs.go b/vendor/github.com/couchbase/go-couchbase/ddocs.go
index f9cc343aa8..f9cc343aa8 100644
--- a/vendor/github.com/couchbaselabs/go-couchbase/ddocs.go
+++ b/vendor/github.com/couchbase/go-couchbase/ddocs.go
diff --git a/vendor/github.com/couchbase/go-couchbase/go.mod b/vendor/github.com/couchbase/go-couchbase/go.mod
new file mode 100644
index 0000000000..4d4ed0a714
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/go.mod
@@ -0,0 +1,3 @@
+module github.com/couchbase/go-couchbase
+
+go 1.13
diff --git a/vendor/github.com/couchbaselabs/go-couchbase/observe.go b/vendor/github.com/couchbase/go-couchbase/observe.go
index 6e746f5a16..6e746f5a16 100644
--- a/vendor/github.com/couchbaselabs/go-couchbase/observe.go
+++ b/vendor/github.com/couchbase/go-couchbase/observe.go
diff --git a/vendor/github.com/couchbaselabs/go-couchbase/pools.go b/vendor/github.com/couchbase/go-couchbase/pools.go
index 0e2379398a..39db2ddbd9 100644
--- a/vendor/github.com/couchbaselabs/go-couchbase/pools.go
+++ b/vendor/github.com/couchbase/go-couchbase/pools.go
@@ -34,6 +34,9 @@ var ClientTimeOut = 10 * time.Second
var HTTPTransport = &http.Transport{MaxIdleConnsPerHost: MaxIdleConnsPerHost}
var HTTPClient = &http.Client{Transport: HTTPTransport, Timeout: ClientTimeOut}
+// Use this client for reading from streams that should be open for an extended duration.
+var HTTPClientForStreaming = &http.Client{Transport: HTTPTransport, Timeout: 0}
+
// PoolSize is the size of each connection pool (per host).
var PoolSize = 64
@@ -164,22 +167,23 @@ type Pools struct {
// A Node is a computer in a cluster running the couchbase software.
type Node struct {
- ClusterCompatibility int `json:"clusterCompatibility"`
- ClusterMembership string `json:"clusterMembership"`
- CouchAPIBase string `json:"couchApiBase"`
- Hostname string `json:"hostname"`
- InterestingStats map[string]float64 `json:"interestingStats,omitempty"`
- MCDMemoryAllocated float64 `json:"mcdMemoryAllocated"`
- MCDMemoryReserved float64 `json:"mcdMemoryReserved"`
- MemoryFree float64 `json:"memoryFree"`
- MemoryTotal float64 `json:"memoryTotal"`
- OS string `json:"os"`
- Ports map[string]int `json:"ports"`
- Services []string `json:"services"`
- Status string `json:"status"`
- Uptime int `json:"uptime,string"`
- Version string `json:"version"`
- ThisNode bool `json:"thisNode,omitempty"`
+ ClusterCompatibility int `json:"clusterCompatibility"`
+ ClusterMembership string `json:"clusterMembership"`
+ CouchAPIBase string `json:"couchApiBase"`
+ Hostname string `json:"hostname"`
+ AlternateNames map[string]NodeAlternateNames `json:"alternateAddresses"`
+ InterestingStats map[string]float64 `json:"interestingStats,omitempty"`
+ MCDMemoryAllocated float64 `json:"mcdMemoryAllocated"`
+ MCDMemoryReserved float64 `json:"mcdMemoryReserved"`
+ MemoryFree float64 `json:"memoryFree"`
+ MemoryTotal float64 `json:"memoryTotal"`
+ OS string `json:"os"`
+ Ports map[string]int `json:"ports"`
+ Services []string `json:"services"`
+ Status string `json:"status"`
+ Uptime int `json:"uptime,string"`
+ Version string `json:"version"`
+ ThisNode bool `json:"thisNode,omitempty"`
}
// A Pool of nodes and buckets.
@@ -189,6 +193,12 @@ type Pool struct {
BucketURL map[string]string `json:"buckets"`
+ MemoryQuota float64 `json:"memoryQuota"`
+ CbasMemoryQuota float64 `json:"cbasMemoryQuota"`
+ EventingMemoryQuota float64 `json:"eventingMemoryQuota"`
+ FtsMemoryQuota float64 `json:"ftsMemoryQuota"`
+ IndexMemoryQuota float64 `json:"indexMemoryQuota"`
+
client *Client
}
@@ -217,6 +227,7 @@ type Bucket struct {
AuthType string `json:"authType"`
Capabilities []string `json:"bucketCapabilities"`
CapabilitiesVersion string `json:"bucketCapabilitiesVer"`
+ CollectionsManifestUid string `json:"collectionsManifestUid"`
Type string `json:"bucketType"`
Name string `json:"name"`
NodeLocator string `json:"nodeLocator"`
@@ -259,9 +270,15 @@ type PoolServices struct {
// NodeServices is all the bucket-independent services running on
// a node (given by Hostname)
type NodeServices struct {
- Services map[string]int `json:"services,omitempty"`
+ Services map[string]int `json:"services,omitempty"`
+ Hostname string `json:"hostname"`
+ ThisNode bool `json:"thisNode"`
+ AlternateNames map[string]NodeAlternateNames `json:"alternateAddresses"`
+}
+
+type NodeAlternateNames struct {
Hostname string `json:"hostname"`
- ThisNode bool `json:"thisNode"`
+ Ports map[string]int `json:"ports"`
}
type BucketNotFoundError struct {
@@ -344,6 +361,13 @@ func (b *Bucket) GetName() string {
return ret
}
+func (b *Bucket) GetUUID() string {
+ b.RLock()
+ defer b.RUnlock()
+ ret := b.UUID
+ return ret
+}
+
// Nodes returns the current list of nodes servicing this bucket.
func (b *Bucket) Nodes() []Node {
b.RLock()
@@ -474,13 +498,14 @@ func (b *Bucket) getRandomConnection() (*memcached.Client, *connectionPool, erro
// Client.GetRandomDoc() call to get a random document from that node.
//
-func (b *Bucket) GetRandomDoc() (*gomemcached.MCResponse, error) {
+func (b *Bucket) GetRandomDoc(context ...*memcached.ClientContext) (*gomemcached.MCResponse, error) {
// get a connection from the pool
conn, pool, err := b.getRandomConnection()
if err != nil {
return nil, err
}
+ conn.SetDeadline(getDeadline(time.Time{}, DefaultTimeout))
// We may need to select the bucket before GetRandomDoc()
// will work. This is sometimes done at startup (see defaultMkConn())
@@ -491,12 +516,60 @@ func (b *Bucket) GetRandomDoc() (*gomemcached.MCResponse, error) {
}
// get a randomm document from the connection
- doc, err := conn.GetRandomDoc()
+ doc, err := conn.GetRandomDoc(context...)
// need to return the connection to the pool
pool.Return(conn)
return doc, err
}
+// Bucket DDL
+func uriAdj(s string) string {
+ return strings.Replace(s, "%", "%25", -1)
+}
+
+func (b *Bucket) CreateScope(scope string) error {
+ b.RLock()
+ pool := b.pool
+ client := pool.client
+ b.RUnlock()
+ args := map[string]interface{}{"name": scope}
+ return client.parsePostURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/collections", args, nil)
+}
+
+func (b *Bucket) DropScope(scope string) error {
+ b.RLock()
+ pool := b.pool
+ client := pool.client
+ b.RUnlock()
+ return client.parseDeleteURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/collections/"+uriAdj(scope), nil, nil)
+}
+
+func (b *Bucket) CreateCollection(scope string, collection string) error {
+ b.RLock()
+ pool := b.pool
+ client := pool.client
+ b.RUnlock()
+ args := map[string]interface{}{"name": collection}
+ return client.parsePostURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/collections/"+uriAdj(scope), args, nil)
+}
+
+func (b *Bucket) DropCollection(scope string, collection string) error {
+ b.RLock()
+ pool := b.pool
+ client := pool.client
+ b.RUnlock()
+ return client.parseDeleteURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/collections/"+uriAdj(scope)+"/"+uriAdj(collection), nil, nil)
+}
+
+func (b *Bucket) FlushCollection(scope string, collection string) error {
+ b.RLock()
+ pool := b.pool
+ client := pool.client
+ b.RUnlock()
+ args := map[string]interface{}{"name": collection, "scope": scope}
+ return client.parsePostURLResponseTerse("/pools/default/buckets/"+uriAdj(b.Name)+"/collections-flush", args, nil)
+}
+
func (b *Bucket) getMasterNode(i int) string {
p := b.getConnPools(false /* not already locked */)
if len(p) > i {
@@ -580,6 +653,7 @@ func isHttpConnError(err error) bool {
}
var client *http.Client
+var clientForStreaming *http.Client
func ClientConfigForX509(certFile, keyFile, rootFile string) (*tls.Config, error) {
cfg := &tls.Config{}
@@ -612,6 +686,59 @@ func ClientConfigForX509(certFile, keyFile, rootFile string) (*tls.Config, error
return cfg, nil
}
+// This version of doHTTPRequest is for requests where the response connection is held open
+// for an extended duration since line is a new and significant output.
+//
+// The ordinary version of this method expects the results to arrive promptly, and
+// therefore use an HTTP client with a timeout. This client is not suitable
+// for streaming use.
+func doHTTPRequestForStreaming(req *http.Request) (*http.Response, error) {
+ var err error
+ var res *http.Response
+
+ // we need a client that ignores certificate errors, since we self-sign
+ // our certs
+ if clientForStreaming == nil && req.URL.Scheme == "https" {
+ var tr *http.Transport
+
+ if skipVerify {
+ tr = &http.Transport{
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ }
+ } else {
+ // Handle cases with cert
+
+ cfg, err := ClientConfigForX509(certFile, keyFile, rootFile)
+ if err != nil {
+ return nil, err
+ }
+
+ tr = &http.Transport{
+ TLSClientConfig: cfg,
+ }
+ }
+
+ clientForStreaming = &http.Client{Transport: tr, Timeout: 0}
+
+ } else if clientForStreaming == nil {
+ clientForStreaming = HTTPClientForStreaming
+ }
+
+ for i := 0; i < HTTP_MAX_RETRY; i++ {
+ res, err = clientForStreaming.Do(req)
+ if err != nil && isHttpConnError(err) {
+ continue
+ }
+ break
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return res, err
+}
+
func doHTTPRequest(req *http.Request) (*http.Response, error) {
var err error
@@ -660,12 +787,16 @@ func doHTTPRequest(req *http.Request) (*http.Response, error) {
return res, err
}
-func doPutAPI(baseURL *url.URL, path string, params map[string]interface{}, authHandler AuthHandler, out interface{}) error {
- return doOutputAPI("PUT", baseURL, path, params, authHandler, out)
+func doPutAPI(baseURL *url.URL, path string, params map[string]interface{}, authHandler AuthHandler, out interface{}, terse bool) error {
+ return doOutputAPI("PUT", baseURL, path, params, authHandler, out, terse)
}
-func doPostAPI(baseURL *url.URL, path string, params map[string]interface{}, authHandler AuthHandler, out interface{}) error {
- return doOutputAPI("POST", baseURL, path, params, authHandler, out)
+func doPostAPI(baseURL *url.URL, path string, params map[string]interface{}, authHandler AuthHandler, out interface{}, terse bool) error {
+ return doOutputAPI("POST", baseURL, path, params, authHandler, out, terse)
+}
+
+func doDeleteAPI(baseURL *url.URL, path string, params map[string]interface{}, authHandler AuthHandler, out interface{}, terse bool) error {
+ return doOutputAPI("DELETE", baseURL, path, params, authHandler, out, terse)
}
func doOutputAPI(
@@ -674,7 +805,8 @@ func doOutputAPI(
path string,
params map[string]interface{},
authHandler AuthHandler,
- out interface{}) error {
+ out interface{},
+ terse bool) error {
var requestUrl string
@@ -707,16 +839,40 @@ func doOutputAPI(
}
defer res.Body.Close()
- if res.StatusCode != 200 {
+ // 200 - ok, 202 - accepted (asynchronously)
+ if res.StatusCode != 200 && res.StatusCode != 202 {
bod, _ := ioutil.ReadAll(io.LimitReader(res.Body, 512))
+ if terse {
+ var outBuf interface{}
+
+ err := json.Unmarshal(bod, &outBuf)
+ if err == nil && outBuf != nil {
+ switch errText := outBuf.(type) {
+ case string:
+ return fmt.Errorf("%s", errText)
+ case map[string]interface{}:
+ errField := errText["errors"]
+ if errField != nil {
+
+ // remove annoying 'map' prefix
+ return fmt.Errorf("%s", strings.TrimPrefix(fmt.Sprintf("%v", errField), "map"))
+ }
+ }
+ }
+ return fmt.Errorf("%s", string(bod))
+ }
return fmt.Errorf("HTTP error %v getting %q: %s",
res.Status, requestUrl, bod)
}
d := json.NewDecoder(res.Body)
- if err = d.Decode(&out); err != nil {
- return err
+ // PUT/POST/DELETE request may not have a response body
+ if d.More() {
+ if err = d.Decode(&out); err != nil {
+ return err
+ }
}
+
return nil
}
@@ -724,7 +880,8 @@ func queryRestAPI(
baseURL *url.URL,
path string,
authHandler AuthHandler,
- out interface{}) error {
+ out interface{},
+ terse bool) error {
var requestUrl string
@@ -752,13 +909,27 @@ func queryRestAPI(
defer res.Body.Close()
if res.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(res.Body, 512))
+ if terse {
+ var outBuf interface{}
+
+ err := json.Unmarshal(bod, &outBuf)
+ if err == nil && outBuf != nil {
+ errText, ok := outBuf.(string)
+ if ok {
+ return fmt.Errorf(errText)
+ }
+ }
+ return fmt.Errorf(string(bod))
+ }
return fmt.Errorf("HTTP error %v getting %q: %s",
res.Status, requestUrl, bod)
}
d := json.NewDecoder(res.Body)
+ // GET request should have a response body
if err = d.Decode(&out); err != nil {
- return err
+ return fmt.Errorf("json decode err: %#v, for requestUrl: %s",
+ err, requestUrl)
}
return nil
}
@@ -787,7 +958,7 @@ func (c *Client) processStream(baseURL *url.URL, path string, authHandler AuthHa
return err
}
- res, err := doHTTPRequest(req)
+ res, err := doHTTPRequestForStreaming(req)
if err != nil {
return err
}
@@ -823,15 +994,31 @@ func (c *Client) processStream(baseURL *url.URL, path string, authHandler AuthHa
}
func (c *Client) parseURLResponse(path string, out interface{}) error {
- return queryRestAPI(c.BaseURL, path, c.ah, out)
+ return queryRestAPI(c.BaseURL, path, c.ah, out, false)
}
func (c *Client) parsePostURLResponse(path string, params map[string]interface{}, out interface{}) error {
- return doPostAPI(c.BaseURL, path, params, c.ah, out)
+ return doPostAPI(c.BaseURL, path, params, c.ah, out, false)
+}
+
+func (c *Client) parsePostURLResponseTerse(path string, params map[string]interface{}, out interface{}) error {
+ return doPostAPI(c.BaseURL, path, params, c.ah, out, true)
+}
+
+func (c *Client) parseDeleteURLResponse(path string, params map[string]interface{}, out interface{}) error {
+ return doDeleteAPI(c.BaseURL, path, params, c.ah, out, false)
+}
+
+func (c *Client) parseDeleteURLResponseTerse(path string, params map[string]interface{}, out interface{}) error {
+ return doDeleteAPI(c.BaseURL, path, params, c.ah, out, true)
}
func (c *Client) parsePutURLResponse(path string, params map[string]interface{}, out interface{}) error {
- return doPutAPI(c.BaseURL, path, params, c.ah, out)
+ return doPutAPI(c.BaseURL, path, params, c.ah, out, false)
+}
+
+func (c *Client) parsePutURLResponseTerse(path string, params map[string]interface{}, out interface{}) error {
+ return doPutAPI(c.BaseURL, path, params, c.ah, out, true)
}
func (b *Bucket) parseURLResponse(path string, out interface{}) error {
@@ -856,7 +1043,7 @@ func (b *Bucket) parseURLResponse(path string, out interface{}) error {
// Lock here to avoid having pool closed under us.
b.RLock()
- err := queryRestAPI(url, path, b.pool.client.ah, out)
+ err := queryRestAPI(url, path, b.pool.client.ah, out, false)
b.RUnlock()
if err == nil {
return err
@@ -900,7 +1087,7 @@ func (b *Bucket) parseAPIResponse(path string, out interface{}) error {
// MB-13770
requestPath := strings.Split(u.String(), u.Host)[1]
- err = queryRestAPI(u, requestPath, b.pool.client.ah, out)
+ err = queryRestAPI(u, requestPath, b.pool.client.ah, out, false)
b.RUnlock()
if err == nil {
return err
@@ -1165,6 +1352,7 @@ func (b *Bucket) GetCollectionsManifest() (*Manifest, error) {
if err != nil {
return nil, fmt.Errorf("Unable to get connection to retrieve collections manifest: %v. No collections access to bucket %s.", err, b.Name)
}
+ client.SetDeadline(getDeadline(time.Time{}, DefaultTimeout))
// We need to select the bucket before GetCollectionsManifest()
// will work. This is sometimes done at startup (see defaultMkConn())
@@ -1206,11 +1394,10 @@ func (b *Bucket) refresh(preserveConnections bool) error {
uri := b.URI
client := pool.client
b.RUnlock()
- tlsConfig := client.tlsConfig
var poolServices PoolServices
var err error
- if tlsConfig != nil {
+ if client.tlsConfig != nil {
poolServices, err = client.GetPoolServices("default")
if err != nil {
return err
@@ -1238,10 +1425,10 @@ func (b *Bucket) refresh(preserveConnections bool) error {
newcps := make([]*connectionPool, len(tmpb.VBSMJson.ServerList))
for i := range newcps {
-
+ hostport := tmpb.VBSMJson.ServerList[i]
if preserveConnections {
- pool := b.getConnPoolByHost(tmpb.VBSMJson.ServerList[i], true /* bucket already locked */)
- if pool != nil && pool.inUse == false {
+ pool := b.getConnPoolByHost(hostport, true /* bucket already locked */)
+ if pool != nil && pool.inUse == false && (!pool.encrypted || pool.tlsConfig == client.tlsConfig) {
// if the hostname and index is unchanged then reuse this pool
newcps[i] = pool
pool.inUse = true
@@ -1249,9 +1436,9 @@ func (b *Bucket) refresh(preserveConnections bool) error {
}
}
- hostport := tmpb.VBSMJson.ServerList[i]
- if tlsConfig != nil {
- hostport, err = MapKVtoSSL(hostport, &poolServices)
+ var encrypted bool
+ if client.tlsConfig != nil {
+ hostport, encrypted, err = MapKVtoSSL(hostport, &poolServices)
if err != nil {
b.Unlock()
return err
@@ -1260,12 +1447,12 @@ func (b *Bucket) refresh(preserveConnections bool) error {
if b.ah != nil {
newcps[i] = newConnectionPool(hostport,
- b.ah, AsynchronousCloser, PoolSize, PoolOverflow, tlsConfig, b.Name)
+ b.ah, AsynchronousCloser, PoolSize, PoolOverflow, client.tlsConfig, b.Name, encrypted)
} else {
newcps[i] = newConnectionPool(hostport,
b.authHandler(true /* bucket already locked */),
- AsynchronousCloser, PoolSize, PoolOverflow, tlsConfig, b.Name)
+ AsynchronousCloser, PoolSize, PoolOverflow, client.tlsConfig, b.Name, encrypted)
}
}
b.replaceConnPools2(newcps, true /* bucket already locked */)
@@ -1301,6 +1488,7 @@ func (p *Pool) refresh() (err error) {
p.BucketMap[b.Name] = b
runtime.SetFinalizer(b, bucketFinalizer)
}
+ buckets = nil
return nil
}
@@ -1320,6 +1508,9 @@ func (c *Client) GetPool(name string) (p Pool, err error) {
}
err = c.parseURLResponse(poolURI, &p)
+ if err != nil {
+ return p, err
+ }
p.client = c
@@ -1427,15 +1618,32 @@ func (p *Pool) GetClient() *Client {
// Release bucket connections when the pool is no longer in use
func (p *Pool) Close() {
+
+ // MB-36186 make the bucket map inaccessible
+ bucketMap := p.BucketMap
+ p.BucketMap = nil
+
// fine to loop through the buckets unlocked
// locking happens at the bucket level
- for b, _ := range p.BucketMap {
+ for b, _ := range bucketMap {
+
+ // MB-36186 make the bucket unreachable and avoid concurrent read/write map panics
+ bucket := bucketMap[b]
+ bucketMap[b] = nil
- // MB-33208 defer closing connection pools until the bucket is no longer used
- bucket := p.BucketMap[b]
bucket.Lock()
+
+ // MB-33208 defer closing connection pools until the bucket is no longer used
+ // MB-36186 if the bucket is unused make it unreachable straight away
+ needClose := bucket.connPools == nil && !bucket.closed
+ if needClose {
+ runtime.SetFinalizer(&bucket, nil)
+ }
bucket.closed = true
bucket.Unlock()
+ if needClose {
+ bucket.Close()
+ }
}
}
@@ -1472,3 +1680,67 @@ func ConnectWithAuthAndGetBucket(endpoint, poolname, bucketname string,
return pool.GetBucket(bucketname)
}
+
+func GetSystemBucket(c *Client, p *Pool, name string) (*Bucket, error) {
+ bucket, err := p.GetBucket(name)
+ if err != nil {
+ if _, ok := err.(*BucketNotFoundError); !ok {
+ return nil, err
+ }
+
+ // create the bucket if not found
+ args := map[string]interface{}{
+ "authType": "sasl",
+ "bucketType": "couchbase",
+ "name": name,
+ "ramQuotaMB": 100,
+ "saslPassword": "donotuse",
+ }
+ var ret interface{}
+ // allow "bucket already exists" error in case duplicate create
+ // (e.g. two query nodes starting at same time)
+ err = c.parsePostURLResponseTerse("/pools/default/buckets", args, &ret)
+ if err != nil && !AlreadyExistsError(err) {
+ return nil, err
+ }
+
+ // bucket created asynchronously, try to get the bucket
+ maxRetry := 8
+ interval := 100 * time.Millisecond
+ for i := 0; i < maxRetry; i++ {
+ time.Sleep(interval)
+ interval *= 2
+ err = p.refresh()
+ if err != nil {
+ return nil, err
+ }
+ bucket, err = p.GetBucket(name)
+ if bucket != nil {
+ bucket.RLock()
+ ok := !bucket.closed && len(bucket.getConnPools(true /* already locked */)) > 0
+ bucket.RUnlock()
+ if ok {
+ break
+ }
+ } else if err != nil {
+ if _, ok := err.(*BucketNotFoundError); !ok {
+ break
+ }
+ }
+ }
+ }
+
+ return bucket, err
+}
+
+func DropSystemBucket(c *Client, name string) error {
+ err := c.parseDeleteURLResponseTerse("/pools/default/buckets/"+name, nil, nil)
+ return err
+}
+
+func AlreadyExistsError(err error) bool {
+ // Bucket error: Bucket with given name already exists
+ // Scope error: Scope with this name already exists
+ // Collection error: Collection with this name already exists
+ return strings.Contains(err.Error(), " name already exists")
+}
diff --git a/vendor/github.com/couchbase/go-couchbase/port_map.go b/vendor/github.com/couchbase/go-couchbase/port_map.go
new file mode 100644
index 0000000000..864bd4aedb
--- /dev/null
+++ b/vendor/github.com/couchbase/go-couchbase/port_map.go
@@ -0,0 +1,106 @@
+package couchbase
+
+/*
+
+The goal here is to map a hostname:port combination to another hostname:port
+combination. The original hostname:port gives the name and regular KV port
+of a couchbase server. We want to determine the corresponding SSL KV port.
+
+To do this, we have a pool services structure, as obtained from
+the /pools/default/nodeServices API.
+
+For a fully configured two-node system, the structure may look like this:
+{"rev":32,"nodesExt":[
+ {"services":{"mgmt":8091,"mgmtSSL":18091,"fts":8094,"ftsSSL":18094,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"capiSSL":18092,"capi":8092,"kvSSL":11207,"projector":9999,"kv":11210,"moxi":11211},"hostname":"172.23.123.101"},
+ {"services":{"mgmt":8091,"mgmtSSL":18091,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"capiSSL":18092,"capi":8092,"kvSSL":11207,"projector":9999,"kv":11210,"moxi":11211,"n1ql":8093,"n1qlSSL":18093},"thisNode":true,"hostname":"172.23.123.102"}]}
+
+In this case, note the "hostname" fields, and the "kv" and "kvSSL" fields.
+
+For a single-node system, perhaps brought up for testing, the structure may look like this:
+{"rev":66,"nodesExt":[
+ {"services":{"mgmt":8091,"mgmtSSL":18091,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"kv":11210,"kvSSL":11207,"capi":8092,"capiSSL":18092,"projector":9999,"n1ql":8093,"n1qlSSL":18093},"thisNode":true}],"clusterCapabilitiesVer":[1,0],"clusterCapabilities":{"n1ql":["enhancedPreparedStatements"]}}
+
+Here, note that there is only a single entry in the "nodeExt" array and that it does not have a "hostname" field.
+We will assume that either hostname fields are present, or there is only a single node.
+*/
+
+import (
+ "encoding/json"
+ "fmt"
+ "net"
+ "strconv"
+)
+
+func ParsePoolServices(jsonInput string) (*PoolServices, error) {
+ ps := &PoolServices{}
+ err := json.Unmarshal([]byte(jsonInput), ps)
+ return ps, err
+}
+
+// Accepts a "host:port" string representing the KV TCP port and the pools
+// nodeServices payload and returns a host:port string representing the KV
+// TLS port on the same node as the KV TCP port.
+// Returns the original host:port if in case of local communication (services
+// on the same node as source)
+func MapKVtoSSL(hostport string, ps *PoolServices) (string, bool, error) {
+ return MapKVtoSSLExt(hostport, ps, false)
+}
+
+func MapKVtoSSLExt(hostport string, ps *PoolServices, force bool) (string, bool, error) {
+ host, port, err := net.SplitHostPort(hostport)
+ if err != nil {
+ return "", false, fmt.Errorf("Unable to split hostport %s: %v", hostport, err)
+ }
+
+ portInt, err := strconv.Atoi(port)
+ if err != nil {
+ return "", false, fmt.Errorf("Unable to parse host/port combination %s: %v", hostport, err)
+ }
+
+ var ns *NodeServices
+ for i := range ps.NodesExt {
+ hostname := ps.NodesExt[i].Hostname
+ if len(hostname) != 0 && hostname != host {
+ /* If the hostname is the empty string, it means the node (and by extension
+ the cluster) is configured on the loopback. Further, it means that the client
+ should use whatever hostname it used to get the nodeServices information in
+ the first place to access the cluster. Thus, when the hostname is empty in
+ the nodeService entry we can assume that client will use the hostname it used
+ to access the KV TCP endpoint - and thus that it automatically "matches".
+ If hostname is not empty and doesn't match then we move to the next entry.
+ */
+ continue
+ }
+ kvPort, found := ps.NodesExt[i].Services["kv"]
+ if !found {
+ /* not a node with a KV service */
+ continue
+ }
+ if kvPort == portInt {
+ ns = &(ps.NodesExt[i])
+ break
+ }
+ }
+
+ if ns == nil {
+ return "", false, fmt.Errorf("Unable to parse host/port combination %s: no matching node found among %d", hostport, len(ps.NodesExt))
+ }
+ kvSSL, found := ns.Services["kvSSL"]
+ if !found {
+ return "", false, fmt.Errorf("Unable to map host/port combination %s: target host has no kvSSL port listed", hostport)
+ }
+
+ //Don't encrypt for communication between local nodes
+ if !force && (len(ns.Hostname) == 0 || ns.ThisNode) {
+ return hostport, false, nil
+ }
+
+ ip := net.ParseIP(host)
+ if ip != nil && ip.To4() == nil && ip.To16() != nil { // IPv6 and not a FQDN
+ // Prefix and suffix square brackets as SplitHostPort removes them,
+ // see: https://golang.org/pkg/net/#SplitHostPort
+ host = "[" + host + "]"
+ }
+
+ return fmt.Sprintf("%s:%d", host, kvSSL), true, nil
+}
diff --git a/vendor/github.com/couchbaselabs/go-couchbase/streaming.go b/vendor/github.com/couchbase/go-couchbase/streaming.go
index 6d8f7dfd53..ecf5be9932 100644
--- a/vendor/github.com/couchbaselabs/go-couchbase/streaming.go
+++ b/vendor/github.com/couchbase/go-couchbase/streaming.go
@@ -22,6 +22,7 @@ const MAX_RETRY_COUNT = 5
const DISCONNECT_PERIOD = 120 * time.Second
type NotifyFn func(bucket string, err error)
+type StreamingFn func(bucket *Bucket)
// Use TCP keepalive to detect half close sockets
var updaterTransport http.RoundTripper = &http.Transport{
@@ -55,8 +56,12 @@ func doHTTPRequestForUpdate(req *http.Request) (*http.Response, error) {
}
func (b *Bucket) RunBucketUpdater(notify NotifyFn) {
+ b.RunBucketUpdater2(nil, notify)
+}
+
+func (b *Bucket) RunBucketUpdater2(streamingFn StreamingFn, notify NotifyFn) {
go func() {
- err := b.UpdateBucket()
+ err := b.UpdateBucket2(streamingFn)
if err != nil {
if notify != nil {
notify(b.GetName(), err)
@@ -84,19 +89,13 @@ func (b *Bucket) replaceConnPools2(with []*connectionPool, bucketLocked bool) {
}
func (b *Bucket) UpdateBucket() error {
+ return b.UpdateBucket2(nil)
+}
+func (b *Bucket) UpdateBucket2(streamingFn StreamingFn) error {
var failures int
var returnErr error
-
var poolServices PoolServices
- var err error
- tlsConfig := b.pool.client.tlsConfig
- if tlsConfig != nil {
- poolServices, err = b.pool.client.GetPoolServices("default")
- if err != nil {
- return err
- }
- }
for {
@@ -113,7 +112,7 @@ func (b *Bucket) UpdateBucket() error {
startNode := rand.Intn(len(nodes))
node := nodes[(startNode)%len(nodes)]
- streamUrl := fmt.Sprintf("http://%s/pools/default/bucketsStreaming/%s", node.Hostname, b.GetName())
+ streamUrl := fmt.Sprintf("http://%s/pools/default/bucketsStreaming/%s", node.Hostname, uriAdj(b.GetName()))
logging.Infof(" Trying with %s", streamUrl)
req, err := http.NewRequest("GET", streamUrl, nil)
if err != nil {
@@ -156,6 +155,16 @@ func (b *Bucket) UpdateBucket() error {
// if we got here, reset failure count
failures = 0
+
+ if b.pool.client.tlsConfig != nil {
+ poolServices, err = b.pool.client.GetPoolServices("default")
+ if err != nil {
+ returnErr = err
+ res.Body.Close()
+ break
+ }
+ }
+
b.Lock()
// mark all the old connection pools for deletion
@@ -170,16 +179,17 @@ func (b *Bucket) UpdateBucket() error {
for i := range newcps {
// get the old connection pool and check if it is still valid
pool := b.getConnPoolByHost(tmpb.VBSMJson.ServerList[i], true /* bucket already locked */)
- if pool != nil && pool.inUse == false {
+ if pool != nil && pool.inUse == false && pool.tlsConfig == b.pool.client.tlsConfig {
// if the hostname and index is unchanged then reuse this pool
newcps[i] = pool
pool.inUse = true
continue
}
// else create a new pool
+ var encrypted bool
hostport := tmpb.VBSMJson.ServerList[i]
- if tlsConfig != nil {
- hostport, err = MapKVtoSSL(hostport, &poolServices)
+ if b.pool.client.tlsConfig != nil {
+ hostport, encrypted, err = MapKVtoSSL(hostport, &poolServices)
if err != nil {
b.Unlock()
return err
@@ -187,12 +197,12 @@ func (b *Bucket) UpdateBucket() error {
}
if b.ah != nil {
newcps[i] = newConnectionPool(hostport,
- b.ah, false, PoolSize, PoolOverflow, b.pool.client.tlsConfig, b.Name)
+ b.ah, false, PoolSize, PoolOverflow, b.pool.client.tlsConfig, b.Name, encrypted)
} else {
newcps[i] = newConnectionPool(hostport,
b.authHandler(true /* bucket already locked */),
- false, PoolSize, PoolOverflow, b.pool.client.tlsConfig, b.Name)
+ false, PoolSize, PoolOverflow, b.pool.client.tlsConfig, b.Name, encrypted)
}
}
@@ -203,7 +213,10 @@ func (b *Bucket) UpdateBucket() error {
b.nodeList = unsafe.Pointer(&tmpb.NodesJSON)
b.Unlock()
- logging.Infof("Got new configuration for bucket %s", b.GetName())
+ if streamingFn != nil {
+ streamingFn(tmpb)
+ }
+ logging.Debugf("Got new configuration for bucket %s", b.GetName())
}
// we are here because of an error
diff --git a/vendor/github.com/couchbaselabs/go-couchbase/tap.go b/vendor/github.com/couchbase/go-couchbase/tap.go
index 86edd30554..86edd30554 100644
--- a/vendor/github.com/couchbaselabs/go-couchbase/tap.go
+++ b/vendor/github.com/couchbase/go-couchbase/tap.go
diff --git a/vendor/github.com/couchbaselabs/go-couchbase/upr.go b/vendor/github.com/couchbase/go-couchbase/upr.go
index bf1b209b7e..844bf91510 100644
--- a/vendor/github.com/couchbaselabs/go-couchbase/upr.go
+++ b/vendor/github.com/couchbase/go-couchbase/upr.go
@@ -88,6 +88,7 @@ func (b *Bucket) GetFailoverLogs(vBuckets []uint16) (FailoverLog, error) {
// close the connection so that it doesn't get reused for upr data
// connection
defer mc.Close()
+ mc.SetDeadline(getDeadline(time.Time{}, DefaultTimeout))
failoverlogs, err := mc.UprGetFailoverLog(vbList)
if err != nil {
return nil, fmt.Errorf("Error getting failover log %s host %s",
diff --git a/vendor/github.com/couchbaselabs/go-couchbase/users.go b/vendor/github.com/couchbase/go-couchbase/users.go
index 47d4861522..4e8f962908 100644
--- a/vendor/github.com/couchbaselabs/go-couchbase/users.go
+++ b/vendor/github.com/couchbase/go-couchbase/users.go
@@ -13,8 +13,10 @@ type User struct {
}
type Role struct {
- Role string
- BucketName string `json:"bucket_name"`
+ Role string
+ BucketName string `json:"bucket_name"`
+ ScopeName string `json:"scope_name"`
+ CollectionName string `json:"collection_name"`
}
// Sample:
diff --git a/vendor/github.com/couchbaselabs/go-couchbase/util.go b/vendor/github.com/couchbase/go-couchbase/util.go
index 4d286a3271..4d286a3271 100644
--- a/vendor/github.com/couchbaselabs/go-couchbase/util.go
+++ b/vendor/github.com/couchbase/go-couchbase/util.go
diff --git a/vendor/github.com/couchbaselabs/go-couchbase/vbmap.go b/vendor/github.com/couchbase/go-couchbase/vbmap.go
index b96a18ed57..b96a18ed57 100644
--- a/vendor/github.com/couchbaselabs/go-couchbase/vbmap.go
+++ b/vendor/github.com/couchbase/go-couchbase/vbmap.go
diff --git a/vendor/github.com/couchbaselabs/go-couchbase/views.go b/vendor/github.com/couchbase/go-couchbase/views.go
index 2f68642f5a..2f68642f5a 100644
--- a/vendor/github.com/couchbaselabs/go-couchbase/views.go
+++ b/vendor/github.com/couchbase/go-couchbase/views.go
diff --git a/vendor/github.com/couchbase/gomemcached/client/collections_filter.go b/vendor/github.com/couchbase/gomemcached/client/collections_filter.go
index 0bedae1c35..4da8b8f426 100644
--- a/vendor/github.com/couchbase/gomemcached/client/collections_filter.go
+++ b/vendor/github.com/couchbase/gomemcached/client/collections_filter.go
@@ -45,7 +45,7 @@ type streamIdNonResumeScopeMeta struct {
}
func (c *CollectionsFilter) IsValid() error {
- if c.UseManifestUid {
+ if c.UseManifestUid && c.UseStreamId {
return fmt.Errorf("Not implemented yet")
}
@@ -99,8 +99,10 @@ func (c *CollectionsFilter) ToStreamReqBody() ([]byte, error) {
case false:
switch c.UseManifestUid {
case true:
- // TODO
- return nil, fmt.Errorf("NotImplemented1")
+ filter := &nonStreamIdResumeScopeMeta{
+ ManifestId: fmt.Sprintf("%x", c.ManifestUid),
+ }
+ output = *filter
case false:
switch len(c.CollectionsList) > 0 {
case true:
diff --git a/vendor/github.com/couchbase/gomemcached/client/mc.go b/vendor/github.com/couchbase/gomemcached/client/mc.go
index 66c897c5d6..16dd2f8f7c 100644
--- a/vendor/github.com/couchbase/gomemcached/client/mc.go
+++ b/vendor/github.com/couchbase/gomemcached/client/mc.go
@@ -19,8 +19,8 @@ import (
)
type ClientIface interface {
- Add(vb uint16, key string, flags int, exp int, body []byte) (*gomemcached.MCResponse, error)
- Append(vb uint16, key string, data []byte) (*gomemcached.MCResponse, error)
+ Add(vb uint16, key string, flags int, exp int, body []byte, context ...*ClientContext) (*gomemcached.MCResponse, error)
+ Append(vb uint16, key string, data []byte, context ...*ClientContext) (*gomemcached.MCResponse, error)
Auth(user, pass string) (*gomemcached.MCResponse, error)
AuthList() (*gomemcached.MCResponse, error)
AuthPlain(user, pass string) (*gomemcached.MCResponse, error)
@@ -30,44 +30,87 @@ type ClientIface interface {
CollectionsGetCID(scope string, collection string) (*gomemcached.MCResponse, error)
CollectionEnabled() bool
Close() error
- Decr(vb uint16, key string, amt, def uint64, exp int) (uint64, error)
- Del(vb uint16, key string) (*gomemcached.MCResponse, error)
+ Decr(vb uint16, key string, amt, def uint64, exp int, context ...*ClientContext) (uint64, error)
+ Del(vb uint16, key string, context ...*ClientContext) (*gomemcached.MCResponse, error)
EnableMutationToken() (*gomemcached.MCResponse, error)
EnableFeatures(features Features) (*gomemcached.MCResponse, error)
- Get(vb uint16, key string) (*gomemcached.MCResponse, error)
+ Get(vb uint16, key string, context ...*ClientContext) (*gomemcached.MCResponse, error)
+ GetAllVbSeqnos(vbSeqnoMap map[uint16]uint64, context ...*ClientContext) (map[uint16]uint64, error)
+ GetAndTouch(vb uint16, key string, exp int, context ...*ClientContext) (*gomemcached.MCResponse, error)
+ GetBulk(vb uint16, keys []string, rv map[string]*gomemcached.MCResponse, subPaths []string, context ...*ClientContext) error
GetCollectionsManifest() (*gomemcached.MCResponse, error)
- GetFromCollection(vb uint16, cid uint32, key string) (*gomemcached.MCResponse, error)
- GetSubdoc(vb uint16, key string, subPaths []string) (*gomemcached.MCResponse, error)
- GetAndTouch(vb uint16, key string, exp int) (*gomemcached.MCResponse, error)
- GetBulk(vb uint16, keys []string, rv map[string]*gomemcached.MCResponse, subPaths []string) error
- GetMeta(vb uint16, key string) (*gomemcached.MCResponse, error)
- GetRandomDoc() (*gomemcached.MCResponse, error)
+ GetMeta(vb uint16, key string, context ...*ClientContext) (*gomemcached.MCResponse, error)
+ GetRandomDoc(context ...*ClientContext) (*gomemcached.MCResponse, error)
+ GetSubdoc(vb uint16, key string, subPaths []string, context ...*ClientContext) (*gomemcached.MCResponse, error)
Hijack() io.ReadWriteCloser
- Incr(vb uint16, key string, amt, def uint64, exp int) (uint64, error)
+ Incr(vb uint16, key string, amt, def uint64, exp int, context ...*ClientContext) (uint64, error)
Observe(vb uint16, key string) (result ObserveResult, err error)
ObserveSeq(vb uint16, vbuuid uint64) (result *ObserveSeqResult, err error)
Receive() (*gomemcached.MCResponse, error)
ReceiveWithDeadline(deadline time.Time) (*gomemcached.MCResponse, error)
Send(req *gomemcached.MCRequest) (rv *gomemcached.MCResponse, err error)
- Set(vb uint16, key string, flags int, exp int, body []byte) (*gomemcached.MCResponse, error)
+ Set(vb uint16, key string, flags int, exp int, body []byte, context ...*ClientContext) (*gomemcached.MCResponse, error)
SetKeepAliveOptions(interval time.Duration)
SetReadDeadline(t time.Time)
SetDeadline(t time.Time)
SelectBucket(bucket string) (*gomemcached.MCResponse, error)
- SetCas(vb uint16, key string, flags int, exp int, cas uint64, body []byte) (*gomemcached.MCResponse, error)
+ SetCas(vb uint16, key string, flags int, exp int, cas uint64, body []byte, context ...*ClientContext) (*gomemcached.MCResponse, error)
Stats(key string) ([]StatValue, error)
StatsMap(key string) (map[string]string, error)
StatsMapForSpecifiedStats(key string, statsMap map[string]string) error
Transmit(req *gomemcached.MCRequest) error
TransmitWithDeadline(req *gomemcached.MCRequest, deadline time.Time) error
TransmitResponse(res *gomemcached.MCResponse) error
+ UprGetFailoverLog(vb []uint16) (map[uint16]*FailoverLog, error)
// UprFeed Related
NewUprFeed() (*UprFeed, error)
NewUprFeedIface() (UprFeedIface, error)
NewUprFeedWithConfig(ackByClient bool) (*UprFeed, error)
NewUprFeedWithConfigIface(ackByClient bool) (UprFeedIface, error)
- UprGetFailoverLog(vb []uint16) (map[uint16]*FailoverLog, error)
+}
+
+type ClientContext struct {
+ // Collection-based context
+ CollId uint32
+
+ // VB-state related context
+ // nil means not used in this context
+ VbState *VbStateType
+}
+
+type VbStateType uint8
+
+const (
+ VbAlive VbStateType = 0x00
+ VbActive VbStateType = 0x01
+ VbReplica VbStateType = 0x02
+ VbPending VbStateType = 0x03
+ VbDead VbStateType = 0x04
+)
+
+func (context *ClientContext) InitExtras(req *gomemcached.MCRequest, client *Client) {
+ if req == nil || client == nil {
+ return
+ }
+
+ var bytesToAllocate int
+ switch req.Opcode {
+ case gomemcached.GET_ALL_VB_SEQNOS:
+ if context.VbState != nil {
+ bytesToAllocate += 4
+ }
+ if client.CollectionEnabled() {
+ if context.VbState == nil {
+ bytesToAllocate += 8
+ } else {
+ bytesToAllocate += 4
+ }
+ }
+ }
+ if bytesToAllocate > 0 {
+ req.Extras = make([]byte, bytesToAllocate)
+ }
}
const bufsize = 1024
@@ -102,8 +145,8 @@ type Client struct {
hdrBuf []byte
- featureMtx sync.RWMutex
- sentHeloFeatures Features
+ collectionsEnabled uint32
+ deadline time.Time
}
var (
@@ -156,7 +199,11 @@ func (c *Client) SetReadDeadline(t time.Time) {
}
func (c *Client) SetDeadline(t time.Time) {
+ if t.Equal(c.deadline) {
+ return
+ }
c.conn.SetDeadline(t)
+ c.deadline = t
}
// Wrap an existing transport.
@@ -287,60 +334,103 @@ func (c *Client) EnableMutationToken() (*gomemcached.MCResponse, error) {
//Send a hello command to enable specific features
func (c *Client) EnableFeatures(features Features) (*gomemcached.MCResponse, error) {
var payload []byte
+ collectionsEnabled := 0
for _, feature := range features {
+ if feature == FeatureCollections {
+ collectionsEnabled = 1
+ }
payload = append(payload, 0, 0)
binary.BigEndian.PutUint16(payload[len(payload)-2:], uint16(feature))
}
- c.featureMtx.Lock()
- c.sentHeloFeatures = features
- c.featureMtx.Unlock()
-
- return c.Send(&gomemcached.MCRequest{
+ rv, err := c.Send(&gomemcached.MCRequest{
Opcode: gomemcached.HELLO,
Key: []byte("GoMemcached"),
Body: payload,
})
+ if err == nil && collectionsEnabled != 0 {
+ atomic.StoreUint32(&c.collectionsEnabled, uint32(collectionsEnabled))
+ }
+ return rv, err
}
-// Get the value for a key.
-func (c *Client) Get(vb uint16, key string) (*gomemcached.MCResponse, error) {
- return c.Send(&gomemcached.MCRequest{
- Opcode: gomemcached.GET,
- VBucket: vb,
- Key: []byte(key),
- })
+// Sets collection info for a request
+func (c *Client) setCollection(req *gomemcached.MCRequest, context ...*ClientContext) error {
+ req.CollIdLen = 0
+ collectionId := uint32(0)
+ if len(context) > 0 {
+ collectionId = context[0].CollId
+ }
+
+ // if the optional collection is specified, it must be default for clients that haven't turned on collections
+ if atomic.LoadUint32(&c.collectionsEnabled) == 0 {
+ if collectionId != 0 {
+ return fmt.Errorf("Client does not use collections but a collection was specified")
+ }
+ } else {
+ req.CollIdLen = binary.PutUvarint(req.CollId[:], uint64(collectionId))
+ }
+ return nil
}
-// Get the value for a key from a collection, identified by collection id.
-func (c *Client) GetFromCollection(vb uint16, cid uint32, key string) (*gomemcached.MCResponse, error) {
- keyBytes := []byte(key)
- encodedCid := make([]byte, binary.MaxVarintLen32)
- lenEncodedCid := binary.PutUvarint(encodedCid, uint64(cid))
- encodedKey := make([]byte, 0, lenEncodedCid+len(keyBytes))
- encodedKey = append(encodedKey, encodedCid[0:lenEncodedCid]...)
- encodedKey = append(encodedKey, keyBytes...)
+func (c *Client) setVbSeqnoContext(req *gomemcached.MCRequest, context ...*ClientContext) error {
+ if len(context) == 0 || req == nil {
+ return nil
+ }
- return c.Send(&gomemcached.MCRequest{
+ switch req.Opcode {
+ case gomemcached.GET_ALL_VB_SEQNOS:
+ if len(context) == 0 {
+ return nil
+ }
+
+ if len(req.Extras) == 0 {
+ context[0].InitExtras(req, c)
+ }
+ if context[0].VbState != nil {
+ binary.BigEndian.PutUint32(req.Extras, uint32(*(context[0].VbState)))
+ }
+ if c.CollectionEnabled() {
+ binary.BigEndian.PutUint32(req.Extras[4:8], context[0].CollId)
+ }
+ return nil
+ default:
+ return fmt.Errorf("setVbState Not supported for opcode: %v", req.Opcode.String())
+ }
+}
+
+// Get the value for a key.
+func (c *Client) Get(vb uint16, key string, context ...*ClientContext) (*gomemcached.MCResponse, error) {
+ req := &gomemcached.MCRequest{
Opcode: gomemcached.GET,
VBucket: vb,
- Key: encodedKey,
- })
+ Key: []byte(key),
+ }
+ err := c.setCollection(req, context...)
+ if err != nil {
+ return nil, err
+ }
+ return c.Send(req)
}
// Get the xattrs, doc value for the input key
-func (c *Client) GetSubdoc(vb uint16, key string, subPaths []string) (*gomemcached.MCResponse, error) {
-
+func (c *Client) GetSubdoc(vb uint16, key string, subPaths []string, context ...*ClientContext) (*gomemcached.MCResponse, error) {
extraBuf, valueBuf := GetSubDocVal(subPaths)
- res, err := c.Send(&gomemcached.MCRequest{
+ req := &gomemcached.MCRequest{
Opcode: gomemcached.SUBDOC_MULTI_LOOKUP,
VBucket: vb,
Key: []byte(key),
Extras: extraBuf,
Body: valueBuf,
- })
+ }
+ err := c.setCollection(req, context...)
+ if err != nil {
+ return nil, err
+ }
+
+ res, err := c.Send(req)
if err != nil && IfResStatusError(res) {
return res, err
@@ -376,48 +466,56 @@ func (c *Client) CollectionsGetCID(scope string, collection string) (*gomemcache
}
func (c *Client) CollectionEnabled() bool {
- c.featureMtx.RLock()
- defer c.featureMtx.RUnlock()
-
- for _, feature := range c.sentHeloFeatures {
- if feature == FeatureCollections {
- return true
- }
- }
- return false
+ return atomic.LoadUint32(&c.collectionsEnabled) > 0
}
// Get the value for a key, and update expiry
-func (c *Client) GetAndTouch(vb uint16, key string, exp int) (*gomemcached.MCResponse, error) {
+func (c *Client) GetAndTouch(vb uint16, key string, exp int, context ...*ClientContext) (*gomemcached.MCResponse, error) {
extraBuf := make([]byte, 4)
binary.BigEndian.PutUint32(extraBuf[0:], uint32(exp))
- return c.Send(&gomemcached.MCRequest{
+ req := &gomemcached.MCRequest{
Opcode: gomemcached.GAT,
VBucket: vb,
Key: []byte(key),
Extras: extraBuf,
- })
+ }
+ err := c.setCollection(req, context...)
+ if err != nil {
+ return nil, err
+ }
+ return c.Send(req)
}
// Get metadata for a key
-func (c *Client) GetMeta(vb uint16, key string) (*gomemcached.MCResponse, error) {
- return c.Send(&gomemcached.MCRequest{
+func (c *Client) GetMeta(vb uint16, key string, context ...*ClientContext) (*gomemcached.MCResponse, error) {
+ req := &gomemcached.MCRequest{
Opcode: gomemcached.GET_META,
VBucket: vb,
Key: []byte(key),
- })
+ }
+ err := c.setCollection(req, context...)
+ if err != nil {
+ return nil, err
+ }
+ return c.Send(req)
}
// Del deletes a key.
-func (c *Client) Del(vb uint16, key string) (*gomemcached.MCResponse, error) {
- return c.Send(&gomemcached.MCRequest{
+func (c *Client) Del(vb uint16, key string, context ...*ClientContext) (*gomemcached.MCResponse, error) {
+ req := &gomemcached.MCRequest{
Opcode: gomemcached.DELETE,
VBucket: vb,
- Key: []byte(key)})
+ Key: []byte(key),
+ }
+ err := c.setCollection(req, context...)
+ if err != nil {
+ return nil, err
+ }
+ return c.Send(req)
}
// Get a random document
-func (c *Client) GetRandomDoc() (*gomemcached.MCResponse, error) {
+func (c *Client) GetRandomDoc(context ...*ClientContext) (*gomemcached.MCResponse, error) {
return c.Send(&gomemcached.MCRequest{
Opcode: 0xB6,
})
@@ -522,8 +620,7 @@ func (c *Client) SelectBucket(bucket string) (*gomemcached.MCResponse, error) {
}
func (c *Client) store(opcode gomemcached.CommandCode, vb uint16,
- key string, flags int, exp int, body []byte) (*gomemcached.MCResponse, error) {
-
+ key string, flags int, exp int, body []byte, context ...*ClientContext) (*gomemcached.MCResponse, error) {
req := &gomemcached.MCRequest{
Opcode: opcode,
VBucket: vb,
@@ -533,13 +630,16 @@ func (c *Client) store(opcode gomemcached.CommandCode, vb uint16,
Extras: []byte{0, 0, 0, 0, 0, 0, 0, 0},
Body: body}
+ err := c.setCollection(req, context...)
+ if err != nil {
+ return nil, err
+ }
binary.BigEndian.PutUint64(req.Extras, uint64(flags)<<32|uint64(exp))
return c.Send(req)
}
func (c *Client) storeCas(opcode gomemcached.CommandCode, vb uint16,
- key string, flags int, exp int, cas uint64, body []byte) (*gomemcached.MCResponse, error) {
-
+ key string, flags int, exp int, cas uint64, body []byte, context ...*ClientContext) (*gomemcached.MCResponse, error) {
req := &gomemcached.MCRequest{
Opcode: opcode,
VBucket: vb,
@@ -549,20 +649,29 @@ func (c *Client) storeCas(opcode gomemcached.CommandCode, vb uint16,
Extras: []byte{0, 0, 0, 0, 0, 0, 0, 0},
Body: body}
+ err := c.setCollection(req, context...)
+ if err != nil {
+ return nil, err
+ }
+
binary.BigEndian.PutUint64(req.Extras, uint64(flags)<<32|uint64(exp))
return c.Send(req)
}
// Incr increments the value at the given key.
func (c *Client) Incr(vb uint16, key string,
- amt, def uint64, exp int) (uint64, error) {
-
+ amt, def uint64, exp int, context ...*ClientContext) (uint64, error) {
req := &gomemcached.MCRequest{
Opcode: gomemcached.INCREMENT,
VBucket: vb,
Key: []byte(key),
Extras: make([]byte, 8+8+4),
}
+ err := c.setCollection(req, context...)
+ if err != nil {
+ return 0, err
+ }
+
binary.BigEndian.PutUint64(req.Extras[:8], amt)
binary.BigEndian.PutUint64(req.Extras[8:16], def)
binary.BigEndian.PutUint32(req.Extras[16:20], uint32(exp))
@@ -577,14 +686,18 @@ func (c *Client) Incr(vb uint16, key string,
// Decr decrements the value at the given key.
func (c *Client) Decr(vb uint16, key string,
- amt, def uint64, exp int) (uint64, error) {
-
+ amt, def uint64, exp int, context ...*ClientContext) (uint64, error) {
req := &gomemcached.MCRequest{
Opcode: gomemcached.DECREMENT,
VBucket: vb,
Key: []byte(key),
Extras: make([]byte, 8+8+4),
}
+ err := c.setCollection(req, context...)
+ if err != nil {
+ return 0, err
+ }
+
binary.BigEndian.PutUint64(req.Extras[:8], amt)
binary.BigEndian.PutUint64(req.Extras[8:16], def)
binary.BigEndian.PutUint32(req.Extras[16:20], uint32(exp))
@@ -599,24 +712,24 @@ func (c *Client) Decr(vb uint16, key string,
// Add a value for a key (store if not exists).
func (c *Client) Add(vb uint16, key string, flags int, exp int,
- body []byte) (*gomemcached.MCResponse, error) {
- return c.store(gomemcached.ADD, vb, key, flags, exp, body)
+ body []byte, context ...*ClientContext) (*gomemcached.MCResponse, error) {
+ return c.store(gomemcached.ADD, vb, key, flags, exp, body, context...)
}
// Set the value for a key.
func (c *Client) Set(vb uint16, key string, flags int, exp int,
- body []byte) (*gomemcached.MCResponse, error) {
- return c.store(gomemcached.SET, vb, key, flags, exp, body)
+ body []byte, context ...*ClientContext) (*gomemcached.MCResponse, error) {
+ return c.store(gomemcached.SET, vb, key, flags, exp, body, context...)
}
// SetCas set the value for a key with cas
func (c *Client) SetCas(vb uint16, key string, flags int, exp int, cas uint64,
- body []byte) (*gomemcached.MCResponse, error) {
- return c.storeCas(gomemcached.SET, vb, key, flags, exp, cas, body)
+ body []byte, context ...*ClientContext) (*gomemcached.MCResponse, error) {
+ return c.storeCas(gomemcached.SET, vb, key, flags, exp, cas, body, context...)
}
// Append data to the value of a key.
-func (c *Client) Append(vb uint16, key string, data []byte) (*gomemcached.MCResponse, error) {
+func (c *Client) Append(vb uint16, key string, data []byte, context ...*ClientContext) (*gomemcached.MCResponse, error) {
req := &gomemcached.MCRequest{
Opcode: gomemcached.APPEND,
VBucket: vb,
@@ -625,11 +738,15 @@ func (c *Client) Append(vb uint16, key string, data []byte) (*gomemcached.MCResp
Opaque: 0,
Body: data}
+ err := c.setCollection(req, context...)
+ if err != nil {
+ return nil, err
+ }
return c.Send(req)
}
// GetBulk gets keys in bulk
-func (c *Client) GetBulk(vb uint16, keys []string, rv map[string]*gomemcached.MCResponse, subPaths []string) error {
+func (c *Client) GetBulk(vb uint16, keys []string, rv map[string]*gomemcached.MCResponse, subPaths []string, context ...*ClientContext) error {
stopch := make(chan bool)
var wg sync.WaitGroup
@@ -698,6 +815,10 @@ func (c *Client) GetBulk(vb uint16, keys []string, rv map[string]*gomemcached.MC
Opcode: gomemcached.GET,
VBucket: vb,
}
+ err := c.setCollection(memcachedReqPkt, context...)
+ if err != nil {
+ return err
+ }
if len(subPaths) > 0 {
extraBuf, valueBuf := GetSubDocVal(subPaths)
@@ -719,7 +840,7 @@ func (c *Client) GetBulk(vb uint16, keys []string, rv map[string]*gomemcached.MC
} // End of Get request
// finally transmit a NOOP
- err := c.Transmit(&gomemcached.MCRequest{
+ err = c.Transmit(&gomemcached.MCRequest{
Opcode: gomemcached.NOOP,
VBucket: vb,
Opaque: c.opaque,
@@ -747,7 +868,10 @@ func GetSubDocVal(subPaths []string) (extraBuf, valueBuf []byte) {
}
// Xattr retrieval - subdoc multi get
- extraBuf = append(extraBuf, uint8(0x04))
+ // Set deleted true only if it is not expiration
+ if len(subPaths) != 1 || subPaths[0] != "$document.exptime" {
+ extraBuf = append(extraBuf, uint8(0x04))
+ }
valueBuf = make([]byte, num*4+totalBytesLen)
@@ -1138,6 +1262,38 @@ func (c *Client) StatsMapForSpecifiedStats(key string, statsMap map[string]strin
return nil
}
+// UprGetFailoverLog for given list of vbuckets.
+func (mc *Client) UprGetFailoverLog(vb []uint16) (map[uint16]*FailoverLog, error) {
+
+ rq := &gomemcached.MCRequest{
+ Opcode: gomemcached.UPR_FAILOVERLOG,
+ Opaque: opaqueFailover,
+ }
+
+ failoverLogs := make(map[uint16]*FailoverLog)
+ for _, vBucket := range vb {
+ rq.VBucket = vBucket
+ if err := mc.Transmit(rq); err != nil {
+ return nil, err
+ }
+ res, err := mc.Receive()
+
+ if err != nil {
+ return nil, fmt.Errorf("failed to receive %s", err.Error())
+ } else if res.Opcode != gomemcached.UPR_FAILOVERLOG || res.Status != gomemcached.SUCCESS {
+ return nil, fmt.Errorf("unexpected #opcode %v", res.Opcode)
+ }
+
+ flog, err := parseFailoverLog(res.Body)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse failover logs for vb %d", vb)
+ }
+ failoverLogs[vBucket] = flog
+ }
+
+ return failoverLogs, nil
+}
+
// Hijack exposes the underlying connection from this client.
//
// It also marks the connection as unhealthy since the client will
@@ -1166,3 +1322,98 @@ func IfResStatusError(response *gomemcached.MCResponse) bool {
func (c *Client) Conn() io.ReadWriteCloser {
return c.conn
}
+
+// Since the binary request supports only a single collection at a time, it is possible
+// that this may be called multiple times in succession by callers to get vbSeqnos for
+// multiple collections. Thus, caller could pass in a non-nil map so the gomemcached
+// client won't need to allocate new map for each call to prevent too much GC
+// NOTE: If collection is enabled and context is not given, KV will still return stats for default collection
+func (c *Client) GetAllVbSeqnos(vbSeqnoMap map[uint16]uint64, context ...*ClientContext) (map[uint16]uint64, error) {
+ rq := &gomemcached.MCRequest{
+ Opcode: gomemcached.GET_ALL_VB_SEQNOS,
+ Opaque: opaqueGetSeqno,
+ }
+
+ err := c.setVbSeqnoContext(rq, context...)
+ if err != nil {
+ return vbSeqnoMap, err
+ }
+
+ err = c.Transmit(rq)
+ if err != nil {
+ return vbSeqnoMap, err
+ }
+
+ res, err := c.Receive()
+ if err != nil {
+ return vbSeqnoMap, fmt.Errorf("failed to receive: %v", err)
+ }
+
+ vbSeqnosList, err := parseGetSeqnoResp(res.Body)
+ if err != nil {
+ logging.Errorf("Unable to parse : err: %v\n", err)
+ return vbSeqnoMap, err
+ }
+
+ if vbSeqnoMap == nil {
+ vbSeqnoMap = make(map[uint16]uint64)
+ }
+
+ combineMapWithReturnedList(vbSeqnoMap, vbSeqnosList)
+ return vbSeqnoMap, nil
+}
+
+func combineMapWithReturnedList(vbSeqnoMap map[uint16]uint64, list *VBSeqnos) {
+ if list == nil {
+ return
+ }
+
+ // If the map contains exactly the existing vbs in the list, no need to modify
+ needToCleanupMap := true
+ if len(vbSeqnoMap) == 0 {
+ needToCleanupMap = false
+ } else if len(vbSeqnoMap) == len(*list) {
+ needToCleanupMap = false
+ for _, pair := range *list {
+ _, vbExists := vbSeqnoMap[uint16(pair[0])]
+ if !vbExists {
+ needToCleanupMap = true
+ break
+ }
+ }
+ }
+
+ if needToCleanupMap {
+ var vbsToDelete []uint16
+ for vbInSeqnoMap, _ := range vbSeqnoMap {
+ // If a vb in the seqno map doesn't exist in the returned list, need to clean up
+ // to ensure returning an accurate result
+ found := false
+ var vbno uint16
+ for _, pair := range *list {
+ vbno = uint16(pair[0])
+ if vbno == vbInSeqnoMap {
+ found = true
+ break
+ } else if vbno > vbInSeqnoMap {
+ // definitely not in the list
+ break
+ }
+ }
+ if !found {
+ vbsToDelete = append(vbsToDelete, vbInSeqnoMap)
+ }
+ }
+
+ for _, vbno := range vbsToDelete {
+ delete(vbSeqnoMap, vbno)
+ }
+ }
+
+ // Set the map with data from the list
+ for _, pair := range *list {
+ vbno := uint16(pair[0])
+ seqno := pair[1]
+ vbSeqnoMap[vbno] = seqno
+ }
+}
diff --git a/vendor/github.com/couchbase/gomemcached/client/upr_event.go b/vendor/github.com/couchbase/gomemcached/client/upr_event.go
index 31e0abfbfd..7ede0a128d 100644
--- a/vendor/github.com/couchbase/gomemcached/client/upr_event.go
+++ b/vendor/github.com/couchbase/gomemcached/client/upr_event.go
@@ -89,6 +89,9 @@ type UprEvent struct {
// FailoverLog containing vvuid and sequnce number
type FailoverLog [][2]uint64
+// Containing a pair of vbno and the high seqno
+type VBSeqnos [][2]uint64
+
func makeUprEvent(rq gomemcached.MCRequest, stream *UprStream, bytesReceivedFromDCP int) *UprEvent {
event := &UprEvent{
Opcode: rq.Opcode,
@@ -148,6 +151,8 @@ func makeUprEvent(rq gomemcached.MCRequest, stream *UprStream, bytesReceivedFrom
event.SnapshotType = binary.BigEndian.Uint32(rq.Extras[16:20])
} else if event.IsSystemEvent() {
event.PopulateEvent(rq.Extras)
+ } else if event.IsSeqnoAdv() {
+ event.PopulateSeqnoAdv(rq.Extras)
}
return event
@@ -199,17 +204,31 @@ func (event *UprEvent) IsSystemEvent() bool {
return event.Opcode == gomemcached.DCP_SYSTEM_EVENT
}
+func (event *UprEvent) IsSeqnoAdv() bool {
+ return event.Opcode == gomemcached.DCP_SEQNO_ADV
+}
+
func (event *UprEvent) PopulateEvent(extras []byte) {
if len(extras) < dcpSystemEventExtraLen {
// Wrong length, don't parse
return
}
+
event.Seqno = binary.BigEndian.Uint64(extras[:8])
event.SystemEvent = SystemEventType(binary.BigEndian.Uint32(extras[8:12]))
var versionTemp uint16 = binary.BigEndian.Uint16(extras[12:14])
event.SysEventVersion = uint8(versionTemp >> 8)
}
+func (event *UprEvent) PopulateSeqnoAdv(extras []byte) {
+ if len(extras) < dcpSeqnoAdvExtraLen {
+ // Wrong length, don't parse
+ return
+ }
+
+ event.Seqno = binary.BigEndian.Uint64(extras[:8])
+}
+
func (event *UprEvent) GetSystemEventName() (string, error) {
switch event.SystemEvent {
case CollectionCreate:
diff --git a/vendor/github.com/couchbase/gomemcached/client/upr_feed.go b/vendor/github.com/couchbase/gomemcached/client/upr_feed.go
index 085b03c145..be676aa71c 100644
--- a/vendor/github.com/couchbase/gomemcached/client/upr_feed.go
+++ b/vendor/github.com/couchbase/gomemcached/client/upr_feed.go
@@ -20,9 +20,11 @@ const uprDeletetionExtraLen = 18
const uprDeletetionWithDeletionTimeExtraLen = 21
const uprSnapshotExtraLen = 20
const dcpSystemEventExtraLen = 13
+const dcpSeqnoAdvExtraLen = 8
const bufferAckThreshold = 0.2
const opaqueOpen = 0xBEAF0001
const opaqueFailover = 0xDEADBEEF
+const opaqueGetSeqno = 0xDEADBEEF
const uprDefaultNoopInterval = 120
// Counter on top of opaqueOpen that others can draw from for open and control msgs
@@ -605,44 +607,6 @@ func (feed *UprFeed) uprOpen(name string, sequence uint32, bufSize uint32, featu
return
}
-// UprGetFailoverLog for given list of vbuckets.
-func (mc *Client) UprGetFailoverLog(
- vb []uint16) (map[uint16]*FailoverLog, error) {
-
- rq := &gomemcached.MCRequest{
- Opcode: gomemcached.UPR_FAILOVERLOG,
- Opaque: opaqueFailover,
- }
-
- var allFeaturesDisabled UprFeatures
- if err := doUprOpen(mc, "FailoverLog", 0, allFeaturesDisabled); err != nil {
- return nil, fmt.Errorf("UPR_OPEN Failed %s", err.Error())
- }
-
- failoverLogs := make(map[uint16]*FailoverLog)
- for _, vBucket := range vb {
- rq.VBucket = vBucket
- if err := mc.Transmit(rq); err != nil {
- return nil, err
- }
- res, err := mc.Receive()
-
- if err != nil {
- return nil, fmt.Errorf("failed to receive %s", err.Error())
- } else if res.Opcode != gomemcached.UPR_FAILOVERLOG || res.Status != gomemcached.SUCCESS {
- return nil, fmt.Errorf("unexpected #opcode %v", res.Opcode)
- }
-
- flog, err := parseFailoverLog(res.Body)
- if err != nil {
- return nil, fmt.Errorf("unable to parse failover logs for vb %d", vb)
- }
- failoverLogs[vBucket] = flog
- }
-
- return failoverLogs, nil
-}
-
// UprRequestStream for a single vbucket.
func (feed *UprFeed) UprRequestStream(vbno, opaqueMSB uint16, flags uint32,
vuuid, startSequence, endSequence, snapStart, snapEnd uint64) error {
@@ -793,7 +757,6 @@ func (feed *UprFeed) StartFeedWithConfig(datachan_len int) error {
}
func parseFailoverLog(body []byte) (*FailoverLog, error) {
-
if len(body)%16 != 0 {
err := fmt.Errorf("invalid body length %v, in failover-log", len(body))
return nil, err
@@ -808,6 +771,24 @@ func parseFailoverLog(body []byte) (*FailoverLog, error) {
return &log, nil
}
+func parseGetSeqnoResp(body []byte) (*VBSeqnos, error) {
+ // vbno of 2 bytes + seqno of 8 bytes
+ var entryLen int = 10
+
+ if len(body)%entryLen != 0 {
+ err := fmt.Errorf("invalid body length %v, in getVbSeqno", len(body))
+ return nil, err
+ }
+ vbSeqnos := make(VBSeqnos, len(body)/entryLen)
+ for i, j := 0, 0; i < len(body); i += entryLen {
+ vbno := binary.BigEndian.Uint16(body[i : i+2])
+ seqno := binary.BigEndian.Uint64(body[i+2 : i+10])
+ vbSeqnos[j] = [2]uint64{uint64(vbno), seqno}
+ j++
+ }
+ return &vbSeqnos, nil
+}
+
func handleStreamRequest(
res *gomemcached.MCResponse,
headerBuf []byte,
@@ -987,6 +968,14 @@ loop:
break loop
}
event = makeUprEvent(pkt, stream, bytes)
+ case gomemcached.UPR_FAILOVERLOG:
+ logging.Infof("Failover log for vb %d received: %v", vb, pkt)
+ case gomemcached.DCP_SEQNO_ADV:
+ if stream == nil {
+ logging.Infof("Stream not found for vb %d: %#v", vb, pkt)
+ break loop
+ }
+ event = makeUprEvent(pkt, stream, bytes)
default:
logging.Infof("Recived an unknown response for vbucket %d", vb)
}
diff --git a/vendor/github.com/couchbase/gomemcached/mc_constants.go b/vendor/github.com/couchbase/gomemcached/mc_constants.go
index 11f383b8ff..1dfe2febf2 100644
--- a/vendor/github.com/couchbase/gomemcached/mc_constants.go
+++ b/vendor/github.com/couchbase/gomemcached/mc_constants.go
@@ -74,6 +74,7 @@ const (
TAP_VBUCKET_SET = CommandCode(0x45) // Sets state of vbucket in receiver (used in takeover)
TAP_CHECKPOINT_START = CommandCode(0x46) // Notifies start of new checkpoint
TAP_CHECKPOINT_END = CommandCode(0x47) // Notifies end of checkpoint
+ GET_ALL_VB_SEQNOS = CommandCode(0x48) // Get current high sequence numbers from all vbuckets located on the server
UPR_OPEN = CommandCode(0x50) // Open a UPR connection with a name
UPR_ADDSTREAM = CommandCode(0x51) // Sent by ebucketMigrator to UPR Consumer
@@ -102,18 +103,21 @@ const (
SUBDOC_MULTI_LOOKUP = CommandCode(0xd0) // Multi lookup. Doc xattrs and meta.
DCP_SYSTEM_EVENT = CommandCode(0x5f) // A system event has occurred
-
+ DCP_SEQNO_ADV = CommandCode(0x64) // Sent when the vb seqno has advanced due to an unsubscribed event
)
// command codes that are counted toward DCP control buffer
// when DCP clients receive DCP messages with these command codes, they need to provide acknowledgement
var BufferedCommandCodeMap = map[CommandCode]bool{
- SET_VBUCKET: true,
- UPR_STREAMEND: true,
- UPR_SNAPSHOT: true,
- UPR_MUTATION: true,
- UPR_DELETION: true,
- UPR_EXPIRATION: true}
+ SET_VBUCKET: true,
+ UPR_STREAMEND: true,
+ UPR_SNAPSHOT: true,
+ UPR_MUTATION: true,
+ UPR_DELETION: true,
+ UPR_EXPIRATION: true,
+ DCP_SYSTEM_EVENT: true,
+ DCP_SEQNO_ADV: true,
+}
// Status field for memcached response.
type Status uint16
@@ -274,6 +278,8 @@ func init() {
CommandNames[SUBDOC_MULTI_LOOKUP] = "SUBDOC_MULTI_LOOKUP"
CommandNames[GET_COLLECTIONS_MANIFEST] = "GET_COLLECTIONS_MANIFEST"
CommandNames[COLLECTIONS_GET_CID] = "COLLECTIONS_GET_CID"
+ CommandNames[DCP_SYSTEM_EVENT] = "DCP_SYSTEM_EVENT"
+ CommandNames[DCP_SEQNO_ADV] = "DCP_SEQNO_ADV"
StatusNames = make(map[Status]string)
StatusNames[SUCCESS] = "SUCCESS"
diff --git a/vendor/github.com/couchbase/gomemcached/mc_req.go b/vendor/github.com/couchbase/gomemcached/mc_req.go
index 35d0fe2daf..c4f154f224 100644
--- a/vendor/github.com/couchbase/gomemcached/mc_req.go
+++ b/vendor/github.com/couchbase/gomemcached/mc_req.go
@@ -11,6 +11,8 @@ import (
// The current limit, 20MB, is the size limit supported by ep-engine.
var MaxBodyLen = int(20 * 1024 * 1024)
+const _BUFLEN = 256
+
// MCRequest is memcached Request
type MCRequest struct {
// The command being issued
@@ -27,6 +29,10 @@ type MCRequest struct {
DataType uint8
// len() calls are expensive - cache this in case for collection
Keylen int
+ // Collection id for collection based operations
+ CollId [binary.MaxVarintLen32]byte
+ // Length of collection id
+ CollIdLen int
// Flexible Framing Extras
FramingExtras []FrameInfo
// Stored length of incoming framing extras
@@ -34,8 +40,12 @@ type MCRequest struct {
}
// Size gives the number of bytes this request requires.
+func (req *MCRequest) HdrSize() int {
+ return HDR_LEN + len(req.Extras) + req.CollIdLen + req.FramingElen + len(req.Key)
+}
+
func (req *MCRequest) Size() int {
- return HDR_LEN + len(req.Extras) + len(req.Key) + len(req.Body) + len(req.ExtMeta) + req.FramingElen
+ return req.HdrSize() + len(req.Body) + len(req.ExtMeta)
}
// A debugging string representation of this request
@@ -68,7 +78,7 @@ func (req *MCRequest) fillRegularHeaderBytes(data []byte) int {
data[pos] = byte(req.Opcode)
pos++
binary.BigEndian.PutUint16(data[pos:pos+2],
- uint16(len(req.Key)))
+ uint16(req.CollIdLen+len(req.Key)))
pos += 2
// 4
@@ -84,7 +94,7 @@ func (req *MCRequest) fillRegularHeaderBytes(data []byte) int {
// 8
binary.BigEndian.PutUint32(data[pos:pos+4],
- uint32(len(req.Body)+len(req.Key)+len(req.Extras)+len(req.ExtMeta)))
+ uint32(len(req.Body)+req.CollIdLen+len(req.Key)+len(req.Extras)+len(req.ExtMeta)))
pos += 4
// 12
@@ -97,15 +107,21 @@ func (req *MCRequest) fillRegularHeaderBytes(data []byte) int {
}
pos += 8
+ // 24 - extras
if len(req.Extras) > 0 {
copy(data[pos:pos+len(req.Extras)], req.Extras)
pos += len(req.Extras)
}
if len(req.Key) > 0 {
+ if req.CollIdLen > 0 {
+ copy(data[pos:pos+req.CollIdLen], req.CollId[:])
+ pos += req.CollIdLen
+ }
copy(data[pos:pos+len(req.Key)], req.Key)
pos += len(req.Key)
}
+
return pos
}
@@ -132,7 +148,7 @@ func (req *MCRequest) fillFlexHeaderBytes(data []byte) (int, bool) {
data[0] = FLEX_MAGIC
data[1] = byte(req.Opcode)
data[2] = byte(req.FramingElen)
- data[3] = byte(req.Keylen)
+ data[3] = byte(req.Keylen + req.CollIdLen)
elen := len(req.Extras)
data[4] = byte(elen)
if req.DataType != 0 {
@@ -140,7 +156,7 @@ func (req *MCRequest) fillFlexHeaderBytes(data []byte) (int, bool) {
}
binary.BigEndian.PutUint16(data[6:8], req.VBucket)
binary.BigEndian.PutUint32(data[8:12],
- uint32(len(req.Body)+req.Keylen+elen+len(req.ExtMeta)+req.FramingElen))
+ uint32(len(req.Body)+req.Keylen+req.CollIdLen+elen+len(req.ExtMeta)+req.FramingElen))
binary.BigEndian.PutUint32(data[12:16], req.Opaque)
if req.Cas != 0 {
binary.BigEndian.PutUint64(data[16:24], req.Cas)
@@ -205,12 +221,27 @@ func (req *MCRequest) fillFlexHeaderBytes(data []byte) (int, bool) {
// Add keys
if req.Keylen > 0 {
if mergeMode {
+ var key []byte
+ var keylen int
+ if req.CollIdLen == 0 {
+ key = req.Key
+ keylen = req.Keylen
+ } else {
+ key = append(key, req.CollId[:]...)
+ key = append(key, req.Key...)
+ keylen = req.Keylen + req.CollIdLen
+ }
outputBytes = ShiftByteSliceRight4Bits(req.Key)
data = Merge2HalfByteSlices(data, outputBytes)
+ pos += keylen
} else {
+ if req.CollIdLen > 0 {
+ copy(data[pos:pos+req.CollIdLen], req.CollId[:])
+ pos += req.CollIdLen
+ }
copy(data[pos:pos+req.Keylen], req.Key)
+ pos += req.Keylen
}
- pos += req.Keylen
}
return pos, mergeMode
@@ -227,7 +258,7 @@ func (req *MCRequest) FillHeaderBytes(data []byte) (int, bool) {
// HeaderBytes will return the wire representation of the request header
// (with the extras and key).
func (req *MCRequest) HeaderBytes() []byte {
- data := make([]byte, HDR_LEN+len(req.Extras)+len(req.Key)+req.FramingElen)
+ data := make([]byte, HDR_LEN+len(req.Extras)+req.CollIdLen+len(req.Key)+req.FramingElen)
req.FillHeaderBytes(data)
@@ -237,7 +268,11 @@ func (req *MCRequest) HeaderBytes() []byte {
// Bytes will return the wire representation of this request.
func (req *MCRequest) Bytes() []byte {
data := make([]byte, req.Size())
+ req.bytes(data)
+ return data
+}
+func (req *MCRequest) bytes(data []byte) {
pos, halfByteMode := req.FillHeaderBytes(data)
// TODO - the halfByteMode should be revisited for a more efficient
// way of doing things
@@ -259,15 +294,19 @@ func (req *MCRequest) Bytes() []byte {
copy(data[pos+len(req.Body):pos+len(req.Body)+len(req.ExtMeta)], req.ExtMeta)
}
}
- return data
}
// Transmit will send this request message across a writer.
func (req *MCRequest) Transmit(w io.Writer) (n int, err error) {
- if len(req.Body) < 128 {
- n, err = w.Write(req.Bytes())
+ l := req.Size()
+ if l < _BUFLEN {
+ data := make([]byte, l)
+ req.bytes(data)
+ n, err = w.Write(data)
} else {
- n, err = w.Write(req.HeaderBytes())
+ data := make([]byte, req.HdrSize())
+ req.FillHeaderBytes(data)
+ n, err = w.Write(data)
if err == nil {
m := 0
m, err = w.Write(req.Body)
diff --git a/vendor/github.com/couchbaselabs/go-couchbase/port_map.go b/vendor/github.com/couchbaselabs/go-couchbase/port_map.go
deleted file mode 100644
index 24c9f105db..0000000000
--- a/vendor/github.com/couchbaselabs/go-couchbase/port_map.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package couchbase
-
-/*
-
-The goal here is to map a hostname:port combination to another hostname:port
-combination. The original hostname:port gives the name and regular KV port
-of a couchbase server. We want to determine the corresponding SSL KV port.
-
-To do this, we have a pool services structure, as obtained from
-the /pools/default/nodeServices API.
-
-For a fully configured two-node system, the structure may look like this:
-{"rev":32,"nodesExt":[
- {"services":{"mgmt":8091,"mgmtSSL":18091,"fts":8094,"ftsSSL":18094,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"capiSSL":18092,"capi":8092,"kvSSL":11207,"projector":9999,"kv":11210,"moxi":11211},"hostname":"172.23.123.101"},
- {"services":{"mgmt":8091,"mgmtSSL":18091,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"capiSSL":18092,"capi":8092,"kvSSL":11207,"projector":9999,"kv":11210,"moxi":11211,"n1ql":8093,"n1qlSSL":18093},"thisNode":true,"hostname":"172.23.123.102"}]}
-
-In this case, note the "hostname" fields, and the "kv" and "kvSSL" fields.
-
-For a single-node system, perhaps brought up for testing, the structure may look like this:
-{"rev":66,"nodesExt":[
- {"services":{"mgmt":8091,"mgmtSSL":18091,"indexAdmin":9100,"indexScan":9101,"indexHttp":9102,"indexStreamInit":9103,"indexStreamCatchup":9104,"indexStreamMaint":9105,"indexHttps":19102,"kv":11210,"kvSSL":11207,"capi":8092,"capiSSL":18092,"projector":9999,"n1ql":8093,"n1qlSSL":18093},"thisNode":true}],"clusterCapabilitiesVer":[1,0],"clusterCapabilities":{"n1ql":["enhancedPreparedStatements"]}}
-
-Here, note that there is only a single entry in the "nodeExt" array and that it does not have a "hostname" field.
-We will assume that either hostname fields are present, or there is only a single node.
-*/
-
-import (
- "encoding/json"
- "fmt"
- "strconv"
- "strings"
-)
-
-func ParsePoolServices(jsonInput string) (*PoolServices, error) {
- ps := &PoolServices{}
- err := json.Unmarshal([]byte(jsonInput), ps)
- return ps, err
-}
-
-func MapKVtoSSL(hostport string, ps *PoolServices) (string, error) {
- colonIndex := strings.LastIndex(hostport, ":")
- if colonIndex < 0 {
- return "", fmt.Errorf("Unable to find host/port separator in %s", hostport)
- }
- host := hostport[0:colonIndex]
- port := hostport[colonIndex+1:]
- portInt, err := strconv.Atoi(port)
- if err != nil {
- return "", fmt.Errorf("Unable to parse host/port combination %s: %v", hostport, err)
- }
-
- var ns *NodeServices
- if len(ps.NodesExt) == 1 {
- ns = &(ps.NodesExt[0])
- } else {
- for i := range ps.NodesExt {
- hostname := ps.NodesExt[i].Hostname
- if len(hostname) == 0 {
- // in case of missing hostname, check for 127.0.0.1
- hostname = "127.0.0.1"
- }
- if hostname == host {
- ns = &(ps.NodesExt[i])
- break
- }
- }
- }
-
- if ns == nil {
- return "", fmt.Errorf("Unable to parse host/port combination %s: no matching node found among %d", hostport, len(ps.NodesExt))
- }
- kv, found := ns.Services["kv"]
- if !found {
- return "", fmt.Errorf("Unable to map host/port combination %s: target host has no kv port listed", hostport)
- }
- kvSSL, found := ns.Services["kvSSL"]
- if !found {
- return "", fmt.Errorf("Unable to map host/port combination %s: target host has no kvSSL port listed", hostport)
- }
- if portInt != kv {
- return "", fmt.Errorf("Unable to map hostport combination %s: expected port %d but found %d", hostport, portInt, kv)
- }
- return fmt.Sprintf("%s:%d", host, kvSSL), nil
-}
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index ea3e510827..07f7285f08 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -251,14 +251,14 @@ For streaming use a simple setup could look like this:
import "github.com/klauspost/compress/zstd"
func Decompress(in io.Reader, out io.Writer) error {
- d, err := zstd.NewReader(input)
+ d, err := zstd.NewReader(in)
if err != nil {
return err
}
defer d.Close()
// Copy content...
- _, err := io.Copy(out, d)
+ _, err = io.Copy(out, d)
return err
}
```
diff --git a/vendor/github.com/lunny/log/.gitignore b/vendor/github.com/lunny/log/.gitignore
deleted file mode 100644
index 3a11644b45..0000000000
--- a/vendor/github.com/lunny/log/.gitignore
+++ /dev/null
@@ -1,26 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-log.db
-*.log
-logs
-.vscode \ No newline at end of file
diff --git a/vendor/github.com/lunny/log/LICENSE b/vendor/github.com/lunny/log/LICENSE
deleted file mode 100644
index c9338f8293..0000000000
--- a/vendor/github.com/lunny/log/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2014 - 2016 lunny
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-* Neither the name of the {organization} nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/lunny/log/README.md b/vendor/github.com/lunny/log/README.md
deleted file mode 100644
index da21fa4618..0000000000
--- a/vendor/github.com/lunny/log/README.md
+++ /dev/null
@@ -1,49 +0,0 @@
-## log
-[![GoDoc](https://godoc.org/github.com/lunny/log?status.png)](https://godoc.org/github.com/lunny/log)
-
-[简体中文](https://github.com/lunny/log/blob/master/README_CN.md)
-
-# Installation
-
-```
-go get github.com/lunny/log
-```
-
-# Features
-
-* Add color support for unix console
-* Implemented dbwriter to save log to database
-* Implemented FileWriter to save log to file by date or time.
-* Location configuration
-
-# Example
-
-For Single File:
-```Go
-f, _ := os.Create("my.log")
-log.Std.SetOutput(f)
-```
-
-For Multiple Writer:
-```Go
-f, _ := os.Create("my.log")
-log.Std.SetOutput(io.MultiWriter(f, os.Stdout))
-```
-
-For log files by date or time:
-```Go
-w := log.NewFileWriter(log.FileOptions{
- ByType:log.ByDay,
- Dir:"./logs",
-})
-log.Std.SetOutput(w)
-```
-
-# About
-
-This repo is an extension of Golang log.
-
-# LICENSE
-
- BSD License
- [http://creativecommons.org/licenses/BSD/](http://creativecommons.org/licenses/BSD/)
diff --git a/vendor/github.com/lunny/log/README_CN.md b/vendor/github.com/lunny/log/README_CN.md
deleted file mode 100644
index 0fc7db59e2..0000000000
--- a/vendor/github.com/lunny/log/README_CN.md
+++ /dev/null
@@ -1,52 +0,0 @@
-## log
-[![GoDoc](https://godoc.org/github.com/lunny/log?status.png)](https://godoc.org/github.com/lunny/log)
-
-[English](https://github.com/lunny/log/blob/master/README.md)
-
-# 安装
-
-```
-go get github.com/lunny/log
-```
-
-# 特性
-
-* 对unix增加控制台颜色支持
-* 实现了保存log到数据库支持
-* 实现了保存log到按日期的文件支持
-* 实现了设置日期的地区
-
-# 例子
-
-保存到单个文件:
-
-```Go
-f, _ := os.Create("my.log")
-log.Std.SetOutput(f)
-```
-
-保存到数据库:
-
-```Go
-f, _ := os.Create("my.log")
-log.Std.SetOutput(io.MultiWriter(f, os.Stdout))
-```
-
-保存到按时间分隔的文件:
-
-```Go
-w := log.NewFileWriter(log.FileOptions{
- ByType:log.ByDay,
- Dir:"./logs",
-})
-log.Std.SetOutput(w)
-```
-
-# 关于
-
-本 Log 是在 golang 的 log 之上的扩展
-
-# LICENSE
-
- BSD License
- [http://creativecommons.org/licenses/BSD/](http://creativecommons.org/licenses/BSD/)
diff --git a/vendor/github.com/lunny/log/dbwriter.go b/vendor/github.com/lunny/log/dbwriter.go
deleted file mode 100644
index e8ff00bd89..0000000000
--- a/vendor/github.com/lunny/log/dbwriter.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package log
-
-import (
- "database/sql"
- "time"
-)
-
-type DBWriter struct {
- db *sql.DB
- stmt *sql.Stmt
- content chan []byte
-}
-
-func NewDBWriter(db *sql.DB) (*DBWriter, error) {
- _, err := db.Exec("CREATE TABLE IF NOT EXISTS log (id int, content text, created datetime)")
- if err != nil {
- return nil, err
- }
- stmt, err := db.Prepare("INSERT INTO log (content, created) values (?, ?)")
- if err != nil {
- return nil, err
- }
- return &DBWriter{db, stmt, make(chan []byte, 1000)}, nil
-}
-
-func (w *DBWriter) Write(p []byte) (n int, err error) {
- _, err = w.stmt.Exec(string(p), time.Now())
- if err == nil {
- n = len(p)
- }
- return
-}
-
-func (w *DBWriter) Close() {
- w.stmt.Close()
-}
diff --git a/vendor/github.com/lunny/log/filewriter.go b/vendor/github.com/lunny/log/filewriter.go
deleted file mode 100644
index f0bb4d1df1..0000000000
--- a/vendor/github.com/lunny/log/filewriter.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package log
-
-import (
- "io"
- "os"
- "path/filepath"
- "sync"
- "time"
-)
-
-var _ io.Writer = &Files{}
-
-type ByType int
-
-const (
- ByDay ByType = iota
- ByHour
- ByMonth
-)
-
-var (
- formats = map[ByType]string{
- ByDay: "2006-01-02",
- ByHour: "2006-01-02-15",
- ByMonth: "2006-01",
- }
-)
-
-func SetFileFormat(t ByType, format string) {
- formats[t] = format
-}
-
-func (b ByType) Format() string {
- return formats[b]
-}
-
-type Files struct {
- FileOptions
- f *os.File
- lastFormat string
- lock sync.Mutex
-}
-
-type FileOptions struct {
- Dir string
- ByType ByType
- Loc *time.Location
-}
-
-func prepareFileOption(opts []FileOptions) FileOptions {
- var opt FileOptions
- if len(opts) > 0 {
- opt = opts[0]
- }
- if opt.Dir == "" {
- opt.Dir = "./"
- }
- err := os.MkdirAll(opt.Dir, os.ModePerm)
- if err != nil {
- panic(err.Error())
- }
-
- if opt.Loc == nil {
- opt.Loc = time.Local
- }
- return opt
-}
-
-func NewFileWriter(opts ...FileOptions) *Files {
- opt := prepareFileOption(opts)
- return &Files{
- FileOptions: opt,
- }
-}
-
-func (f *Files) getFile() (*os.File, error) {
- var err error
- t := time.Now().In(f.Loc)
- if f.f == nil {
- f.lastFormat = t.Format(f.ByType.Format())
- f.f, err = os.OpenFile(filepath.Join(f.Dir, f.lastFormat+".log"),
- os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
- return f.f, err
- }
- if f.lastFormat != t.Format(f.ByType.Format()) {
- f.f.Close()
- f.lastFormat = t.Format(f.ByType.Format())
- f.f, err = os.OpenFile(filepath.Join(f.Dir, f.lastFormat+".log"),
- os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
- return f.f, err
- }
- return f.f, nil
-}
-
-func (f *Files) Write(bs []byte) (int, error) {
- f.lock.Lock()
- defer f.lock.Unlock()
-
- w, err := f.getFile()
- if err != nil {
- return 0, err
- }
- return w.Write(bs)
-}
-
-func (f *Files) Close() {
- if f.f != nil {
- f.f.Close()
- f.f = nil
- }
- f.lastFormat = ""
-}
diff --git a/vendor/github.com/lunny/log/logext.go b/vendor/github.com/lunny/log/logext.go
deleted file mode 100644
index 215c45f309..0000000000
--- a/vendor/github.com/lunny/log/logext.go
+++ /dev/null
@@ -1,595 +0,0 @@
-package log
-
-import (
- "bytes"
- "fmt"
- "io"
- "os"
- "runtime"
- "strings"
- "sync"
- "time"
-)
-
-// These flags define which text to prefix to each log entry generated by the Logger.
-const (
- // Bits or'ed together to control what's printed. There is no control over the
- // order they appear (the order listed here) or the format they present (as
- // described in the comments). A colon appears after these items:
- // 2009/0123 01:23:23.123123 /a/b/c/d.go:23: message
- Ldate = 1 << iota // the date: 2009/0123
- Ltime // the time: 01:23:23
- Lmicroseconds // microsecond resolution: 01:23:23.123123. assumes Ltime.
- Llongfile // full file name and line number: /a/b/c/d.go:23
- Lshortfile // final file name element and line number: d.go:23. overrides Llongfile
- Lmodule // module name
- Llevel // level: 0(Debug), 1(Info), 2(Warn), 3(Error), 4(Panic), 5(Fatal)
- Llongcolor // color will start [info] end of line
- Lshortcolor // color only include [info]
- LstdFlags = Ldate | Ltime // initial values for the standard logger
- //Ldefault = Llevel | LstdFlags | Lshortfile | Llongcolor
-) // [prefix][time][level][module][shortfile|longfile]
-
-func Ldefault() int {
- if runtime.GOOS == "windows" {
- return Llevel | LstdFlags | Lshortfile
- }
- return Llevel | LstdFlags | Lshortfile | Llongcolor
-}
-
-func Version() string {
- return "0.2.0.1121"
-}
-
-const (
- Lall = iota
-)
-const (
- Ldebug = iota
- Linfo
- Lwarn
- Lerror
- Lpanic
- Lfatal
- Lnone
-)
-
-const (
- ForeBlack = iota + 30 //30
- ForeRed //31
- ForeGreen //32
- ForeYellow //33
- ForeBlue //34
- ForePurple //35
- ForeCyan //36
- ForeWhite //37
-)
-
-const (
- BackBlack = iota + 40 //40
- BackRed //41
- BackGreen //42
- BackYellow //43
- BackBlue //44
- BackPurple //45
- BackCyan //46
- BackWhite //47
-)
-
-var levels = []string{
- "[Debug]",
- "[Info]",
- "[Warn]",
- "[Error]",
- "[Panic]",
- "[Fatal]",
-}
-
-// MUST called before all logs
-func SetLevels(lvs []string) {
- levels = lvs
-}
-
-var colors = []int{
- ForeCyan,
- ForeGreen,
- ForeYellow,
- ForeRed,
- ForePurple,
- ForeBlue,
-}
-
-// MUST called before all logs
-func SetColors(cls []int) {
- colors = cls
-}
-
-// A Logger represents an active logging object that generates lines of
-// output to an io.Writer. Each logging operation makes a single call to
-// the Writer's Write method. A Logger can be used simultaneously from
-// multiple goroutines; it guarantees to serialize access to the Writer.
-type Logger struct {
- mu sync.Mutex // ensures atomic writes; protects the following fields
- prefix string // prefix to write at beginning of each line
- flag int // properties
- Level int
- out io.Writer // destination for output
- buf bytes.Buffer // for accumulating text to write
- levelStats [6]int64
- loc *time.Location
-}
-
-// New creates a new Logger. The out variable sets the
-// destination to which log data will be written.
-// The prefix appears at the beginning of each generated log line.
-// The flag argument defines the logging properties.
-func New(out io.Writer, prefix string, flag int) *Logger {
- l := &Logger{out: out, prefix: prefix, Level: 1, flag: flag, loc: time.Local}
- if out != os.Stdout {
- l.flag = RmColorFlags(l.flag)
- }
- return l
-}
-
-var Std = New(os.Stderr, "", Ldefault())
-
-// Cheap integer to fixed-width decimal ASCII. Give a negative width to avoid zero-padding.
-// Knows the buffer has capacity.
-func itoa(buf *bytes.Buffer, i int, wid int) {
- var u uint = uint(i)
- if u == 0 && wid <= 1 {
- buf.WriteByte('0')
- return
- }
-
- // Assemble decimal in reverse order.
- var b [32]byte
- bp := len(b)
- for ; u > 0 || wid > 0; u /= 10 {
- bp--
- wid--
- b[bp] = byte(u%10) + '0'
- }
-
- // avoid slicing b to avoid an allocation.
- for bp < len(b) {
- buf.WriteByte(b[bp])
- bp++
- }
-}
-
-func moduleOf(file string) string {
- pos := strings.LastIndex(file, "/")
- if pos != -1 {
- pos1 := strings.LastIndex(file[:pos], "/src/")
- if pos1 != -1 {
- return file[pos1+5 : pos]
- }
- }
- return "UNKNOWN"
-}
-
-func (l *Logger) formatHeader(buf *bytes.Buffer, t time.Time,
- file string, line int, lvl int, reqId string) {
- if l.prefix != "" {
- buf.WriteString(l.prefix)
- }
- if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 {
- if l.flag&Ldate != 0 {
- year, month, day := t.Date()
- itoa(buf, year, 4)
- buf.WriteByte('/')
- itoa(buf, int(month), 2)
- buf.WriteByte('/')
- itoa(buf, day, 2)
- buf.WriteByte(' ')
- }
- if l.flag&(Ltime|Lmicroseconds) != 0 {
- hour, min, sec := t.Clock()
- itoa(buf, hour, 2)
- buf.WriteByte(':')
- itoa(buf, min, 2)
- buf.WriteByte(':')
- itoa(buf, sec, 2)
- if l.flag&Lmicroseconds != 0 {
- buf.WriteByte('.')
- itoa(buf, t.Nanosecond()/1e3, 6)
- }
- buf.WriteByte(' ')
- }
- }
- if reqId != "" {
- buf.WriteByte('[')
- buf.WriteString(reqId)
- buf.WriteByte(']')
- buf.WriteByte(' ')
- }
-
- if l.flag&(Lshortcolor|Llongcolor) != 0 {
- buf.WriteString(fmt.Sprintf("\033[1;%dm", colors[lvl]))
- }
- if l.flag&Llevel != 0 {
- buf.WriteString(levels[lvl])
- buf.WriteByte(' ')
- }
- if l.flag&Lshortcolor != 0 {
- buf.WriteString("\033[0m")
- }
-
- if l.flag&Lmodule != 0 {
- buf.WriteByte('[')
- buf.WriteString(moduleOf(file))
- buf.WriteByte(']')
- buf.WriteByte(' ')
- }
- if l.flag&(Lshortfile|Llongfile) != 0 {
- if l.flag&Lshortfile != 0 {
- short := file
- for i := len(file) - 1; i > 0; i-- {
- if file[i] == '/' {
- short = file[i+1:]
- break
- }
- }
- file = short
- }
- buf.WriteString(file)
- buf.WriteByte(':')
- itoa(buf, line, -1)
- buf.WriteByte(' ')
- }
-}
-
-// Output writes the output for a logging event. The string s contains
-// the text to print after the prefix specified by the flags of the
-// Logger. A newline is appended if the last character of s is not
-// already a newline. Calldepth is used to recover the PC and is
-// provided for generality, although at the moment on all pre-defined
-// paths it will be 2.
-func (l *Logger) Output(reqId string, lvl int, calldepth int, s string) error {
- if lvl < l.Level {
- return nil
- }
- now := time.Now().In(l.loc) // get this early.
- var file string
- var line int
- l.mu.Lock()
- defer l.mu.Unlock()
- if l.flag&(Lshortfile|Llongfile|Lmodule) != 0 {
- // release lock while getting caller info - it's expensive.
- l.mu.Unlock()
- var ok bool
- _, file, line, ok = runtime.Caller(calldepth)
- if !ok {
- file = "???"
- line = 0
- }
- l.mu.Lock()
- }
- l.levelStats[lvl]++
- l.buf.Reset()
- l.formatHeader(&l.buf, now, file, line, lvl, reqId)
- l.buf.WriteString(s)
- if l.flag&Llongcolor != 0 {
- l.buf.WriteString("\033[0m")
- }
- if len(s) > 0 && s[len(s)-1] != '\n' {
- l.buf.WriteByte('\n')
- }
- _, err := l.out.Write(l.buf.Bytes())
- return err
-}
-
-// -----------------------------------------
-
-// Printf calls l.Output to print to the logger.
-// Arguments are handled in the manner of fmt.Printf.
-func (l *Logger) Printf(format string, v ...interface{}) {
- l.Output("", Linfo, 2, fmt.Sprintf(format, v...))
-}
-
-// Print calls l.Output to print to the logger.
-// Arguments are handled in the manner of fmt.Print.
-func (l *Logger) Print(v ...interface{}) {
- l.Output("", Linfo, 2, fmt.Sprint(v...))
-}
-
-// Println calls l.Output to print to the logger.
-// Arguments are handled in the manner of fmt.Println.
-func (l *Logger) Println(v ...interface{}) {
- l.Output("", Linfo, 2, fmt.Sprintln(v...))
-}
-
-// -----------------------------------------
-
-func (l *Logger) Debugf(format string, v ...interface{}) {
- l.Output("", Ldebug, 2, fmt.Sprintf(format, v...))
-}
-
-func (l *Logger) Debug(v ...interface{}) {
- l.Output("", Ldebug, 2, fmt.Sprintln(v...))
-}
-
-// -----------------------------------------
-func (l *Logger) Infof(format string, v ...interface{}) {
- l.Output("", Linfo, 2, fmt.Sprintf(format, v...))
-}
-
-func (l *Logger) Info(v ...interface{}) {
- l.Output("", Linfo, 2, fmt.Sprintln(v...))
-}
-
-// -----------------------------------------
-func (l *Logger) Warnf(format string, v ...interface{}) {
- l.Output("", Lwarn, 2, fmt.Sprintf(format, v...))
-}
-
-func (l *Logger) Warn(v ...interface{}) {
- l.Output("", Lwarn, 2, fmt.Sprintln(v...))
-}
-
-// -----------------------------------------
-
-func (l *Logger) Errorf(format string, v ...interface{}) {
- l.Output("", Lerror, 2, fmt.Sprintf(format, v...))
-}
-
-func (l *Logger) Error(v ...interface{}) {
- l.Output("", Lerror, 2, fmt.Sprintln(v...))
-}
-
-// -----------------------------------------
-
-func (l *Logger) Fatal(v ...interface{}) {
- l.Output("", Lfatal, 2, fmt.Sprintln(v...))
- os.Exit(1)
-}
-
-// Fatalf is equivalent to l.Printf() followed by a call to os.Exit(1).
-func (l *Logger) Fatalf(format string, v ...interface{}) {
- l.Output("", Lfatal, 2, fmt.Sprintf(format, v...))
- os.Exit(1)
-}
-
-// -----------------------------------------
-// Panic is equivalent to l.Print() followed by a call to panic().
-func (l *Logger) Panic(v ...interface{}) {
- s := fmt.Sprintln(v...)
- l.Output("", Lpanic, 2, s)
- panic(s)
-}
-
-// Panicf is equivalent to l.Printf() followed by a call to panic().
-func (l *Logger) Panicf(format string, v ...interface{}) {
- s := fmt.Sprintf(format, v...)
- l.Output("", Lpanic, 2, s)
- panic(s)
-}
-
-// -----------------------------------------
-func (l *Logger) Stack(v ...interface{}) {
- s := fmt.Sprint(v...)
- s += "\n"
- buf := make([]byte, 1024*1024)
- n := runtime.Stack(buf, true)
- s += string(buf[:n])
- s += "\n"
- l.Output("", Lerror, 2, s)
-}
-
-// -----------------------------------------
-func (l *Logger) Stat() (stats []int64) {
- l.mu.Lock()
- v := l.levelStats
- l.mu.Unlock()
- return v[:]
-}
-
-// Flags returns the output flags for the logger.
-func (l *Logger) Flags() int {
- l.mu.Lock()
- defer l.mu.Unlock()
- return l.flag
-}
-
-func RmColorFlags(flag int) int {
- // for un std out, it should not show color since almost them don't support
- if flag&Llongcolor != 0 {
- flag = flag ^ Llongcolor
- }
- if flag&Lshortcolor != 0 {
- flag = flag ^ Lshortcolor
- }
- return flag
-}
-
-func (l *Logger) Location() *time.Location {
- return l.loc
-}
-
-func (l *Logger) SetLocation(loc *time.Location) {
- l.loc = loc
-}
-
-// SetFlags sets the output flags for the logger.
-func (l *Logger) SetFlags(flag int) {
- l.mu.Lock()
- defer l.mu.Unlock()
- if l.out != os.Stdout {
- flag = RmColorFlags(flag)
- }
- l.flag = flag
-}
-
-// Prefix returns the output prefix for the logger.
-func (l *Logger) Prefix() string {
- l.mu.Lock()
- defer l.mu.Unlock()
- return l.prefix
-}
-
-// SetPrefix sets the output prefix for the logger.
-func (l *Logger) SetPrefix(prefix string) {
- l.mu.Lock()
- defer l.mu.Unlock()
- l.prefix = prefix
-}
-
-// SetOutputLevel sets the output level for the logger.
-func (l *Logger) SetOutputLevel(lvl int) {
- l.mu.Lock()
- defer l.mu.Unlock()
- l.Level = lvl
-}
-
-func (l *Logger) OutputLevel() int {
- return l.Level
-}
-
-func (l *Logger) SetOutput(w io.Writer) {
- l.mu.Lock()
- defer l.mu.Unlock()
- l.out = w
- if w != os.Stdout {
- l.flag = RmColorFlags(l.flag)
- }
-}
-
-// SetOutput sets the output destination for the standard logger.
-func SetOutput(w io.Writer) {
- Std.SetOutput(w)
-}
-
-func SetLocation(loc *time.Location) {
- Std.SetLocation(loc)
-}
-
-func Location() *time.Location {
- return Std.Location()
-}
-
-// Flags returns the output flags for the standard logger.
-func Flags() int {
- return Std.Flags()
-}
-
-// SetFlags sets the output flags for the standard logger.
-func SetFlags(flag int) {
- Std.SetFlags(flag)
-}
-
-// Prefix returns the output prefix for the standard logger.
-func Prefix() string {
- return Std.Prefix()
-}
-
-// SetPrefix sets the output prefix for the standard logger.
-func SetPrefix(prefix string) {
- Std.SetPrefix(prefix)
-}
-
-func SetOutputLevel(lvl int) {
- Std.SetOutputLevel(lvl)
-}
-
-func OutputLevel() int {
- return Std.OutputLevel()
-}
-
-// -----------------------------------------
-
-// Print calls Output to print to the standard logger.
-// Arguments are handled in the manner of fmt.Print.
-func Print(v ...interface{}) {
- Std.Output("", Linfo, 2, fmt.Sprintln(v...))
-}
-
-// Printf calls Output to print to the standard logger.
-// Arguments are handled in the manner of fmt.Printf.
-func Printf(format string, v ...interface{}) {
- Std.Output("", Linfo, 2, fmt.Sprintf(format, v...))
-}
-
-// Println calls Output to print to the standard logger.
-// Arguments are handled in the manner of fmt.Println.
-func Println(v ...interface{}) {
- Std.Output("", Linfo, 2, fmt.Sprintln(v...))
-}
-
-// -----------------------------------------
-
-func Debugf(format string, v ...interface{}) {
- Std.Output("", Ldebug, 2, fmt.Sprintf(format, v...))
-}
-
-func Debug(v ...interface{}) {
- Std.Output("", Ldebug, 2, fmt.Sprintln(v...))
-}
-
-// -----------------------------------------
-
-func Infof(format string, v ...interface{}) {
- Std.Output("", Linfo, 2, fmt.Sprintf(format, v...))
-}
-
-func Info(v ...interface{}) {
- Std.Output("", Linfo, 2, fmt.Sprintln(v...))
-}
-
-// -----------------------------------------
-
-func Warnf(format string, v ...interface{}) {
- Std.Output("", Lwarn, 2, fmt.Sprintf(format, v...))
-}
-
-func Warn(v ...interface{}) {
- Std.Output("", Lwarn, 2, fmt.Sprintln(v...))
-}
-
-// -----------------------------------------
-
-func Errorf(format string, v ...interface{}) {
- Std.Output("", Lerror, 2, fmt.Sprintf(format, v...))
-}
-
-func Error(v ...interface{}) {
- Std.Output("", Lerror, 2, fmt.Sprintln(v...))
-}
-
-// -----------------------------------------
-
-// Fatal is equivalent to Print() followed by a call to os.Exit(1).
-func Fatal(v ...interface{}) {
- Std.Output("", Lfatal, 2, fmt.Sprintln(v...))
-}
-
-// Fatalf is equivalent to Printf() followed by a call to os.Exit(1).
-func Fatalf(format string, v ...interface{}) {
- Std.Output("", Lfatal, 2, fmt.Sprintf(format, v...))
-}
-
-// -----------------------------------------
-
-// Panic is equivalent to Print() followed by a call to panic().
-func Panic(v ...interface{}) {
- Std.Output("", Lpanic, 2, fmt.Sprintln(v...))
-}
-
-// Panicf is equivalent to Printf() followed by a call to panic().
-func Panicf(format string, v ...interface{}) {
- Std.Output("", Lpanic, 2, fmt.Sprintf(format, v...))
-}
-
-// -----------------------------------------
-
-func Stack(v ...interface{}) {
- s := fmt.Sprint(v...)
- s += "\n"
- buf := make([]byte, 1024*1024)
- n := runtime.Stack(buf, true)
- s += string(buf[:n])
- s += "\n"
- Std.Output("", Lerror, 2, s)
-}
-
-// -----------------------------------------
diff --git a/vendor/github.com/lunny/nodb/.gitignore b/vendor/github.com/lunny/nodb/.gitignore
deleted file mode 100644
index 8f4051772a..0000000000
--- a/vendor/github.com/lunny/nodb/.gitignore
+++ /dev/null
@@ -1,7 +0,0 @@
-build
-*.pyc
-.DS_Store
-nohup.out
-build_config.mk
-var
-.vscode
diff --git a/vendor/github.com/lunny/nodb/LICENSE b/vendor/github.com/lunny/nodb/LICENSE
deleted file mode 100644
index 7ece9fdf5a..0000000000
--- a/vendor/github.com/lunny/nodb/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2014 siddontang
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE. \ No newline at end of file
diff --git a/vendor/github.com/lunny/nodb/README.md b/vendor/github.com/lunny/nodb/README.md
deleted file mode 100644
index ebba36b158..0000000000
--- a/vendor/github.com/lunny/nodb/README.md
+++ /dev/null
@@ -1,84 +0,0 @@
-# NoDB
-
-[中文](https://github.com/lunny/nodb/blob/master/README_CN.md)
-
-Nodb is a fork of [ledisdb](https://github.com/siddontang/ledisdb) and shrink version. It's get rid of all C or other language codes and only keep Go's. It aims to provide a nosql database library rather than a redis like server. So if you want a redis like server, ledisdb is the best choose.
-
-Nodb is a pure Go and high performance NoSQL database library. It supports some data structure like kv, list, hash, zset, bitmap, set.
-
-Nodb now use [goleveldb](https://github.com/syndtr/goleveldb) as backend to store data.
-
-## Features
-
-+ Rich data structure: KV, List, Hash, ZSet, Bitmap, Set.
-+ Stores lots of data, over the memory limit.
-+ Supports expiration and ttl.
-+ Easy to embed in your own Go application.
-
-## Install
-
- go get github.com/lunny/nodb
-
-## Package Example
-
-### Open And Select database
-```go
-import(
- "github.com/lunny/nodb"
- "github.com/lunny/nodb/config"
-)
-
-cfg := new(config.Config)
-cfg.DataDir = "./"
-dbs, err := nodb.Open(cfg)
-if err != nil {
- fmt.Printf("nodb: error opening db: %v", err)
-}
-
-db, _ := dbs.Select(0)
-```
-### KV
-
-KV is the most basic nodb type like any other key-value database.
-```go
-err := db.Set(key, value)
-value, err := db.Get(key)
-```
-### List
-
-List is simply lists of values, sorted by insertion order.
-You can push or pop value on the list head (left) or tail (right).
-```go
-err := db.LPush(key, value1)
-err := db.RPush(key, value2)
-value1, err := db.LPop(key)
-value2, err := db.RPop(key)
-```
-### Hash
-
-Hash is a map between fields and values.
-```go
-n, err := db.HSet(key, field1, value1)
-n, err := db.HSet(key, field2, value2)
-value1, err := db.HGet(key, field1)
-value2, err := db.HGet(key, field2)
-```
-### ZSet
-
-ZSet is a sorted collections of values.
-Every member of zset is associated with score, a int64 value which used to sort, from smallest to greatest score.
-Members are unique, but score may be same.
-```go
-n, err := db.ZAdd(key, ScorePair{score1, member1}, ScorePair{score2, member2})
-ay, err := db.ZRangeByScore(key, minScore, maxScore, 0, -1)
-```
-## Links
-
-+ [Ledisdb Official Website](http://ledisdb.com)
-+ [GoDoc](https://godoc.org/github.com/lunny/nodb)
-+ [GoWalker](https://gowalker.org/github.com/lunny/nodb)
-
-
-## Thanks
-
-Gmail: siddontang@gmail.com
diff --git a/vendor/github.com/lunny/nodb/README_CN.md b/vendor/github.com/lunny/nodb/README_CN.md
deleted file mode 100644
index 6fa286e393..0000000000
--- a/vendor/github.com/lunny/nodb/README_CN.md
+++ /dev/null
@@ -1,81 +0,0 @@
-# NoDB
-
-[English](https://github.com/lunny/nodb/blob/master/README.md)
-
-Nodb 是 [ledisdb](https://github.com/siddontang/ledisdb) 的克隆和缩减版本。该版本去掉了所有C和其它语言的依赖,只保留Go语言的。目标是提供一个Nosql数据库的开发库而不是提供一个像Redis那样的服务器。因此如果你想要的是一个独立服务器,你可以直接选择ledisdb。
-
-Nodb 是一个纯Go的高性能 NoSQL 数据库。他支持 kv, list, hash, zset, bitmap, set 等数据结构。
-
-Nodb 当前底层使用 (goleveldb)[https://github.com/syndtr/goleveldb] 来存储数据。
-
-## 特性
-
-+ 丰富的数据结构支持: KV, List, Hash, ZSet, Bitmap, Set。
-+ 永久存储并且不受内存的限制。
-+ 高性能那个。
-+ 可以方便的嵌入到你的应用程序中。
-
-## 安装
-
- go get github.com/lunny/nodb
-
-## 例子
-
-### 打开和选择数据库
-```go
-import(
- "github.com/lunny/nodb"
- "github.com/lunny/nodb/config"
-)
-
-cfg := new(config.Config)
-cfg.DataDir = "./"
-dbs, err := nodb.Open(cfg)
-if err != nil {
- fmt.Printf("nodb: error opening db: %v", err)
-}
-db, _ := dbs.Select(0)
-```
-### KV
-
-KV 是最基础的功能,和其它Nosql一样。
-```go
-err := db.Set(key, value)
-value, err := db.Get(key)
-```
-### List
-
-List 是一些值的简单列表,按照插入的顺序排列。你可以从左或右push和pop值。
-```go
-err := db.LPush(key, value1)
-err := db.RPush(key, value2)
-value1, err := db.LPop(key)
-value2, err := db.RPop(key)
-```
-### Hash
-
-Hash 是一个field和value对应的map。
-```go
-n, err := db.HSet(key, field1, value1)
-n, err := db.HSet(key, field2, value2)
-value1, err := db.HGet(key, field1)
-value2, err := db.HGet(key, field2)
-```
-### ZSet
-
-ZSet 是一个排序的值集合。zset的每个成员对应一个score,这是一个int64的值用于从小到大排序。成员不可重复,但是score可以相同。
-```go
-n, err := db.ZAdd(key, ScorePair{score1, member1}, ScorePair{score2, member2})
-ay, err := db.ZRangeByScore(key, minScore, maxScore, 0, -1)
-```
-
-## 链接
-
-+ [Ledisdb Official Website](http://ledisdb.com)
-+ [GoDoc](https://godoc.org/github.com/lunny/nodb)
-+ [GoWalker](https://gowalker.org/github.com/lunny/nodb)
-
-
-## 感谢
-
-Gmail: siddontang@gmail.com
diff --git a/vendor/github.com/lunny/nodb/batch.go b/vendor/github.com/lunny/nodb/batch.go
deleted file mode 100644
index e69d96a122..0000000000
--- a/vendor/github.com/lunny/nodb/batch.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package nodb
-
-import (
- "sync"
-
- "github.com/lunny/nodb/store"
-)
-
-type batch struct {
- l *Nodb
-
- store.WriteBatch
-
- sync.Locker
-
- logs [][]byte
-
- tx *Tx
-}
-
-func (b *batch) Commit() error {
- b.l.commitLock.Lock()
- defer b.l.commitLock.Unlock()
-
- err := b.WriteBatch.Commit()
-
- if b.l.binlog != nil {
- if err == nil {
- if b.tx == nil {
- b.l.binlog.Log(b.logs...)
- } else {
- b.tx.logs = append(b.tx.logs, b.logs...)
- }
- }
- b.logs = [][]byte{}
- }
-
- return err
-}
-
-func (b *batch) Lock() {
- b.Locker.Lock()
-}
-
-func (b *batch) Unlock() {
- if b.l.binlog != nil {
- b.logs = [][]byte{}
- }
- b.WriteBatch.Rollback()
- b.Locker.Unlock()
-}
-
-func (b *batch) Put(key []byte, value []byte) {
- if b.l.binlog != nil {
- buf := encodeBinLogPut(key, value)
- b.logs = append(b.logs, buf)
- }
- b.WriteBatch.Put(key, value)
-}
-
-func (b *batch) Delete(key []byte) {
- if b.l.binlog != nil {
- buf := encodeBinLogDelete(key)
- b.logs = append(b.logs, buf)
- }
- b.WriteBatch.Delete(key)
-}
-
-type dbBatchLocker struct {
- l *sync.Mutex
- wrLock *sync.RWMutex
-}
-
-func (l *dbBatchLocker) Lock() {
- l.wrLock.RLock()
- l.l.Lock()
-}
-
-func (l *dbBatchLocker) Unlock() {
- l.l.Unlock()
- l.wrLock.RUnlock()
-}
-
-type txBatchLocker struct {
-}
-
-func (l *txBatchLocker) Lock() {}
-func (l *txBatchLocker) Unlock() {}
-
-type multiBatchLocker struct {
-}
-
-func (l *multiBatchLocker) Lock() {}
-func (l *multiBatchLocker) Unlock() {}
-
-func (l *Nodb) newBatch(wb store.WriteBatch, locker sync.Locker, tx *Tx) *batch {
- b := new(batch)
- b.l = l
- b.WriteBatch = wb
-
- b.tx = tx
- b.Locker = locker
-
- b.logs = [][]byte{}
- return b
-}
diff --git a/vendor/github.com/lunny/nodb/binlog.go b/vendor/github.com/lunny/nodb/binlog.go
deleted file mode 100644
index 4c094d9463..0000000000
--- a/vendor/github.com/lunny/nodb/binlog.go
+++ /dev/null
@@ -1,391 +0,0 @@
-package nodb
-
-import (
- "bufio"
- "encoding/binary"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/lunny/log"
- "github.com/lunny/nodb/config"
-)
-
-type BinLogHead struct {
- CreateTime uint32
- BatchId uint32
- PayloadLen uint32
-}
-
-func (h *BinLogHead) Len() int {
- return 12
-}
-
-func (h *BinLogHead) Write(w io.Writer) error {
- if err := binary.Write(w, binary.BigEndian, h.CreateTime); err != nil {
- return err
- }
-
- if err := binary.Write(w, binary.BigEndian, h.BatchId); err != nil {
- return err
- }
-
- if err := binary.Write(w, binary.BigEndian, h.PayloadLen); err != nil {
- return err
- }
-
- return nil
-}
-
-func (h *BinLogHead) handleReadError(err error) error {
- if err == io.EOF {
- return io.ErrUnexpectedEOF
- } else {
- return err
- }
-}
-
-func (h *BinLogHead) Read(r io.Reader) error {
- var err error
- if err = binary.Read(r, binary.BigEndian, &h.CreateTime); err != nil {
- return err
- }
-
- if err = binary.Read(r, binary.BigEndian, &h.BatchId); err != nil {
- return h.handleReadError(err)
- }
-
- if err = binary.Read(r, binary.BigEndian, &h.PayloadLen); err != nil {
- return h.handleReadError(err)
- }
-
- return nil
-}
-
-func (h *BinLogHead) InSameBatch(ho *BinLogHead) bool {
- if h.CreateTime == ho.CreateTime && h.BatchId == ho.BatchId {
- return true
- } else {
- return false
- }
-}
-
-/*
-index file format:
-ledis-bin.00001
-ledis-bin.00002
-ledis-bin.00003
-
-log file format
-
-Log: Head|PayloadData
-
-Head: createTime|batchId|payloadData
-
-*/
-
-type BinLog struct {
- sync.Mutex
-
- path string
-
- cfg *config.BinLogConfig
-
- logFile *os.File
-
- logWb *bufio.Writer
-
- indexName string
- logNames []string
- lastLogIndex int64
-
- batchId uint32
-
- ch chan struct{}
-}
-
-func NewBinLog(cfg *config.Config) (*BinLog, error) {
- l := new(BinLog)
-
- l.cfg = &cfg.BinLog
- l.cfg.Adjust()
-
- l.path = path.Join(cfg.DataDir, "binlog")
-
- if err := os.MkdirAll(l.path, os.ModePerm); err != nil {
- return nil, err
- }
-
- l.logNames = make([]string, 0, 16)
-
- l.ch = make(chan struct{})
-
- if err := l.loadIndex(); err != nil {
- return nil, err
- }
-
- return l, nil
-}
-
-func (l *BinLog) flushIndex() error {
- data := strings.Join(l.logNames, "\n")
-
- bakName := fmt.Sprintf("%s.bak", l.indexName)
- f, err := os.OpenFile(bakName, os.O_WRONLY|os.O_CREATE, 0666)
- if err != nil {
- log.Error("create binlog bak index error %s", err.Error())
- return err
- }
-
- if _, err := f.WriteString(data); err != nil {
- log.Error("write binlog index error %s", err.Error())
- f.Close()
- return err
- }
-
- f.Close()
-
- if err := os.Rename(bakName, l.indexName); err != nil {
- log.Error("rename binlog bak index error %s", err.Error())
- return err
- }
-
- return nil
-}
-
-func (l *BinLog) loadIndex() error {
- l.indexName = path.Join(l.path, fmt.Sprintf("ledis-bin.index"))
- if _, err := os.Stat(l.indexName); os.IsNotExist(err) {
- //no index file, nothing to do
- } else {
- indexData, err := ioutil.ReadFile(l.indexName)
- if err != nil {
- return err
- }
-
- lines := strings.Split(string(indexData), "\n")
- for _, line := range lines {
- line = strings.Trim(line, "\r\n ")
- if len(line) == 0 {
- continue
- }
-
- if _, err := os.Stat(path.Join(l.path, line)); err != nil {
- log.Error("load index line %s error %s", line, err.Error())
- return err
- } else {
- l.logNames = append(l.logNames, line)
- }
- }
- }
- if l.cfg.MaxFileNum > 0 && len(l.logNames) > l.cfg.MaxFileNum {
- //remove oldest logfile
- if err := l.Purge(len(l.logNames) - l.cfg.MaxFileNum); err != nil {
- return err
- }
- }
-
- var err error
- if len(l.logNames) == 0 {
- l.lastLogIndex = 1
- } else {
- lastName := l.logNames[len(l.logNames)-1]
-
- if l.lastLogIndex, err = strconv.ParseInt(path.Ext(lastName)[1:], 10, 64); err != nil {
- log.Error("invalid logfile name %s", err.Error())
- return err
- }
-
- //like mysql, if server restart, a new binlog will create
- l.lastLogIndex++
- }
-
- return nil
-}
-
-func (l *BinLog) getLogFile() string {
- return l.FormatLogFileName(l.lastLogIndex)
-}
-
-func (l *BinLog) openNewLogFile() error {
- var err error
- lastName := l.getLogFile()
-
- logPath := path.Join(l.path, lastName)
- if l.logFile, err = os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY, 0666); err != nil {
- log.Error("open new logfile error %s", err.Error())
- return err
- }
-
- if l.cfg.MaxFileNum > 0 && len(l.logNames) == l.cfg.MaxFileNum {
- l.purge(1)
- }
-
- l.logNames = append(l.logNames, lastName)
-
- if l.logWb == nil {
- l.logWb = bufio.NewWriterSize(l.logFile, 1024)
- } else {
- l.logWb.Reset(l.logFile)
- }
-
- if err = l.flushIndex(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (l *BinLog) checkLogFileSize() bool {
- if l.logFile == nil {
- return false
- }
-
- st, _ := l.logFile.Stat()
- if st.Size() >= int64(l.cfg.MaxFileSize) {
- l.closeLog()
- return true
- }
-
- return false
-}
-
-func (l *BinLog) closeLog() {
- l.lastLogIndex++
-
- l.logFile.Close()
- l.logFile = nil
-}
-
-func (l *BinLog) purge(n int) {
- for i := 0; i < n; i++ {
- logPath := path.Join(l.path, l.logNames[i])
- os.Remove(logPath)
- }
-
- copy(l.logNames[0:], l.logNames[n:])
- l.logNames = l.logNames[0 : len(l.logNames)-n]
-}
-
-func (l *BinLog) Close() {
- if l.logFile != nil {
- l.logFile.Close()
- l.logFile = nil
- }
-}
-
-func (l *BinLog) LogNames() []string {
- return l.logNames
-}
-
-func (l *BinLog) LogFileName() string {
- return l.getLogFile()
-}
-
-func (l *BinLog) LogFilePos() int64 {
- if l.logFile == nil {
- return 0
- } else {
- st, _ := l.logFile.Stat()
- return st.Size()
- }
-}
-
-func (l *BinLog) LogFileIndex() int64 {
- return l.lastLogIndex
-}
-
-func (l *BinLog) FormatLogFileName(index int64) string {
- return fmt.Sprintf("ledis-bin.%07d", index)
-}
-
-func (l *BinLog) FormatLogFilePath(index int64) string {
- return path.Join(l.path, l.FormatLogFileName(index))
-}
-
-func (l *BinLog) LogPath() string {
- return l.path
-}
-
-func (l *BinLog) Purge(n int) error {
- l.Lock()
- defer l.Unlock()
-
- if len(l.logNames) == 0 {
- return nil
- }
-
- if n >= len(l.logNames) {
- n = len(l.logNames)
- //can not purge current log file
- if l.logNames[n-1] == l.getLogFile() {
- n = n - 1
- }
- }
-
- l.purge(n)
-
- return l.flushIndex()
-}
-
-func (l *BinLog) PurgeAll() error {
- l.Lock()
- defer l.Unlock()
-
- l.closeLog()
- return l.openNewLogFile()
-}
-
-func (l *BinLog) Log(args ...[]byte) error {
- l.Lock()
- defer l.Unlock()
-
- var err error
-
- if l.logFile == nil {
- if err = l.openNewLogFile(); err != nil {
- return err
- }
- }
-
- head := &BinLogHead{}
-
- head.CreateTime = uint32(time.Now().Unix())
- head.BatchId = l.batchId
-
- l.batchId++
-
- for _, data := range args {
- head.PayloadLen = uint32(len(data))
-
- if err := head.Write(l.logWb); err != nil {
- return err
- }
-
- if _, err := l.logWb.Write(data); err != nil {
- return err
- }
- }
-
- if err = l.logWb.Flush(); err != nil {
- log.Error("write log error %s", err.Error())
- return err
- }
-
- l.checkLogFileSize()
-
- close(l.ch)
- l.ch = make(chan struct{})
-
- return nil
-}
-
-func (l *BinLog) Wait() <-chan struct{} {
- return l.ch
-}
diff --git a/vendor/github.com/lunny/nodb/binlog_util.go b/vendor/github.com/lunny/nodb/binlog_util.go
deleted file mode 100644
index 22124dda07..0000000000
--- a/vendor/github.com/lunny/nodb/binlog_util.go
+++ /dev/null
@@ -1,215 +0,0 @@
-package nodb
-
-import (
- "encoding/binary"
- "errors"
- "fmt"
- "strconv"
-)
-
-var (
- errBinLogDeleteType = errors.New("invalid bin log delete type")
- errBinLogPutType = errors.New("invalid bin log put type")
- errBinLogCommandType = errors.New("invalid bin log command type")
-)
-
-func encodeBinLogDelete(key []byte) []byte {
- buf := make([]byte, 1+len(key))
- buf[0] = BinLogTypeDeletion
- copy(buf[1:], key)
- return buf
-}
-
-func decodeBinLogDelete(sz []byte) ([]byte, error) {
- if len(sz) < 1 || sz[0] != BinLogTypeDeletion {
- return nil, errBinLogDeleteType
- }
-
- return sz[1:], nil
-}
-
-func encodeBinLogPut(key []byte, value []byte) []byte {
- buf := make([]byte, 3+len(key)+len(value))
- buf[0] = BinLogTypePut
- pos := 1
- binary.BigEndian.PutUint16(buf[pos:], uint16(len(key)))
- pos += 2
- copy(buf[pos:], key)
- pos += len(key)
- copy(buf[pos:], value)
-
- return buf
-}
-
-func decodeBinLogPut(sz []byte) ([]byte, []byte, error) {
- if len(sz) < 3 || sz[0] != BinLogTypePut {
- return nil, nil, errBinLogPutType
- }
-
- keyLen := int(binary.BigEndian.Uint16(sz[1:]))
- if 3+keyLen > len(sz) {
- return nil, nil, errBinLogPutType
- }
-
- return sz[3 : 3+keyLen], sz[3+keyLen:], nil
-}
-
-func FormatBinLogEvent(event []byte) (string, error) {
- logType := uint8(event[0])
-
- var err error
- var k []byte
- var v []byte
-
- var buf []byte = make([]byte, 0, 1024)
-
- switch logType {
- case BinLogTypePut:
- k, v, err = decodeBinLogPut(event)
- buf = append(buf, "PUT "...)
- case BinLogTypeDeletion:
- k, err = decodeBinLogDelete(event)
- buf = append(buf, "DELETE "...)
- default:
- err = errInvalidBinLogEvent
- }
-
- if err != nil {
- return "", err
- }
-
- if buf, err = formatDataKey(buf, k); err != nil {
- return "", err
- }
-
- if v != nil && len(v) != 0 {
- buf = append(buf, fmt.Sprintf(" %q", v)...)
- }
-
- return String(buf), nil
-}
-
-func formatDataKey(buf []byte, k []byte) ([]byte, error) {
- if len(k) < 2 {
- return nil, errInvalidBinLogEvent
- }
-
- buf = append(buf, fmt.Sprintf("DB:%2d ", k[0])...)
- buf = append(buf, fmt.Sprintf("%s ", TypeName[k[1]])...)
-
- db := new(DB)
- db.index = k[0]
-
- //to do format at respective place
-
- switch k[1] {
- case KVType:
- if key, err := db.decodeKVKey(k); err != nil {
- return nil, err
- } else {
- buf = strconv.AppendQuote(buf, String(key))
- }
- case HashType:
- if key, field, err := db.hDecodeHashKey(k); err != nil {
- return nil, err
- } else {
- buf = strconv.AppendQuote(buf, String(key))
- buf = append(buf, ' ')
- buf = strconv.AppendQuote(buf, String(field))
- }
- case HSizeType:
- if key, err := db.hDecodeSizeKey(k); err != nil {
- return nil, err
- } else {
- buf = strconv.AppendQuote(buf, String(key))
- }
- case ListType:
- if key, seq, err := db.lDecodeListKey(k); err != nil {
- return nil, err
- } else {
- buf = strconv.AppendQuote(buf, String(key))
- buf = append(buf, ' ')
- buf = strconv.AppendInt(buf, int64(seq), 10)
- }
- case LMetaType:
- if key, err := db.lDecodeMetaKey(k); err != nil {
- return nil, err
- } else {
- buf = strconv.AppendQuote(buf, String(key))
- }
- case ZSetType:
- if key, m, err := db.zDecodeSetKey(k); err != nil {
- return nil, err
- } else {
- buf = strconv.AppendQuote(buf, String(key))
- buf = append(buf, ' ')
- buf = strconv.AppendQuote(buf, String(m))
- }
- case ZSizeType:
- if key, err := db.zDecodeSizeKey(k); err != nil {
- return nil, err
- } else {
- buf = strconv.AppendQuote(buf, String(key))
- }
- case ZScoreType:
- if key, m, score, err := db.zDecodeScoreKey(k); err != nil {
- return nil, err
- } else {
- buf = strconv.AppendQuote(buf, String(key))
- buf = append(buf, ' ')
- buf = strconv.AppendQuote(buf, String(m))
- buf = append(buf, ' ')
- buf = strconv.AppendInt(buf, score, 10)
- }
- case BitType:
- if key, seq, err := db.bDecodeBinKey(k); err != nil {
- return nil, err
- } else {
- buf = strconv.AppendQuote(buf, String(key))
- buf = append(buf, ' ')
- buf = strconv.AppendUint(buf, uint64(seq), 10)
- }
- case BitMetaType:
- if key, err := db.bDecodeMetaKey(k); err != nil {
- return nil, err
- } else {
- buf = strconv.AppendQuote(buf, String(key))
- }
- case SetType:
- if key, member, err := db.sDecodeSetKey(k); err != nil {
- return nil, err
- } else {
- buf = strconv.AppendQuote(buf, String(key))
- buf = append(buf, ' ')
- buf = strconv.AppendQuote(buf, String(member))
- }
- case SSizeType:
- if key, err := db.sDecodeSizeKey(k); err != nil {
- return nil, err
- } else {
- buf = strconv.AppendQuote(buf, String(key))
- }
- case ExpTimeType:
- if tp, key, t, err := db.expDecodeTimeKey(k); err != nil {
- return nil, err
- } else {
- buf = append(buf, TypeName[tp]...)
- buf = append(buf, ' ')
- buf = strconv.AppendQuote(buf, String(key))
- buf = append(buf, ' ')
- buf = strconv.AppendInt(buf, t, 10)
- }
- case ExpMetaType:
- if tp, key, err := db.expDecodeMetaKey(k); err != nil {
- return nil, err
- } else {
- buf = append(buf, TypeName[tp]...)
- buf = append(buf, ' ')
- buf = strconv.AppendQuote(buf, String(key))
- }
- default:
- return nil, errInvalidBinLogEvent
- }
-
- return buf, nil
-}
diff --git a/vendor/github.com/lunny/nodb/config/config.go b/vendor/github.com/lunny/nodb/config/config.go
deleted file mode 100644
index 3b44d3043f..0000000000
--- a/vendor/github.com/lunny/nodb/config/config.go
+++ /dev/null
@@ -1,135 +0,0 @@
-package config
-
-import (
- "io/ioutil"
-
- "github.com/BurntSushi/toml"
-)
-
-type Size int
-
-const (
- DefaultAddr string = "127.0.0.1:6380"
- DefaultHttpAddr string = "127.0.0.1:11181"
-
- DefaultDBName string = "goleveldb"
-
- DefaultDataDir string = "./data"
-)
-
-const (
- MaxBinLogFileSize int = 1024 * 1024 * 1024
- MaxBinLogFileNum int = 10000
-
- DefaultBinLogFileSize int = MaxBinLogFileSize
- DefaultBinLogFileNum int = 10
-)
-
-type LevelDBConfig struct {
- Compression bool `toml:"compression"`
- BlockSize int `toml:"block_size"`
- WriteBufferSize int `toml:"write_buffer_size"`
- CacheSize int `toml:"cache_size"`
- MaxOpenFiles int `toml:"max_open_files"`
-}
-
-type LMDBConfig struct {
- MapSize int `toml:"map_size"`
- NoSync bool `toml:"nosync"`
-}
-
-type BinLogConfig struct {
- MaxFileSize int `toml:"max_file_size"`
- MaxFileNum int `toml:"max_file_num"`
-}
-
-type Config struct {
- DataDir string `toml:"data_dir"`
-
- DBName string `toml:"db_name"`
-
- LevelDB LevelDBConfig `toml:"leveldb"`
-
- LMDB LMDBConfig `toml:"lmdb"`
-
- BinLog BinLogConfig `toml:"binlog"`
-
- SlaveOf string `toml:"slaveof"`
-
- AccessLog string `toml:"access_log"`
-}
-
-func NewConfigWithFile(fileName string) (*Config, error) {
- data, err := ioutil.ReadFile(fileName)
- if err != nil {
- return nil, err
- }
-
- return NewConfigWithData(data)
-}
-
-func NewConfigWithData(data []byte) (*Config, error) {
- cfg := NewConfigDefault()
-
- _, err := toml.Decode(string(data), cfg)
- if err != nil {
- return nil, err
- }
-
- return cfg, nil
-}
-
-func NewConfigDefault() *Config {
- cfg := new(Config)
-
- cfg.DataDir = DefaultDataDir
-
- cfg.DBName = DefaultDBName
-
- // disable binlog
- cfg.BinLog.MaxFileNum = 0
- cfg.BinLog.MaxFileSize = 0
-
- // disable replication
- cfg.SlaveOf = ""
-
- // disable access log
- cfg.AccessLog = ""
-
- cfg.LMDB.MapSize = 20 * 1024 * 1024
- cfg.LMDB.NoSync = true
-
- return cfg
-}
-
-func (cfg *LevelDBConfig) Adjust() {
- if cfg.CacheSize <= 0 {
- cfg.CacheSize = 4 * 1024 * 1024
- }
-
- if cfg.BlockSize <= 0 {
- cfg.BlockSize = 4 * 1024
- }
-
- if cfg.WriteBufferSize <= 0 {
- cfg.WriteBufferSize = 4 * 1024 * 1024
- }
-
- if cfg.MaxOpenFiles < 1024 {
- cfg.MaxOpenFiles = 1024
- }
-}
-
-func (cfg *BinLogConfig) Adjust() {
- if cfg.MaxFileSize <= 0 {
- cfg.MaxFileSize = DefaultBinLogFileSize
- } else if cfg.MaxFileSize > MaxBinLogFileSize {
- cfg.MaxFileSize = MaxBinLogFileSize
- }
-
- if cfg.MaxFileNum <= 0 {
- cfg.MaxFileNum = DefaultBinLogFileNum
- } else if cfg.MaxFileNum > MaxBinLogFileNum {
- cfg.MaxFileNum = MaxBinLogFileNum
- }
-}
diff --git a/vendor/github.com/lunny/nodb/config/config.toml b/vendor/github.com/lunny/nodb/config/config.toml
deleted file mode 100644
index 2a3a2466e0..0000000000
--- a/vendor/github.com/lunny/nodb/config/config.toml
+++ /dev/null
@@ -1,45 +0,0 @@
-# LedisDB configuration
-
-# Server listen address
-addr = "127.0.0.1:6380"
-
-# Server http listen address, set empty to disable
-http_addr = "127.0.0.1:11181"
-
-# Data store path, all ledisdb's data will be saved here
-data_dir = "/tmp/ledis_server"
-
-# Log server command, set empty to disable
-access_log = ""
-
-# Set slaveof to enable replication from master, empty, no replication
-slaveof = ""
-
-# Choose which backend storage to use, now support:
-#
-# leveldb
-# rocksdb
-# goleveldb
-# lmdb
-# boltdb
-# hyperleveldb
-# memory
-#
-db_name = "leveldb"
-
-[leveldb]
-compression = false
-block_size = 32768
-write_buffer_size = 67108864
-cache_size = 524288000
-max_open_files = 1024
-
-[lmdb]
-map_size = 524288000
-nosync = true
-
-[binlog]
-max_file_size = 0
-max_file_num = 0
-
-
diff --git a/vendor/github.com/lunny/nodb/const.go b/vendor/github.com/lunny/nodb/const.go
deleted file mode 100644
index 446dae634e..0000000000
--- a/vendor/github.com/lunny/nodb/const.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package nodb
-
-import (
- "errors"
-)
-
-const (
- NoneType byte = 0
- KVType byte = 1
- HashType byte = 2
- HSizeType byte = 3
- ListType byte = 4
- LMetaType byte = 5
- ZSetType byte = 6
- ZSizeType byte = 7
- ZScoreType byte = 8
- BitType byte = 9
- BitMetaType byte = 10
- SetType byte = 11
- SSizeType byte = 12
-
- maxDataType byte = 100
-
- ExpTimeType byte = 101
- ExpMetaType byte = 102
-)
-
-var (
- TypeName = map[byte]string{
- KVType: "kv",
- HashType: "hash",
- HSizeType: "hsize",
- ListType: "list",
- LMetaType: "lmeta",
- ZSetType: "zset",
- ZSizeType: "zsize",
- ZScoreType: "zscore",
- BitType: "bit",
- BitMetaType: "bitmeta",
- SetType: "set",
- SSizeType: "ssize",
- ExpTimeType: "exptime",
- ExpMetaType: "expmeta",
- }
-)
-
-const (
- defaultScanCount int = 10
-)
-
-var (
- errKeySize = errors.New("invalid key size")
- errValueSize = errors.New("invalid value size")
- errHashFieldSize = errors.New("invalid hash field size")
- errSetMemberSize = errors.New("invalid set member size")
- errZSetMemberSize = errors.New("invalid zset member size")
- errExpireValue = errors.New("invalid expire value")
-)
-
-const (
- //we don't support too many databases
- MaxDBNumber uint8 = 16
-
- //max key size
- MaxKeySize int = 1024
-
- //max hash field size
- MaxHashFieldSize int = 1024
-
- //max zset member size
- MaxZSetMemberSize int = 1024
-
- //max set member size
- MaxSetMemberSize int = 1024
-
- //max value size
- MaxValueSize int = 10 * 1024 * 1024
-)
-
-var (
- ErrScoreMiss = errors.New("zset score miss")
-)
-
-const (
- BinLogTypeDeletion uint8 = 0x0
- BinLogTypePut uint8 = 0x1
- BinLogTypeCommand uint8 = 0x2
-)
-
-const (
- DBAutoCommit uint8 = 0x0
- DBInTransaction uint8 = 0x1
- DBInMulti uint8 = 0x2
-)
-
-var (
- Version = "0.1"
-)
diff --git a/vendor/github.com/lunny/nodb/doc.go b/vendor/github.com/lunny/nodb/doc.go
deleted file mode 100644
index 2f7df33ffd..0000000000
--- a/vendor/github.com/lunny/nodb/doc.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// package nodb is a high performance embedded NoSQL.
-//
-// nodb supports various data structure like kv, list, hash and zset like redis.
-//
-// Other features include binlog replication, data with a limited time-to-live.
-//
-// Usage
-//
-// First create a nodb instance before use:
-//
-// l := nodb.Open(cfg)
-//
-// cfg is a Config instance which contains configuration for nodb use,
-// like DataDir (root directory for nodb working to store data).
-//
-// After you create a nodb instance, you can select a DB to store you data:
-//
-// db, _ := l.Select(0)
-//
-// DB must be selected by a index, nodb supports only 16 databases, so the index range is [0-15].
-//
-// KV
-//
-// KV is the most basic nodb type like any other key-value database.
-//
-// err := db.Set(key, value)
-// value, err := db.Get(key)
-//
-// List
-//
-// List is simply lists of values, sorted by insertion order.
-// You can push or pop value on the list head (left) or tail (right).
-//
-// err := db.LPush(key, value1)
-// err := db.RPush(key, value2)
-// value1, err := db.LPop(key)
-// value2, err := db.RPop(key)
-//
-// Hash
-//
-// Hash is a map between fields and values.
-//
-// n, err := db.HSet(key, field1, value1)
-// n, err := db.HSet(key, field2, value2)
-// value1, err := db.HGet(key, field1)
-// value2, err := db.HGet(key, field2)
-//
-// ZSet
-//
-// ZSet is a sorted collections of values.
-// Every member of zset is associated with score, a int64 value which used to sort, from smallest to greatest score.
-// Members are unique, but score may be same.
-//
-// n, err := db.ZAdd(key, ScorePair{score1, member1}, ScorePair{score2, member2})
-// ay, err := db.ZRangeByScore(key, minScore, maxScore, 0, -1)
-//
-// Binlog
-//
-// nodb supports binlog, so you can sync binlog to another server for replication. If you want to open binlog support, set UseBinLog to true in config.
-//
-package nodb
diff --git a/vendor/github.com/lunny/nodb/dump.go b/vendor/github.com/lunny/nodb/dump.go
deleted file mode 100644
index 3c9722e00d..0000000000
--- a/vendor/github.com/lunny/nodb/dump.go
+++ /dev/null
@@ -1,200 +0,0 @@
-package nodb
-
-import (
- "bufio"
- "bytes"
- "encoding/binary"
- "io"
- "os"
-
- "github.com/siddontang/go-snappy/snappy"
-)
-
-//dump format
-// fileIndex(bigendian int64)|filePos(bigendian int64)
-// |keylen(bigendian int32)|key|valuelen(bigendian int32)|value......
-//
-//key and value are both compressed for fast transfer dump on network using snappy
-
-type BinLogAnchor struct {
- LogFileIndex int64
- LogPos int64
-}
-
-func (m *BinLogAnchor) WriteTo(w io.Writer) error {
- if err := binary.Write(w, binary.BigEndian, m.LogFileIndex); err != nil {
- return err
- }
-
- if err := binary.Write(w, binary.BigEndian, m.LogPos); err != nil {
- return err
- }
- return nil
-}
-
-func (m *BinLogAnchor) ReadFrom(r io.Reader) error {
- err := binary.Read(r, binary.BigEndian, &m.LogFileIndex)
- if err != nil {
- return err
- }
-
- err = binary.Read(r, binary.BigEndian, &m.LogPos)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (l *Nodb) DumpFile(path string) error {
- f, err := os.Create(path)
- if err != nil {
- return err
- }
- defer f.Close()
-
- return l.Dump(f)
-}
-
-func (l *Nodb) Dump(w io.Writer) error {
- m := new(BinLogAnchor)
-
- var err error
-
- l.wLock.Lock()
- defer l.wLock.Unlock()
-
- if l.binlog != nil {
- m.LogFileIndex = l.binlog.LogFileIndex()
- m.LogPos = l.binlog.LogFilePos()
- }
-
- wb := bufio.NewWriterSize(w, 4096)
- if err = m.WriteTo(wb); err != nil {
- return err
- }
-
- it := l.ldb.NewIterator()
- it.SeekToFirst()
-
- compressBuf := make([]byte, 4096)
-
- var key []byte
- var value []byte
- for ; it.Valid(); it.Next() {
- key = it.RawKey()
- value = it.RawValue()
-
- if key, err = snappy.Encode(compressBuf, key); err != nil {
- return err
- }
-
- if err = binary.Write(wb, binary.BigEndian, uint16(len(key))); err != nil {
- return err
- }
-
- if _, err = wb.Write(key); err != nil {
- return err
- }
-
- if value, err = snappy.Encode(compressBuf, value); err != nil {
- return err
- }
-
- if err = binary.Write(wb, binary.BigEndian, uint32(len(value))); err != nil {
- return err
- }
-
- if _, err = wb.Write(value); err != nil {
- return err
- }
- }
-
- if err = wb.Flush(); err != nil {
- return err
- }
-
- compressBuf = nil
-
- return nil
-}
-
-func (l *Nodb) LoadDumpFile(path string) (*BinLogAnchor, error) {
- f, err := os.Open(path)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- return l.LoadDump(f)
-}
-
-func (l *Nodb) LoadDump(r io.Reader) (*BinLogAnchor, error) {
- l.wLock.Lock()
- defer l.wLock.Unlock()
-
- info := new(BinLogAnchor)
-
- rb := bufio.NewReaderSize(r, 4096)
-
- err := info.ReadFrom(rb)
- if err != nil {
- return nil, err
- }
-
- var keyLen uint16
- var valueLen uint32
-
- var keyBuf bytes.Buffer
- var valueBuf bytes.Buffer
-
- deKeyBuf := make([]byte, 4096)
- deValueBuf := make([]byte, 4096)
-
- var key, value []byte
-
- for {
- if err = binary.Read(rb, binary.BigEndian, &keyLen); err != nil && err != io.EOF {
- return nil, err
- } else if err == io.EOF {
- break
- }
-
- if _, err = io.CopyN(&keyBuf, rb, int64(keyLen)); err != nil {
- return nil, err
- }
-
- if key, err = snappy.Decode(deKeyBuf, keyBuf.Bytes()); err != nil {
- return nil, err
- }
-
- if err = binary.Read(rb, binary.BigEndian, &valueLen); err != nil {
- return nil, err
- }
-
- if _, err = io.CopyN(&valueBuf, rb, int64(valueLen)); err != nil {
- return nil, err
- }
-
- if value, err = snappy.Decode(deValueBuf, valueBuf.Bytes()); err != nil {
- return nil, err
- }
-
- if err = l.ldb.Put(key, value); err != nil {
- return nil, err
- }
-
- keyBuf.Reset()
- valueBuf.Reset()
- }
-
- deKeyBuf = nil
- deValueBuf = nil
-
- //if binlog enable, we will delete all binlogs and open a new one for handling simply
- if l.binlog != nil {
- l.binlog.PurgeAll()
- }
-
- return info, nil
-}
diff --git a/vendor/github.com/lunny/nodb/info.go b/vendor/github.com/lunny/nodb/info.go
deleted file mode 100644
index 3fd37e3d44..0000000000
--- a/vendor/github.com/lunny/nodb/info.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package nodb
-
-// todo, add info
-
-// type Keyspace struct {
-// Kvs int `json:"kvs"`
-// KvExpires int `json:"kv_expires"`
-
-// Lists int `json:"lists"`
-// ListExpires int `json:"list_expires"`
-
-// Bitmaps int `json:"bitmaps"`
-// BitmapExpires int `json:"bitmap_expires"`
-
-// ZSets int `json:"zsets"`
-// ZSetExpires int `json:"zset_expires"`
-
-// Hashes int `json:"hashes"`
-// HashExpires int `json:"hahsh_expires"`
-// }
-
-// type Info struct {
-// KeySpaces [MaxDBNumber]Keyspace
-// }
diff --git a/vendor/github.com/lunny/nodb/multi.go b/vendor/github.com/lunny/nodb/multi.go
deleted file mode 100644
index ca581ce9a2..0000000000
--- a/vendor/github.com/lunny/nodb/multi.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package nodb
-
-import (
- "errors"
- "fmt"
-)
-
-var (
- ErrNestMulti = errors.New("nest multi not supported")
- ErrMultiDone = errors.New("multi has been closed")
-)
-
-type Multi struct {
- *DB
-}
-
-func (db *DB) IsInMulti() bool {
- return db.status == DBInMulti
-}
-
-// begin a mutli to execute commands,
-// it will block any other write operations before you close the multi, unlike transaction, mutli can not rollback
-func (db *DB) Multi() (*Multi, error) {
- if db.IsInMulti() {
- return nil, ErrNestMulti
- }
-
- m := new(Multi)
-
- m.DB = new(DB)
- m.DB.status = DBInMulti
-
- m.DB.l = db.l
-
- m.l.wLock.Lock()
-
- m.DB.sdb = db.sdb
-
- m.DB.bucket = db.sdb
-
- m.DB.index = db.index
-
- m.DB.kvBatch = m.newBatch()
- m.DB.listBatch = m.newBatch()
- m.DB.hashBatch = m.newBatch()
- m.DB.zsetBatch = m.newBatch()
- m.DB.binBatch = m.newBatch()
- m.DB.setBatch = m.newBatch()
-
- return m, nil
-}
-
-func (m *Multi) newBatch() *batch {
- return m.l.newBatch(m.bucket.NewWriteBatch(), &multiBatchLocker{}, nil)
-}
-
-func (m *Multi) Close() error {
- if m.bucket == nil {
- return ErrMultiDone
- }
- m.l.wLock.Unlock()
- m.bucket = nil
- return nil
-}
-
-func (m *Multi) Select(index int) error {
- if index < 0 || index >= int(MaxDBNumber) {
- return fmt.Errorf("invalid db index %d", index)
- }
-
- m.DB.index = uint8(index)
- return nil
-}
diff --git a/vendor/github.com/lunny/nodb/nodb.go b/vendor/github.com/lunny/nodb/nodb.go
deleted file mode 100644
index fdd0272c94..0000000000
--- a/vendor/github.com/lunny/nodb/nodb.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package nodb
-
-import (
- "fmt"
- "sync"
- "time"
-
- "github.com/lunny/log"
- "github.com/lunny/nodb/config"
- "github.com/lunny/nodb/store"
-)
-
-type Nodb struct {
- cfg *config.Config
-
- ldb *store.DB
- dbs [MaxDBNumber]*DB
-
- quit chan struct{}
- jobs *sync.WaitGroup
-
- binlog *BinLog
-
- wLock sync.RWMutex //allow one write at same time
- commitLock sync.Mutex //allow one write commit at same time
-}
-
-func Open(cfg *config.Config) (*Nodb, error) {
- if len(cfg.DataDir) == 0 {
- cfg.DataDir = config.DefaultDataDir
- }
-
- ldb, err := store.Open(cfg)
- if err != nil {
- return nil, err
- }
-
- l := new(Nodb)
-
- l.quit = make(chan struct{})
- l.jobs = new(sync.WaitGroup)
-
- l.ldb = ldb
-
- if cfg.BinLog.MaxFileNum > 0 && cfg.BinLog.MaxFileSize > 0 {
- l.binlog, err = NewBinLog(cfg)
- if err != nil {
- return nil, err
- }
- } else {
- l.binlog = nil
- }
-
- for i := uint8(0); i < MaxDBNumber; i++ {
- l.dbs[i] = l.newDB(i)
- }
-
- l.activeExpireCycle()
-
- return l, nil
-}
-
-func (l *Nodb) Close() {
- close(l.quit)
- l.jobs.Wait()
-
- l.ldb.Close()
-
- if l.binlog != nil {
- l.binlog.Close()
- l.binlog = nil
- }
-}
-
-func (l *Nodb) Select(index int) (*DB, error) {
- if index < 0 || index >= int(MaxDBNumber) {
- return nil, fmt.Errorf("invalid db index %d", index)
- }
-
- return l.dbs[index], nil
-}
-
-func (l *Nodb) FlushAll() error {
- for index, db := range l.dbs {
- if _, err := db.FlushAll(); err != nil {
- log.Error("flush db %d error %s", index, err.Error())
- }
- }
-
- return nil
-}
-
-// very dangerous to use
-func (l *Nodb) DataDB() *store.DB {
- return l.ldb
-}
-
-func (l *Nodb) activeExpireCycle() {
- var executors []*elimination = make([]*elimination, len(l.dbs))
- for i, db := range l.dbs {
- executors[i] = db.newEliminator()
- }
-
- l.jobs.Add(1)
- go func() {
- tick := time.NewTicker(1 * time.Second)
- end := false
- done := make(chan struct{})
- for !end {
- select {
- case <-tick.C:
- go func() {
- for _, eli := range executors {
- eli.active()
- }
- done <- struct{}{}
- }()
- <-done
- case <-l.quit:
- end = true
- break
- }
- }
-
- tick.Stop()
- l.jobs.Done()
- }()
-}
diff --git a/vendor/github.com/lunny/nodb/nodb_db.go b/vendor/github.com/lunny/nodb/nodb_db.go
deleted file mode 100644
index f68ebaa0d4..0000000000
--- a/vendor/github.com/lunny/nodb/nodb_db.go
+++ /dev/null
@@ -1,171 +0,0 @@
-package nodb
-
-import (
- "fmt"
- "sync"
-
- "github.com/lunny/nodb/store"
-)
-
-type ibucket interface {
- Get(key []byte) ([]byte, error)
-
- Put(key []byte, value []byte) error
- Delete(key []byte) error
-
- NewIterator() *store.Iterator
-
- NewWriteBatch() store.WriteBatch
-
- RangeIterator(min []byte, max []byte, rangeType uint8) *store.RangeLimitIterator
- RevRangeIterator(min []byte, max []byte, rangeType uint8) *store.RangeLimitIterator
- RangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *store.RangeLimitIterator
- RevRangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *store.RangeLimitIterator
-}
-
-type DB struct {
- l *Nodb
-
- sdb *store.DB
-
- bucket ibucket
-
- index uint8
-
- kvBatch *batch
- listBatch *batch
- hashBatch *batch
- zsetBatch *batch
- binBatch *batch
- setBatch *batch
-
- status uint8
-}
-
-func (l *Nodb) newDB(index uint8) *DB {
- d := new(DB)
-
- d.l = l
-
- d.sdb = l.ldb
-
- d.bucket = d.sdb
-
- d.status = DBAutoCommit
- d.index = index
-
- d.kvBatch = d.newBatch()
- d.listBatch = d.newBatch()
- d.hashBatch = d.newBatch()
- d.zsetBatch = d.newBatch()
- d.binBatch = d.newBatch()
- d.setBatch = d.newBatch()
-
- return d
-}
-
-func (db *DB) newBatch() *batch {
- return db.l.newBatch(db.bucket.NewWriteBatch(), &dbBatchLocker{l: &sync.Mutex{}, wrLock: &db.l.wLock}, nil)
-}
-
-func (db *DB) Index() int {
- return int(db.index)
-}
-
-func (db *DB) IsAutoCommit() bool {
- return db.status == DBAutoCommit
-}
-
-func (db *DB) FlushAll() (drop int64, err error) {
- all := [...](func() (int64, error)){
- db.flush,
- db.lFlush,
- db.hFlush,
- db.zFlush,
- db.bFlush,
- db.sFlush}
-
- for _, flush := range all {
- if n, e := flush(); e != nil {
- err = e
- return
- } else {
- drop += n
- }
- }
-
- return
-}
-
-func (db *DB) newEliminator() *elimination {
- eliminator := newEliminator(db)
-
- eliminator.regRetireContext(KVType, db.kvBatch, db.delete)
- eliminator.regRetireContext(ListType, db.listBatch, db.lDelete)
- eliminator.regRetireContext(HashType, db.hashBatch, db.hDelete)
- eliminator.regRetireContext(ZSetType, db.zsetBatch, db.zDelete)
- eliminator.regRetireContext(BitType, db.binBatch, db.bDelete)
- eliminator.regRetireContext(SetType, db.setBatch, db.sDelete)
-
- return eliminator
-}
-
-func (db *DB) flushRegion(t *batch, minKey []byte, maxKey []byte) (drop int64, err error) {
- it := db.bucket.RangeIterator(minKey, maxKey, store.RangeROpen)
- for ; it.Valid(); it.Next() {
- t.Delete(it.RawKey())
- drop++
- if drop&1023 == 0 {
- if err = t.Commit(); err != nil {
- return
- }
- }
- }
- it.Close()
- return
-}
-
-func (db *DB) flushType(t *batch, dataType byte) (drop int64, err error) {
- var deleteFunc func(t *batch, key []byte) int64
- var metaDataType byte
- switch dataType {
- case KVType:
- deleteFunc = db.delete
- metaDataType = KVType
- case ListType:
- deleteFunc = db.lDelete
- metaDataType = LMetaType
- case HashType:
- deleteFunc = db.hDelete
- metaDataType = HSizeType
- case ZSetType:
- deleteFunc = db.zDelete
- metaDataType = ZSizeType
- case BitType:
- deleteFunc = db.bDelete
- metaDataType = BitMetaType
- case SetType:
- deleteFunc = db.sDelete
- metaDataType = SSizeType
- default:
- return 0, fmt.Errorf("invalid data type: %s", TypeName[dataType])
- }
-
- var keys [][]byte
- keys, err = db.scan(metaDataType, nil, 1024, false, "")
- for len(keys) != 0 || err != nil {
- for _, key := range keys {
- deleteFunc(t, key)
- db.rmExpire(t, dataType, key)
-
- }
-
- if err = t.Commit(); err != nil {
- return
- } else {
- drop += int64(len(keys))
- }
- keys, err = db.scan(metaDataType, nil, 1024, false, "")
- }
- return
-}
diff --git a/vendor/github.com/lunny/nodb/replication.go b/vendor/github.com/lunny/nodb/replication.go
deleted file mode 100644
index f9bc951085..0000000000
--- a/vendor/github.com/lunny/nodb/replication.go
+++ /dev/null
@@ -1,312 +0,0 @@
-package nodb
-
-import (
- "bufio"
- "bytes"
- "errors"
- "io"
- "os"
- "time"
-
- "github.com/lunny/log"
- "github.com/lunny/nodb/store/driver"
-)
-
-const (
- maxReplBatchNum = 100
- maxReplLogSize = 1 * 1024 * 1024
-)
-
-var (
- ErrSkipEvent = errors.New("skip to next event")
-)
-
-var (
- errInvalidBinLogEvent = errors.New("invalid binglog event")
- errInvalidBinLogFile = errors.New("invalid binlog file")
-)
-
-type replBatch struct {
- wb driver.IWriteBatch
- events [][]byte
- l *Nodb
-
- lastHead *BinLogHead
-}
-
-func (b *replBatch) Commit() error {
- b.l.commitLock.Lock()
- defer b.l.commitLock.Unlock()
-
- err := b.wb.Commit()
- if err != nil {
- b.Rollback()
- return err
- }
-
- if b.l.binlog != nil {
- if err = b.l.binlog.Log(b.events...); err != nil {
- b.Rollback()
- return err
- }
- }
-
- b.events = [][]byte{}
- b.lastHead = nil
-
- return nil
-}
-
-func (b *replBatch) Rollback() error {
- b.wb.Rollback()
- b.events = [][]byte{}
- b.lastHead = nil
- return nil
-}
-
-func (l *Nodb) replicateEvent(b *replBatch, event []byte) error {
- if len(event) == 0 {
- return errInvalidBinLogEvent
- }
-
- b.events = append(b.events, event)
-
- logType := uint8(event[0])
- switch logType {
- case BinLogTypePut:
- return l.replicatePutEvent(b, event)
- case BinLogTypeDeletion:
- return l.replicateDeleteEvent(b, event)
- default:
- return errInvalidBinLogEvent
- }
-}
-
-func (l *Nodb) replicatePutEvent(b *replBatch, event []byte) error {
- key, value, err := decodeBinLogPut(event)
- if err != nil {
- return err
- }
-
- b.wb.Put(key, value)
-
- return nil
-}
-
-func (l *Nodb) replicateDeleteEvent(b *replBatch, event []byte) error {
- key, err := decodeBinLogDelete(event)
- if err != nil {
- return err
- }
-
- b.wb.Delete(key)
-
- return nil
-}
-
-func ReadEventFromReader(rb io.Reader, f func(head *BinLogHead, event []byte) error) error {
- head := &BinLogHead{}
- var err error
-
- for {
- if err = head.Read(rb); err != nil {
- if err == io.EOF {
- break
- } else {
- return err
- }
- }
-
- var dataBuf bytes.Buffer
-
- if _, err = io.CopyN(&dataBuf, rb, int64(head.PayloadLen)); err != nil {
- return err
- }
-
- err = f(head, dataBuf.Bytes())
- if err != nil && err != ErrSkipEvent {
- return err
- }
- }
-
- return nil
-}
-
-func (l *Nodb) ReplicateFromReader(rb io.Reader) error {
- b := new(replBatch)
-
- b.wb = l.ldb.NewWriteBatch()
- b.l = l
-
- f := func(head *BinLogHead, event []byte) error {
- if b.lastHead == nil {
- b.lastHead = head
- } else if !b.lastHead.InSameBatch(head) {
- if err := b.Commit(); err != nil {
- log.Fatal("replication error %s, skip to next", err.Error())
- return ErrSkipEvent
- }
- b.lastHead = head
- }
-
- err := l.replicateEvent(b, event)
- if err != nil {
- log.Fatal("replication error %s, skip to next", err.Error())
- return ErrSkipEvent
- }
- return nil
- }
-
- err := ReadEventFromReader(rb, f)
- if err != nil {
- b.Rollback()
- return err
- }
- return b.Commit()
-}
-
-func (l *Nodb) ReplicateFromData(data []byte) error {
- rb := bytes.NewReader(data)
-
- err := l.ReplicateFromReader(rb)
-
- return err
-}
-
-func (l *Nodb) ReplicateFromBinLog(filePath string) error {
- f, err := os.Open(filePath)
- if err != nil {
- return err
- }
-
- rb := bufio.NewReaderSize(f, 4096)
-
- err = l.ReplicateFromReader(rb)
-
- f.Close()
-
- return err
-}
-
-// try to read events, if no events read, try to wait the new event singal until timeout seconds
-func (l *Nodb) ReadEventsToTimeout(info *BinLogAnchor, w io.Writer, timeout int) (n int, err error) {
- lastIndex := info.LogFileIndex
- lastPos := info.LogPos
-
- n = 0
- if l.binlog == nil {
- //binlog not supported
- info.LogFileIndex = 0
- info.LogPos = 0
- return
- }
-
- n, err = l.ReadEventsTo(info, w)
- if err == nil && info.LogFileIndex == lastIndex && info.LogPos == lastPos {
- //no events read
- select {
- case <-l.binlog.Wait():
- case <-time.After(time.Duration(timeout) * time.Second):
- }
- return l.ReadEventsTo(info, w)
- }
- return
-}
-
-func (l *Nodb) ReadEventsTo(info *BinLogAnchor, w io.Writer) (n int, err error) {
- n = 0
- if l.binlog == nil {
- //binlog not supported
- info.LogFileIndex = 0
- info.LogPos = 0
- return
- }
-
- index := info.LogFileIndex
- offset := info.LogPos
-
- filePath := l.binlog.FormatLogFilePath(index)
-
- var f *os.File
- f, err = os.Open(filePath)
- if os.IsNotExist(err) {
- lastIndex := l.binlog.LogFileIndex()
-
- if index == lastIndex {
- //no binlog at all
- info.LogPos = 0
- } else {
- //slave binlog info had lost
- info.LogFileIndex = -1
- }
- }
-
- if err != nil {
- if os.IsNotExist(err) {
- err = nil
- }
- return
- }
-
- defer f.Close()
-
- var fileSize int64
- st, _ := f.Stat()
- fileSize = st.Size()
-
- if fileSize == info.LogPos {
- return
- }
-
- if _, err = f.Seek(offset, os.SEEK_SET); err != nil {
- //may be invliad seek offset
- return
- }
-
- var lastHead *BinLogHead = nil
-
- head := &BinLogHead{}
-
- batchNum := 0
-
- for {
- if err = head.Read(f); err != nil {
- if err == io.EOF {
- //we will try to use next binlog
- if index < l.binlog.LogFileIndex() {
- info.LogFileIndex += 1
- info.LogPos = 0
- }
- err = nil
- return
- } else {
- return
- }
-
- }
-
- if lastHead == nil {
- lastHead = head
- batchNum++
- } else if !lastHead.InSameBatch(head) {
- lastHead = head
- batchNum++
- if batchNum > maxReplBatchNum || n > maxReplLogSize {
- return
- }
- }
-
- if err = head.Write(w); err != nil {
- return
- }
-
- if _, err = io.CopyN(w, f, int64(head.PayloadLen)); err != nil {
- return
- }
-
- n += (head.Len() + int(head.PayloadLen))
- info.LogPos = info.LogPos + int64(head.Len()) + int64(head.PayloadLen)
- }
-
- return
-}
diff --git a/vendor/github.com/lunny/nodb/scan.go b/vendor/github.com/lunny/nodb/scan.go
deleted file mode 100644
index e989db3fed..0000000000
--- a/vendor/github.com/lunny/nodb/scan.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package nodb
-
-import (
- "bytes"
- "errors"
- "regexp"
-
- "github.com/lunny/nodb/store"
-)
-
-var errDataType = errors.New("error data type")
-var errMetaKey = errors.New("error meta key")
-
-// Seek search the prefix key
-func (db *DB) Seek(key []byte) (*store.Iterator, error) {
- return db.seek(KVType, key)
-}
-
-func (db *DB) seek(dataType byte, key []byte) (*store.Iterator, error) {
- var minKey []byte
- var err error
-
- if len(key) > 0 {
- if err = checkKeySize(key); err != nil {
- return nil, err
- }
- if minKey, err = db.encodeMetaKey(dataType, key); err != nil {
- return nil, err
- }
-
- } else {
- if minKey, err = db.encodeMinKey(dataType); err != nil {
- return nil, err
- }
- }
-
- it := db.bucket.NewIterator()
- it.Seek(minKey)
- return it, nil
-}
-
-func (db *DB) MaxKey() ([]byte, error) {
- return db.encodeMaxKey(KVType)
-}
-
-func (db *DB) Key(it *store.Iterator) ([]byte, error) {
- return db.decodeMetaKey(KVType, it.Key())
-}
-
-func (db *DB) scan(dataType byte, key []byte, count int, inclusive bool, match string) ([][]byte, error) {
- var minKey, maxKey []byte
- var err error
- var r *regexp.Regexp
-
- if len(match) > 0 {
- if r, err = regexp.Compile(match); err != nil {
- return nil, err
- }
- }
-
- if len(key) > 0 {
- if err = checkKeySize(key); err != nil {
- return nil, err
- }
- if minKey, err = db.encodeMetaKey(dataType, key); err != nil {
- return nil, err
- }
-
- } else {
- if minKey, err = db.encodeMinKey(dataType); err != nil {
- return nil, err
- }
- }
-
- if maxKey, err = db.encodeMaxKey(dataType); err != nil {
- return nil, err
- }
-
- if count <= 0 {
- count = defaultScanCount
- }
-
- v := make([][]byte, 0, count)
-
- it := db.bucket.NewIterator()
- it.Seek(minKey)
-
- if !inclusive {
- if it.Valid() && bytes.Equal(it.RawKey(), minKey) {
- it.Next()
- }
- }
-
- for i := 0; it.Valid() && i < count && bytes.Compare(it.RawKey(), maxKey) < 0; it.Next() {
- if k, err := db.decodeMetaKey(dataType, it.Key()); err != nil {
- continue
- } else if r != nil && !r.Match(k) {
- continue
- } else {
- v = append(v, k)
- i++
- }
- }
- it.Close()
- return v, nil
-}
-
-func (db *DB) encodeMinKey(dataType byte) ([]byte, error) {
- return db.encodeMetaKey(dataType, nil)
-}
-
-func (db *DB) encodeMaxKey(dataType byte) ([]byte, error) {
- k, err := db.encodeMetaKey(dataType, nil)
- if err != nil {
- return nil, err
- }
- k[len(k)-1] = dataType + 1
- return k, nil
-}
-
-func (db *DB) encodeMetaKey(dataType byte, key []byte) ([]byte, error) {
- switch dataType {
- case KVType:
- return db.encodeKVKey(key), nil
- case LMetaType:
- return db.lEncodeMetaKey(key), nil
- case HSizeType:
- return db.hEncodeSizeKey(key), nil
- case ZSizeType:
- return db.zEncodeSizeKey(key), nil
- case BitMetaType:
- return db.bEncodeMetaKey(key), nil
- case SSizeType:
- return db.sEncodeSizeKey(key), nil
- default:
- return nil, errDataType
- }
-}
-func (db *DB) decodeMetaKey(dataType byte, ek []byte) ([]byte, error) {
- if len(ek) < 2 || ek[0] != db.index || ek[1] != dataType {
- return nil, errMetaKey
- }
- return ek[2:], nil
-}
diff --git a/vendor/github.com/lunny/nodb/store/db.go b/vendor/github.com/lunny/nodb/store/db.go
deleted file mode 100644
index 00a8831a67..0000000000
--- a/vendor/github.com/lunny/nodb/store/db.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package store
-
-import (
- "github.com/lunny/nodb/store/driver"
-)
-
-type DB struct {
- driver.IDB
-}
-
-func (db *DB) NewIterator() *Iterator {
- it := new(Iterator)
- it.it = db.IDB.NewIterator()
-
- return it
-}
-
-func (db *DB) NewWriteBatch() WriteBatch {
- return db.IDB.NewWriteBatch()
-}
-
-func (db *DB) NewSnapshot() (*Snapshot, error) {
- var err error
- s := &Snapshot{}
- if s.ISnapshot, err = db.IDB.NewSnapshot(); err != nil {
- return nil, err
- }
-
- return s, nil
-}
-
-func (db *DB) RangeIterator(min []byte, max []byte, rangeType uint8) *RangeLimitIterator {
- return NewRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{0, -1})
-}
-
-func (db *DB) RevRangeIterator(min []byte, max []byte, rangeType uint8) *RangeLimitIterator {
- return NewRevRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{0, -1})
-}
-
-//count < 0, unlimit.
-//
-//offset must >= 0, if < 0, will get nothing.
-func (db *DB) RangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *RangeLimitIterator {
- return NewRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{offset, count})
-}
-
-//count < 0, unlimit.
-//
-//offset must >= 0, if < 0, will get nothing.
-func (db *DB) RevRangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *RangeLimitIterator {
- return NewRevRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{offset, count})
-}
-
-func (db *DB) Begin() (*Tx, error) {
- tx, err := db.IDB.Begin()
- if err != nil {
- return nil, err
- }
-
- return &Tx{tx}, nil
-}
diff --git a/vendor/github.com/lunny/nodb/store/driver/batch.go b/vendor/github.com/lunny/nodb/store/driver/batch.go
deleted file mode 100644
index 6b79c21c48..0000000000
--- a/vendor/github.com/lunny/nodb/store/driver/batch.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package driver
-
-type BatchPuter interface {
- BatchPut([]Write) error
-}
-
-type Write struct {
- Key []byte
- Value []byte
-}
-
-type WriteBatch struct {
- batch BatchPuter
- wb []Write
-}
-
-func (w *WriteBatch) Put(key, value []byte) {
- if value == nil {
- value = []byte{}
- }
- w.wb = append(w.wb, Write{key, value})
-}
-
-func (w *WriteBatch) Delete(key []byte) {
- w.wb = append(w.wb, Write{key, nil})
-}
-
-func (w *WriteBatch) Commit() error {
- return w.batch.BatchPut(w.wb)
-}
-
-func (w *WriteBatch) Rollback() error {
- w.wb = w.wb[0:0]
- return nil
-}
-
-func NewWriteBatch(puter BatchPuter) IWriteBatch {
- return &WriteBatch{puter, []Write{}}
-}
diff --git a/vendor/github.com/lunny/nodb/store/driver/driver.go b/vendor/github.com/lunny/nodb/store/driver/driver.go
deleted file mode 100644
index 6da67df083..0000000000
--- a/vendor/github.com/lunny/nodb/store/driver/driver.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package driver
-
-import (
- "errors"
-)
-
-var (
- ErrTxSupport = errors.New("transaction is not supported")
-)
-
-type IDB interface {
- Close() error
-
- Get(key []byte) ([]byte, error)
-
- Put(key []byte, value []byte) error
- Delete(key []byte) error
-
- NewIterator() IIterator
-
- NewWriteBatch() IWriteBatch
-
- NewSnapshot() (ISnapshot, error)
-
- Begin() (Tx, error)
-}
-
-type ISnapshot interface {
- Get(key []byte) ([]byte, error)
- NewIterator() IIterator
- Close()
-}
-
-type IIterator interface {
- Close() error
-
- First()
- Last()
- Seek(key []byte)
-
- Next()
- Prev()
-
- Valid() bool
-
- Key() []byte
- Value() []byte
-}
-
-type IWriteBatch interface {
- Put(key []byte, value []byte)
- Delete(key []byte)
- Commit() error
- Rollback() error
-}
-
-type Tx interface {
- Get(key []byte) ([]byte, error)
- Put(key []byte, value []byte) error
- Delete(key []byte) error
-
- NewIterator() IIterator
- NewWriteBatch() IWriteBatch
-
- Commit() error
- Rollback() error
-}
diff --git a/vendor/github.com/lunny/nodb/store/driver/store.go b/vendor/github.com/lunny/nodb/store/driver/store.go
deleted file mode 100644
index 173431d4c1..0000000000
--- a/vendor/github.com/lunny/nodb/store/driver/store.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package driver
-
-import (
- "fmt"
-
- "github.com/lunny/nodb/config"
-)
-
-type Store interface {
- String() string
- Open(path string, cfg *config.Config) (IDB, error)
- Repair(path string, cfg *config.Config) error
-}
-
-var dbs = map[string]Store{}
-
-func Register(s Store) {
- name := s.String()
- if _, ok := dbs[name]; ok {
- panic(fmt.Errorf("store %s is registered", s))
- }
-
- dbs[name] = s
-}
-
-func ListStores() []string {
- s := []string{}
- for k, _ := range dbs {
- s = append(s, k)
- }
-
- return s
-}
-
-func GetStore(cfg *config.Config) (Store, error) {
- if len(cfg.DBName) == 0 {
- cfg.DBName = config.DefaultDBName
- }
-
- s, ok := dbs[cfg.DBName]
- if !ok {
- return nil, fmt.Errorf("store %s is not registered", cfg.DBName)
- }
-
- return s, nil
-}
diff --git a/vendor/github.com/lunny/nodb/store/goleveldb/batch.go b/vendor/github.com/lunny/nodb/store/goleveldb/batch.go
deleted file mode 100644
index b17e85e750..0000000000
--- a/vendor/github.com/lunny/nodb/store/goleveldb/batch.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package goleveldb
-
-import (
- "github.com/syndtr/goleveldb/leveldb"
-)
-
-type WriteBatch struct {
- db *DB
- wbatch *leveldb.Batch
-}
-
-func (w *WriteBatch) Put(key, value []byte) {
- w.wbatch.Put(key, value)
-}
-
-func (w *WriteBatch) Delete(key []byte) {
- w.wbatch.Delete(key)
-}
-
-func (w *WriteBatch) Commit() error {
- return w.db.db.Write(w.wbatch, nil)
-}
-
-func (w *WriteBatch) Rollback() error {
- w.wbatch.Reset()
- return nil
-}
diff --git a/vendor/github.com/lunny/nodb/store/goleveldb/const.go b/vendor/github.com/lunny/nodb/store/goleveldb/const.go
deleted file mode 100644
index 2fffa7c82b..0000000000
--- a/vendor/github.com/lunny/nodb/store/goleveldb/const.go
+++ /dev/null
@@ -1,4 +0,0 @@
-package goleveldb
-
-const DBName = "goleveldb"
-const MemDBName = "memory"
diff --git a/vendor/github.com/lunny/nodb/store/goleveldb/db.go b/vendor/github.com/lunny/nodb/store/goleveldb/db.go
deleted file mode 100644
index a36e87f628..0000000000
--- a/vendor/github.com/lunny/nodb/store/goleveldb/db.go
+++ /dev/null
@@ -1,187 +0,0 @@
-package goleveldb
-
-import (
- "github.com/syndtr/goleveldb/leveldb"
- "github.com/syndtr/goleveldb/leveldb/cache"
- "github.com/syndtr/goleveldb/leveldb/filter"
- "github.com/syndtr/goleveldb/leveldb/opt"
- "github.com/syndtr/goleveldb/leveldb/storage"
-
- "github.com/lunny/nodb/config"
- "github.com/lunny/nodb/store/driver"
-
- "os"
-)
-
-const defaultFilterBits int = 10
-
-type Store struct {
-}
-
-func (s Store) String() string {
- return DBName
-}
-
-type MemStore struct {
-}
-
-func (s MemStore) String() string {
- return MemDBName
-}
-
-type DB struct {
- path string
-
- cfg *config.LevelDBConfig
-
- db *leveldb.DB
-
- opts *opt.Options
-
- iteratorOpts *opt.ReadOptions
-
- cache cache.Cache
-
- filter filter.Filter
-}
-
-func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) {
- if err := os.MkdirAll(path, os.ModePerm); err != nil {
- return nil, err
- }
-
- db := new(DB)
- db.path = path
- db.cfg = &cfg.LevelDB
-
- db.initOpts()
-
- var err error
- db.db, err = leveldb.OpenFile(db.path, db.opts)
-
- if err != nil {
- return nil, err
- }
-
- return db, nil
-}
-
-func (s Store) Repair(path string, cfg *config.Config) error {
- db, err := leveldb.RecoverFile(path, newOptions(&cfg.LevelDB))
- if err != nil {
- return err
- }
-
- db.Close()
- return nil
-}
-
-func (s MemStore) Open(path string, cfg *config.Config) (driver.IDB, error) {
- db := new(DB)
- db.path = path
- db.cfg = &cfg.LevelDB
-
- db.initOpts()
-
- var err error
- db.db, err = leveldb.Open(storage.NewMemStorage(), db.opts)
- if err != nil {
- return nil, err
- }
-
- return db, nil
-}
-
-func (s MemStore) Repair(path string, cfg *config.Config) error {
- return nil
-}
-
-func (db *DB) initOpts() {
- db.opts = newOptions(db.cfg)
-
- db.iteratorOpts = &opt.ReadOptions{}
- db.iteratorOpts.DontFillCache = true
-}
-
-func newOptions(cfg *config.LevelDBConfig) *opt.Options {
- opts := &opt.Options{}
- opts.ErrorIfMissing = false
-
- cfg.Adjust()
-
- //opts.BlockCacher = cache.NewLRU(cfg.CacheSize)
- opts.BlockCacheCapacity = cfg.CacheSize
-
- //we must use bloomfilter
- opts.Filter = filter.NewBloomFilter(defaultFilterBits)
-
- if !cfg.Compression {
- opts.Compression = opt.NoCompression
- } else {
- opts.Compression = opt.SnappyCompression
- }
-
- opts.BlockSize = cfg.BlockSize
- opts.WriteBuffer = cfg.WriteBufferSize
-
- return opts
-}
-
-func (db *DB) Close() error {
- return db.db.Close()
-}
-
-func (db *DB) Put(key, value []byte) error {
- return db.db.Put(key, value, nil)
-}
-
-func (db *DB) Get(key []byte) ([]byte, error) {
- v, err := db.db.Get(key, nil)
- if err == leveldb.ErrNotFound {
- return nil, nil
- }
- return v, nil
-}
-
-func (db *DB) Delete(key []byte) error {
- return db.db.Delete(key, nil)
-}
-
-func (db *DB) NewWriteBatch() driver.IWriteBatch {
- wb := &WriteBatch{
- db: db,
- wbatch: new(leveldb.Batch),
- }
- return wb
-}
-
-func (db *DB) NewIterator() driver.IIterator {
- it := &Iterator{
- db.db.NewIterator(nil, db.iteratorOpts),
- }
-
- return it
-}
-
-func (db *DB) Begin() (driver.Tx, error) {
- return nil, driver.ErrTxSupport
-}
-
-func (db *DB) NewSnapshot() (driver.ISnapshot, error) {
- snapshot, err := db.db.GetSnapshot()
- if err != nil {
- return nil, err
- }
-
- s := &Snapshot{
- db: db,
- snp: snapshot,
- }
-
- return s, nil
-}
-
-func init() {
- driver.Register(Store{})
- driver.Register(MemStore{})
-}
diff --git a/vendor/github.com/lunny/nodb/store/goleveldb/iterator.go b/vendor/github.com/lunny/nodb/store/goleveldb/iterator.go
deleted file mode 100644
index c1fd8b5573..0000000000
--- a/vendor/github.com/lunny/nodb/store/goleveldb/iterator.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package goleveldb
-
-import (
- "github.com/syndtr/goleveldb/leveldb/iterator"
-)
-
-type Iterator struct {
- it iterator.Iterator
-}
-
-func (it *Iterator) Key() []byte {
- return it.it.Key()
-}
-
-func (it *Iterator) Value() []byte {
- return it.it.Value()
-}
-
-func (it *Iterator) Close() error {
- if it.it != nil {
- it.it.Release()
- it.it = nil
- }
- return nil
-}
-
-func (it *Iterator) Valid() bool {
- return it.it.Valid()
-}
-
-func (it *Iterator) Next() {
- it.it.Next()
-}
-
-func (it *Iterator) Prev() {
- it.it.Prev()
-}
-
-func (it *Iterator) First() {
- it.it.First()
-}
-
-func (it *Iterator) Last() {
- it.it.Last()
-}
-
-func (it *Iterator) Seek(key []byte) {
- it.it.Seek(key)
-}
diff --git a/vendor/github.com/lunny/nodb/store/goleveldb/snapshot.go b/vendor/github.com/lunny/nodb/store/goleveldb/snapshot.go
deleted file mode 100644
index fe2b409c3f..0000000000
--- a/vendor/github.com/lunny/nodb/store/goleveldb/snapshot.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package goleveldb
-
-import (
- "github.com/lunny/nodb/store/driver"
- "github.com/syndtr/goleveldb/leveldb"
-)
-
-type Snapshot struct {
- db *DB
- snp *leveldb.Snapshot
-}
-
-func (s *Snapshot) Get(key []byte) ([]byte, error) {
- return s.snp.Get(key, s.db.iteratorOpts)
-}
-
-func (s *Snapshot) NewIterator() driver.IIterator {
- it := &Iterator{
- s.snp.NewIterator(nil, s.db.iteratorOpts),
- }
- return it
-}
-
-func (s *Snapshot) Close() {
- s.snp.Release()
-}
diff --git a/vendor/github.com/lunny/nodb/store/iterator.go b/vendor/github.com/lunny/nodb/store/iterator.go
deleted file mode 100644
index 27bf689da2..0000000000
--- a/vendor/github.com/lunny/nodb/store/iterator.go
+++ /dev/null
@@ -1,327 +0,0 @@
-package store
-
-import (
- "bytes"
-
- "github.com/lunny/nodb/store/driver"
-)
-
-const (
- IteratorForward uint8 = 0
- IteratorBackward uint8 = 1
-)
-
-const (
- RangeClose uint8 = 0x00
- RangeLOpen uint8 = 0x01
- RangeROpen uint8 = 0x10
- RangeOpen uint8 = 0x11
-)
-
-// min must less or equal than max
-//
-// range type:
-//
-// close: [min, max]
-// open: (min, max)
-// lopen: (min, max]
-// ropen: [min, max)
-//
-type Range struct {
- Min []byte
- Max []byte
-
- Type uint8
-}
-
-type Limit struct {
- Offset int
- Count int
-}
-
-type Iterator struct {
- it driver.IIterator
-}
-
-// Returns a copy of key.
-func (it *Iterator) Key() []byte {
- k := it.it.Key()
- if k == nil {
- return nil
- }
-
- return append([]byte{}, k...)
-}
-
-// Returns a copy of value.
-func (it *Iterator) Value() []byte {
- v := it.it.Value()
- if v == nil {
- return nil
- }
-
- return append([]byte{}, v...)
-}
-
-// Returns a reference of key.
-// you must be careful that it will be changed after next iterate.
-func (it *Iterator) RawKey() []byte {
- return it.it.Key()
-}
-
-// Returns a reference of value.
-// you must be careful that it will be changed after next iterate.
-func (it *Iterator) RawValue() []byte {
- return it.it.Value()
-}
-
-// Copy key to b, if b len is small or nil, returns a new one.
-func (it *Iterator) BufKey(b []byte) []byte {
- k := it.RawKey()
- if k == nil {
- return nil
- }
- if b == nil {
- b = []byte{}
- }
-
- b = b[0:0]
- return append(b, k...)
-}
-
-// Copy value to b, if b len is small or nil, returns a new one.
-func (it *Iterator) BufValue(b []byte) []byte {
- v := it.RawValue()
- if v == nil {
- return nil
- }
-
- if b == nil {
- b = []byte{}
- }
-
- b = b[0:0]
- return append(b, v...)
-}
-
-func (it *Iterator) Close() {
- if it.it != nil {
- it.it.Close()
- it.it = nil
- }
-}
-
-func (it *Iterator) Valid() bool {
- return it.it.Valid()
-}
-
-func (it *Iterator) Next() {
- it.it.Next()
-}
-
-func (it *Iterator) Prev() {
- it.it.Prev()
-}
-
-func (it *Iterator) SeekToFirst() {
- it.it.First()
-}
-
-func (it *Iterator) SeekToLast() {
- it.it.Last()
-}
-
-func (it *Iterator) Seek(key []byte) {
- it.it.Seek(key)
-}
-
-// Finds by key, if not found, nil returns.
-func (it *Iterator) Find(key []byte) []byte {
- it.Seek(key)
- if it.Valid() {
- k := it.RawKey()
- if k == nil {
- return nil
- } else if bytes.Equal(k, key) {
- return it.Value()
- }
- }
-
- return nil
-}
-
-// Finds by key, if not found, nil returns, else a reference of value returns.
-// you must be careful that it will be changed after next iterate.
-func (it *Iterator) RawFind(key []byte) []byte {
- it.Seek(key)
- if it.Valid() {
- k := it.RawKey()
- if k == nil {
- return nil
- } else if bytes.Equal(k, key) {
- return it.RawValue()
- }
- }
-
- return nil
-}
-
-type RangeLimitIterator struct {
- it *Iterator
-
- r *Range
- l *Limit
-
- step int
-
- //0 for IteratorForward, 1 for IteratorBackward
- direction uint8
-}
-
-func (it *RangeLimitIterator) Key() []byte {
- return it.it.Key()
-}
-
-func (it *RangeLimitIterator) Value() []byte {
- return it.it.Value()
-}
-
-func (it *RangeLimitIterator) RawKey() []byte {
- return it.it.RawKey()
-}
-
-func (it *RangeLimitIterator) RawValue() []byte {
- return it.it.RawValue()
-}
-
-func (it *RangeLimitIterator) BufKey(b []byte) []byte {
- return it.it.BufKey(b)
-}
-
-func (it *RangeLimitIterator) BufValue(b []byte) []byte {
- return it.it.BufValue(b)
-}
-
-func (it *RangeLimitIterator) Valid() bool {
- if it.l.Offset < 0 {
- return false
- } else if !it.it.Valid() {
- return false
- } else if it.l.Count >= 0 && it.step >= it.l.Count {
- return false
- }
-
- if it.direction == IteratorForward {
- if it.r.Max != nil {
- r := bytes.Compare(it.it.RawKey(), it.r.Max)
- if it.r.Type&RangeROpen > 0 {
- return !(r >= 0)
- } else {
- return !(r > 0)
- }
- }
- } else {
- if it.r.Min != nil {
- r := bytes.Compare(it.it.RawKey(), it.r.Min)
- if it.r.Type&RangeLOpen > 0 {
- return !(r <= 0)
- } else {
- return !(r < 0)
- }
- }
- }
-
- return true
-}
-
-func (it *RangeLimitIterator) Next() {
- it.step++
-
- if it.direction == IteratorForward {
- it.it.Next()
- } else {
- it.it.Prev()
- }
-}
-
-func (it *RangeLimitIterator) Close() {
- it.it.Close()
-}
-
-func NewRangeLimitIterator(i *Iterator, r *Range, l *Limit) *RangeLimitIterator {
- return rangeLimitIterator(i, r, l, IteratorForward)
-}
-
-func NewRevRangeLimitIterator(i *Iterator, r *Range, l *Limit) *RangeLimitIterator {
- return rangeLimitIterator(i, r, l, IteratorBackward)
-}
-
-func NewRangeIterator(i *Iterator, r *Range) *RangeLimitIterator {
- return rangeLimitIterator(i, r, &Limit{0, -1}, IteratorForward)
-}
-
-func NewRevRangeIterator(i *Iterator, r *Range) *RangeLimitIterator {
- return rangeLimitIterator(i, r, &Limit{0, -1}, IteratorBackward)
-}
-
-func rangeLimitIterator(i *Iterator, r *Range, l *Limit, direction uint8) *RangeLimitIterator {
- it := new(RangeLimitIterator)
-
- it.it = i
-
- it.r = r
- it.l = l
- it.direction = direction
-
- it.step = 0
-
- if l.Offset < 0 {
- return it
- }
-
- if direction == IteratorForward {
- if r.Min == nil {
- it.it.SeekToFirst()
- } else {
- it.it.Seek(r.Min)
-
- if r.Type&RangeLOpen > 0 {
- if it.it.Valid() && bytes.Equal(it.it.RawKey(), r.Min) {
- it.it.Next()
- }
- }
- }
- } else {
- if r.Max == nil {
- it.it.SeekToLast()
- } else {
- it.it.Seek(r.Max)
-
- if !it.it.Valid() {
- it.it.SeekToLast()
- } else {
- if !bytes.Equal(it.it.RawKey(), r.Max) {
- it.it.Prev()
- }
- }
-
- if r.Type&RangeROpen > 0 {
- if it.it.Valid() && bytes.Equal(it.it.RawKey(), r.Max) {
- it.it.Prev()
- }
- }
- }
- }
-
- for i := 0; i < l.Offset; i++ {
- if it.it.Valid() {
- if it.direction == IteratorForward {
- it.it.Next()
- } else {
- it.it.Prev()
- }
- }
- }
-
- return it
-}
diff --git a/vendor/github.com/lunny/nodb/store/snapshot.go b/vendor/github.com/lunny/nodb/store/snapshot.go
deleted file mode 100644
index 75ba0497db..0000000000
--- a/vendor/github.com/lunny/nodb/store/snapshot.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package store
-
-import (
- "github.com/lunny/nodb/store/driver"
-)
-
-type Snapshot struct {
- driver.ISnapshot
-}
-
-func (s *Snapshot) NewIterator() *Iterator {
- it := new(Iterator)
- it.it = s.ISnapshot.NewIterator()
-
- return it
-}
diff --git a/vendor/github.com/lunny/nodb/store/store.go b/vendor/github.com/lunny/nodb/store/store.go
deleted file mode 100644
index 5d0ade1bf0..0000000000
--- a/vendor/github.com/lunny/nodb/store/store.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package store
-
-import (
- "fmt"
- "os"
- "path"
- "github.com/lunny/nodb/config"
- "github.com/lunny/nodb/store/driver"
-
- _ "github.com/lunny/nodb/store/goleveldb"
-)
-
-func getStorePath(cfg *config.Config) string {
- return path.Join(cfg.DataDir, fmt.Sprintf("%s_data", cfg.DBName))
-}
-
-func Open(cfg *config.Config) (*DB, error) {
- s, err := driver.GetStore(cfg)
- if err != nil {
- return nil, err
- }
-
- path := getStorePath(cfg)
-
- if err := os.MkdirAll(path, os.ModePerm); err != nil {
- return nil, err
- }
-
- idb, err := s.Open(path, cfg)
- if err != nil {
- return nil, err
- }
-
- db := &DB{idb}
-
- return db, nil
-}
-
-func Repair(cfg *config.Config) error {
- s, err := driver.GetStore(cfg)
- if err != nil {
- return err
- }
-
- path := getStorePath(cfg)
-
- return s.Repair(path, cfg)
-}
-
-func init() {
-}
diff --git a/vendor/github.com/lunny/nodb/store/tx.go b/vendor/github.com/lunny/nodb/store/tx.go
deleted file mode 100644
index 32bcbcda4b..0000000000
--- a/vendor/github.com/lunny/nodb/store/tx.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package store
-
-import (
- "github.com/lunny/nodb/store/driver"
-)
-
-type Tx struct {
- driver.Tx
-}
-
-func (tx *Tx) NewIterator() *Iterator {
- it := new(Iterator)
- it.it = tx.Tx.NewIterator()
-
- return it
-}
-
-func (tx *Tx) NewWriteBatch() WriteBatch {
- return tx.Tx.NewWriteBatch()
-}
-
-func (tx *Tx) RangeIterator(min []byte, max []byte, rangeType uint8) *RangeLimitIterator {
- return NewRangeLimitIterator(tx.NewIterator(), &Range{min, max, rangeType}, &Limit{0, -1})
-}
-
-func (tx *Tx) RevRangeIterator(min []byte, max []byte, rangeType uint8) *RangeLimitIterator {
- return NewRevRangeLimitIterator(tx.NewIterator(), &Range{min, max, rangeType}, &Limit{0, -1})
-}
-
-//count < 0, unlimit.
-//
-//offset must >= 0, if < 0, will get nothing.
-func (tx *Tx) RangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *RangeLimitIterator {
- return NewRangeLimitIterator(tx.NewIterator(), &Range{min, max, rangeType}, &Limit{offset, count})
-}
-
-//count < 0, unlimit.
-//
-//offset must >= 0, if < 0, will get nothing.
-func (tx *Tx) RevRangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *RangeLimitIterator {
- return NewRevRangeLimitIterator(tx.NewIterator(), &Range{min, max, rangeType}, &Limit{offset, count})
-}
diff --git a/vendor/github.com/lunny/nodb/store/writebatch.go b/vendor/github.com/lunny/nodb/store/writebatch.go
deleted file mode 100644
index 23e079eba6..0000000000
--- a/vendor/github.com/lunny/nodb/store/writebatch.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package store
-
-import (
- "github.com/lunny/nodb/store/driver"
-)
-
-type WriteBatch interface {
- driver.IWriteBatch
-}
diff --git a/vendor/github.com/lunny/nodb/t_bit.go b/vendor/github.com/lunny/nodb/t_bit.go
deleted file mode 100644
index 930d4ba568..0000000000
--- a/vendor/github.com/lunny/nodb/t_bit.go
+++ /dev/null
@@ -1,922 +0,0 @@
-package nodb
-
-import (
- "encoding/binary"
- "errors"
- "sort"
- "time"
-
- "github.com/lunny/nodb/store"
-)
-
-const (
- OPand uint8 = iota + 1
- OPor
- OPxor
- OPnot
-)
-
-type BitPair struct {
- Pos int32
- Val uint8
-}
-
-type segBitInfo struct {
- Seq uint32
- Off uint32
- Val uint8
-}
-
-type segBitInfoArray []segBitInfo
-
-const (
- // byte
- segByteWidth uint32 = 9
- segByteSize uint32 = 1 << segByteWidth
-
- // bit
- segBitWidth uint32 = segByteWidth + 3
- segBitSize uint32 = segByteSize << 3
-
- maxByteSize uint32 = 8 << 20
- maxSegCount uint32 = maxByteSize / segByteSize
-
- minSeq uint32 = 0
- maxSeq uint32 = uint32((maxByteSize << 3) - 1)
-)
-
-var bitsInByte = [256]int32{0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3,
- 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3,
- 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4,
- 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4,
- 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4,
- 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2,
- 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3,
- 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4,
- 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6,
- 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5,
- 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8}
-
-var fillBits = [...]uint8{1, 3, 7, 15, 31, 63, 127, 255}
-
-var emptySegment []byte = make([]byte, segByteSize, segByteSize)
-
-var fillSegment []byte = func() []byte {
- data := make([]byte, segByteSize, segByteSize)
- for i := uint32(0); i < segByteSize; i++ {
- data[i] = 0xff
- }
- return data
-}()
-
-var errBinKey = errors.New("invalid bin key")
-var errOffset = errors.New("invalid offset")
-var errDuplicatePos = errors.New("duplicate bit pos")
-
-func getBit(sz []byte, offset uint32) uint8 {
- index := offset >> 3
- if index >= uint32(len(sz)) {
- return 0 // error("overflow")
- }
-
- offset -= index << 3
- return sz[index] >> offset & 1
-}
-
-func setBit(sz []byte, offset uint32, val uint8) bool {
- if val != 1 && val != 0 {
- return false // error("invalid val")
- }
-
- index := offset >> 3
- if index >= uint32(len(sz)) {
- return false // error("overflow")
- }
-
- offset -= index << 3
- if sz[index]>>offset&1 != val {
- sz[index] ^= (1 << offset)
- }
- return true
-}
-
-func (datas segBitInfoArray) Len() int {
- return len(datas)
-}
-
-func (datas segBitInfoArray) Less(i, j int) bool {
- res := (datas)[i].Seq < (datas)[j].Seq
- if !res && (datas)[i].Seq == (datas)[j].Seq {
- res = (datas)[i].Off < (datas)[j].Off
- }
- return res
-}
-
-func (datas segBitInfoArray) Swap(i, j int) {
- datas[i], datas[j] = datas[j], datas[i]
-}
-
-func (db *DB) bEncodeMetaKey(key []byte) []byte {
- mk := make([]byte, len(key)+2)
- mk[0] = db.index
- mk[1] = BitMetaType
-
- copy(mk[2:], key)
- return mk
-}
-
-func (db *DB) bDecodeMetaKey(bkey []byte) ([]byte, error) {
- if len(bkey) < 2 || bkey[0] != db.index || bkey[1] != BitMetaType {
- return nil, errBinKey
- }
-
- return bkey[2:], nil
-}
-
-func (db *DB) bEncodeBinKey(key []byte, seq uint32) []byte {
- bk := make([]byte, len(key)+8)
-
- pos := 0
- bk[pos] = db.index
- pos++
- bk[pos] = BitType
- pos++
-
- binary.BigEndian.PutUint16(bk[pos:], uint16(len(key)))
- pos += 2
-
- copy(bk[pos:], key)
- pos += len(key)
-
- binary.BigEndian.PutUint32(bk[pos:], seq)
-
- return bk
-}
-
-func (db *DB) bDecodeBinKey(bkey []byte) (key []byte, seq uint32, err error) {
- if len(bkey) < 8 || bkey[0] != db.index {
- err = errBinKey
- return
- }
-
- keyLen := binary.BigEndian.Uint16(bkey[2:4])
- if int(keyLen+8) != len(bkey) {
- err = errBinKey
- return
- }
-
- key = bkey[4 : 4+keyLen]
- seq = uint32(binary.BigEndian.Uint32(bkey[4+keyLen:]))
- return
-}
-
-func (db *DB) bCapByteSize(seq uint32, off uint32) uint32 {
- var offByteSize uint32 = (off >> 3) + 1
- if offByteSize > segByteSize {
- offByteSize = segByteSize
- }
-
- return seq<<segByteWidth + offByteSize
-}
-
-func (db *DB) bParseOffset(key []byte, offset int32) (seq uint32, off uint32, err error) {
- if offset < 0 {
- if tailSeq, tailOff, e := db.bGetMeta(key); e != nil {
- err = e
- return
- } else if tailSeq >= 0 {
- offset += int32((uint32(tailSeq)<<segBitWidth | uint32(tailOff)) + 1)
- if offset < 0 {
- err = errOffset
- return
- }
- }
- }
-
- off = uint32(offset)
-
- seq = off >> segBitWidth
- off &= (segBitSize - 1)
- return
-}
-
-func (db *DB) bGetMeta(key []byte) (tailSeq int32, tailOff int32, err error) {
- var v []byte
-
- mk := db.bEncodeMetaKey(key)
- v, err = db.bucket.Get(mk)
- if err != nil {
- return
- }
-
- if v != nil {
- tailSeq = int32(binary.LittleEndian.Uint32(v[0:4]))
- tailOff = int32(binary.LittleEndian.Uint32(v[4:8]))
- } else {
- tailSeq = -1
- tailOff = -1
- }
- return
-}
-
-func (db *DB) bSetMeta(t *batch, key []byte, tailSeq uint32, tailOff uint32) {
- ek := db.bEncodeMetaKey(key)
-
- buf := make([]byte, 8)
- binary.LittleEndian.PutUint32(buf[0:4], tailSeq)
- binary.LittleEndian.PutUint32(buf[4:8], tailOff)
-
- t.Put(ek, buf)
- return
-}
-
-func (db *DB) bUpdateMeta(t *batch, key []byte, seq uint32, off uint32) (tailSeq uint32, tailOff uint32, err error) {
- var tseq, toff int32
- var update bool = false
-
- if tseq, toff, err = db.bGetMeta(key); err != nil {
- return
- } else if tseq < 0 {
- update = true
- } else {
- tailSeq = uint32(MaxInt32(tseq, 0))
- tailOff = uint32(MaxInt32(toff, 0))
- update = (seq > tailSeq || (seq == tailSeq && off > tailOff))
- }
-
- if update {
- db.bSetMeta(t, key, seq, off)
- tailSeq = seq
- tailOff = off
- }
- return
-}
-
-func (db *DB) bDelete(t *batch, key []byte) (drop int64) {
- mk := db.bEncodeMetaKey(key)
- t.Delete(mk)
-
- minKey := db.bEncodeBinKey(key, minSeq)
- maxKey := db.bEncodeBinKey(key, maxSeq)
- it := db.bucket.RangeIterator(minKey, maxKey, store.RangeClose)
- for ; it.Valid(); it.Next() {
- t.Delete(it.RawKey())
- drop++
- }
- it.Close()
-
- return drop
-}
-
-func (db *DB) bGetSegment(key []byte, seq uint32) ([]byte, []byte, error) {
- bk := db.bEncodeBinKey(key, seq)
- segment, err := db.bucket.Get(bk)
- if err != nil {
- return bk, nil, err
- }
- return bk, segment, nil
-}
-
-func (db *DB) bAllocateSegment(key []byte, seq uint32) ([]byte, []byte, error) {
- bk, segment, err := db.bGetSegment(key, seq)
- if err == nil && segment == nil {
- segment = make([]byte, segByteSize, segByteSize)
- }
- return bk, segment, err
-}
-
-func (db *DB) bIterator(key []byte) *store.RangeLimitIterator {
- sk := db.bEncodeBinKey(key, minSeq)
- ek := db.bEncodeBinKey(key, maxSeq)
- return db.bucket.RangeIterator(sk, ek, store.RangeClose)
-}
-
-func (db *DB) bSegAnd(a []byte, b []byte, res *[]byte) {
- if a == nil || b == nil {
- *res = nil
- return
- }
-
- data := *res
- if data == nil {
- data = make([]byte, segByteSize, segByteSize)
- *res = data
- }
-
- for i := uint32(0); i < segByteSize; i++ {
- data[i] = a[i] & b[i]
- }
- return
-}
-
-func (db *DB) bSegOr(a []byte, b []byte, res *[]byte) {
- if a == nil || b == nil {
- if a == nil && b == nil {
- *res = nil
- } else if a == nil {
- *res = b
- } else {
- *res = a
- }
- return
- }
-
- data := *res
- if data == nil {
- data = make([]byte, segByteSize, segByteSize)
- *res = data
- }
-
- for i := uint32(0); i < segByteSize; i++ {
- data[i] = a[i] | b[i]
- }
- return
-}
-
-func (db *DB) bSegXor(a []byte, b []byte, res *[]byte) {
- if a == nil && b == nil {
- *res = fillSegment
- return
- }
-
- if a == nil {
- a = emptySegment
- }
-
- if b == nil {
- b = emptySegment
- }
-
- data := *res
- if data == nil {
- data = make([]byte, segByteSize, segByteSize)
- *res = data
- }
-
- for i := uint32(0); i < segByteSize; i++ {
- data[i] = a[i] ^ b[i]
- }
-
- return
-}
-
-func (db *DB) bExpireAt(key []byte, when int64) (int64, error) {
- t := db.binBatch
- t.Lock()
- defer t.Unlock()
-
- if seq, _, err := db.bGetMeta(key); err != nil || seq < 0 {
- return 0, err
- } else {
- db.expireAt(t, BitType, key, when)
- if err := t.Commit(); err != nil {
- return 0, err
- }
- }
- return 1, nil
-}
-
-func (db *DB) bCountByte(val byte, soff uint32, eoff uint32) int32 {
- if soff > eoff {
- soff, eoff = eoff, soff
- }
-
- mask := uint8(0)
- if soff > 0 {
- mask |= fillBits[soff-1]
- }
- if eoff < 7 {
- mask |= (fillBits[7] ^ fillBits[eoff])
- }
- mask = fillBits[7] ^ mask
-
- return bitsInByte[val&mask]
-}
-
-func (db *DB) bCountSeg(key []byte, seq uint32, soff uint32, eoff uint32) (cnt int32, err error) {
- if soff >= segBitSize || soff < 0 ||
- eoff >= segBitSize || eoff < 0 {
- return
- }
-
- var segment []byte
- if _, segment, err = db.bGetSegment(key, seq); err != nil {
- return
- }
-
- if segment == nil {
- return
- }
-
- if soff > eoff {
- soff, eoff = eoff, soff
- }
-
- headIdx := int(soff >> 3)
- endIdx := int(eoff >> 3)
- sByteOff := soff - ((soff >> 3) << 3)
- eByteOff := eoff - ((eoff >> 3) << 3)
-
- if headIdx == endIdx {
- cnt = db.bCountByte(segment[headIdx], sByteOff, eByteOff)
- } else {
- cnt = db.bCountByte(segment[headIdx], sByteOff, 7) +
- db.bCountByte(segment[endIdx], 0, eByteOff)
- }
-
- // sum up following bytes
- for idx, end := headIdx+1, endIdx-1; idx <= end; idx += 1 {
- cnt += bitsInByte[segment[idx]]
- if idx == end {
- break
- }
- }
-
- return
-}
-
-func (db *DB) BGet(key []byte) (data []byte, err error) {
- if err = checkKeySize(key); err != nil {
- return
- }
-
- var ts, to int32
- if ts, to, err = db.bGetMeta(key); err != nil || ts < 0 {
- return
- }
-
- var tailSeq, tailOff = uint32(ts), uint32(to)
- var capByteSize uint32 = db.bCapByteSize(tailSeq, tailOff)
- data = make([]byte, capByteSize, capByteSize)
-
- minKey := db.bEncodeBinKey(key, minSeq)
- maxKey := db.bEncodeBinKey(key, tailSeq)
- it := db.bucket.RangeIterator(minKey, maxKey, store.RangeClose)
-
- var seq, s, e uint32
- for ; it.Valid(); it.Next() {
- if _, seq, err = db.bDecodeBinKey(it.RawKey()); err != nil {
- data = nil
- break
- }
-
- s = seq << segByteWidth
- e = MinUInt32(s+segByteSize, capByteSize)
- copy(data[s:e], it.RawValue())
- }
- it.Close()
-
- return
-}
-
-func (db *DB) BDelete(key []byte) (drop int64, err error) {
- if err = checkKeySize(key); err != nil {
- return
- }
-
- t := db.binBatch
- t.Lock()
- defer t.Unlock()
-
- drop = db.bDelete(t, key)
- db.rmExpire(t, BitType, key)
-
- err = t.Commit()
- return
-}
-
-func (db *DB) BSetBit(key []byte, offset int32, val uint8) (ori uint8, err error) {
- if err = checkKeySize(key); err != nil {
- return
- }
-
- // todo : check offset
- var seq, off uint32
- if seq, off, err = db.bParseOffset(key, offset); err != nil {
- return 0, err
- }
-
- var bk, segment []byte
- if bk, segment, err = db.bAllocateSegment(key, seq); err != nil {
- return 0, err
- }
-
- if segment != nil {
- ori = getBit(segment, off)
- if setBit(segment, off, val) {
- t := db.binBatch
- t.Lock()
- defer t.Unlock()
-
- t.Put(bk, segment)
- if _, _, e := db.bUpdateMeta(t, key, seq, off); e != nil {
- err = e
- return
- }
-
- err = t.Commit()
- }
- }
-
- return
-}
-
-func (db *DB) BMSetBit(key []byte, args ...BitPair) (place int64, err error) {
- if err = checkKeySize(key); err != nil {
- return
- }
-
- // (ps : so as to aviod wasting memory copy while calling db.Get() and batch.Put(),
- // here we sequence the params by pos, so that we can merge the execution of
- // diff pos setting which targets on the same segment respectively. )
-
- // #1 : sequence request data
- var argCnt = len(args)
- var bitInfos segBitInfoArray = make(segBitInfoArray, argCnt)
- var seq, off uint32
-
- for i, info := range args {
- if seq, off, err = db.bParseOffset(key, info.Pos); err != nil {
- return
- }
-
- bitInfos[i].Seq = seq
- bitInfos[i].Off = off
- bitInfos[i].Val = info.Val
- }
-
- sort.Sort(bitInfos)
-
- for i := 1; i < argCnt; i++ {
- if bitInfos[i].Seq == bitInfos[i-1].Seq && bitInfos[i].Off == bitInfos[i-1].Off {
- return 0, errDuplicatePos
- }
- }
-
- // #2 : execute bit set in order
- t := db.binBatch
- t.Lock()
- defer t.Unlock()
-
- var curBinKey, curSeg []byte
- var curSeq, maxSeq, maxOff uint32
-
- for _, info := range bitInfos {
- if curSeg != nil && info.Seq != curSeq {
- t.Put(curBinKey, curSeg)
- curSeg = nil
- }
-
- if curSeg == nil {
- curSeq = info.Seq
- if curBinKey, curSeg, err = db.bAllocateSegment(key, info.Seq); err != nil {
- return
- }
-
- if curSeg == nil {
- continue
- }
- }
-
- if setBit(curSeg, info.Off, info.Val) {
- maxSeq = info.Seq
- maxOff = info.Off
- place++
- }
- }
-
- if curSeg != nil {
- t.Put(curBinKey, curSeg)
- }
-
- // finally, update meta
- if place > 0 {
- if _, _, err = db.bUpdateMeta(t, key, maxSeq, maxOff); err != nil {
- return
- }
-
- err = t.Commit()
- }
-
- return
-}
-
-func (db *DB) BGetBit(key []byte, offset int32) (uint8, error) {
- if seq, off, err := db.bParseOffset(key, offset); err != nil {
- return 0, err
- } else {
- _, segment, err := db.bGetSegment(key, seq)
- if err != nil {
- return 0, err
- }
-
- if segment == nil {
- return 0, nil
- } else {
- return getBit(segment, off), nil
- }
- }
-}
-
-// func (db *DB) BGetRange(key []byte, start int32, end int32) ([]byte, error) {
-// section := make([]byte)
-
-// return
-// }
-
-func (db *DB) BCount(key []byte, start int32, end int32) (cnt int32, err error) {
- var sseq, soff uint32
- if sseq, soff, err = db.bParseOffset(key, start); err != nil {
- return
- }
-
- var eseq, eoff uint32
- if eseq, eoff, err = db.bParseOffset(key, end); err != nil {
- return
- }
-
- if sseq > eseq || (sseq == eseq && soff > eoff) {
- sseq, eseq = eseq, sseq
- soff, eoff = eoff, soff
- }
-
- var segCnt int32
- if eseq == sseq {
- if segCnt, err = db.bCountSeg(key, sseq, soff, eoff); err != nil {
- return 0, err
- }
-
- cnt = segCnt
-
- } else {
- if segCnt, err = db.bCountSeg(key, sseq, soff, segBitSize-1); err != nil {
- return 0, err
- } else {
- cnt += segCnt
- }
-
- if segCnt, err = db.bCountSeg(key, eseq, 0, eoff); err != nil {
- return 0, err
- } else {
- cnt += segCnt
- }
- }
-
- // middle segs
- var segment []byte
- skey := db.bEncodeBinKey(key, sseq)
- ekey := db.bEncodeBinKey(key, eseq)
-
- it := db.bucket.RangeIterator(skey, ekey, store.RangeOpen)
- for ; it.Valid(); it.Next() {
- segment = it.RawValue()
- for _, bt := range segment {
- cnt += bitsInByte[bt]
- }
- }
- it.Close()
-
- return
-}
-
-func (db *DB) BTail(key []byte) (int32, error) {
- // effective length of data, the highest bit-pos set in history
- tailSeq, tailOff, err := db.bGetMeta(key)
- if err != nil {
- return 0, err
- }
-
- tail := int32(-1)
- if tailSeq >= 0 {
- tail = int32(uint32(tailSeq)<<segBitWidth | uint32(tailOff))
- }
-
- return tail, nil
-}
-
-func (db *DB) BOperation(op uint8, dstkey []byte, srckeys ...[]byte) (blen int32, err error) {
- // blen -
- // the total bit size of data stored in destination key,
- // that is equal to the size of the longest input string.
-
- var exeOp func([]byte, []byte, *[]byte)
- switch op {
- case OPand:
- exeOp = db.bSegAnd
- case OPor:
- exeOp = db.bSegOr
- case OPxor, OPnot:
- exeOp = db.bSegXor
- default:
- return
- }
-
- if dstkey == nil || srckeys == nil {
- return
- }
-
- t := db.binBatch
- t.Lock()
- defer t.Unlock()
-
- var srcKseq, srcKoff int32
- var seq, off, maxDstSeq, maxDstOff uint32
-
- var keyNum int = len(srckeys)
- var validKeyNum int
- for i := 0; i < keyNum; i++ {
- if srcKseq, srcKoff, err = db.bGetMeta(srckeys[i]); err != nil {
- return
- } else if srcKseq < 0 {
- srckeys[i] = nil
- continue
- }
-
- validKeyNum++
-
- seq = uint32(srcKseq)
- off = uint32(srcKoff)
- if seq > maxDstSeq || (seq == maxDstSeq && off > maxDstOff) {
- maxDstSeq = seq
- maxDstOff = off
- }
- }
-
- if (op == OPnot && validKeyNum != 1) ||
- (op != OPnot && validKeyNum < 2) {
- return // with not enough existing source key
- }
-
- var srcIdx int
- for srcIdx = 0; srcIdx < keyNum; srcIdx++ {
- if srckeys[srcIdx] != nil {
- break
- }
- }
-
- // init - data
- var segments = make([][]byte, maxDstSeq+1)
-
- if op == OPnot {
- // ps :
- // ( ~num == num ^ 0x11111111 )
- // we init the result segments with all bit set,
- // then we can calculate through the way of 'xor'.
-
- // ahead segments bin format : 1111 ... 1111
- for i := uint32(0); i < maxDstSeq; i++ {
- segments[i] = fillSegment
- }
-
- // last segment bin format : 1111..1100..0000
- var tailSeg = make([]byte, segByteSize, segByteSize)
- var fillByte = fillBits[7]
- var tailSegLen = db.bCapByteSize(uint32(0), maxDstOff)
- for i := uint32(0); i < tailSegLen-1; i++ {
- tailSeg[i] = fillByte
- }
- tailSeg[tailSegLen-1] = fillBits[maxDstOff-(tailSegLen-1)<<3]
- segments[maxDstSeq] = tailSeg
-
- } else {
- // ps : init segments by data corresponding to the 1st valid source key
- it := db.bIterator(srckeys[srcIdx])
- for ; it.Valid(); it.Next() {
- if _, seq, err = db.bDecodeBinKey(it.RawKey()); err != nil {
- // to do ...
- it.Close()
- return
- }
- segments[seq] = it.Value()
- }
- it.Close()
- srcIdx++
- }
-
- // operation with following keys
- var res []byte
- for i := srcIdx; i < keyNum; i++ {
- if srckeys[i] == nil {
- continue
- }
-
- it := db.bIterator(srckeys[i])
- for idx, end := uint32(0), false; !end; it.Next() {
- end = !it.Valid()
- if !end {
- if _, seq, err = db.bDecodeBinKey(it.RawKey()); err != nil {
- // to do ...
- it.Close()
- return
- }
- } else {
- seq = maxDstSeq + 1
- }
-
- // todo :
- // operation 'and' can be optimize here :
- // if seq > max_segments_idx, this loop can be break,
- // which can avoid cost from Key() and bDecodeBinKey()
-
- for ; idx < seq; idx++ {
- res = nil
- exeOp(segments[idx], nil, &res)
- segments[idx] = res
- }
-
- if !end {
- res = it.Value()
- exeOp(segments[seq], res, &res)
- segments[seq] = res
- idx++
- }
- }
- it.Close()
- }
-
- // clear the old data in case
- db.bDelete(t, dstkey)
- db.rmExpire(t, BitType, dstkey)
-
- // set data
- db.bSetMeta(t, dstkey, maxDstSeq, maxDstOff)
-
- var bk []byte
- for seq, segt := range segments {
- if segt != nil {
- bk = db.bEncodeBinKey(dstkey, uint32(seq))
- t.Put(bk, segt)
- }
- }
-
- err = t.Commit()
- if err == nil {
- // blen = int32(db.bCapByteSize(maxDstOff, maxDstOff))
- blen = int32(maxDstSeq<<segBitWidth | maxDstOff + 1)
- }
-
- return
-}
-
-func (db *DB) BExpire(key []byte, duration int64) (int64, error) {
- if duration <= 0 {
- return 0, errExpireValue
- }
-
- if err := checkKeySize(key); err != nil {
- return -1, err
- }
-
- return db.bExpireAt(key, time.Now().Unix()+duration)
-}
-
-func (db *DB) BExpireAt(key []byte, when int64) (int64, error) {
- if when <= time.Now().Unix() {
- return 0, errExpireValue
- }
-
- if err := checkKeySize(key); err != nil {
- return -1, err
- }
-
- return db.bExpireAt(key, when)
-}
-
-func (db *DB) BTTL(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return -1, err
- }
-
- return db.ttl(BitType, key)
-}
-
-func (db *DB) BPersist(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
-
- t := db.binBatch
- t.Lock()
- defer t.Unlock()
-
- n, err := db.rmExpire(t, BitType, key)
- if err != nil {
- return 0, err
- }
-
- err = t.Commit()
- return n, err
-}
-
-func (db *DB) BScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) {
- return db.scan(BitMetaType, key, count, inclusive, match)
-}
-
-func (db *DB) bFlush() (drop int64, err error) {
- t := db.binBatch
- t.Lock()
- defer t.Unlock()
-
- return db.flushType(t, BitType)
-}
diff --git a/vendor/github.com/lunny/nodb/t_hash.go b/vendor/github.com/lunny/nodb/t_hash.go
deleted file mode 100644
index bedfbf7c3e..0000000000
--- a/vendor/github.com/lunny/nodb/t_hash.go
+++ /dev/null
@@ -1,509 +0,0 @@
-package nodb
-
-import (
- "encoding/binary"
- "errors"
- "time"
-
- "github.com/lunny/nodb/store"
-)
-
-type FVPair struct {
- Field []byte
- Value []byte
-}
-
-var errHashKey = errors.New("invalid hash key")
-var errHSizeKey = errors.New("invalid hsize key")
-
-const (
- hashStartSep byte = ':'
- hashStopSep byte = hashStartSep + 1
-)
-
-func checkHashKFSize(key []byte, field []byte) error {
- if len(key) > MaxKeySize || len(key) == 0 {
- return errKeySize
- } else if len(field) > MaxHashFieldSize || len(field) == 0 {
- return errHashFieldSize
- }
- return nil
-}
-
-func (db *DB) hEncodeSizeKey(key []byte) []byte {
- buf := make([]byte, len(key)+2)
-
- buf[0] = db.index
- buf[1] = HSizeType
-
- copy(buf[2:], key)
- return buf
-}
-
-func (db *DB) hDecodeSizeKey(ek []byte) ([]byte, error) {
- if len(ek) < 2 || ek[0] != db.index || ek[1] != HSizeType {
- return nil, errHSizeKey
- }
-
- return ek[2:], nil
-}
-
-func (db *DB) hEncodeHashKey(key []byte, field []byte) []byte {
- buf := make([]byte, len(key)+len(field)+1+1+2+1)
-
- pos := 0
- buf[pos] = db.index
- pos++
- buf[pos] = HashType
- pos++
-
- binary.BigEndian.PutUint16(buf[pos:], uint16(len(key)))
- pos += 2
-
- copy(buf[pos:], key)
- pos += len(key)
-
- buf[pos] = hashStartSep
- pos++
- copy(buf[pos:], field)
-
- return buf
-}
-
-func (db *DB) hDecodeHashKey(ek []byte) ([]byte, []byte, error) {
- if len(ek) < 5 || ek[0] != db.index || ek[1] != HashType {
- return nil, nil, errHashKey
- }
-
- pos := 2
- keyLen := int(binary.BigEndian.Uint16(ek[pos:]))
- pos += 2
-
- if keyLen+5 > len(ek) {
- return nil, nil, errHashKey
- }
-
- key := ek[pos : pos+keyLen]
- pos += keyLen
-
- if ek[pos] != hashStartSep {
- return nil, nil, errHashKey
- }
-
- pos++
- field := ek[pos:]
- return key, field, nil
-}
-
-func (db *DB) hEncodeStartKey(key []byte) []byte {
- return db.hEncodeHashKey(key, nil)
-}
-
-func (db *DB) hEncodeStopKey(key []byte) []byte {
- k := db.hEncodeHashKey(key, nil)
-
- k[len(k)-1] = hashStopSep
-
- return k
-}
-
-func (db *DB) hSetItem(key []byte, field []byte, value []byte) (int64, error) {
- t := db.hashBatch
-
- ek := db.hEncodeHashKey(key, field)
-
- var n int64 = 1
- if v, _ := db.bucket.Get(ek); v != nil {
- n = 0
- } else {
- if _, err := db.hIncrSize(key, 1); err != nil {
- return 0, err
- }
- }
-
- t.Put(ek, value)
- return n, nil
-}
-
-// ps : here just focus on deleting the hash data,
-// any other likes expire is ignore.
-func (db *DB) hDelete(t *batch, key []byte) int64 {
- sk := db.hEncodeSizeKey(key)
- start := db.hEncodeStartKey(key)
- stop := db.hEncodeStopKey(key)
-
- var num int64 = 0
- it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
- for ; it.Valid(); it.Next() {
- t.Delete(it.Key())
- num++
- }
- it.Close()
-
- t.Delete(sk)
- return num
-}
-
-func (db *DB) hExpireAt(key []byte, when int64) (int64, error) {
- t := db.hashBatch
- t.Lock()
- defer t.Unlock()
-
- if hlen, err := db.HLen(key); err != nil || hlen == 0 {
- return 0, err
- } else {
- db.expireAt(t, HashType, key, when)
- if err := t.Commit(); err != nil {
- return 0, err
- }
- }
- return 1, nil
-}
-
-func (db *DB) HLen(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
-
- return Int64(db.bucket.Get(db.hEncodeSizeKey(key)))
-}
-
-func (db *DB) HSet(key []byte, field []byte, value []byte) (int64, error) {
- if err := checkHashKFSize(key, field); err != nil {
- return 0, err
- } else if err := checkValueSize(value); err != nil {
- return 0, err
- }
-
- t := db.hashBatch
- t.Lock()
- defer t.Unlock()
-
- n, err := db.hSetItem(key, field, value)
- if err != nil {
- return 0, err
- }
-
- //todo add binlog
-
- err = t.Commit()
- return n, err
-}
-
-func (db *DB) HGet(key []byte, field []byte) ([]byte, error) {
- if err := checkHashKFSize(key, field); err != nil {
- return nil, err
- }
-
- return db.bucket.Get(db.hEncodeHashKey(key, field))
-}
-
-func (db *DB) HMset(key []byte, args ...FVPair) error {
- t := db.hashBatch
- t.Lock()
- defer t.Unlock()
-
- var err error
- var ek []byte
- var num int64 = 0
- for i := 0; i < len(args); i++ {
- if err := checkHashKFSize(key, args[i].Field); err != nil {
- return err
- } else if err := checkValueSize(args[i].Value); err != nil {
- return err
- }
-
- ek = db.hEncodeHashKey(key, args[i].Field)
-
- if v, err := db.bucket.Get(ek); err != nil {
- return err
- } else if v == nil {
- num++
- }
-
- t.Put(ek, args[i].Value)
- }
-
- if _, err = db.hIncrSize(key, num); err != nil {
- return err
- }
-
- //todo add binglog
- err = t.Commit()
- return err
-}
-
-func (db *DB) HMget(key []byte, args ...[]byte) ([][]byte, error) {
- var ek []byte
-
- it := db.bucket.NewIterator()
- defer it.Close()
-
- r := make([][]byte, len(args))
- for i := 0; i < len(args); i++ {
- if err := checkHashKFSize(key, args[i]); err != nil {
- return nil, err
- }
-
- ek = db.hEncodeHashKey(key, args[i])
-
- r[i] = it.Find(ek)
- }
-
- return r, nil
-}
-
-func (db *DB) HDel(key []byte, args ...[]byte) (int64, error) {
- t := db.hashBatch
-
- var ek []byte
- var v []byte
- var err error
-
- t.Lock()
- defer t.Unlock()
-
- it := db.bucket.NewIterator()
- defer it.Close()
-
- var num int64 = 0
- for i := 0; i < len(args); i++ {
- if err := checkHashKFSize(key, args[i]); err != nil {
- return 0, err
- }
-
- ek = db.hEncodeHashKey(key, args[i])
-
- v = it.RawFind(ek)
- if v == nil {
- continue
- } else {
- num++
- t.Delete(ek)
- }
- }
-
- if _, err = db.hIncrSize(key, -num); err != nil {
- return 0, err
- }
-
- err = t.Commit()
-
- return num, err
-}
-
-func (db *DB) hIncrSize(key []byte, delta int64) (int64, error) {
- t := db.hashBatch
- sk := db.hEncodeSizeKey(key)
-
- var err error
- var size int64 = 0
- if size, err = Int64(db.bucket.Get(sk)); err != nil {
- return 0, err
- } else {
- size += delta
- if size <= 0 {
- size = 0
- t.Delete(sk)
- db.rmExpire(t, HashType, key)
- } else {
- t.Put(sk, PutInt64(size))
- }
- }
-
- return size, nil
-}
-
-func (db *DB) HIncrBy(key []byte, field []byte, delta int64) (int64, error) {
- if err := checkHashKFSize(key, field); err != nil {
- return 0, err
- }
-
- t := db.hashBatch
- var ek []byte
- var err error
-
- t.Lock()
- defer t.Unlock()
-
- ek = db.hEncodeHashKey(key, field)
-
- var n int64 = 0
- if n, err = StrInt64(db.bucket.Get(ek)); err != nil {
- return 0, err
- }
-
- n += delta
-
- _, err = db.hSetItem(key, field, StrPutInt64(n))
- if err != nil {
- return 0, err
- }
-
- err = t.Commit()
-
- return n, err
-}
-
-func (db *DB) HGetAll(key []byte) ([]FVPair, error) {
- if err := checkKeySize(key); err != nil {
- return nil, err
- }
-
- start := db.hEncodeStartKey(key)
- stop := db.hEncodeStopKey(key)
-
- v := make([]FVPair, 0, 16)
-
- it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
- for ; it.Valid(); it.Next() {
- _, f, err := db.hDecodeHashKey(it.Key())
- if err != nil {
- return nil, err
- }
-
- v = append(v, FVPair{Field: f, Value: it.Value()})
- }
-
- it.Close()
-
- return v, nil
-}
-
-func (db *DB) HKeys(key []byte) ([][]byte, error) {
- if err := checkKeySize(key); err != nil {
- return nil, err
- }
-
- start := db.hEncodeStartKey(key)
- stop := db.hEncodeStopKey(key)
-
- v := make([][]byte, 0, 16)
-
- it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
- for ; it.Valid(); it.Next() {
- _, f, err := db.hDecodeHashKey(it.Key())
- if err != nil {
- return nil, err
- }
- v = append(v, f)
- }
-
- it.Close()
-
- return v, nil
-}
-
-func (db *DB) HValues(key []byte) ([][]byte, error) {
- if err := checkKeySize(key); err != nil {
- return nil, err
- }
-
- start := db.hEncodeStartKey(key)
- stop := db.hEncodeStopKey(key)
-
- v := make([][]byte, 0, 16)
-
- it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
- for ; it.Valid(); it.Next() {
- _, _, err := db.hDecodeHashKey(it.Key())
- if err != nil {
- return nil, err
- }
-
- v = append(v, it.Value())
- }
-
- it.Close()
-
- return v, nil
-}
-
-func (db *DB) HClear(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
-
- t := db.hashBatch
- t.Lock()
- defer t.Unlock()
-
- num := db.hDelete(t, key)
- db.rmExpire(t, HashType, key)
-
- err := t.Commit()
- return num, err
-}
-
-func (db *DB) HMclear(keys ...[]byte) (int64, error) {
- t := db.hashBatch
- t.Lock()
- defer t.Unlock()
-
- for _, key := range keys {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
-
- db.hDelete(t, key)
- db.rmExpire(t, HashType, key)
- }
-
- err := t.Commit()
- return int64(len(keys)), err
-}
-
-func (db *DB) hFlush() (drop int64, err error) {
- t := db.hashBatch
-
- t.Lock()
- defer t.Unlock()
-
- return db.flushType(t, HashType)
-}
-
-func (db *DB) HScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) {
- return db.scan(HSizeType, key, count, inclusive, match)
-}
-
-func (db *DB) HExpire(key []byte, duration int64) (int64, error) {
- if duration <= 0 {
- return 0, errExpireValue
- }
-
- return db.hExpireAt(key, time.Now().Unix()+duration)
-}
-
-func (db *DB) HExpireAt(key []byte, when int64) (int64, error) {
- if when <= time.Now().Unix() {
- return 0, errExpireValue
- }
-
- return db.hExpireAt(key, when)
-}
-
-func (db *DB) HTTL(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return -1, err
- }
-
- return db.ttl(HashType, key)
-}
-
-func (db *DB) HPersist(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
-
- t := db.hashBatch
- t.Lock()
- defer t.Unlock()
-
- n, err := db.rmExpire(t, HashType, key)
- if err != nil {
- return 0, err
- }
-
- err = t.Commit()
- return n, err
-}
diff --git a/vendor/github.com/lunny/nodb/t_kv.go b/vendor/github.com/lunny/nodb/t_kv.go
deleted file mode 100644
index 82a12f7027..0000000000
--- a/vendor/github.com/lunny/nodb/t_kv.go
+++ /dev/null
@@ -1,387 +0,0 @@
-package nodb
-
-import (
- "errors"
- "time"
-)
-
-type KVPair struct {
- Key []byte
- Value []byte
-}
-
-var errKVKey = errors.New("invalid encode kv key")
-
-func checkKeySize(key []byte) error {
- if len(key) > MaxKeySize || len(key) == 0 {
- return errKeySize
- }
- return nil
-}
-
-func checkValueSize(value []byte) error {
- if len(value) > MaxValueSize {
- return errValueSize
- }
-
- return nil
-}
-
-func (db *DB) encodeKVKey(key []byte) []byte {
- ek := make([]byte, len(key)+2)
- ek[0] = db.index
- ek[1] = KVType
- copy(ek[2:], key)
- return ek
-}
-
-func (db *DB) decodeKVKey(ek []byte) ([]byte, error) {
- if len(ek) < 2 || ek[0] != db.index || ek[1] != KVType {
- return nil, errKVKey
- }
-
- return ek[2:], nil
-}
-
-func (db *DB) encodeKVMinKey() []byte {
- ek := db.encodeKVKey(nil)
- return ek
-}
-
-func (db *DB) encodeKVMaxKey() []byte {
- ek := db.encodeKVKey(nil)
- ek[len(ek)-1] = KVType + 1
- return ek
-}
-
-func (db *DB) incr(key []byte, delta int64) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
-
- var err error
- key = db.encodeKVKey(key)
-
- t := db.kvBatch
-
- t.Lock()
- defer t.Unlock()
-
- var n int64
- n, err = StrInt64(db.bucket.Get(key))
- if err != nil {
- return 0, err
- }
-
- n += delta
-
- t.Put(key, StrPutInt64(n))
-
- //todo binlog
-
- err = t.Commit()
- return n, err
-}
-
-// ps : here just focus on deleting the key-value data,
-// any other likes expire is ignore.
-func (db *DB) delete(t *batch, key []byte) int64 {
- key = db.encodeKVKey(key)
- t.Delete(key)
- return 1
-}
-
-func (db *DB) setExpireAt(key []byte, when int64) (int64, error) {
- t := db.kvBatch
- t.Lock()
- defer t.Unlock()
-
- if exist, err := db.Exists(key); err != nil || exist == 0 {
- return 0, err
- } else {
- db.expireAt(t, KVType, key, when)
- if err := t.Commit(); err != nil {
- return 0, err
- }
- }
- return 1, nil
-}
-
-func (db *DB) Decr(key []byte) (int64, error) {
- return db.incr(key, -1)
-}
-
-func (db *DB) DecrBy(key []byte, decrement int64) (int64, error) {
- return db.incr(key, -decrement)
-}
-
-func (db *DB) Del(keys ...[]byte) (int64, error) {
- if len(keys) == 0 {
- return 0, nil
- }
-
- codedKeys := make([][]byte, len(keys))
- for i, k := range keys {
- codedKeys[i] = db.encodeKVKey(k)
- }
-
- t := db.kvBatch
- t.Lock()
- defer t.Unlock()
-
- for i, k := range keys {
- t.Delete(codedKeys[i])
- db.rmExpire(t, KVType, k)
- }
-
- err := t.Commit()
- return int64(len(keys)), err
-}
-
-func (db *DB) Exists(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
-
- var err error
- key = db.encodeKVKey(key)
-
- var v []byte
- v, err = db.bucket.Get(key)
- if v != nil && err == nil {
- return 1, nil
- }
-
- return 0, err
-}
-
-func (db *DB) Get(key []byte) ([]byte, error) {
- if err := checkKeySize(key); err != nil {
- return nil, err
- }
-
- key = db.encodeKVKey(key)
-
- return db.bucket.Get(key)
-}
-
-func (db *DB) GetSet(key []byte, value []byte) ([]byte, error) {
- if err := checkKeySize(key); err != nil {
- return nil, err
- } else if err := checkValueSize(value); err != nil {
- return nil, err
- }
-
- key = db.encodeKVKey(key)
-
- t := db.kvBatch
-
- t.Lock()
- defer t.Unlock()
-
- oldValue, err := db.bucket.Get(key)
- if err != nil {
- return nil, err
- }
-
- t.Put(key, value)
- //todo, binlog
-
- err = t.Commit()
-
- return oldValue, err
-}
-
-func (db *DB) Incr(key []byte) (int64, error) {
- return db.incr(key, 1)
-}
-
-func (db *DB) IncrBy(key []byte, increment int64) (int64, error) {
- return db.incr(key, increment)
-}
-
-func (db *DB) MGet(keys ...[]byte) ([][]byte, error) {
- values := make([][]byte, len(keys))
-
- it := db.bucket.NewIterator()
- defer it.Close()
-
- for i := range keys {
- if err := checkKeySize(keys[i]); err != nil {
- return nil, err
- }
-
- values[i] = it.Find(db.encodeKVKey(keys[i]))
- }
-
- return values, nil
-}
-
-func (db *DB) MSet(args ...KVPair) error {
- if len(args) == 0 {
- return nil
- }
-
- t := db.kvBatch
-
- var err error
- var key []byte
- var value []byte
-
- t.Lock()
- defer t.Unlock()
-
- for i := 0; i < len(args); i++ {
- if err := checkKeySize(args[i].Key); err != nil {
- return err
- } else if err := checkValueSize(args[i].Value); err != nil {
- return err
- }
-
- key = db.encodeKVKey(args[i].Key)
-
- value = args[i].Value
-
- t.Put(key, value)
-
- //todo binlog
- }
-
- err = t.Commit()
- return err
-}
-
-func (db *DB) Set(key []byte, value []byte) error {
- if err := checkKeySize(key); err != nil {
- return err
- } else if err := checkValueSize(value); err != nil {
- return err
- }
-
- var err error
- key = db.encodeKVKey(key)
-
- t := db.kvBatch
-
- t.Lock()
- defer t.Unlock()
-
- t.Put(key, value)
-
- err = t.Commit()
-
- return err
-}
-
-func (db *DB) SetNX(key []byte, value []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return 0, err
- } else if err := checkValueSize(value); err != nil {
- return 0, err
- }
-
- var err error
- key = db.encodeKVKey(key)
-
- var n int64 = 1
-
- t := db.kvBatch
-
- t.Lock()
- defer t.Unlock()
-
- if v, err := db.bucket.Get(key); err != nil {
- return 0, err
- } else if v != nil {
- n = 0
- } else {
- t.Put(key, value)
-
- //todo binlog
-
- err = t.Commit()
- }
-
- return n, err
-}
-
-func (db *DB) flush() (drop int64, err error) {
- t := db.kvBatch
- t.Lock()
- defer t.Unlock()
- return db.flushType(t, KVType)
-}
-
-//if inclusive is true, scan range [key, inf) else (key, inf)
-func (db *DB) Scan(key []byte, count int, inclusive bool, match string) ([][]byte, error) {
- return db.scan(KVType, key, count, inclusive, match)
-}
-
-func (db *DB) Expire(key []byte, duration int64) (int64, error) {
- if duration <= 0 {
- return 0, errExpireValue
- }
-
- return db.setExpireAt(key, time.Now().Unix()+duration)
-}
-
-func (db *DB) ExpireAt(key []byte, when int64) (int64, error) {
- if when <= time.Now().Unix() {
- return 0, errExpireValue
- }
-
- return db.setExpireAt(key, when)
-}
-
-func (db *DB) TTL(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return -1, err
- }
-
- return db.ttl(KVType, key)
-}
-
-func (db *DB) Persist(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
-
- t := db.kvBatch
- t.Lock()
- defer t.Unlock()
- n, err := db.rmExpire(t, KVType, key)
- if err != nil {
- return 0, err
- }
-
- err = t.Commit()
- return n, err
-}
-
-func (db *DB) Lock() {
- t := db.kvBatch
- t.Lock()
-}
-
-func (db *DB) Remove(key []byte) bool {
- if len(key) == 0 {
- return false
- }
- t := db.kvBatch
- t.Delete(db.encodeKVKey(key))
- _, err := db.rmExpire(t, KVType, key)
- if err != nil {
- return false
- }
- return true
-}
-
-func (db *DB) Commit() error {
- t := db.kvBatch
- return t.Commit()
-}
-
-func (db *DB) Unlock() {
- t := db.kvBatch
- t.Unlock()
-}
diff --git a/vendor/github.com/lunny/nodb/t_list.go b/vendor/github.com/lunny/nodb/t_list.go
deleted file mode 100644
index 5b9d9d9c21..0000000000
--- a/vendor/github.com/lunny/nodb/t_list.go
+++ /dev/null
@@ -1,492 +0,0 @@
-package nodb
-
-import (
- "encoding/binary"
- "errors"
- "time"
-
- "github.com/lunny/nodb/store"
-)
-
-const (
- listHeadSeq int32 = 1
- listTailSeq int32 = 2
-
- listMinSeq int32 = 1000
- listMaxSeq int32 = 1<<31 - 1000
- listInitialSeq int32 = listMinSeq + (listMaxSeq-listMinSeq)/2
-)
-
-var errLMetaKey = errors.New("invalid lmeta key")
-var errListKey = errors.New("invalid list key")
-var errListSeq = errors.New("invalid list sequence, overflow")
-
-func (db *DB) lEncodeMetaKey(key []byte) []byte {
- buf := make([]byte, len(key)+2)
- buf[0] = db.index
- buf[1] = LMetaType
-
- copy(buf[2:], key)
- return buf
-}
-
-func (db *DB) lDecodeMetaKey(ek []byte) ([]byte, error) {
- if len(ek) < 2 || ek[0] != db.index || ek[1] != LMetaType {
- return nil, errLMetaKey
- }
-
- return ek[2:], nil
-}
-
-func (db *DB) lEncodeListKey(key []byte, seq int32) []byte {
- buf := make([]byte, len(key)+8)
-
- pos := 0
- buf[pos] = db.index
- pos++
- buf[pos] = ListType
- pos++
-
- binary.BigEndian.PutUint16(buf[pos:], uint16(len(key)))
- pos += 2
-
- copy(buf[pos:], key)
- pos += len(key)
-
- binary.BigEndian.PutUint32(buf[pos:], uint32(seq))
-
- return buf
-}
-
-func (db *DB) lDecodeListKey(ek []byte) (key []byte, seq int32, err error) {
- if len(ek) < 8 || ek[0] != db.index || ek[1] != ListType {
- err = errListKey
- return
- }
-
- keyLen := int(binary.BigEndian.Uint16(ek[2:]))
- if keyLen+8 != len(ek) {
- err = errListKey
- return
- }
-
- key = ek[4 : 4+keyLen]
- seq = int32(binary.BigEndian.Uint32(ek[4+keyLen:]))
- return
-}
-
-func (db *DB) lpush(key []byte, whereSeq int32, args ...[]byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
-
- var headSeq int32
- var tailSeq int32
- var size int32
- var err error
-
- t := db.listBatch
- t.Lock()
- defer t.Unlock()
-
- metaKey := db.lEncodeMetaKey(key)
- headSeq, tailSeq, size, err = db.lGetMeta(nil, metaKey)
- if err != nil {
- return 0, err
- }
-
- var pushCnt int = len(args)
- if pushCnt == 0 {
- return int64(size), nil
- }
-
- var seq int32 = headSeq
- var delta int32 = -1
- if whereSeq == listTailSeq {
- seq = tailSeq
- delta = 1
- }
-
- // append elements
- if size > 0 {
- seq += delta
- }
-
- for i := 0; i < pushCnt; i++ {
- ek := db.lEncodeListKey(key, seq+int32(i)*delta)
- t.Put(ek, args[i])
- }
-
- seq += int32(pushCnt-1) * delta
- if seq <= listMinSeq || seq >= listMaxSeq {
- return 0, errListSeq
- }
-
- // set meta info
- if whereSeq == listHeadSeq {
- headSeq = seq
- } else {
- tailSeq = seq
- }
-
- db.lSetMeta(metaKey, headSeq, tailSeq)
-
- err = t.Commit()
- return int64(size) + int64(pushCnt), err
-}
-
-func (db *DB) lpop(key []byte, whereSeq int32) ([]byte, error) {
- if err := checkKeySize(key); err != nil {
- return nil, err
- }
-
- t := db.listBatch
- t.Lock()
- defer t.Unlock()
-
- var headSeq int32
- var tailSeq int32
- var err error
-
- metaKey := db.lEncodeMetaKey(key)
- headSeq, tailSeq, _, err = db.lGetMeta(nil, metaKey)
- if err != nil {
- return nil, err
- }
-
- var value []byte
-
- var seq int32 = headSeq
- if whereSeq == listTailSeq {
- seq = tailSeq
- }
-
- itemKey := db.lEncodeListKey(key, seq)
- value, err = db.bucket.Get(itemKey)
- if err != nil {
- return nil, err
- }
-
- if whereSeq == listHeadSeq {
- headSeq += 1
- } else {
- tailSeq -= 1
- }
-
- t.Delete(itemKey)
- size := db.lSetMeta(metaKey, headSeq, tailSeq)
- if size == 0 {
- db.rmExpire(t, HashType, key)
- }
-
- err = t.Commit()
- return value, err
-}
-
-// ps : here just focus on deleting the list data,
-// any other likes expire is ignore.
-func (db *DB) lDelete(t *batch, key []byte) int64 {
- mk := db.lEncodeMetaKey(key)
-
- var headSeq int32
- var tailSeq int32
- var err error
-
- it := db.bucket.NewIterator()
- defer it.Close()
-
- headSeq, tailSeq, _, err = db.lGetMeta(it, mk)
- if err != nil {
- return 0
- }
-
- var num int64 = 0
- startKey := db.lEncodeListKey(key, headSeq)
- stopKey := db.lEncodeListKey(key, tailSeq)
-
- rit := store.NewRangeIterator(it, &store.Range{startKey, stopKey, store.RangeClose})
- for ; rit.Valid(); rit.Next() {
- t.Delete(rit.RawKey())
- num++
- }
-
- t.Delete(mk)
-
- return num
-}
-
-func (db *DB) lGetMeta(it *store.Iterator, ek []byte) (headSeq int32, tailSeq int32, size int32, err error) {
- var v []byte
- if it != nil {
- v = it.Find(ek)
- } else {
- v, err = db.bucket.Get(ek)
- }
- if err != nil {
- return
- } else if v == nil {
- headSeq = listInitialSeq
- tailSeq = listInitialSeq
- size = 0
- return
- } else {
- headSeq = int32(binary.LittleEndian.Uint32(v[0:4]))
- tailSeq = int32(binary.LittleEndian.Uint32(v[4:8]))
- size = tailSeq - headSeq + 1
- }
- return
-}
-
-func (db *DB) lSetMeta(ek []byte, headSeq int32, tailSeq int32) int32 {
- t := db.listBatch
-
- var size int32 = tailSeq - headSeq + 1
- if size < 0 {
- // todo : log error + panic
- } else if size == 0 {
- t.Delete(ek)
- } else {
- buf := make([]byte, 8)
-
- binary.LittleEndian.PutUint32(buf[0:4], uint32(headSeq))
- binary.LittleEndian.PutUint32(buf[4:8], uint32(tailSeq))
-
- t.Put(ek, buf)
- }
-
- return size
-}
-
-func (db *DB) lExpireAt(key []byte, when int64) (int64, error) {
- t := db.listBatch
- t.Lock()
- defer t.Unlock()
-
- if llen, err := db.LLen(key); err != nil || llen == 0 {
- return 0, err
- } else {
- db.expireAt(t, ListType, key, when)
- if err := t.Commit(); err != nil {
- return 0, err
- }
- }
- return 1, nil
-}
-
-func (db *DB) LIndex(key []byte, index int32) ([]byte, error) {
- if err := checkKeySize(key); err != nil {
- return nil, err
- }
-
- var seq int32
- var headSeq int32
- var tailSeq int32
- var err error
-
- metaKey := db.lEncodeMetaKey(key)
-
- it := db.bucket.NewIterator()
- defer it.Close()
-
- headSeq, tailSeq, _, err = db.lGetMeta(it, metaKey)
- if err != nil {
- return nil, err
- }
-
- if index >= 0 {
- seq = headSeq + index
- } else {
- seq = tailSeq + index + 1
- }
-
- sk := db.lEncodeListKey(key, seq)
- v := it.Find(sk)
-
- return v, nil
-}
-
-func (db *DB) LLen(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
-
- ek := db.lEncodeMetaKey(key)
- _, _, size, err := db.lGetMeta(nil, ek)
- return int64(size), err
-}
-
-func (db *DB) LPop(key []byte) ([]byte, error) {
- return db.lpop(key, listHeadSeq)
-}
-
-func (db *DB) LPush(key []byte, arg1 []byte, args ...[]byte) (int64, error) {
- var argss = [][]byte{arg1}
- argss = append(argss, args...)
- return db.lpush(key, listHeadSeq, argss...)
-}
-
-func (db *DB) LRange(key []byte, start int32, stop int32) ([][]byte, error) {
- if err := checkKeySize(key); err != nil {
- return nil, err
- }
-
- var headSeq int32
- var llen int32
- var err error
-
- metaKey := db.lEncodeMetaKey(key)
-
- it := db.bucket.NewIterator()
- defer it.Close()
-
- if headSeq, _, llen, err = db.lGetMeta(it, metaKey); err != nil {
- return nil, err
- }
-
- if start < 0 {
- start = llen + start
- }
- if stop < 0 {
- stop = llen + stop
- }
- if start < 0 {
- start = 0
- }
-
- if start > stop || start >= llen {
- return [][]byte{}, nil
- }
-
- if stop >= llen {
- stop = llen - 1
- }
-
- limit := (stop - start) + 1
- headSeq += start
-
- v := make([][]byte, 0, limit)
-
- startKey := db.lEncodeListKey(key, headSeq)
- rit := store.NewRangeLimitIterator(it,
- &store.Range{
- Min: startKey,
- Max: nil,
- Type: store.RangeClose},
- &store.Limit{
- Offset: 0,
- Count: int(limit)})
-
- for ; rit.Valid(); rit.Next() {
- v = append(v, rit.Value())
- }
-
- return v, nil
-}
-
-func (db *DB) RPop(key []byte) ([]byte, error) {
- return db.lpop(key, listTailSeq)
-}
-
-func (db *DB) RPush(key []byte, arg1 []byte, args ...[]byte) (int64, error) {
- var argss = [][]byte{arg1}
- argss = append(argss, args...)
- return db.lpush(key, listTailSeq, argss...)
-}
-
-func (db *DB) LClear(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
-
- t := db.listBatch
- t.Lock()
- defer t.Unlock()
-
- num := db.lDelete(t, key)
- db.rmExpire(t, ListType, key)
-
- err := t.Commit()
- return num, err
-}
-
-func (db *DB) LMclear(keys ...[]byte) (int64, error) {
- t := db.listBatch
- t.Lock()
- defer t.Unlock()
-
- for _, key := range keys {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
-
- db.lDelete(t, key)
- db.rmExpire(t, ListType, key)
-
- }
-
- err := t.Commit()
- return int64(len(keys)), err
-}
-
-func (db *DB) lFlush() (drop int64, err error) {
- t := db.listBatch
- t.Lock()
- defer t.Unlock()
- return db.flushType(t, ListType)
-}
-
-func (db *DB) LExpire(key []byte, duration int64) (int64, error) {
- if duration <= 0 {
- return 0, errExpireValue
- }
-
- return db.lExpireAt(key, time.Now().Unix()+duration)
-}
-
-func (db *DB) LExpireAt(key []byte, when int64) (int64, error) {
- if when <= time.Now().Unix() {
- return 0, errExpireValue
- }
-
- return db.lExpireAt(key, when)
-}
-
-func (db *DB) LTTL(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return -1, err
- }
-
- return db.ttl(ListType, key)
-}
-
-func (db *DB) LPersist(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
-
- t := db.listBatch
- t.Lock()
- defer t.Unlock()
-
- n, err := db.rmExpire(t, ListType, key)
- if err != nil {
- return 0, err
- }
-
- err = t.Commit()
- return n, err
-}
-
-func (db *DB) LScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) {
- return db.scan(LMetaType, key, count, inclusive, match)
-}
-
-func (db *DB) lEncodeMinKey() []byte {
- return db.lEncodeMetaKey(nil)
-}
-
-func (db *DB) lEncodeMaxKey() []byte {
- ek := db.lEncodeMetaKey(nil)
- ek[len(ek)-1] = LMetaType + 1
- return ek
-}
diff --git a/vendor/github.com/lunny/nodb/t_set.go b/vendor/github.com/lunny/nodb/t_set.go
deleted file mode 100644
index 41ce30e8ce..0000000000
--- a/vendor/github.com/lunny/nodb/t_set.go
+++ /dev/null
@@ -1,601 +0,0 @@
-package nodb
-
-import (
- "encoding/binary"
- "errors"
- "time"
-
- "github.com/lunny/nodb/store"
-)
-
-var errSetKey = errors.New("invalid set key")
-var errSSizeKey = errors.New("invalid ssize key")
-
-const (
- setStartSep byte = ':'
- setStopSep byte = setStartSep + 1
- UnionType byte = 51
- DiffType byte = 52
- InterType byte = 53
-)
-
-func checkSetKMSize(key []byte, member []byte) error {
- if len(key) > MaxKeySize || len(key) == 0 {
- return errKeySize
- } else if len(member) > MaxSetMemberSize || len(member) == 0 {
- return errSetMemberSize
- }
- return nil
-}
-
-func (db *DB) sEncodeSizeKey(key []byte) []byte {
- buf := make([]byte, len(key)+2)
-
- buf[0] = db.index
- buf[1] = SSizeType
-
- copy(buf[2:], key)
- return buf
-}
-
-func (db *DB) sDecodeSizeKey(ek []byte) ([]byte, error) {
- if len(ek) < 2 || ek[0] != db.index || ek[1] != SSizeType {
- return nil, errSSizeKey
- }
-
- return ek[2:], nil
-}
-
-func (db *DB) sEncodeSetKey(key []byte, member []byte) []byte {
- buf := make([]byte, len(key)+len(member)+1+1+2+1)
-
- pos := 0
- buf[pos] = db.index
- pos++
- buf[pos] = SetType
- pos++
-
- binary.BigEndian.PutUint16(buf[pos:], uint16(len(key)))
- pos += 2
-
- copy(buf[pos:], key)
- pos += len(key)
-
- buf[pos] = setStartSep
- pos++
- copy(buf[pos:], member)
-
- return buf
-}
-
-func (db *DB) sDecodeSetKey(ek []byte) ([]byte, []byte, error) {
- if len(ek) < 5 || ek[0] != db.index || ek[1] != SetType {
- return nil, nil, errSetKey
- }
-
- pos := 2
- keyLen := int(binary.BigEndian.Uint16(ek[pos:]))
- pos += 2
-
- if keyLen+5 > len(ek) {
- return nil, nil, errSetKey
- }
-
- key := ek[pos : pos+keyLen]
- pos += keyLen
-
- if ek[pos] != hashStartSep {
- return nil, nil, errSetKey
- }
-
- pos++
- member := ek[pos:]
- return key, member, nil
-}
-
-func (db *DB) sEncodeStartKey(key []byte) []byte {
- return db.sEncodeSetKey(key, nil)
-}
-
-func (db *DB) sEncodeStopKey(key []byte) []byte {
- k := db.sEncodeSetKey(key, nil)
-
- k[len(k)-1] = setStopSep
-
- return k
-}
-
-func (db *DB) sFlush() (drop int64, err error) {
-
- t := db.setBatch
- t.Lock()
- defer t.Unlock()
-
- return db.flushType(t, SetType)
-}
-
-func (db *DB) sDelete(t *batch, key []byte) int64 {
- sk := db.sEncodeSizeKey(key)
- start := db.sEncodeStartKey(key)
- stop := db.sEncodeStopKey(key)
-
- var num int64 = 0
- it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
- for ; it.Valid(); it.Next() {
- t.Delete(it.RawKey())
- num++
- }
-
- it.Close()
-
- t.Delete(sk)
- return num
-}
-
-func (db *DB) sIncrSize(key []byte, delta int64) (int64, error) {
- t := db.setBatch
- sk := db.sEncodeSizeKey(key)
-
- var err error
- var size int64 = 0
- if size, err = Int64(db.bucket.Get(sk)); err != nil {
- return 0, err
- } else {
- size += delta
- if size <= 0 {
- size = 0
- t.Delete(sk)
- db.rmExpire(t, SetType, key)
- } else {
- t.Put(sk, PutInt64(size))
- }
- }
-
- return size, nil
-}
-
-func (db *DB) sExpireAt(key []byte, when int64) (int64, error) {
- t := db.setBatch
- t.Lock()
- defer t.Unlock()
-
- if scnt, err := db.SCard(key); err != nil || scnt == 0 {
- return 0, err
- } else {
- db.expireAt(t, SetType, key, when)
- if err := t.Commit(); err != nil {
- return 0, err
- }
-
- }
-
- return 1, nil
-}
-
-func (db *DB) sSetItem(key []byte, member []byte) (int64, error) {
- t := db.setBatch
- ek := db.sEncodeSetKey(key, member)
-
- var n int64 = 1
- if v, _ := db.bucket.Get(ek); v != nil {
- n = 0
- } else {
- if _, err := db.sIncrSize(key, 1); err != nil {
- return 0, err
- }
- }
-
- t.Put(ek, nil)
- return n, nil
-}
-
-func (db *DB) SAdd(key []byte, args ...[]byte) (int64, error) {
- t := db.setBatch
- t.Lock()
- defer t.Unlock()
-
- var err error
- var ek []byte
- var num int64 = 0
- for i := 0; i < len(args); i++ {
- if err := checkSetKMSize(key, args[i]); err != nil {
- return 0, err
- }
-
- ek = db.sEncodeSetKey(key, args[i])
-
- if v, err := db.bucket.Get(ek); err != nil {
- return 0, err
- } else if v == nil {
- num++
- }
-
- t.Put(ek, nil)
- }
-
- if _, err = db.sIncrSize(key, num); err != nil {
- return 0, err
- }
-
- err = t.Commit()
- return num, err
-
-}
-
-func (db *DB) SCard(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
-
- sk := db.sEncodeSizeKey(key)
-
- return Int64(db.bucket.Get(sk))
-}
-
-func (db *DB) sDiffGeneric(keys ...[]byte) ([][]byte, error) {
- destMap := make(map[string]bool)
-
- members, err := db.SMembers(keys[0])
- if err != nil {
- return nil, err
- }
-
- for _, m := range members {
- destMap[String(m)] = true
- }
-
- for _, k := range keys[1:] {
- members, err := db.SMembers(k)
- if err != nil {
- return nil, err
- }
-
- for _, m := range members {
- if _, ok := destMap[String(m)]; !ok {
- continue
- } else if ok {
- delete(destMap, String(m))
- }
- }
- // O - A = O, O is zero set.
- if len(destMap) == 0 {
- return nil, nil
- }
- }
-
- slice := make([][]byte, len(destMap))
- idx := 0
- for k, v := range destMap {
- if !v {
- continue
- }
- slice[idx] = []byte(k)
- idx++
- }
-
- return slice, nil
-}
-
-func (db *DB) SDiff(keys ...[]byte) ([][]byte, error) {
- v, err := db.sDiffGeneric(keys...)
- return v, err
-}
-
-func (db *DB) SDiffStore(dstKey []byte, keys ...[]byte) (int64, error) {
- n, err := db.sStoreGeneric(dstKey, DiffType, keys...)
- return n, err
-}
-
-func (db *DB) sInterGeneric(keys ...[]byte) ([][]byte, error) {
- destMap := make(map[string]bool)
-
- members, err := db.SMembers(keys[0])
- if err != nil {
- return nil, err
- }
-
- for _, m := range members {
- destMap[String(m)] = true
- }
-
- for _, key := range keys[1:] {
- if err := checkKeySize(key); err != nil {
- return nil, err
- }
-
- members, err := db.SMembers(key)
- if err != nil {
- return nil, err
- } else if len(members) == 0 {
- return nil, err
- }
-
- tempMap := make(map[string]bool)
- for _, member := range members {
- if err := checkKeySize(member); err != nil {
- return nil, err
- }
- if _, ok := destMap[String(member)]; ok {
- tempMap[String(member)] = true //mark this item as selected
- }
- }
- destMap = tempMap //reduce the size of the result set
- if len(destMap) == 0 {
- return nil, nil
- }
- }
-
- slice := make([][]byte, len(destMap))
- idx := 0
- for k, v := range destMap {
- if !v {
- continue
- }
-
- slice[idx] = []byte(k)
- idx++
- }
-
- return slice, nil
-
-}
-
-func (db *DB) SInter(keys ...[]byte) ([][]byte, error) {
- v, err := db.sInterGeneric(keys...)
- return v, err
-
-}
-
-func (db *DB) SInterStore(dstKey []byte, keys ...[]byte) (int64, error) {
- n, err := db.sStoreGeneric(dstKey, InterType, keys...)
- return n, err
-}
-
-func (db *DB) SIsMember(key []byte, member []byte) (int64, error) {
- ek := db.sEncodeSetKey(key, member)
-
- var n int64 = 1
- if v, err := db.bucket.Get(ek); err != nil {
- return 0, err
- } else if v == nil {
- n = 0
- }
- return n, nil
-}
-
-func (db *DB) SMembers(key []byte) ([][]byte, error) {
- if err := checkKeySize(key); err != nil {
- return nil, err
- }
-
- start := db.sEncodeStartKey(key)
- stop := db.sEncodeStopKey(key)
-
- v := make([][]byte, 0, 16)
-
- it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
- for ; it.Valid(); it.Next() {
- _, m, err := db.sDecodeSetKey(it.Key())
- if err != nil {
- return nil, err
- }
-
- v = append(v, m)
- }
-
- it.Close()
-
- return v, nil
-}
-
-func (db *DB) SRem(key []byte, args ...[]byte) (int64, error) {
- t := db.setBatch
- t.Lock()
- defer t.Unlock()
-
- var ek []byte
- var v []byte
- var err error
-
- it := db.bucket.NewIterator()
- defer it.Close()
-
- var num int64 = 0
- for i := 0; i < len(args); i++ {
- if err := checkSetKMSize(key, args[i]); err != nil {
- return 0, err
- }
-
- ek = db.sEncodeSetKey(key, args[i])
-
- v = it.RawFind(ek)
- if v == nil {
- continue
- } else {
- num++
- t.Delete(ek)
- }
- }
-
- if _, err = db.sIncrSize(key, -num); err != nil {
- return 0, err
- }
-
- err = t.Commit()
- return num, err
-
-}
-
-func (db *DB) sUnionGeneric(keys ...[]byte) ([][]byte, error) {
- dstMap := make(map[string]bool)
-
- for _, key := range keys {
- if err := checkKeySize(key); err != nil {
- return nil, err
- }
-
- members, err := db.SMembers(key)
- if err != nil {
- return nil, err
- }
-
- for _, member := range members {
- dstMap[String(member)] = true
- }
- }
-
- slice := make([][]byte, len(dstMap))
- idx := 0
- for k, v := range dstMap {
- if !v {
- continue
- }
- slice[idx] = []byte(k)
- idx++
- }
-
- return slice, nil
-}
-
-func (db *DB) SUnion(keys ...[]byte) ([][]byte, error) {
- v, err := db.sUnionGeneric(keys...)
- return v, err
-}
-
-func (db *DB) SUnionStore(dstKey []byte, keys ...[]byte) (int64, error) {
- n, err := db.sStoreGeneric(dstKey, UnionType, keys...)
- return n, err
-}
-
-func (db *DB) sStoreGeneric(dstKey []byte, optType byte, keys ...[]byte) (int64, error) {
- if err := checkKeySize(dstKey); err != nil {
- return 0, err
- }
-
- t := db.setBatch
- t.Lock()
- defer t.Unlock()
-
- db.sDelete(t, dstKey)
-
- var err error
- var ek []byte
- var v [][]byte
-
- switch optType {
- case UnionType:
- v, err = db.sUnionGeneric(keys...)
- case DiffType:
- v, err = db.sDiffGeneric(keys...)
- case InterType:
- v, err = db.sInterGeneric(keys...)
- }
-
- if err != nil {
- return 0, err
- }
-
- for _, m := range v {
- if err := checkSetKMSize(dstKey, m); err != nil {
- return 0, err
- }
-
- ek = db.sEncodeSetKey(dstKey, m)
-
- if _, err := db.bucket.Get(ek); err != nil {
- return 0, err
- }
-
- t.Put(ek, nil)
- }
-
- var num = int64(len(v))
- sk := db.sEncodeSizeKey(dstKey)
- t.Put(sk, PutInt64(num))
-
- if err = t.Commit(); err != nil {
- return 0, err
- }
- return num, nil
-}
-
-func (db *DB) SClear(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
-
- t := db.setBatch
- t.Lock()
- defer t.Unlock()
-
- num := db.sDelete(t, key)
- db.rmExpire(t, SetType, key)
-
- err := t.Commit()
- return num, err
-}
-
-func (db *DB) SMclear(keys ...[]byte) (int64, error) {
- t := db.setBatch
- t.Lock()
- defer t.Unlock()
-
- for _, key := range keys {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
-
- db.sDelete(t, key)
- db.rmExpire(t, SetType, key)
- }
-
- err := t.Commit()
- return int64(len(keys)), err
-}
-
-func (db *DB) SExpire(key []byte, duration int64) (int64, error) {
- if duration <= 0 {
- return 0, errExpireValue
- }
-
- return db.sExpireAt(key, time.Now().Unix()+duration)
-
-}
-
-func (db *DB) SExpireAt(key []byte, when int64) (int64, error) {
- if when <= time.Now().Unix() {
- return 0, errExpireValue
- }
-
- return db.sExpireAt(key, when)
-
-}
-
-func (db *DB) STTL(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return -1, err
- }
-
- return db.ttl(SetType, key)
-}
-
-func (db *DB) SPersist(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
-
- t := db.setBatch
- t.Lock()
- defer t.Unlock()
-
- n, err := db.rmExpire(t, SetType, key)
- if err != nil {
- return 0, err
- }
- err = t.Commit()
- return n, err
-}
-
-func (db *DB) SScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) {
- return db.scan(SSizeType, key, count, inclusive, match)
-}
diff --git a/vendor/github.com/lunny/nodb/t_ttl.go b/vendor/github.com/lunny/nodb/t_ttl.go
deleted file mode 100644
index 5c3638891c..0000000000
--- a/vendor/github.com/lunny/nodb/t_ttl.go
+++ /dev/null
@@ -1,195 +0,0 @@
-package nodb
-
-import (
- "encoding/binary"
- "errors"
- "time"
-
- "github.com/lunny/nodb/store"
-)
-
-var (
- errExpMetaKey = errors.New("invalid expire meta key")
- errExpTimeKey = errors.New("invalid expire time key")
-)
-
-type retireCallback func(*batch, []byte) int64
-
-type elimination struct {
- db *DB
- exp2Tx []*batch
- exp2Retire []retireCallback
-}
-
-var errExpType = errors.New("invalid expire type")
-
-func (db *DB) expEncodeTimeKey(dataType byte, key []byte, when int64) []byte {
- buf := make([]byte, len(key)+11)
-
- buf[0] = db.index
- buf[1] = ExpTimeType
- buf[2] = dataType
- pos := 3
-
- binary.BigEndian.PutUint64(buf[pos:], uint64(when))
- pos += 8
-
- copy(buf[pos:], key)
-
- return buf
-}
-
-func (db *DB) expEncodeMetaKey(dataType byte, key []byte) []byte {
- buf := make([]byte, len(key)+3)
-
- buf[0] = db.index
- buf[1] = ExpMetaType
- buf[2] = dataType
- pos := 3
-
- copy(buf[pos:], key)
-
- return buf
-}
-
-func (db *DB) expDecodeMetaKey(mk []byte) (byte, []byte, error) {
- if len(mk) <= 3 || mk[0] != db.index || mk[1] != ExpMetaType {
- return 0, nil, errExpMetaKey
- }
-
- return mk[2], mk[3:], nil
-}
-
-func (db *DB) expDecodeTimeKey(tk []byte) (byte, []byte, int64, error) {
- if len(tk) < 11 || tk[0] != db.index || tk[1] != ExpTimeType {
- return 0, nil, 0, errExpTimeKey
- }
-
- return tk[2], tk[11:], int64(binary.BigEndian.Uint64(tk[3:])), nil
-}
-
-func (db *DB) expire(t *batch, dataType byte, key []byte, duration int64) {
- db.expireAt(t, dataType, key, time.Now().Unix()+duration)
-}
-
-func (db *DB) expireAt(t *batch, dataType byte, key []byte, when int64) {
- mk := db.expEncodeMetaKey(dataType, key)
- tk := db.expEncodeTimeKey(dataType, key, when)
-
- t.Put(tk, mk)
- t.Put(mk, PutInt64(when))
-}
-
-func (db *DB) ttl(dataType byte, key []byte) (t int64, err error) {
- mk := db.expEncodeMetaKey(dataType, key)
-
- if t, err = Int64(db.bucket.Get(mk)); err != nil || t == 0 {
- t = -1
- } else {
- t -= time.Now().Unix()
- if t <= 0 {
- t = -1
- }
- // if t == -1 : to remove ????
- }
-
- return t, err
-}
-
-func (db *DB) rmExpire(t *batch, dataType byte, key []byte) (int64, error) {
- mk := db.expEncodeMetaKey(dataType, key)
- if v, err := db.bucket.Get(mk); err != nil {
- return 0, err
- } else if v == nil {
- return 0, nil
- } else if when, err2 := Int64(v, nil); err2 != nil {
- return 0, err2
- } else {
- tk := db.expEncodeTimeKey(dataType, key, when)
- t.Delete(mk)
- t.Delete(tk)
- return 1, nil
- }
-}
-
-func (db *DB) expFlush(t *batch, dataType byte) (err error) {
- minKey := make([]byte, 3)
- minKey[0] = db.index
- minKey[1] = ExpTimeType
- minKey[2] = dataType
-
- maxKey := make([]byte, 3)
- maxKey[0] = db.index
- maxKey[1] = ExpMetaType
- maxKey[2] = dataType + 1
-
- _, err = db.flushRegion(t, minKey, maxKey)
- err = t.Commit()
- return
-}
-
-//////////////////////////////////////////////////////////
-//
-//////////////////////////////////////////////////////////
-
-func newEliminator(db *DB) *elimination {
- eli := new(elimination)
- eli.db = db
- eli.exp2Tx = make([]*batch, maxDataType)
- eli.exp2Retire = make([]retireCallback, maxDataType)
- return eli
-}
-
-func (eli *elimination) regRetireContext(dataType byte, t *batch, onRetire retireCallback) {
-
- // todo .. need to ensure exist - mapExpMetaType[expType]
-
- eli.exp2Tx[dataType] = t
- eli.exp2Retire[dataType] = onRetire
-}
-
-// call by outside ... (from *db to another *db)
-func (eli *elimination) active() {
- now := time.Now().Unix()
- db := eli.db
- dbGet := db.bucket.Get
-
- minKey := db.expEncodeTimeKey(NoneType, nil, 0)
- maxKey := db.expEncodeTimeKey(maxDataType, nil, now)
-
- it := db.bucket.RangeLimitIterator(minKey, maxKey, store.RangeROpen, 0, -1)
- for ; it.Valid(); it.Next() {
- tk := it.RawKey()
- mk := it.RawValue()
-
- dt, k, _, err := db.expDecodeTimeKey(tk)
- if err != nil {
- continue
- }
-
- t := eli.exp2Tx[dt]
- onRetire := eli.exp2Retire[dt]
- if tk == nil || onRetire == nil {
- continue
- }
-
- t.Lock()
-
- if exp, err := Int64(dbGet(mk)); err == nil {
- // check expire again
- if exp <= now {
- onRetire(t, k)
- t.Delete(tk)
- t.Delete(mk)
-
- t.Commit()
- }
-
- }
-
- t.Unlock()
- }
- it.Close()
-
- return
-}
diff --git a/vendor/github.com/lunny/nodb/t_zset.go b/vendor/github.com/lunny/nodb/t_zset.go
deleted file mode 100644
index d0ffb7ccf3..0000000000
--- a/vendor/github.com/lunny/nodb/t_zset.go
+++ /dev/null
@@ -1,943 +0,0 @@
-package nodb
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "time"
-
- "github.com/lunny/nodb/store"
-)
-
-const (
- MinScore int64 = -1<<63 + 1
- MaxScore int64 = 1<<63 - 1
- InvalidScore int64 = -1 << 63
-
- AggregateSum byte = 0
- AggregateMin byte = 1
- AggregateMax byte = 2
-)
-
-type ScorePair struct {
- Score int64
- Member []byte
-}
-
-var errZSizeKey = errors.New("invalid zsize key")
-var errZSetKey = errors.New("invalid zset key")
-var errZScoreKey = errors.New("invalid zscore key")
-var errScoreOverflow = errors.New("zset score overflow")
-var errInvalidAggregate = errors.New("invalid aggregate")
-var errInvalidWeightNum = errors.New("invalid weight number")
-var errInvalidSrcKeyNum = errors.New("invalid src key number")
-
-const (
- zsetNScoreSep byte = '<'
- zsetPScoreSep byte = zsetNScoreSep + 1
- zsetStopScoreSep byte = zsetPScoreSep + 1
-
- zsetStartMemSep byte = ':'
- zsetStopMemSep byte = zsetStartMemSep + 1
-)
-
-func checkZSetKMSize(key []byte, member []byte) error {
- if len(key) > MaxKeySize || len(key) == 0 {
- return errKeySize
- } else if len(member) > MaxZSetMemberSize || len(member) == 0 {
- return errZSetMemberSize
- }
- return nil
-}
-
-func (db *DB) zEncodeSizeKey(key []byte) []byte {
- buf := make([]byte, len(key)+2)
- buf[0] = db.index
- buf[1] = ZSizeType
-
- copy(buf[2:], key)
- return buf
-}
-
-func (db *DB) zDecodeSizeKey(ek []byte) ([]byte, error) {
- if len(ek) < 2 || ek[0] != db.index || ek[1] != ZSizeType {
- return nil, errZSizeKey
- }
-
- return ek[2:], nil
-}
-
-func (db *DB) zEncodeSetKey(key []byte, member []byte) []byte {
- buf := make([]byte, len(key)+len(member)+5)
-
- pos := 0
- buf[pos] = db.index
- pos++
-
- buf[pos] = ZSetType
- pos++
-
- binary.BigEndian.PutUint16(buf[pos:], uint16(len(key)))
- pos += 2
-
- copy(buf[pos:], key)
- pos += len(key)
-
- buf[pos] = zsetStartMemSep
- pos++
-
- copy(buf[pos:], member)
-
- return buf
-}
-
-func (db *DB) zDecodeSetKey(ek []byte) ([]byte, []byte, error) {
- if len(ek) < 5 || ek[0] != db.index || ek[1] != ZSetType {
- return nil, nil, errZSetKey
- }
-
- keyLen := int(binary.BigEndian.Uint16(ek[2:]))
- if keyLen+5 > len(ek) {
- return nil, nil, errZSetKey
- }
-
- key := ek[4 : 4+keyLen]
-
- if ek[4+keyLen] != zsetStartMemSep {
- return nil, nil, errZSetKey
- }
-
- member := ek[5+keyLen:]
- return key, member, nil
-}
-
-func (db *DB) zEncodeStartSetKey(key []byte) []byte {
- k := db.zEncodeSetKey(key, nil)
- return k
-}
-
-func (db *DB) zEncodeStopSetKey(key []byte) []byte {
- k := db.zEncodeSetKey(key, nil)
- k[len(k)-1] = zsetStartMemSep + 1
- return k
-}
-
-func (db *DB) zEncodeScoreKey(key []byte, member []byte, score int64) []byte {
- buf := make([]byte, len(key)+len(member)+14)
-
- pos := 0
- buf[pos] = db.index
- pos++
-
- buf[pos] = ZScoreType
- pos++
-
- binary.BigEndian.PutUint16(buf[pos:], uint16(len(key)))
- pos += 2
-
- copy(buf[pos:], key)
- pos += len(key)
-
- if score < 0 {
- buf[pos] = zsetNScoreSep
- } else {
- buf[pos] = zsetPScoreSep
- }
-
- pos++
- binary.BigEndian.PutUint64(buf[pos:], uint64(score))
- pos += 8
-
- buf[pos] = zsetStartMemSep
- pos++
-
- copy(buf[pos:], member)
- return buf
-}
-
-func (db *DB) zEncodeStartScoreKey(key []byte, score int64) []byte {
- return db.zEncodeScoreKey(key, nil, score)
-}
-
-func (db *DB) zEncodeStopScoreKey(key []byte, score int64) []byte {
- k := db.zEncodeScoreKey(key, nil, score)
- k[len(k)-1] = zsetStopMemSep
- return k
-}
-
-func (db *DB) zDecodeScoreKey(ek []byte) (key []byte, member []byte, score int64, err error) {
- if len(ek) < 14 || ek[0] != db.index || ek[1] != ZScoreType {
- err = errZScoreKey
- return
- }
-
- keyLen := int(binary.BigEndian.Uint16(ek[2:]))
- if keyLen+14 > len(ek) {
- err = errZScoreKey
- return
- }
-
- key = ek[4 : 4+keyLen]
- pos := 4 + keyLen
-
- if (ek[pos] != zsetNScoreSep) && (ek[pos] != zsetPScoreSep) {
- err = errZScoreKey
- return
- }
- pos++
-
- score = int64(binary.BigEndian.Uint64(ek[pos:]))
- pos += 8
-
- if ek[pos] != zsetStartMemSep {
- err = errZScoreKey
- return
- }
-
- pos++
-
- member = ek[pos:]
- return
-}
-
-func (db *DB) zSetItem(t *batch, key []byte, score int64, member []byte) (int64, error) {
- if score <= MinScore || score >= MaxScore {
- return 0, errScoreOverflow
- }
-
- var exists int64 = 0
- ek := db.zEncodeSetKey(key, member)
-
- if v, err := db.bucket.Get(ek); err != nil {
- return 0, err
- } else if v != nil {
- exists = 1
-
- if s, err := Int64(v, err); err != nil {
- return 0, err
- } else {
- sk := db.zEncodeScoreKey(key, member, s)
- t.Delete(sk)
- }
- }
-
- t.Put(ek, PutInt64(score))
-
- sk := db.zEncodeScoreKey(key, member, score)
- t.Put(sk, []byte{})
-
- return exists, nil
-}
-
-func (db *DB) zDelItem(t *batch, key []byte, member []byte, skipDelScore bool) (int64, error) {
- ek := db.zEncodeSetKey(key, member)
- if v, err := db.bucket.Get(ek); err != nil {
- return 0, err
- } else if v == nil {
- //not exists
- return 0, nil
- } else {
- //exists
- if !skipDelScore {
- //we must del score
- if s, err := Int64(v, err); err != nil {
- return 0, err
- } else {
- sk := db.zEncodeScoreKey(key, member, s)
- t.Delete(sk)
- }
- }
- }
-
- t.Delete(ek)
-
- return 1, nil
-}
-
-func (db *DB) zDelete(t *batch, key []byte) int64 {
- delMembCnt, _ := db.zRemRange(t, key, MinScore, MaxScore, 0, -1)
- // todo : log err
- return delMembCnt
-}
-
-func (db *DB) zExpireAt(key []byte, when int64) (int64, error) {
- t := db.zsetBatch
- t.Lock()
- defer t.Unlock()
-
- if zcnt, err := db.ZCard(key); err != nil || zcnt == 0 {
- return 0, err
- } else {
- db.expireAt(t, ZSetType, key, when)
- if err := t.Commit(); err != nil {
- return 0, err
- }
- }
- return 1, nil
-}
-
-func (db *DB) ZAdd(key []byte, args ...ScorePair) (int64, error) {
- if len(args) == 0 {
- return 0, nil
- }
-
- t := db.zsetBatch
- t.Lock()
- defer t.Unlock()
-
- var num int64 = 0
- for i := 0; i < len(args); i++ {
- score := args[i].Score
- member := args[i].Member
-
- if err := checkZSetKMSize(key, member); err != nil {
- return 0, err
- }
-
- if n, err := db.zSetItem(t, key, score, member); err != nil {
- return 0, err
- } else if n == 0 {
- //add new
- num++
- }
- }
-
- if _, err := db.zIncrSize(t, key, num); err != nil {
- return 0, err
- }
-
- //todo add binlog
- err := t.Commit()
- return num, err
-}
-
-func (db *DB) zIncrSize(t *batch, key []byte, delta int64) (int64, error) {
- sk := db.zEncodeSizeKey(key)
-
- size, err := Int64(db.bucket.Get(sk))
- if err != nil {
- return 0, err
- } else {
- size += delta
- if size <= 0 {
- size = 0
- t.Delete(sk)
- db.rmExpire(t, ZSetType, key)
- } else {
- t.Put(sk, PutInt64(size))
- }
- }
-
- return size, nil
-}
-
-func (db *DB) ZCard(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
-
- sk := db.zEncodeSizeKey(key)
- return Int64(db.bucket.Get(sk))
-}
-
-func (db *DB) ZScore(key []byte, member []byte) (int64, error) {
- if err := checkZSetKMSize(key, member); err != nil {
- return InvalidScore, err
- }
-
- var score int64 = InvalidScore
-
- k := db.zEncodeSetKey(key, member)
- if v, err := db.bucket.Get(k); err != nil {
- return InvalidScore, err
- } else if v == nil {
- return InvalidScore, ErrScoreMiss
- } else {
- if score, err = Int64(v, nil); err != nil {
- return InvalidScore, err
- }
- }
-
- return score, nil
-}
-
-func (db *DB) ZRem(key []byte, members ...[]byte) (int64, error) {
- if len(members) == 0 {
- return 0, nil
- }
-
- t := db.zsetBatch
- t.Lock()
- defer t.Unlock()
-
- var num int64 = 0
- for i := 0; i < len(members); i++ {
- if err := checkZSetKMSize(key, members[i]); err != nil {
- return 0, err
- }
-
- if n, err := db.zDelItem(t, key, members[i], false); err != nil {
- return 0, err
- } else if n == 1 {
- num++
- }
- }
-
- if _, err := db.zIncrSize(t, key, -num); err != nil {
- return 0, err
- }
-
- err := t.Commit()
- return num, err
-}
-
-func (db *DB) ZIncrBy(key []byte, delta int64, member []byte) (int64, error) {
- if err := checkZSetKMSize(key, member); err != nil {
- return InvalidScore, err
- }
-
- t := db.zsetBatch
- t.Lock()
- defer t.Unlock()
-
- ek := db.zEncodeSetKey(key, member)
-
- var oldScore int64 = 0
- v, err := db.bucket.Get(ek)
- if err != nil {
- return InvalidScore, err
- } else if v == nil {
- db.zIncrSize(t, key, 1)
- } else {
- if oldScore, err = Int64(v, err); err != nil {
- return InvalidScore, err
- }
- }
-
- newScore := oldScore + delta
- if newScore >= MaxScore || newScore <= MinScore {
- return InvalidScore, errScoreOverflow
- }
-
- sk := db.zEncodeScoreKey(key, member, newScore)
- t.Put(sk, []byte{})
- t.Put(ek, PutInt64(newScore))
-
- if v != nil {
- // so as to update score, we must delete the old one
- oldSk := db.zEncodeScoreKey(key, member, oldScore)
- t.Delete(oldSk)
- }
-
- err = t.Commit()
- return newScore, err
-}
-
-func (db *DB) ZCount(key []byte, min int64, max int64) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
- minKey := db.zEncodeStartScoreKey(key, min)
- maxKey := db.zEncodeStopScoreKey(key, max)
-
- rangeType := store.RangeROpen
-
- it := db.bucket.RangeLimitIterator(minKey, maxKey, rangeType, 0, -1)
- var n int64 = 0
- for ; it.Valid(); it.Next() {
- n++
- }
- it.Close()
-
- return n, nil
-}
-
-func (db *DB) zrank(key []byte, member []byte, reverse bool) (int64, error) {
- if err := checkZSetKMSize(key, member); err != nil {
- return 0, err
- }
-
- k := db.zEncodeSetKey(key, member)
-
- it := db.bucket.NewIterator()
- defer it.Close()
-
- if v := it.Find(k); v == nil {
- return -1, nil
- } else {
- if s, err := Int64(v, nil); err != nil {
- return 0, err
- } else {
- var rit *store.RangeLimitIterator
-
- sk := db.zEncodeScoreKey(key, member, s)
-
- if !reverse {
- minKey := db.zEncodeStartScoreKey(key, MinScore)
-
- rit = store.NewRangeIterator(it, &store.Range{minKey, sk, store.RangeClose})
- } else {
- maxKey := db.zEncodeStopScoreKey(key, MaxScore)
- rit = store.NewRevRangeIterator(it, &store.Range{sk, maxKey, store.RangeClose})
- }
-
- var lastKey []byte = nil
- var n int64 = 0
-
- for ; rit.Valid(); rit.Next() {
- n++
-
- lastKey = rit.BufKey(lastKey)
- }
-
- if _, m, _, err := db.zDecodeScoreKey(lastKey); err == nil && bytes.Equal(m, member) {
- n--
- return n, nil
- }
- }
- }
-
- return -1, nil
-}
-
-func (db *DB) zIterator(key []byte, min int64, max int64, offset int, count int, reverse bool) *store.RangeLimitIterator {
- minKey := db.zEncodeStartScoreKey(key, min)
- maxKey := db.zEncodeStopScoreKey(key, max)
-
- if !reverse {
- return db.bucket.RangeLimitIterator(minKey, maxKey, store.RangeClose, offset, count)
- } else {
- return db.bucket.RevRangeLimitIterator(minKey, maxKey, store.RangeClose, offset, count)
- }
-}
-
-func (db *DB) zRemRange(t *batch, key []byte, min int64, max int64, offset int, count int) (int64, error) {
- if len(key) > MaxKeySize {
- return 0, errKeySize
- }
-
- it := db.zIterator(key, min, max, offset, count, false)
- var num int64 = 0
- for ; it.Valid(); it.Next() {
- sk := it.RawKey()
- _, m, _, err := db.zDecodeScoreKey(sk)
- if err != nil {
- continue
- }
-
- if n, err := db.zDelItem(t, key, m, true); err != nil {
- return 0, err
- } else if n == 1 {
- num++
- }
-
- t.Delete(sk)
- }
- it.Close()
-
- if _, err := db.zIncrSize(t, key, -num); err != nil {
- return 0, err
- }
-
- return num, nil
-}
-
-func (db *DB) zRange(key []byte, min int64, max int64, offset int, count int, reverse bool) ([]ScorePair, error) {
- if len(key) > MaxKeySize {
- return nil, errKeySize
- }
-
- if offset < 0 {
- return []ScorePair{}, nil
- }
-
- nv := 64
- if count > 0 {
- nv = count
- }
-
- v := make([]ScorePair, 0, nv)
-
- var it *store.RangeLimitIterator
-
- //if reverse and offset is 0, count < 0, we may use forward iterator then reverse
- //because store iterator prev is slower than next
- if !reverse || (offset == 0 && count < 0) {
- it = db.zIterator(key, min, max, offset, count, false)
- } else {
- it = db.zIterator(key, min, max, offset, count, true)
- }
-
- for ; it.Valid(); it.Next() {
- _, m, s, err := db.zDecodeScoreKey(it.Key())
- //may be we will check key equal?
- if err != nil {
- continue
- }
-
- v = append(v, ScorePair{Member: m, Score: s})
- }
- it.Close()
-
- if reverse && (offset == 0 && count < 0) {
- for i, j := 0, len(v)-1; i < j; i, j = i+1, j-1 {
- v[i], v[j] = v[j], v[i]
- }
- }
-
- return v, nil
-}
-
-func (db *DB) zParseLimit(key []byte, start int, stop int) (offset int, count int, err error) {
- if start < 0 || stop < 0 {
- //refer redis implementation
- var size int64
- size, err = db.ZCard(key)
- if err != nil {
- return
- }
-
- llen := int(size)
-
- if start < 0 {
- start = llen + start
- }
- if stop < 0 {
- stop = llen + stop
- }
-
- if start < 0 {
- start = 0
- }
-
- if start >= llen {
- offset = -1
- return
- }
- }
-
- if start > stop {
- offset = -1
- return
- }
-
- offset = start
- count = (stop - start) + 1
- return
-}
-
-func (db *DB) ZClear(key []byte) (int64, error) {
- t := db.zsetBatch
- t.Lock()
- defer t.Unlock()
-
- rmCnt, err := db.zRemRange(t, key, MinScore, MaxScore, 0, -1)
- if err == nil {
- err = t.Commit()
- }
-
- return rmCnt, err
-}
-
-func (db *DB) ZMclear(keys ...[]byte) (int64, error) {
- t := db.zsetBatch
- t.Lock()
- defer t.Unlock()
-
- for _, key := range keys {
- if _, err := db.zRemRange(t, key, MinScore, MaxScore, 0, -1); err != nil {
- return 0, err
- }
- }
-
- err := t.Commit()
-
- return int64(len(keys)), err
-}
-
-func (db *DB) ZRange(key []byte, start int, stop int) ([]ScorePair, error) {
- return db.ZRangeGeneric(key, start, stop, false)
-}
-
-//min and max must be inclusive
-//if no limit, set offset = 0 and count = -1
-func (db *DB) ZRangeByScore(key []byte, min int64, max int64,
- offset int, count int) ([]ScorePair, error) {
- return db.ZRangeByScoreGeneric(key, min, max, offset, count, false)
-}
-
-func (db *DB) ZRank(key []byte, member []byte) (int64, error) {
- return db.zrank(key, member, false)
-}
-
-func (db *DB) ZRemRangeByRank(key []byte, start int, stop int) (int64, error) {
- offset, count, err := db.zParseLimit(key, start, stop)
- if err != nil {
- return 0, err
- }
-
- var rmCnt int64
-
- t := db.zsetBatch
- t.Lock()
- defer t.Unlock()
-
- rmCnt, err = db.zRemRange(t, key, MinScore, MaxScore, offset, count)
- if err == nil {
- err = t.Commit()
- }
-
- return rmCnt, err
-}
-
-//min and max must be inclusive
-func (db *DB) ZRemRangeByScore(key []byte, min int64, max int64) (int64, error) {
- t := db.zsetBatch
- t.Lock()
- defer t.Unlock()
-
- rmCnt, err := db.zRemRange(t, key, min, max, 0, -1)
- if err == nil {
- err = t.Commit()
- }
-
- return rmCnt, err
-}
-
-func (db *DB) ZRevRange(key []byte, start int, stop int) ([]ScorePair, error) {
- return db.ZRangeGeneric(key, start, stop, true)
-}
-
-func (db *DB) ZRevRank(key []byte, member []byte) (int64, error) {
- return db.zrank(key, member, true)
-}
-
-//min and max must be inclusive
-//if no limit, set offset = 0 and count = -1
-func (db *DB) ZRevRangeByScore(key []byte, min int64, max int64, offset int, count int) ([]ScorePair, error) {
- return db.ZRangeByScoreGeneric(key, min, max, offset, count, true)
-}
-
-func (db *DB) ZRangeGeneric(key []byte, start int, stop int, reverse bool) ([]ScorePair, error) {
- offset, count, err := db.zParseLimit(key, start, stop)
- if err != nil {
- return nil, err
- }
-
- return db.zRange(key, MinScore, MaxScore, offset, count, reverse)
-}
-
-//min and max must be inclusive
-//if no limit, set offset = 0 and count = -1
-func (db *DB) ZRangeByScoreGeneric(key []byte, min int64, max int64,
- offset int, count int, reverse bool) ([]ScorePair, error) {
-
- return db.zRange(key, min, max, offset, count, reverse)
-}
-
-func (db *DB) zFlush() (drop int64, err error) {
- t := db.zsetBatch
- t.Lock()
- defer t.Unlock()
- return db.flushType(t, ZSetType)
-}
-
-func (db *DB) ZExpire(key []byte, duration int64) (int64, error) {
- if duration <= 0 {
- return 0, errExpireValue
- }
-
- return db.zExpireAt(key, time.Now().Unix()+duration)
-}
-
-func (db *DB) ZExpireAt(key []byte, when int64) (int64, error) {
- if when <= time.Now().Unix() {
- return 0, errExpireValue
- }
-
- return db.zExpireAt(key, when)
-}
-
-func (db *DB) ZTTL(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return -1, err
- }
-
- return db.ttl(ZSetType, key)
-}
-
-func (db *DB) ZPersist(key []byte) (int64, error) {
- if err := checkKeySize(key); err != nil {
- return 0, err
- }
-
- t := db.zsetBatch
- t.Lock()
- defer t.Unlock()
-
- n, err := db.rmExpire(t, ZSetType, key)
- if err != nil {
- return 0, err
- }
-
- err = t.Commit()
- return n, err
-}
-
-func getAggregateFunc(aggregate byte) func(int64, int64) int64 {
- switch aggregate {
- case AggregateSum:
- return func(a int64, b int64) int64 {
- return a + b
- }
- case AggregateMax:
- return func(a int64, b int64) int64 {
- if a > b {
- return a
- }
- return b
- }
- case AggregateMin:
- return func(a int64, b int64) int64 {
- if a > b {
- return b
- }
- return a
- }
- }
- return nil
-}
-
-func (db *DB) ZUnionStore(destKey []byte, srcKeys [][]byte, weights []int64, aggregate byte) (int64, error) {
-
- var destMap = map[string]int64{}
- aggregateFunc := getAggregateFunc(aggregate)
- if aggregateFunc == nil {
- return 0, errInvalidAggregate
- }
- if len(srcKeys) < 1 {
- return 0, errInvalidSrcKeyNum
- }
- if weights != nil {
- if len(srcKeys) != len(weights) {
- return 0, errInvalidWeightNum
- }
- } else {
- weights = make([]int64, len(srcKeys))
- for i := 0; i < len(weights); i++ {
- weights[i] = 1
- }
- }
-
- for i, key := range srcKeys {
- scorePairs, err := db.ZRange(key, 0, -1)
- if err != nil {
- return 0, err
- }
- for _, pair := range scorePairs {
- if score, ok := destMap[String(pair.Member)]; !ok {
- destMap[String(pair.Member)] = pair.Score * weights[i]
- } else {
- destMap[String(pair.Member)] = aggregateFunc(score, pair.Score*weights[i])
- }
- }
- }
-
- t := db.zsetBatch
- t.Lock()
- defer t.Unlock()
-
- db.zDelete(t, destKey)
-
- for member, score := range destMap {
- if err := checkZSetKMSize(destKey, []byte(member)); err != nil {
- return 0, err
- }
-
- if _, err := db.zSetItem(t, destKey, score, []byte(member)); err != nil {
- return 0, err
- }
- }
-
- var num = int64(len(destMap))
- sk := db.zEncodeSizeKey(destKey)
- t.Put(sk, PutInt64(num))
-
- //todo add binlog
- if err := t.Commit(); err != nil {
- return 0, err
- }
- return num, nil
-}
-
-func (db *DB) ZInterStore(destKey []byte, srcKeys [][]byte, weights []int64, aggregate byte) (int64, error) {
-
- aggregateFunc := getAggregateFunc(aggregate)
- if aggregateFunc == nil {
- return 0, errInvalidAggregate
- }
- if len(srcKeys) < 1 {
- return 0, errInvalidSrcKeyNum
- }
- if weights != nil {
- if len(srcKeys) != len(weights) {
- return 0, errInvalidWeightNum
- }
- } else {
- weights = make([]int64, len(srcKeys))
- for i := 0; i < len(weights); i++ {
- weights[i] = 1
- }
- }
-
- var destMap = map[string]int64{}
- scorePairs, err := db.ZRange(srcKeys[0], 0, -1)
- if err != nil {
- return 0, err
- }
- for _, pair := range scorePairs {
- destMap[String(pair.Member)] = pair.Score * weights[0]
- }
-
- for i, key := range srcKeys[1:] {
- scorePairs, err := db.ZRange(key, 0, -1)
- if err != nil {
- return 0, err
- }
- tmpMap := map[string]int64{}
- for _, pair := range scorePairs {
- if score, ok := destMap[String(pair.Member)]; ok {
- tmpMap[String(pair.Member)] = aggregateFunc(score, pair.Score*weights[i+1])
- }
- }
- destMap = tmpMap
- }
-
- t := db.zsetBatch
- t.Lock()
- defer t.Unlock()
-
- db.zDelete(t, destKey)
-
- for member, score := range destMap {
- if err := checkZSetKMSize(destKey, []byte(member)); err != nil {
- return 0, err
- }
- if _, err := db.zSetItem(t, destKey, score, []byte(member)); err != nil {
- return 0, err
- }
- }
-
- var num int64 = int64(len(destMap))
- sk := db.zEncodeSizeKey(destKey)
- t.Put(sk, PutInt64(num))
- //todo add binlog
- if err := t.Commit(); err != nil {
- return 0, err
- }
- return num, nil
-}
-
-func (db *DB) ZScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) {
- return db.scan(ZSizeType, key, count, inclusive, match)
-}
diff --git a/vendor/github.com/lunny/nodb/tx.go b/vendor/github.com/lunny/nodb/tx.go
deleted file mode 100644
index 5ce99db57a..0000000000
--- a/vendor/github.com/lunny/nodb/tx.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package nodb
-
-import (
- "errors"
- "fmt"
-
- "github.com/lunny/nodb/store"
-)
-
-var (
- ErrNestTx = errors.New("nest transaction not supported")
- ErrTxDone = errors.New("Transaction has already been committed or rolled back")
-)
-
-type Tx struct {
- *DB
-
- tx *store.Tx
-
- logs [][]byte
-}
-
-func (db *DB) IsTransaction() bool {
- return db.status == DBInTransaction
-}
-
-// Begin a transaction, it will block all other write operations before calling Commit or Rollback.
-// You must be very careful to prevent long-time transaction.
-func (db *DB) Begin() (*Tx, error) {
- if db.IsTransaction() {
- return nil, ErrNestTx
- }
-
- tx := new(Tx)
-
- tx.DB = new(DB)
- tx.DB.l = db.l
-
- tx.l.wLock.Lock()
-
- tx.DB.sdb = db.sdb
-
- var err error
- tx.tx, err = db.sdb.Begin()
- if err != nil {
- tx.l.wLock.Unlock()
- return nil, err
- }
-
- tx.DB.bucket = tx.tx
-
- tx.DB.status = DBInTransaction
-
- tx.DB.index = db.index
-
- tx.DB.kvBatch = tx.newBatch()
- tx.DB.listBatch = tx.newBatch()
- tx.DB.hashBatch = tx.newBatch()
- tx.DB.zsetBatch = tx.newBatch()
- tx.DB.binBatch = tx.newBatch()
- tx.DB.setBatch = tx.newBatch()
-
- return tx, nil
-}
-
-func (tx *Tx) Commit() error {
- if tx.tx == nil {
- return ErrTxDone
- }
-
- tx.l.commitLock.Lock()
- err := tx.tx.Commit()
- tx.tx = nil
-
- if len(tx.logs) > 0 {
- tx.l.binlog.Log(tx.logs...)
- }
-
- tx.l.commitLock.Unlock()
-
- tx.l.wLock.Unlock()
-
- tx.DB.bucket = nil
-
- return err
-}
-
-func (tx *Tx) Rollback() error {
- if tx.tx == nil {
- return ErrTxDone
- }
-
- err := tx.tx.Rollback()
- tx.tx = nil
-
- tx.l.wLock.Unlock()
- tx.DB.bucket = nil
-
- return err
-}
-
-func (tx *Tx) newBatch() *batch {
- return tx.l.newBatch(tx.tx.NewWriteBatch(), &txBatchLocker{}, tx)
-}
-
-func (tx *Tx) Select(index int) error {
- if index < 0 || index >= int(MaxDBNumber) {
- return fmt.Errorf("invalid db index %d", index)
- }
-
- tx.DB.index = uint8(index)
- return nil
-}
diff --git a/vendor/github.com/lunny/nodb/util.go b/vendor/github.com/lunny/nodb/util.go
deleted file mode 100644
index d5949a96e6..0000000000
--- a/vendor/github.com/lunny/nodb/util.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package nodb
-
-import (
- "encoding/binary"
- "errors"
- "reflect"
- "strconv"
- "unsafe"
-)
-
-var errIntNumber = errors.New("invalid integer")
-
-// no copy to change slice to string
-// use your own risk
-func String(b []byte) (s string) {
- pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
- pstring.Data = pbytes.Data
- pstring.Len = pbytes.Len
- return
-}
-
-// no copy to change string to slice
-// use your own risk
-func Slice(s string) (b []byte) {
- pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
- pbytes.Data = pstring.Data
- pbytes.Len = pstring.Len
- pbytes.Cap = pstring.Len
- return
-}
-
-func Int64(v []byte, err error) (int64, error) {
- if err != nil {
- return 0, err
- } else if v == nil || len(v) == 0 {
- return 0, nil
- } else if len(v) != 8 {
- return 0, errIntNumber
- }
-
- return int64(binary.LittleEndian.Uint64(v)), nil
-}
-
-func PutInt64(v int64) []byte {
- var b []byte
- pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- pbytes.Data = uintptr(unsafe.Pointer(&v))
- pbytes.Len = 8
- pbytes.Cap = 8
- return b
-}
-
-func StrInt64(v []byte, err error) (int64, error) {
- if err != nil {
- return 0, err
- } else if v == nil {
- return 0, nil
- } else {
- return strconv.ParseInt(String(v), 10, 64)
- }
-}
-
-func StrInt32(v []byte, err error) (int32, error) {
- if err != nil {
- return 0, err
- } else if v == nil {
- return 0, nil
- } else {
- res, err := strconv.ParseInt(String(v), 10, 32)
- return int32(res), err
- }
-}
-
-func StrInt8(v []byte, err error) (int8, error) {
- if err != nil {
- return 0, err
- } else if v == nil {
- return 0, nil
- } else {
- res, err := strconv.ParseInt(String(v), 10, 8)
- return int8(res), err
- }
-}
-
-func StrPutInt64(v int64) []byte {
- return strconv.AppendInt(nil, v, 10)
-}
-
-func MinUInt32(a uint32, b uint32) uint32 {
- if a > b {
- return b
- } else {
- return a
- }
-}
-
-func MaxUInt32(a uint32, b uint32) uint32 {
- if a > b {
- return a
- } else {
- return b
- }
-}
-
-func MaxInt32(a int32, b int32) int32 {
- if a > b {
- return a
- } else {
- return b
- }
-}