aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/gopkg.in
diff options
context:
space:
mode:
authorThomas Boerger <thomas@webhippie.de>2016-11-03 23:16:01 +0100
committerThomas Boerger <thomas@webhippie.de>2016-11-04 08:43:11 +0100
commit1ebb35b98889ff77299f24d82da426b434b0cca0 (patch)
tree6dcb814d6df4d11c7e7a0ba6da8a6945628e2c5d /vendor/gopkg.in
parent78f86abba45cb35018c58b8bd5f4c48a86cc8634 (diff)
downloadgitea-1ebb35b98889ff77299f24d82da426b434b0cca0.tar.gz
gitea-1ebb35b98889ff77299f24d82da426b434b0cca0.zip
Added all required dependencies
Diffstat (limited to 'vendor/gopkg.in')
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/LICENSE27
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/README.md24
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/ber.go504
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/content_int.go25
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/header.go29
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/identifier.go103
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/length.go71
-rw-r--r--vendor/gopkg.in/asn1-ber.v1/util.go24
-rw-r--r--vendor/gopkg.in/bufio.v1/LICENSE27
-rw-r--r--vendor/gopkg.in/bufio.v1/Makefile2
-rw-r--r--vendor/gopkg.in/bufio.v1/README.md4
-rw-r--r--vendor/gopkg.in/bufio.v1/buffer.go413
-rw-r--r--vendor/gopkg.in/bufio.v1/bufio.go728
-rw-r--r--vendor/gopkg.in/editorconfig/editorconfig-core-go.v1/LICENSE8
-rw-r--r--vendor/gopkg.in/editorconfig/editorconfig-core-go.v1/README.md121
-rw-r--r--vendor/gopkg.in/editorconfig/editorconfig-core-go.v1/editorconfig.go277
-rw-r--r--vendor/gopkg.in/gomail.v2/CHANGELOG.md20
-rw-r--r--vendor/gopkg.in/gomail.v2/CONTRIBUTING.md20
-rw-r--r--vendor/gopkg.in/gomail.v2/LICENSE20
-rw-r--r--vendor/gopkg.in/gomail.v2/README.md92
-rw-r--r--vendor/gopkg.in/gomail.v2/auth.go49
-rw-r--r--vendor/gopkg.in/gomail.v2/doc.go5
-rw-r--r--vendor/gopkg.in/gomail.v2/message.go322
-rw-r--r--vendor/gopkg.in/gomail.v2/mime.go21
-rw-r--r--vendor/gopkg.in/gomail.v2/mime_go14.go25
-rw-r--r--vendor/gopkg.in/gomail.v2/send.go116
-rw-r--r--vendor/gopkg.in/gomail.v2/smtp.go202
-rw-r--r--vendor/gopkg.in/gomail.v2/writeto.go306
-rw-r--r--vendor/gopkg.in/ini.v1/LICENSE191
-rw-r--r--vendor/gopkg.in/ini.v1/Makefile12
-rw-r--r--vendor/gopkg.in/ini.v1/README.md703
-rw-r--r--vendor/gopkg.in/ini.v1/README_ZH.md690
-rw-r--r--vendor/gopkg.in/ini.v1/error.go32
-rw-r--r--vendor/gopkg.in/ini.v1/ini.go501
-rw-r--r--vendor/gopkg.in/ini.v1/key.go633
-rw-r--r--vendor/gopkg.in/ini.v1/parser.go325
-rw-r--r--vendor/gopkg.in/ini.v1/section.go206
-rw-r--r--vendor/gopkg.in/ini.v1/struct.go431
-rw-r--r--vendor/gopkg.in/ldap.v2/LICENSE27
-rw-r--r--vendor/gopkg.in/ldap.v2/Makefile42
-rw-r--r--vendor/gopkg.in/ldap.v2/README.md53
-rw-r--r--vendor/gopkg.in/ldap.v2/add.go113
-rw-r--r--vendor/gopkg.in/ldap.v2/bind.go143
-rw-r--r--vendor/gopkg.in/ldap.v2/client.go27
-rw-r--r--vendor/gopkg.in/ldap.v2/compare.go85
-rw-r--r--vendor/gopkg.in/ldap.v2/conn.go467
-rw-r--r--vendor/gopkg.in/ldap.v2/control.go420
-rw-r--r--vendor/gopkg.in/ldap.v2/debug.go24
-rw-r--r--vendor/gopkg.in/ldap.v2/del.go84
-rw-r--r--vendor/gopkg.in/ldap.v2/dn.go158
-rw-r--r--vendor/gopkg.in/ldap.v2/doc.go4
-rw-r--r--vendor/gopkg.in/ldap.v2/error.go148
-rw-r--r--vendor/gopkg.in/ldap.v2/filter.go466
-rw-r--r--vendor/gopkg.in/ldap.v2/ldap.go289
-rw-r--r--vendor/gopkg.in/ldap.v2/modify.go170
-rw-r--r--vendor/gopkg.in/ldap.v2/passwdmodify.go148
-rw-r--r--vendor/gopkg.in/ldap.v2/search.go450
-rw-r--r--vendor/gopkg.in/macaron.v1/LICENSE191
-rw-r--r--vendor/gopkg.in/macaron.v1/README.md92
-rw-r--r--vendor/gopkg.in/macaron.v1/context.go520
-rw-r--r--vendor/gopkg.in/macaron.v1/logger.go64
-rw-r--r--vendor/gopkg.in/macaron.v1/macaron.go291
-rw-r--r--vendor/gopkg.in/macaron.v1/macaronlogo.pngbin0 -> 88924 bytes
-rw-r--r--vendor/gopkg.in/macaron.v1/recovery.go163
-rw-r--r--vendor/gopkg.in/macaron.v1/render.go714
-rw-r--r--vendor/gopkg.in/macaron.v1/response_writer.go111
-rw-r--r--vendor/gopkg.in/macaron.v1/return_handler.go76
-rw-r--r--vendor/gopkg.in/macaron.v1/router.go360
-rw-r--r--vendor/gopkg.in/macaron.v1/static.go205
-rw-r--r--vendor/gopkg.in/macaron.v1/tree.go379
-rw-r--r--vendor/gopkg.in/redis.v2/LICENSE27
-rw-r--r--vendor/gopkg.in/redis.v2/Makefile3
-rw-r--r--vendor/gopkg.in/redis.v2/README.md46
-rw-r--r--vendor/gopkg.in/redis.v2/command.go597
-rw-r--r--vendor/gopkg.in/redis.v2/commands.go1246
-rw-r--r--vendor/gopkg.in/redis.v2/doc.go4
-rw-r--r--vendor/gopkg.in/redis.v2/error.go23
-rw-r--r--vendor/gopkg.in/redis.v2/multi.go138
-rw-r--r--vendor/gopkg.in/redis.v2/parser.go262
-rw-r--r--vendor/gopkg.in/redis.v2/pipeline.go91
-rw-r--r--vendor/gopkg.in/redis.v2/pool.go405
-rw-r--r--vendor/gopkg.in/redis.v2/pubsub.go134
-rw-r--r--vendor/gopkg.in/redis.v2/rate_limit.go53
-rw-r--r--vendor/gopkg.in/redis.v2/redis.go231
-rw-r--r--vendor/gopkg.in/redis.v2/script.go52
-rw-r--r--vendor/gopkg.in/redis.v2/sentinel.go291
86 files changed, 17396 insertions, 0 deletions
diff --git a/vendor/gopkg.in/asn1-ber.v1/LICENSE b/vendor/gopkg.in/asn1-ber.v1/LICENSE
new file mode 100644
index 0000000000..7448756763
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/gopkg.in/asn1-ber.v1/README.md b/vendor/gopkg.in/asn1-ber.v1/README.md
new file mode 100644
index 0000000000..e3a9560d68
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/README.md
@@ -0,0 +1,24 @@
+[![GoDoc](https://godoc.org/gopkg.in/asn1-ber.v1?status.svg)](https://godoc.org/gopkg.in/asn1-ber.v1) [![Build Status](https://travis-ci.org/go-asn1-ber/asn1-ber.svg)](https://travis-ci.org/go-asn1-ber/asn1-ber)
+
+
+ASN1 BER Encoding / Decoding Library for the GO programming language.
+---------------------------------------------------------------------
+
+Required libraries:
+ None
+
+Working:
+ Very basic encoding / decoding needed for LDAP protocol
+
+Tests Implemented:
+ A few
+
+TODO:
+ Fix all encoding / decoding to conform to ASN1 BER spec
+ Implement Tests / Benchmarks
+
+---
+
+The Go gopher was designed by Renee French. (http://reneefrench.blogspot.com/)
+The design is licensed under the Creative Commons 3.0 Attributions license.
+Read this article for more details: http://blog.golang.org/gopher
diff --git a/vendor/gopkg.in/asn1-ber.v1/ber.go b/vendor/gopkg.in/asn1-ber.v1/ber.go
new file mode 100644
index 0000000000..25cc921be9
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/ber.go
@@ -0,0 +1,504 @@
+package ber
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+type Packet struct {
+ Identifier
+ Value interface{}
+ ByteValue []byte
+ Data *bytes.Buffer
+ Children []*Packet
+ Description string
+}
+
+type Identifier struct {
+ ClassType Class
+ TagType Type
+ Tag Tag
+}
+
+type Tag uint64
+
+const (
+ TagEOC Tag = 0x00
+ TagBoolean Tag = 0x01
+ TagInteger Tag = 0x02
+ TagBitString Tag = 0x03
+ TagOctetString Tag = 0x04
+ TagNULL Tag = 0x05
+ TagObjectIdentifier Tag = 0x06
+ TagObjectDescriptor Tag = 0x07
+ TagExternal Tag = 0x08
+ TagRealFloat Tag = 0x09
+ TagEnumerated Tag = 0x0a
+ TagEmbeddedPDV Tag = 0x0b
+ TagUTF8String Tag = 0x0c
+ TagRelativeOID Tag = 0x0d
+ TagSequence Tag = 0x10
+ TagSet Tag = 0x11
+ TagNumericString Tag = 0x12
+ TagPrintableString Tag = 0x13
+ TagT61String Tag = 0x14
+ TagVideotexString Tag = 0x15
+ TagIA5String Tag = 0x16
+ TagUTCTime Tag = 0x17
+ TagGeneralizedTime Tag = 0x18
+ TagGraphicString Tag = 0x19
+ TagVisibleString Tag = 0x1a
+ TagGeneralString Tag = 0x1b
+ TagUniversalString Tag = 0x1c
+ TagCharacterString Tag = 0x1d
+ TagBMPString Tag = 0x1e
+ TagBitmask Tag = 0x1f // xxx11111b
+
+ // HighTag indicates the start of a high-tag byte sequence
+ HighTag Tag = 0x1f // xxx11111b
+ // HighTagContinueBitmask indicates the high-tag byte sequence should continue
+ HighTagContinueBitmask Tag = 0x80 // 10000000b
+ // HighTagValueBitmask obtains the tag value from a high-tag byte sequence byte
+ HighTagValueBitmask Tag = 0x7f // 01111111b
+)
+
+const (
+ // LengthLongFormBitmask is the mask to apply to the length byte to see if a long-form byte sequence is used
+ LengthLongFormBitmask = 0x80
+ // LengthValueBitmask is the mask to apply to the length byte to get the number of bytes in the long-form byte sequence
+ LengthValueBitmask = 0x7f
+
+ // LengthIndefinite is returned from readLength to indicate an indefinite length
+ LengthIndefinite = -1
+)
+
+var tagMap = map[Tag]string{
+ TagEOC: "EOC (End-of-Content)",
+ TagBoolean: "Boolean",
+ TagInteger: "Integer",
+ TagBitString: "Bit String",
+ TagOctetString: "Octet String",
+ TagNULL: "NULL",
+ TagObjectIdentifier: "Object Identifier",
+ TagObjectDescriptor: "Object Descriptor",
+ TagExternal: "External",
+ TagRealFloat: "Real (float)",
+ TagEnumerated: "Enumerated",
+ TagEmbeddedPDV: "Embedded PDV",
+ TagUTF8String: "UTF8 String",
+ TagRelativeOID: "Relative-OID",
+ TagSequence: "Sequence and Sequence of",
+ TagSet: "Set and Set OF",
+ TagNumericString: "Numeric String",
+ TagPrintableString: "Printable String",
+ TagT61String: "T61 String",
+ TagVideotexString: "Videotex String",
+ TagIA5String: "IA5 String",
+ TagUTCTime: "UTC Time",
+ TagGeneralizedTime: "Generalized Time",
+ TagGraphicString: "Graphic String",
+ TagVisibleString: "Visible String",
+ TagGeneralString: "General String",
+ TagUniversalString: "Universal String",
+ TagCharacterString: "Character String",
+ TagBMPString: "BMP String",
+}
+
+type Class uint8
+
+const (
+ ClassUniversal Class = 0 // 00xxxxxxb
+ ClassApplication Class = 64 // 01xxxxxxb
+ ClassContext Class = 128 // 10xxxxxxb
+ ClassPrivate Class = 192 // 11xxxxxxb
+ ClassBitmask Class = 192 // 11xxxxxxb
+)
+
+var ClassMap = map[Class]string{
+ ClassUniversal: "Universal",
+ ClassApplication: "Application",
+ ClassContext: "Context",
+ ClassPrivate: "Private",
+}
+
+type Type uint8
+
+const (
+ TypePrimitive Type = 0 // xx0xxxxxb
+ TypeConstructed Type = 32 // xx1xxxxxb
+ TypeBitmask Type = 32 // xx1xxxxxb
+)
+
+var TypeMap = map[Type]string{
+ TypePrimitive: "Primitive",
+ TypeConstructed: "Constructed",
+}
+
+var Debug bool = false
+
+func PrintBytes(out io.Writer, buf []byte, indent string) {
+ data_lines := make([]string, (len(buf)/30)+1)
+ num_lines := make([]string, (len(buf)/30)+1)
+
+ for i, b := range buf {
+ data_lines[i/30] += fmt.Sprintf("%02x ", b)
+ num_lines[i/30] += fmt.Sprintf("%02d ", (i+1)%100)
+ }
+
+ for i := 0; i < len(data_lines); i++ {
+ out.Write([]byte(indent + data_lines[i] + "\n"))
+ out.Write([]byte(indent + num_lines[i] + "\n\n"))
+ }
+}
+
+func PrintPacket(p *Packet) {
+ printPacket(os.Stdout, p, 0, false)
+}
+
+func printPacket(out io.Writer, p *Packet, indent int, printBytes bool) {
+ indent_str := ""
+
+ for len(indent_str) != indent {
+ indent_str += " "
+ }
+
+ class_str := ClassMap[p.ClassType]
+
+ tagtype_str := TypeMap[p.TagType]
+
+ tag_str := fmt.Sprintf("0x%02X", p.Tag)
+
+ if p.ClassType == ClassUniversal {
+ tag_str = tagMap[p.Tag]
+ }
+
+ value := fmt.Sprint(p.Value)
+ description := ""
+
+ if p.Description != "" {
+ description = p.Description + ": "
+ }
+
+ fmt.Fprintf(out, "%s%s(%s, %s, %s) Len=%d %q\n", indent_str, description, class_str, tagtype_str, tag_str, p.Data.Len(), value)
+
+ if printBytes {
+ PrintBytes(out, p.Bytes(), indent_str)
+ }
+
+ for _, child := range p.Children {
+ printPacket(out, child, indent+1, printBytes)
+ }
+}
+
+// ReadPacket reads a single Packet from the reader
+func ReadPacket(reader io.Reader) (*Packet, error) {
+ p, _, err := readPacket(reader)
+ if err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+func DecodeString(data []byte) string {
+ return string(data)
+}
+
+func parseInt64(bytes []byte) (ret int64, err error) {
+ if len(bytes) > 8 {
+ // We'll overflow an int64 in this case.
+ err = fmt.Errorf("integer too large")
+ return
+ }
+ for bytesRead := 0; bytesRead < len(bytes); bytesRead++ {
+ ret <<= 8
+ ret |= int64(bytes[bytesRead])
+ }
+
+ // Shift up and down in order to sign extend the result.
+ ret <<= 64 - uint8(len(bytes))*8
+ ret >>= 64 - uint8(len(bytes))*8
+ return
+}
+
+func encodeInteger(i int64) []byte {
+ n := int64Length(i)
+ out := make([]byte, n)
+
+ var j int
+ for ; n > 0; n-- {
+ out[j] = (byte(i >> uint((n-1)*8)))
+ j++
+ }
+
+ return out
+}
+
+func int64Length(i int64) (numBytes int) {
+ numBytes = 1
+
+ for i > 127 {
+ numBytes++
+ i >>= 8
+ }
+
+ for i < -128 {
+ numBytes++
+ i >>= 8
+ }
+
+ return
+}
+
+// DecodePacket decodes the given bytes into a single Packet
+// If a decode error is encountered, nil is returned.
+func DecodePacket(data []byte) *Packet {
+ p, _, _ := readPacket(bytes.NewBuffer(data))
+
+ return p
+}
+
+// DecodePacketErr decodes the given bytes into a single Packet
+// If a decode error is encountered, nil is returned
+func DecodePacketErr(data []byte) (*Packet, error) {
+ p, _, err := readPacket(bytes.NewBuffer(data))
+ if err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+// readPacket reads a single Packet from the reader, returning the number of bytes read
+func readPacket(reader io.Reader) (*Packet, int, error) {
+ identifier, length, read, err := readHeader(reader)
+ if err != nil {
+ return nil, read, err
+ }
+
+ p := &Packet{
+ Identifier: identifier,
+ }
+
+ p.Data = new(bytes.Buffer)
+ p.Children = make([]*Packet, 0, 2)
+ p.Value = nil
+
+ if p.TagType == TypeConstructed {
+ // TODO: if universal, ensure tag type is allowed to be constructed
+
+ // Track how much content we've read
+ contentRead := 0
+ for {
+ if length != LengthIndefinite {
+ // End if we've read what we've been told to
+ if contentRead == length {
+ break
+ }
+ // Detect if a packet boundary didn't fall on the expected length
+ if contentRead > length {
+ return nil, read, fmt.Errorf("expected to read %d bytes, read %d", length, contentRead)
+ }
+ }
+
+ // Read the next packet
+ child, r, err := readPacket(reader)
+ if err != nil {
+ return nil, read, err
+ }
+ contentRead += r
+ read += r
+
+ // Test is this is the EOC marker for our packet
+ if isEOCPacket(child) {
+ if length == LengthIndefinite {
+ break
+ }
+ return nil, read, errors.New("eoc child not allowed with definite length")
+ }
+
+ // Append and continue
+ p.AppendChild(child)
+ }
+ return p, read, nil
+ }
+
+ if length == LengthIndefinite {
+ return nil, read, errors.New("indefinite length used with primitive type")
+ }
+
+ // Read definite-length content
+ content := make([]byte, length, length)
+ if length > 0 {
+ _, err := io.ReadFull(reader, content)
+ if err != nil {
+ if err == io.EOF {
+ return nil, read, io.ErrUnexpectedEOF
+ }
+ return nil, read, err
+ }
+ read += length
+ }
+
+ if p.ClassType == ClassUniversal {
+ p.Data.Write(content)
+ p.ByteValue = content
+
+ switch p.Tag {
+ case TagEOC:
+ case TagBoolean:
+ val, _ := parseInt64(content)
+
+ p.Value = val != 0
+ case TagInteger:
+ p.Value, _ = parseInt64(content)
+ case TagBitString:
+ case TagOctetString:
+ // the actual string encoding is not known here
+ // (e.g. for LDAP content is already an UTF8-encoded
+ // string). Return the data without further processing
+ p.Value = DecodeString(content)
+ case TagNULL:
+ case TagObjectIdentifier:
+ case TagObjectDescriptor:
+ case TagExternal:
+ case TagRealFloat:
+ case TagEnumerated:
+ p.Value, _ = parseInt64(content)
+ case TagEmbeddedPDV:
+ case TagUTF8String:
+ p.Value = DecodeString(content)
+ case TagRelativeOID:
+ case TagSequence:
+ case TagSet:
+ case TagNumericString:
+ case TagPrintableString:
+ p.Value = DecodeString(content)
+ case TagT61String:
+ case TagVideotexString:
+ case TagIA5String:
+ case TagUTCTime:
+ case TagGeneralizedTime:
+ case TagGraphicString:
+ case TagVisibleString:
+ case TagGeneralString:
+ case TagUniversalString:
+ case TagCharacterString:
+ case TagBMPString:
+ }
+ } else {
+ p.Data.Write(content)
+ }
+
+ return p, read, nil
+}
+
+func (p *Packet) Bytes() []byte {
+ var out bytes.Buffer
+
+ out.Write(encodeIdentifier(p.Identifier))
+ out.Write(encodeLength(p.Data.Len()))
+ out.Write(p.Data.Bytes())
+
+ return out.Bytes()
+}
+
+func (p *Packet) AppendChild(child *Packet) {
+ p.Data.Write(child.Bytes())
+ p.Children = append(p.Children, child)
+}
+
+func Encode(ClassType Class, TagType Type, Tag Tag, Value interface{}, Description string) *Packet {
+ p := new(Packet)
+
+ p.ClassType = ClassType
+ p.TagType = TagType
+ p.Tag = Tag
+ p.Data = new(bytes.Buffer)
+
+ p.Children = make([]*Packet, 0, 2)
+
+ p.Value = Value
+ p.Description = Description
+
+ if Value != nil {
+ v := reflect.ValueOf(Value)
+
+ if ClassType == ClassUniversal {
+ switch Tag {
+ case TagOctetString:
+ sv, ok := v.Interface().(string)
+
+ if ok {
+ p.Data.Write([]byte(sv))
+ }
+ }
+ }
+ }
+
+ return p
+}
+
+func NewSequence(Description string) *Packet {
+ return Encode(ClassUniversal, TypeConstructed, TagSequence, nil, Description)
+}
+
+func NewBoolean(ClassType Class, TagType Type, Tag Tag, Value bool, Description string) *Packet {
+ intValue := int64(0)
+
+ if Value {
+ intValue = 1
+ }
+
+ p := Encode(ClassType, TagType, Tag, nil, Description)
+
+ p.Value = Value
+ p.Data.Write(encodeInteger(intValue))
+
+ return p
+}
+
+func NewInteger(ClassType Class, TagType Type, Tag Tag, Value interface{}, Description string) *Packet {
+ p := Encode(ClassType, TagType, Tag, nil, Description)
+
+ p.Value = Value
+ switch v := Value.(type) {
+ case int:
+ p.Data.Write(encodeInteger(int64(v)))
+ case uint:
+ p.Data.Write(encodeInteger(int64(v)))
+ case int64:
+ p.Data.Write(encodeInteger(v))
+ case uint64:
+ // TODO : check range or add encodeUInt...
+ p.Data.Write(encodeInteger(int64(v)))
+ case int32:
+ p.Data.Write(encodeInteger(int64(v)))
+ case uint32:
+ p.Data.Write(encodeInteger(int64(v)))
+ case int16:
+ p.Data.Write(encodeInteger(int64(v)))
+ case uint16:
+ p.Data.Write(encodeInteger(int64(v)))
+ case int8:
+ p.Data.Write(encodeInteger(int64(v)))
+ case uint8:
+ p.Data.Write(encodeInteger(int64(v)))
+ default:
+ // TODO : add support for big.Int ?
+ panic(fmt.Sprintf("Invalid type %T, expected {u|}int{64|32|16|8}", v))
+ }
+
+ return p
+}
+
+func NewString(ClassType Class, TagType Type, Tag Tag, Value, Description string) *Packet {
+ p := Encode(ClassType, TagType, Tag, nil, Description)
+
+ p.Value = Value
+ p.Data.Write([]byte(Value))
+
+ return p
+}
diff --git a/vendor/gopkg.in/asn1-ber.v1/content_int.go b/vendor/gopkg.in/asn1-ber.v1/content_int.go
new file mode 100644
index 0000000000..1858b74b6d
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/content_int.go
@@ -0,0 +1,25 @@
+package ber
+
+func encodeUnsignedInteger(i uint64) []byte {
+ n := uint64Length(i)
+ out := make([]byte, n)
+
+ var j int
+ for ; n > 0; n-- {
+ out[j] = (byte(i >> uint((n-1)*8)))
+ j++
+ }
+
+ return out
+}
+
+func uint64Length(i uint64) (numBytes int) {
+ numBytes = 1
+
+ for i > 255 {
+ numBytes++
+ i >>= 8
+ }
+
+ return
+}
diff --git a/vendor/gopkg.in/asn1-ber.v1/header.go b/vendor/gopkg.in/asn1-ber.v1/header.go
new file mode 100644
index 0000000000..123744e9b8
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/header.go
@@ -0,0 +1,29 @@
+package ber
+
+import (
+ "errors"
+ "io"
+)
+
+func readHeader(reader io.Reader) (identifier Identifier, length int, read int, err error) {
+ if i, c, err := readIdentifier(reader); err != nil {
+ return Identifier{}, 0, read, err
+ } else {
+ identifier = i
+ read += c
+ }
+
+ if l, c, err := readLength(reader); err != nil {
+ return Identifier{}, 0, read, err
+ } else {
+ length = l
+ read += c
+ }
+
+ // Validate length type with identifier (x.600, 8.1.3.2.a)
+ if length == LengthIndefinite && identifier.TagType == TypePrimitive {
+ return Identifier{}, 0, read, errors.New("indefinite length used with primitive type")
+ }
+
+ return identifier, length, read, nil
+}
diff --git a/vendor/gopkg.in/asn1-ber.v1/identifier.go b/vendor/gopkg.in/asn1-ber.v1/identifier.go
new file mode 100644
index 0000000000..f7672a8447
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/identifier.go
@@ -0,0 +1,103 @@
+package ber
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+)
+
+func readIdentifier(reader io.Reader) (Identifier, int, error) {
+ identifier := Identifier{}
+ read := 0
+
+ // identifier byte
+ b, err := readByte(reader)
+ if err != nil {
+ if Debug {
+ fmt.Printf("error reading identifier byte: %v\n", err)
+ }
+ return Identifier{}, read, err
+ }
+ read++
+
+ identifier.ClassType = Class(b) & ClassBitmask
+ identifier.TagType = Type(b) & TypeBitmask
+
+ if tag := Tag(b) & TagBitmask; tag != HighTag {
+ // short-form tag
+ identifier.Tag = tag
+ return identifier, read, nil
+ }
+
+ // high-tag-number tag
+ tagBytes := 0
+ for {
+ b, err := readByte(reader)
+ if err != nil {
+ if Debug {
+ fmt.Printf("error reading high-tag-number tag byte %d: %v\n", tagBytes, err)
+ }
+ return Identifier{}, read, err
+ }
+ tagBytes++
+ read++
+
+ // Lowest 7 bits get appended to the tag value (x.690, 8.1.2.4.2.b)
+ identifier.Tag <<= 7
+ identifier.Tag |= Tag(b) & HighTagValueBitmask
+
+ // First byte may not be all zeros (x.690, 8.1.2.4.2.c)
+ if tagBytes == 1 && identifier.Tag == 0 {
+ return Identifier{}, read, errors.New("invalid first high-tag-number tag byte")
+ }
+ // Overflow of int64
+ // TODO: support big int tags?
+ if tagBytes > 9 {
+ return Identifier{}, read, errors.New("high-tag-number tag overflow")
+ }
+
+ // Top bit of 0 means this is the last byte in the high-tag-number tag (x.690, 8.1.2.4.2.a)
+ if Tag(b)&HighTagContinueBitmask == 0 {
+ break
+ }
+ }
+
+ return identifier, read, nil
+}
+
+func encodeIdentifier(identifier Identifier) []byte {
+ b := []byte{0x0}
+ b[0] |= byte(identifier.ClassType)
+ b[0] |= byte(identifier.TagType)
+
+ if identifier.Tag < HighTag {
+ // Short-form
+ b[0] |= byte(identifier.Tag)
+ } else {
+ // high-tag-number
+ b[0] |= byte(HighTag)
+
+ tag := identifier.Tag
+
+ highBit := uint(63)
+ for {
+ if tag&(1<<highBit) != 0 {
+ break
+ }
+ highBit--
+ }
+
+ tagBytes := int(math.Ceil(float64(highBit) / 7.0))
+ for i := tagBytes - 1; i >= 0; i-- {
+ offset := uint(i) * 7
+ mask := Tag(0x7f) << offset
+ tagByte := (tag & mask) >> offset
+ if i != 0 {
+ tagByte |= 0x80
+ }
+ b = append(b, byte(tagByte))
+ }
+ }
+ return b
+}
diff --git a/vendor/gopkg.in/asn1-ber.v1/length.go b/vendor/gopkg.in/asn1-ber.v1/length.go
new file mode 100644
index 0000000000..8e2ae4dddd
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/length.go
@@ -0,0 +1,71 @@
+package ber
+
+import (
+ "errors"
+ "fmt"
+ "io"
+)
+
+func readLength(reader io.Reader) (length int, read int, err error) {
+ // length byte
+ b, err := readByte(reader)
+ if err != nil {
+ if Debug {
+ fmt.Printf("error reading length byte: %v\n", err)
+ }
+ return 0, 0, err
+ }
+ read++
+
+ switch {
+ case b == 0xFF:
+ // Invalid 0xFF (x.600, 8.1.3.5.c)
+ return 0, read, errors.New("invalid length byte 0xff")
+
+ case b == LengthLongFormBitmask:
+ // Indefinite form, we have to decode packets until we encounter an EOC packet (x.600, 8.1.3.6)
+ length = LengthIndefinite
+
+ case b&LengthLongFormBitmask == 0:
+ // Short definite form, extract the length from the bottom 7 bits (x.600, 8.1.3.4)
+ length = int(b) & LengthValueBitmask
+
+ case b&LengthLongFormBitmask != 0:
+ // Long definite form, extract the number of length bytes to follow from the bottom 7 bits (x.600, 8.1.3.5.b)
+ lengthBytes := int(b) & LengthValueBitmask
+ // Protect against overflow
+ // TODO: support big int length?
+ if lengthBytes > 8 {
+ return 0, read, errors.New("long-form length overflow")
+ }
+ for i := 0; i < lengthBytes; i++ {
+ b, err = readByte(reader)
+ if err != nil {
+ if Debug {
+ fmt.Printf("error reading long-form length byte %d: %v\n", i, err)
+ }
+ return 0, read, err
+ }
+ read++
+
+ // x.600, 8.1.3.5
+ length <<= 8
+ length |= int(b)
+ }
+
+ default:
+ return 0, read, errors.New("invalid length byte")
+ }
+
+ return length, read, nil
+}
+
+func encodeLength(length int) []byte {
+ length_bytes := encodeUnsignedInteger(uint64(length))
+ if length > 127 || len(length_bytes) > 1 {
+ longFormBytes := []byte{(LengthLongFormBitmask | byte(len(length_bytes)))}
+ longFormBytes = append(longFormBytes, length_bytes...)
+ length_bytes = longFormBytes
+ }
+ return length_bytes
+}
diff --git a/vendor/gopkg.in/asn1-ber.v1/util.go b/vendor/gopkg.in/asn1-ber.v1/util.go
new file mode 100644
index 0000000000..3e56b66c8c
--- /dev/null
+++ b/vendor/gopkg.in/asn1-ber.v1/util.go
@@ -0,0 +1,24 @@
+package ber
+
+import "io"
+
+func readByte(reader io.Reader) (byte, error) {
+ bytes := make([]byte, 1, 1)
+ _, err := io.ReadFull(reader, bytes)
+ if err != nil {
+ if err == io.EOF {
+ return 0, io.ErrUnexpectedEOF
+ }
+ return 0, err
+ }
+ return bytes[0], nil
+}
+
+func isEOCPacket(p *Packet) bool {
+ return p != nil &&
+ p.Tag == TagEOC &&
+ p.ClassType == ClassUniversal &&
+ p.TagType == TypePrimitive &&
+ len(p.ByteValue) == 0 &&
+ len(p.Children) == 0
+}
diff --git a/vendor/gopkg.in/bufio.v1/LICENSE b/vendor/gopkg.in/bufio.v1/LICENSE
new file mode 100644
index 0000000000..07a316cbf4
--- /dev/null
+++ b/vendor/gopkg.in/bufio.v1/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013 The bufio Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/gopkg.in/bufio.v1/Makefile b/vendor/gopkg.in/bufio.v1/Makefile
new file mode 100644
index 0000000000..038ed47e94
--- /dev/null
+++ b/vendor/gopkg.in/bufio.v1/Makefile
@@ -0,0 +1,2 @@
+all:
+ go test gopkg.in/bufio.v1
diff --git a/vendor/gopkg.in/bufio.v1/README.md b/vendor/gopkg.in/bufio.v1/README.md
new file mode 100644
index 0000000000..bfb85ee544
--- /dev/null
+++ b/vendor/gopkg.in/bufio.v1/README.md
@@ -0,0 +1,4 @@
+bufio
+=====
+
+This is a fork of the http://golang.org/pkg/bufio/ package. It adds `ReadN` method that allows reading next `n` bytes from the internal buffer without allocating intermediate buffer. This method works just like the [Buffer.Next](http://golang.org/pkg/bytes/#Buffer.Next) method, but has slightly different signature.
diff --git a/vendor/gopkg.in/bufio.v1/buffer.go b/vendor/gopkg.in/bufio.v1/buffer.go
new file mode 100644
index 0000000000..8b915605b6
--- /dev/null
+++ b/vendor/gopkg.in/bufio.v1/buffer.go
@@ -0,0 +1,413 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bufio
+
+// Simple byte buffer for marshaling data.
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "unicode/utf8"
+)
+
+// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
+// The zero value for Buffer is an empty buffer ready to use.
+type Buffer struct {
+ buf []byte // contents are the bytes buf[off : len(buf)]
+ off int // read at &buf[off], write at &buf[len(buf)]
+ runeBytes [utf8.UTFMax]byte // avoid allocation of slice on each WriteByte or Rune
+ bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation.
+ lastRead readOp // last read operation, so that Unread* can work correctly.
+}
+
+// The readOp constants describe the last action performed on
+// the buffer, so that UnreadRune and UnreadByte can
+// check for invalid usage.
+type readOp int
+
+const (
+ opInvalid readOp = iota // Non-read operation.
+ opReadRune // Read rune.
+ opRead // Any other read operation.
+)
+
+// ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer.
+var ErrTooLarge = errors.New("bytes.Buffer: too large")
+
+// Bytes returns a slice of the contents of the unread portion of the buffer;
+// len(b.Bytes()) == b.Len(). If the caller changes the contents of the
+// returned slice, the contents of the buffer will change provided there
+// are no intervening method calls on the Buffer.
+func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
+
+// String returns the contents of the unread portion of the buffer
+// as a string. If the Buffer is a nil pointer, it returns "<nil>".
+func (b *Buffer) String() string {
+ if b == nil {
+ // Special case, useful in debugging.
+ return "<nil>"
+ }
+ return string(b.buf[b.off:])
+}
+
+// Len returns the number of bytes of the unread portion of the buffer;
+// b.Len() == len(b.Bytes()).
+func (b *Buffer) Len() int { return len(b.buf) - b.off }
+
+// Truncate discards all but the first n unread bytes from the buffer.
+// It panics if n is negative or greater than the length of the buffer.
+func (b *Buffer) Truncate(n int) {
+ b.lastRead = opInvalid
+ switch {
+ case n < 0 || n > b.Len():
+ panic("bytes.Buffer: truncation out of range")
+ case n == 0:
+ // Reuse buffer space.
+ b.off = 0
+ }
+ b.buf = b.buf[0 : b.off+n]
+}
+
+// Reset resets the buffer so it has no content.
+// b.Reset() is the same as b.Truncate(0).
+func (b *Buffer) Reset() { b.Truncate(0) }
+
+// grow grows the buffer to guarantee space for n more bytes.
+// It returns the index where bytes should be written.
+// If the buffer can't grow it will panic with ErrTooLarge.
+func (b *Buffer) grow(n int) int {
+ m := b.Len()
+ // If buffer is empty, reset to recover space.
+ if m == 0 && b.off != 0 {
+ b.Truncate(0)
+ }
+ if len(b.buf)+n > cap(b.buf) {
+ var buf []byte
+ if b.buf == nil && n <= len(b.bootstrap) {
+ buf = b.bootstrap[0:]
+ } else if m+n <= cap(b.buf)/2 {
+ // We can slide things down instead of allocating a new
+ // slice. We only need m+n <= cap(b.buf) to slide, but
+ // we instead let capacity get twice as large so we
+ // don't spend all our time copying.
+ copy(b.buf[:], b.buf[b.off:])
+ buf = b.buf[:m]
+ } else {
+ // not enough space anywhere
+ buf = makeSlice(2*cap(b.buf) + n)
+ copy(buf, b.buf[b.off:])
+ }
+ b.buf = buf
+ b.off = 0
+ }
+ b.buf = b.buf[0 : b.off+m+n]
+ return b.off + m
+}
+
+// Grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After Grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+// If n is negative, Grow will panic.
+// If the buffer can't grow it will panic with ErrTooLarge.
+func (b *Buffer) Grow(n int) {
+ if n < 0 {
+ panic("bytes.Buffer.Grow: negative count")
+ }
+ m := b.grow(n)
+ b.buf = b.buf[0:m]
+}
+
+// Write appends the contents of p to the buffer, growing the buffer as
+// needed. The return value n is the length of p; err is always nil. If the
+// buffer becomes too large, Write will panic with ErrTooLarge.
+func (b *Buffer) Write(p []byte) (n int, err error) {
+ b.lastRead = opInvalid
+ m := b.grow(len(p))
+ return copy(b.buf[m:], p), nil
+}
+
+// WriteString appends the contents of s to the buffer, growing the buffer as
+// needed. The return value n is the length of s; err is always nil. If the
+// buffer becomes too large, WriteString will panic with ErrTooLarge.
+func (b *Buffer) WriteString(s string) (n int, err error) {
+ b.lastRead = opInvalid
+ m := b.grow(len(s))
+ return copy(b.buf[m:], s), nil
+}
+
+// MinRead is the minimum slice size passed to a Read call by
+// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
+// what is required to hold the contents of r, ReadFrom will not grow the
+// underlying buffer.
+const MinRead = 512
+
+// ReadFrom reads data from r until EOF and appends it to the buffer, growing
+// the buffer as needed. The return value n is the number of bytes read. Any
+// error except io.EOF encountered during the read is also returned. If the
+// buffer becomes too large, ReadFrom will panic with ErrTooLarge.
+func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
+ b.lastRead = opInvalid
+ // If buffer is empty, reset to recover space.
+ if b.off >= len(b.buf) {
+ b.Truncate(0)
+ }
+ for {
+ if free := cap(b.buf) - len(b.buf); free < MinRead {
+ // not enough space at end
+ newBuf := b.buf
+ if b.off+free < MinRead {
+ // not enough space using beginning of buffer;
+ // double buffer capacity
+ newBuf = makeSlice(2*cap(b.buf) + MinRead)
+ }
+ copy(newBuf, b.buf[b.off:])
+ b.buf = newBuf[:len(b.buf)-b.off]
+ b.off = 0
+ }
+ m, e := r.Read(b.buf[len(b.buf):cap(b.buf)])
+ b.buf = b.buf[0 : len(b.buf)+m]
+ n += int64(m)
+ if e == io.EOF {
+ break
+ }
+ if e != nil {
+ return n, e
+ }
+ }
+ return n, nil // err is EOF, so return nil explicitly
+}
+
+// makeSlice allocates a slice of size n. If the allocation fails, it panics
+// with ErrTooLarge.
+func makeSlice(n int) []byte {
+ // If the make fails, give a known error.
+ defer func() {
+ if recover() != nil {
+ panic(ErrTooLarge)
+ }
+ }()
+ return make([]byte, n)
+}
+
+// WriteTo writes data to w until the buffer is drained or an error occurs.
+// The return value n is the number of bytes written; it always fits into an
+// int, but it is int64 to match the io.WriterTo interface. Any error
+// encountered during the write is also returned.
+func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
+ b.lastRead = opInvalid
+ if b.off < len(b.buf) {
+ nBytes := b.Len()
+ m, e := w.Write(b.buf[b.off:])
+ if m > nBytes {
+ panic("bytes.Buffer.WriteTo: invalid Write count")
+ }
+ b.off += m
+ n = int64(m)
+ if e != nil {
+ return n, e
+ }
+ // all bytes should have been written, by definition of
+ // Write method in io.Writer
+ if m != nBytes {
+ return n, io.ErrShortWrite
+ }
+ }
+ // Buffer is now empty; reset.
+ b.Truncate(0)
+ return
+}
+
+// WriteByte appends the byte c to the buffer, growing the buffer as needed.
+// The returned error is always nil, but is included to match bufio.Writer's
+// WriteByte. If the buffer becomes too large, WriteByte will panic with
+// ErrTooLarge.
+func (b *Buffer) WriteByte(c byte) error {
+ b.lastRead = opInvalid
+ m := b.grow(1)
+ b.buf[m] = c
+ return nil
+}
+
+// WriteRune appends the UTF-8 encoding of Unicode code point r to the
+// buffer, returning its length and an error, which is always nil but is
+// included to match bufio.Writer's WriteRune. The buffer is grown as needed;
+// if it becomes too large, WriteRune will panic with ErrTooLarge.
+func (b *Buffer) WriteRune(r rune) (n int, err error) {
+ if r < utf8.RuneSelf {
+ b.WriteByte(byte(r))
+ return 1, nil
+ }
+ n = utf8.EncodeRune(b.runeBytes[0:], r)
+ b.Write(b.runeBytes[0:n])
+ return n, nil
+}
+
+// Read reads the next len(p) bytes from the buffer or until the buffer
+// is drained. The return value n is the number of bytes read. If the
+// buffer has no data to return, err is io.EOF (unless len(p) is zero);
+// otherwise it is nil.
+func (b *Buffer) Read(p []byte) (n int, err error) {
+ b.lastRead = opInvalid
+ if b.off >= len(b.buf) {
+ // Buffer is empty, reset to recover space.
+ b.Truncate(0)
+ if len(p) == 0 {
+ return
+ }
+ return 0, io.EOF
+ }
+ n = copy(p, b.buf[b.off:])
+ b.off += n
+ if n > 0 {
+ b.lastRead = opRead
+ }
+ return
+}
+
+// Next returns a slice containing the next n bytes from the buffer,
+// advancing the buffer as if the bytes had been returned by Read.
+// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
+// The slice is only valid until the next call to a read or write method.
+func (b *Buffer) Next(n int) []byte {
+ b.lastRead = opInvalid
+ m := b.Len()
+ if n > m {
+ n = m
+ }
+ data := b.buf[b.off : b.off+n]
+ b.off += n
+ if n > 0 {
+ b.lastRead = opRead
+ }
+ return data
+}
+
+// ReadByte reads and returns the next byte from the buffer.
+// If no byte is available, it returns error io.EOF.
+func (b *Buffer) ReadByte() (c byte, err error) {
+ b.lastRead = opInvalid
+ if b.off >= len(b.buf) {
+ // Buffer is empty, reset to recover space.
+ b.Truncate(0)
+ return 0, io.EOF
+ }
+ c = b.buf[b.off]
+ b.off++
+ b.lastRead = opRead
+ return c, nil
+}
+
+// ReadRune reads and returns the next UTF-8-encoded
+// Unicode code point from the buffer.
+// If no bytes are available, the error returned is io.EOF.
+// If the bytes are an erroneous UTF-8 encoding, it
+// consumes one byte and returns U+FFFD, 1.
+func (b *Buffer) ReadRune() (r rune, size int, err error) {
+ b.lastRead = opInvalid
+ if b.off >= len(b.buf) {
+ // Buffer is empty, reset to recover space.
+ b.Truncate(0)
+ return 0, 0, io.EOF
+ }
+ b.lastRead = opReadRune
+ c := b.buf[b.off]
+ if c < utf8.RuneSelf {
+ b.off++
+ return rune(c), 1, nil
+ }
+ r, n := utf8.DecodeRune(b.buf[b.off:])
+ b.off += n
+ return r, n, nil
+}
+
+// UnreadRune unreads the last rune returned by ReadRune.
+// If the most recent read or write operation on the buffer was
+// not a ReadRune, UnreadRune returns an error. (In this regard
+// it is stricter than UnreadByte, which will unread the last byte
+// from any read operation.)
+func (b *Buffer) UnreadRune() error {
+ if b.lastRead != opReadRune {
+ return errors.New("bytes.Buffer: UnreadRune: previous operation was not ReadRune")
+ }
+ b.lastRead = opInvalid
+ if b.off > 0 {
+ _, n := utf8.DecodeLastRune(b.buf[0:b.off])
+ b.off -= n
+ }
+ return nil
+}
+
+// UnreadByte unreads the last byte returned by the most recent
+// read operation. If write has happened since the last read, UnreadByte
+// returns an error.
+func (b *Buffer) UnreadByte() error {
+ if b.lastRead != opReadRune && b.lastRead != opRead {
+ return errors.New("bytes.Buffer: UnreadByte: previous operation was not a read")
+ }
+ b.lastRead = opInvalid
+ if b.off > 0 {
+ b.off--
+ }
+ return nil
+}
+
+// ReadBytes reads until the first occurrence of delim in the input,
+// returning a slice containing the data up to and including the delimiter.
+// If ReadBytes encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadBytes returns err != nil if and only if the returned data does not end in
+// delim.
+func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
+ slice, err := b.readSlice(delim)
+ // return a copy of slice. The buffer's backing array may
+ // be overwritten by later calls.
+ line = append(line, slice...)
+ return
+}
+
+// readSlice is like ReadBytes but returns a reference to internal buffer data.
+func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
+ i := bytes.IndexByte(b.buf[b.off:], delim)
+ end := b.off + i + 1
+ if i < 0 {
+ end = len(b.buf)
+ err = io.EOF
+ }
+ line = b.buf[b.off:end]
+ b.off = end
+ b.lastRead = opRead
+ return line, err
+}
+
+// ReadString reads until the first occurrence of delim in the input,
+// returning a string containing the data up to and including the delimiter.
+// If ReadString encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadString returns err != nil if and only if the returned data does not end
+// in delim.
+func (b *Buffer) ReadString(delim byte) (line string, err error) {
+ slice, err := b.readSlice(delim)
+ return string(slice), err
+}
+
+// NewBuffer creates and initializes a new Buffer using buf as its initial
+// contents. It is intended to prepare a Buffer to read existing data. It
+// can also be used to size the internal buffer for writing. To do that,
+// buf should have the desired capacity but a length of zero.
+//
+// In most cases, new(Buffer) (or just declaring a Buffer variable) is
+// sufficient to initialize a Buffer.
+func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
+
+// NewBufferString creates and initializes a new Buffer using string s as its
+// initial contents. It is intended to prepare a buffer to read an existing
+// string.
+//
+// In most cases, new(Buffer) (or just declaring a Buffer variable) is
+// sufficient to initialize a Buffer.
+func NewBufferString(s string) *Buffer {
+ return &Buffer{buf: []byte(s)}
+}
diff --git a/vendor/gopkg.in/bufio.v1/bufio.go b/vendor/gopkg.in/bufio.v1/bufio.go
new file mode 100644
index 0000000000..8f5cdc084d
--- /dev/null
+++ b/vendor/gopkg.in/bufio.v1/bufio.go
@@ -0,0 +1,728 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer
+// object, creating another object (Reader or Writer) that also implements
+// the interface but provides buffering and some help for textual I/O.
+package bufio
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "unicode/utf8"
+)
+
+const (
+ defaultBufSize = 4096
+)
+
+var (
+ ErrInvalidUnreadByte = errors.New("bufio: invalid use of UnreadByte")
+ ErrInvalidUnreadRune = errors.New("bufio: invalid use of UnreadRune")
+ ErrBufferFull = errors.New("bufio: buffer full")
+ ErrNegativeCount = errors.New("bufio: negative count")
+)
+
+// Buffered input.
+
+// Reader implements buffering for an io.Reader object.
+type Reader struct {
+ buf []byte
+ rd io.Reader
+ r, w int
+ err error
+ lastByte int
+ lastRuneSize int
+}
+
+const minReadBufferSize = 16
+const maxConsecutiveEmptyReads = 100
+
+// NewReaderSize returns a new Reader whose buffer has at least the specified
+// size. If the argument io.Reader is already a Reader with large enough
+// size, it returns the underlying Reader.
+func NewReaderSize(rd io.Reader, size int) *Reader {
+ // Is it already a Reader?
+ b, ok := rd.(*Reader)
+ if ok && len(b.buf) >= size {
+ return b
+ }
+ if size < minReadBufferSize {
+ size = minReadBufferSize
+ }
+ r := new(Reader)
+ r.reset(make([]byte, size), rd)
+ return r
+}
+
+// NewReader returns a new Reader whose buffer has the default size.
+func NewReader(rd io.Reader) *Reader {
+ return NewReaderSize(rd, defaultBufSize)
+}
+
+// Reset discards any buffered data, resets all state, and switches
+// the buffered reader to read from r.
+func (b *Reader) Reset(r io.Reader) {
+ b.reset(b.buf, r)
+}
+
+func (b *Reader) reset(buf []byte, r io.Reader) {
+ *b = Reader{
+ buf: buf,
+ rd: r,
+ lastByte: -1,
+ lastRuneSize: -1,
+ }
+}
+
+var errNegativeRead = errors.New("bufio: reader returned negative count from Read")
+
+// fill reads a new chunk into the buffer.
+func (b *Reader) fill() {
+ // Slide existing data to beginning.
+ if b.r > 0 {
+ copy(b.buf, b.buf[b.r:b.w])
+ b.w -= b.r
+ b.r = 0
+ }
+
+ if b.w >= len(b.buf) {
+ panic("bufio: tried to fill full buffer")
+ }
+
+ // Read new data: try a limited number of times.
+ for i := maxConsecutiveEmptyReads; i > 0; i-- {
+ n, err := b.rd.Read(b.buf[b.w:])
+ if n < 0 {
+ panic(errNegativeRead)
+ }
+ b.w += n
+ if err != nil {
+ b.err = err
+ return
+ }
+ if n > 0 {
+ return
+ }
+ }
+ b.err = io.ErrNoProgress
+}
+
+func (b *Reader) readErr() error {
+ err := b.err
+ b.err = nil
+ return err
+}
+
+// Peek returns the next n bytes without advancing the reader. The bytes stop
+// being valid at the next read call. If Peek returns fewer than n bytes, it
+// also returns an error explaining why the read is short. The error is
+// ErrBufferFull if n is larger than b's buffer size.
+func (b *Reader) Peek(n int) ([]byte, error) {
+ if n < 0 {
+ return nil, ErrNegativeCount
+ }
+ if n > len(b.buf) {
+ return nil, ErrBufferFull
+ }
+ // 0 <= n <= len(b.buf)
+ for b.w-b.r < n && b.err == nil {
+ b.fill() // b.w-b.r < len(b.buf) => buffer is not full
+ }
+ m := b.w - b.r
+ if m > n {
+ m = n
+ }
+ var err error
+ if m < n {
+ err = b.readErr()
+ if err == nil {
+ err = ErrBufferFull
+ }
+ }
+ return b.buf[b.r : b.r+m], err
+}
+
+// Read reads data into p.
+// It returns the number of bytes read into p.
+// It calls Read at most once on the underlying Reader,
+// hence n may be less than len(p).
+// At EOF, the count will be zero and err will be io.EOF.
+func (b *Reader) Read(p []byte) (n int, err error) {
+ n = len(p)
+ if n == 0 {
+ return 0, b.readErr()
+ }
+ if b.r == b.w {
+ if b.err != nil {
+ return 0, b.readErr()
+ }
+ if len(p) >= len(b.buf) {
+ // Large read, empty buffer.
+ // Read directly into p to avoid copy.
+ n, b.err = b.rd.Read(p)
+ if n < 0 {
+ panic(errNegativeRead)
+ }
+ if n > 0 {
+ b.lastByte = int(p[n-1])
+ b.lastRuneSize = -1
+ }
+ return n, b.readErr()
+ }
+ b.fill() // buffer is empty
+ if b.w == b.r {
+ return 0, b.readErr()
+ }
+ }
+
+ if n > b.w-b.r {
+ n = b.w - b.r
+ }
+ copy(p[0:n], b.buf[b.r:])
+ b.r += n
+ b.lastByte = int(b.buf[b.r-1])
+ b.lastRuneSize = -1
+ return n, nil
+}
+
+// ReadByte reads and returns a single byte.
+// If no byte is available, returns an error.
+func (b *Reader) ReadByte() (c byte, err error) {
+ b.lastRuneSize = -1
+ for b.r == b.w {
+ if b.err != nil {
+ return 0, b.readErr()
+ }
+ b.fill() // buffer is empty
+ }
+ c = b.buf[b.r]
+ b.r++
+ b.lastByte = int(c)
+ return c, nil
+}
+
+// UnreadByte unreads the last byte. Only the most recently read byte can be unread.
+func (b *Reader) UnreadByte() error {
+ if b.lastByte < 0 || b.r == 0 && b.w > 0 {
+ return ErrInvalidUnreadByte
+ }
+ // b.r > 0 || b.w == 0
+ if b.r > 0 {
+ b.r--
+ } else {
+ // b.r == 0 && b.w == 0
+ b.w = 1
+ }
+ b.buf[b.r] = byte(b.lastByte)
+ b.lastByte = -1
+ b.lastRuneSize = -1
+ return nil
+}
+
+// ReadRune reads a single UTF-8 encoded Unicode character and returns the
+// rune and its size in bytes. If the encoded rune is invalid, it consumes one byte
+// and returns unicode.ReplacementChar (U+FFFD) with a size of 1.
+func (b *Reader) ReadRune() (r rune, size int, err error) {
+ for b.r+utf8.UTFMax > b.w && !utf8.FullRune(b.buf[b.r:b.w]) && b.err == nil && b.w-b.r < len(b.buf) {
+ b.fill() // b.w-b.r < len(buf) => buffer is not full
+ }
+ b.lastRuneSize = -1
+ if b.r == b.w {
+ return 0, 0, b.readErr()
+ }
+ r, size = rune(b.buf[b.r]), 1
+ if r >= 0x80 {
+ r, size = utf8.DecodeRune(b.buf[b.r:b.w])
+ }
+ b.r += size
+ b.lastByte = int(b.buf[b.r-1])
+ b.lastRuneSize = size
+ return r, size, nil
+}
+
+// UnreadRune unreads the last rune. If the most recent read operation on
+// the buffer was not a ReadRune, UnreadRune returns an error. (In this
+// regard it is stricter than UnreadByte, which will unread the last byte
+// from any read operation.)
+func (b *Reader) UnreadRune() error {
+ if b.lastRuneSize < 0 || b.r < b.lastRuneSize {
+ return ErrInvalidUnreadRune
+ }
+ b.r -= b.lastRuneSize
+ b.lastByte = -1
+ b.lastRuneSize = -1
+ return nil
+}
+
+// Buffered returns the number of bytes that can be read from the current buffer.
+func (b *Reader) Buffered() int { return b.w - b.r }
+
+// ReadSlice reads until the first occurrence of delim in the input,
+// returning a slice pointing at the bytes in the buffer.
+// The bytes stop being valid at the next read.
+// If ReadSlice encounters an error before finding a delimiter,
+// it returns all the data in the buffer and the error itself (often io.EOF).
+// ReadSlice fails with error ErrBufferFull if the buffer fills without a delim.
+// Because the data returned from ReadSlice will be overwritten
+// by the next I/O operation, most clients should use
+// ReadBytes or ReadString instead.
+// ReadSlice returns err != nil if and only if line does not end in delim.
+func (b *Reader) ReadSlice(delim byte) (line []byte, err error) {
+ for {
+ // Search buffer.
+ if i := bytes.IndexByte(b.buf[b.r:b.w], delim); i >= 0 {
+ line = b.buf[b.r : b.r+i+1]
+ b.r += i + 1
+ break
+ }
+
+ // Pending error?
+ if b.err != nil {
+ line = b.buf[b.r:b.w]
+ b.r = b.w
+ err = b.readErr()
+ break
+ }
+
+ // Buffer full?
+ if n := b.Buffered(); n >= len(b.buf) {
+ b.r = b.w
+ line = b.buf
+ err = ErrBufferFull
+ break
+ }
+
+ b.fill() // buffer is not full
+ }
+
+ // Handle last byte, if any.
+ if i := len(line) - 1; i >= 0 {
+ b.lastByte = int(line[i])
+ }
+
+ return
+}
+
+// ReadN tries to read exactly n bytes.
+// The bytes stop being valid at the next read call.
+// If ReadN encounters an error before reading n bytes,
+// it returns all the data in the buffer and the error itself (often io.EOF).
+// ReadN fails with error ErrBufferFull if the buffer fills
+// without reading N bytes.
+// Because the data returned from ReadN will be overwritten
+// by the next I/O operation, most clients should use
+// ReadBytes or ReadString instead.
+func (b *Reader) ReadN(n int) ([]byte, error) {
+ for b.Buffered() < n {
+ if b.err != nil {
+ buf := b.buf[b.r:b.w]
+ b.r = b.w
+ return buf, b.readErr()
+ }
+
+ // Buffer is full?
+ if b.Buffered() >= len(b.buf) {
+ b.r = b.w
+ return b.buf, ErrBufferFull
+ }
+
+ b.fill()
+ }
+ buf := b.buf[b.r : b.r+n]
+ b.r += n
+ return buf, nil
+}
+
+// ReadLine is a low-level line-reading primitive. Most callers should use
+// ReadBytes('\n') or ReadString('\n') instead or use a Scanner.
+//
+// ReadLine tries to return a single line, not including the end-of-line bytes.
+// If the line was too long for the buffer then isPrefix is set and the
+// beginning of the line is returned. The rest of the line will be returned
+// from future calls. isPrefix will be false when returning the last fragment
+// of the line. The returned buffer is only valid until the next call to
+// ReadLine. ReadLine either returns a non-nil line or it returns an error,
+// never both.
+//
+// The text returned from ReadLine does not include the line end ("\r\n" or "\n").
+// No indication or error is given if the input ends without a final line end.
+// Calling UnreadByte after ReadLine will always unread the last byte read
+// (possibly a character belonging to the line end) even if that byte is not
+// part of the line returned by ReadLine.
+func (b *Reader) ReadLine() (line []byte, isPrefix bool, err error) {
+ line, err = b.ReadSlice('\n')
+ if err == ErrBufferFull {
+ // Handle the case where "\r\n" straddles the buffer.
+ if len(line) > 0 && line[len(line)-1] == '\r' {
+ // Put the '\r' back on buf and drop it from line.
+ // Let the next call to ReadLine check for "\r\n".
+ if b.r == 0 {
+ // should be unreachable
+ panic("bufio: tried to rewind past start of buffer")
+ }
+ b.r--
+ line = line[:len(line)-1]
+ }
+ return line, true, nil
+ }
+
+ if len(line) == 0 {
+ if err != nil {
+ line = nil
+ }
+ return
+ }
+ err = nil
+
+ if line[len(line)-1] == '\n' {
+ drop := 1
+ if len(line) > 1 && line[len(line)-2] == '\r' {
+ drop = 2
+ }
+ line = line[:len(line)-drop]
+ }
+ return
+}
+
+// ReadBytes reads until the first occurrence of delim in the input,
+// returning a slice containing the data up to and including the delimiter.
+// If ReadBytes encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadBytes returns err != nil if and only if the returned data does not end in
+// delim.
+// For simple uses, a Scanner may be more convenient.
+func (b *Reader) ReadBytes(delim byte) (line []byte, err error) {
+ // Use ReadSlice to look for array,
+ // accumulating full buffers.
+ var frag []byte
+ var full [][]byte
+ err = nil
+
+ for {
+ var e error
+ frag, e = b.ReadSlice(delim)
+ if e == nil { // got final fragment
+ break
+ }
+ if e != ErrBufferFull { // unexpected error
+ err = e
+ break
+ }
+
+ // Make a copy of the buffer.
+ buf := make([]byte, len(frag))
+ copy(buf, frag)
+ full = append(full, buf)
+ }
+
+ // Allocate new buffer to hold the full pieces and the fragment.
+ n := 0
+ for i := range full {
+ n += len(full[i])
+ }
+ n += len(frag)
+
+ // Copy full pieces and fragment in.
+ buf := make([]byte, n)
+ n = 0
+ for i := range full {
+ n += copy(buf[n:], full[i])
+ }
+ copy(buf[n:], frag)
+ return buf, err
+}
+
+// ReadString reads until the first occurrence of delim in the input,
+// returning a string containing the data up to and including the delimiter.
+// If ReadString encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadString returns err != nil if and only if the returned data does not end in
+// delim.
+// For simple uses, a Scanner may be more convenient.
+func (b *Reader) ReadString(delim byte) (line string, err error) {
+ bytes, err := b.ReadBytes(delim)
+ line = string(bytes)
+ return line, err
+}
+
+// WriteTo implements io.WriterTo.
+func (b *Reader) WriteTo(w io.Writer) (n int64, err error) {
+ n, err = b.writeBuf(w)
+ if err != nil {
+ return
+ }
+
+ if r, ok := b.rd.(io.WriterTo); ok {
+ m, err := r.WriteTo(w)
+ n += m
+ return n, err
+ }
+
+ if w, ok := w.(io.ReaderFrom); ok {
+ m, err := w.ReadFrom(b.rd)
+ n += m
+ return n, err
+ }
+
+ if b.w-b.r < len(b.buf) {
+ b.fill() // buffer not full
+ }
+
+ for b.r < b.w {
+ // b.r < b.w => buffer is not empty
+ m, err := b.writeBuf(w)
+ n += m
+ if err != nil {
+ return n, err
+ }
+ b.fill() // buffer is empty
+ }
+
+ if b.err == io.EOF {
+ b.err = nil
+ }
+
+ return n, b.readErr()
+}
+
+// writeBuf writes the Reader's buffer to the writer.
+func (b *Reader) writeBuf(w io.Writer) (int64, error) {
+ n, err := w.Write(b.buf[b.r:b.w])
+ if n < b.r-b.w {
+ panic(errors.New("bufio: writer did not write all data"))
+ }
+ b.r += n
+ return int64(n), err
+}
+
+// buffered output
+
+// Writer implements buffering for an io.Writer object.
+// If an error occurs writing to a Writer, no more data will be
+// accepted and all subsequent writes will return the error.
+// After all data has been written, the client should call the
+// Flush method to guarantee all data has been forwarded to
+// the underlying io.Writer.
+type Writer struct {
+ err error
+ buf []byte
+ n int
+ wr io.Writer
+}
+
+// NewWriterSize returns a new Writer whose buffer has at least the specified
+// size. If the argument io.Writer is already a Writer with large enough
+// size, it returns the underlying Writer.
+func NewWriterSize(w io.Writer, size int) *Writer {
+ // Is it already a Writer?
+ b, ok := w.(*Writer)
+ if ok && len(b.buf) >= size {
+ return b
+ }
+ if size <= 0 {
+ size = defaultBufSize
+ }
+ return &Writer{
+ buf: make([]byte, size),
+ wr: w,
+ }
+}
+
+// NewWriter returns a new Writer whose buffer has the default size.
+func NewWriter(w io.Writer) *Writer {
+ return NewWriterSize(w, defaultBufSize)
+}
+
+// Reset discards any unflushed buffered data, clears any error, and
+// resets b to write its output to w.
+func (b *Writer) Reset(w io.Writer) {
+ b.err = nil
+ b.n = 0
+ b.wr = w
+}
+
+// Flush writes any buffered data to the underlying io.Writer.
+func (b *Writer) Flush() error {
+ err := b.flush()
+ return err
+}
+
+func (b *Writer) flush() error {
+ if b.err != nil {
+ return b.err
+ }
+ if b.n == 0 {
+ return nil
+ }
+ n, err := b.wr.Write(b.buf[0:b.n])
+ if n < b.n && err == nil {
+ err = io.ErrShortWrite
+ }
+ if err != nil {
+ if n > 0 && n < b.n {
+ copy(b.buf[0:b.n-n], b.buf[n:b.n])
+ }
+ b.n -= n
+ b.err = err
+ return err
+ }
+ b.n = 0
+ return nil
+}
+
+// Available returns how many bytes are unused in the buffer.
+func (b *Writer) Available() int { return len(b.buf) - b.n }
+
+// Buffered returns the number of bytes that have been written into the current buffer.
+func (b *Writer) Buffered() int { return b.n }
+
+// Write writes the contents of p into the buffer.
+// It returns the number of bytes written.
+// If nn < len(p), it also returns an error explaining
+// why the write is short.
+func (b *Writer) Write(p []byte) (nn int, err error) {
+ for len(p) > b.Available() && b.err == nil {
+ var n int
+ if b.Buffered() == 0 {
+ // Large write, empty buffer.
+ // Write directly from p to avoid copy.
+ n, b.err = b.wr.Write(p)
+ } else {
+ n = copy(b.buf[b.n:], p)
+ b.n += n
+ b.flush()
+ }
+ nn += n
+ p = p[n:]
+ }
+ if b.err != nil {
+ return nn, b.err
+ }
+ n := copy(b.buf[b.n:], p)
+ b.n += n
+ nn += n
+ return nn, nil
+}
+
+// WriteByte writes a single byte.
+func (b *Writer) WriteByte(c byte) error {
+ if b.err != nil {
+ return b.err
+ }
+ if b.Available() <= 0 && b.flush() != nil {
+ return b.err
+ }
+ b.buf[b.n] = c
+ b.n++
+ return nil
+}
+
+// WriteRune writes a single Unicode code point, returning
+// the number of bytes written and any error.
+func (b *Writer) WriteRune(r rune) (size int, err error) {
+ if r < utf8.RuneSelf {
+ err = b.WriteByte(byte(r))
+ if err != nil {
+ return 0, err
+ }
+ return 1, nil
+ }
+ if b.err != nil {
+ return 0, b.err
+ }
+ n := b.Available()
+ if n < utf8.UTFMax {
+ if b.flush(); b.err != nil {
+ return 0, b.err
+ }
+ n = b.Available()
+ if n < utf8.UTFMax {
+ // Can only happen if buffer is silly small.
+ return b.WriteString(string(r))
+ }
+ }
+ size = utf8.EncodeRune(b.buf[b.n:], r)
+ b.n += size
+ return size, nil
+}
+
+// WriteString writes a string.
+// It returns the number of bytes written.
+// If the count is less than len(s), it also returns an error explaining
+// why the write is short.
+func (b *Writer) WriteString(s string) (int, error) {
+ nn := 0
+ for len(s) > b.Available() && b.err == nil {
+ n := copy(b.buf[b.n:], s)
+ b.n += n
+ nn += n
+ s = s[n:]
+ b.flush()
+ }
+ if b.err != nil {
+ return nn, b.err
+ }
+ n := copy(b.buf[b.n:], s)
+ b.n += n
+ nn += n
+ return nn, nil
+}
+
+// ReadFrom implements io.ReaderFrom.
+func (b *Writer) ReadFrom(r io.Reader) (n int64, err error) {
+ if b.Buffered() == 0 {
+ if w, ok := b.wr.(io.ReaderFrom); ok {
+ return w.ReadFrom(r)
+ }
+ }
+ var m int
+ for {
+ if b.Available() == 0 {
+ if err1 := b.flush(); err1 != nil {
+ return n, err1
+ }
+ }
+ nr := 0
+ for nr < maxConsecutiveEmptyReads {
+ m, err = r.Read(b.buf[b.n:])
+ if m != 0 || err != nil {
+ break
+ }
+ nr++
+ }
+ if nr == maxConsecutiveEmptyReads {
+ return n, io.ErrNoProgress
+ }
+ b.n += m
+ n += int64(m)
+ if err != nil {
+ break
+ }
+ }
+ if err == io.EOF {
+ // If we filled the buffer exactly, flush pre-emptively.
+ if b.Available() == 0 {
+ err = b.flush()
+ } else {
+ err = nil
+ }
+ }
+ return n, err
+}
+
+// buffered input and output
+
+// ReadWriter stores pointers to a Reader and a Writer.
+// It implements io.ReadWriter.
+type ReadWriter struct {
+ *Reader
+ *Writer
+}
+
+// NewReadWriter allocates a new ReadWriter that dispatches to r and w.
+func NewReadWriter(r *Reader, w *Writer) *ReadWriter {
+ return &ReadWriter{r, w}
+}
diff --git a/vendor/gopkg.in/editorconfig/editorconfig-core-go.v1/LICENSE b/vendor/gopkg.in/editorconfig/editorconfig-core-go.v1/LICENSE
new file mode 100644
index 0000000000..4a30d56440
--- /dev/null
+++ b/vendor/gopkg.in/editorconfig/editorconfig-core-go.v1/LICENSE
@@ -0,0 +1,8 @@
+MIT License
+Copyright (c) 2016 The Editorconfig Team
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/gopkg.in/editorconfig/editorconfig-core-go.v1/README.md b/vendor/gopkg.in/editorconfig/editorconfig-core-go.v1/README.md
new file mode 100644
index 0000000000..4a57ec133d
--- /dev/null
+++ b/vendor/gopkg.in/editorconfig/editorconfig-core-go.v1/README.md
@@ -0,0 +1,121 @@
+[![GoDoc](https://godoc.org/gopkg.in/editorconfig/editorconfig-core-go.v1?status.svg)](https://godoc.org/gopkg.in/editorconfig/editorconfig-core-go.v1)
+[![Go Report Card](https://goreportcard.com/badge/gopkg.in/editorconfig/editorconfig-core-go.v1)](https://goreportcard.com/report/gopkg.in/editorconfig/editorconfig-core-go.v1)
+
+# Editorconfig Core Go
+
+A [Editorconfig][editorconfig] file parser and manipulator for Go.
+
+> This package is already working, but still under testing.
+
+## Installing
+
+We recommend the use of [gopkg.in][gopkg] for this package:
+
+```bash
+go get -u gopkg.in/editorconfig/editorconfig-core-go.v1
+```
+
+Import by the same path. Tha package name you will use to access it is
+`editorconfig`.
+
+```go
+import (
+ "gopkg.in/editorconfig/editorconfig-core-go.v1"
+)
+```
+
+## Usage
+
+### Parse from file
+
+```go
+editorConfig, err := editorconfig.ParseFile("path/to/.editorconfig")
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+### Parse from slice of bytes
+
+```go
+data := []byte("...")
+editorConfig, err := editorconfig.ParseBytes(data)
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+### Get definition to a given filename
+
+This method builds a definition to a given filename.
+This definition is a merge of the properties with selectors that matched the
+given filename.
+The lasts sections of the file have preference over the priors.
+
+```go
+def := editorConfig.GetDefinitionForFilename("my/file.go")
+```
+
+This definition have the following properties:
+
+```go
+type Definition struct {
+ Selector string
+
+ Charset string
+ IndentStyle string
+ IndentSize string
+ TabWidth int
+ EndOfLine string
+ TrimTrailingWhitespace bool
+ InsertFinalNewline bool
+}
+```
+
+#### Automatic search for `.editorconfig` files
+
+If you want a definition of a file without having to manually
+parse the `.editorconfig` files, you can then use the static version
+of `GetDefinitionForFilename`:
+
+```go
+def, err := editorconfig.GetDefinitionForFilename("foo/bar/baz/my-file.go")
+```
+
+In the example above, the package will automatically search for
+`.editorconfig` files on:
+
+- `foo/bar/baz/.editorconfig`
+- `foo/baz/.editorconfig`
+- `foo/.editorconfig`
+
+Until it reaches a file with `root = true` or the root of the filesystem.
+
+### Generating a .editorconfig file
+
+You can easily convert a Editorconfig struct to a compatible INI file:
+
+```go
+// serialize to slice of bytes
+data, err := editorConfig.Serialize()
+if err != nil {
+ log.Fatal(err)
+}
+
+// save directly to file
+err := editorConfig.Save("path/to/.editorconfig")
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+## Contributing
+
+To run the tests:
+
+```bash
+go test -v
+```
+
+[editorconfig]: http://editorconfig.org/
+[gopkg]: https://gopkg.in
diff --git a/vendor/gopkg.in/editorconfig/editorconfig-core-go.v1/editorconfig.go b/vendor/gopkg.in/editorconfig/editorconfig-core-go.v1/editorconfig.go
new file mode 100644
index 0000000000..6291af5759
--- /dev/null
+++ b/vendor/gopkg.in/editorconfig/editorconfig-core-go.v1/editorconfig.go
@@ -0,0 +1,277 @@
+// Package editorconfig can be used to parse and generate editorconfig files.
+// For more information about editorconfig, see http://editorconfig.org/
+package editorconfig
+
+import (
+ "bytes"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "gopkg.in/ini.v1"
+)
+
+// IndentStyle possible values
+const (
+ IndentStyleTab = "tab"
+ IndentStyleSpaces = "space"
+)
+
+// EndOfLine possible values
+const (
+ EndOfLineLf = "lf"
+ EndOfLineCr = "cr"
+ EndOfLineCrLf = "crlf"
+)
+
+// Charset possible values
+const (
+ CharsetLatin1 = "latin1"
+ CharsetUTF8 = "utf-8"
+ CharsetUTF16BE = "utf-16be"
+ CharsetUTF16LE = "utf-16le"
+)
+
+// Definition represents a definition inside the .editorconfig file.
+// E.g. a section of the file.
+// The definition is composed of the selector ("*", "*.go", "*.{js.css}", etc),
+// plus the properties of the selected files.
+type Definition struct {
+ Selector string `ini:"-" json:"-"`
+
+ Charset string `ini:"charset" json:"charset,omitempty"`
+ IndentStyle string `ini:"indent_style" json:"indent_style,omitempty"`
+ IndentSize string `ini:"indent_size" json:"indent_size,omitempty"`
+ TabWidth int `ini:"tab_width" json:"tab_width,omitempty"`
+ EndOfLine string `ini:"end_of_line" json:"end_of_line,omitempty"`
+ TrimTrailingWhitespace bool `ini:"trim_trailing_whitespace" json:"trim_trailing_whitespace,omitempty"`
+ InsertFinalNewline bool `ini:"insert_final_newline" json:"insert_final_newline,omitempty"`
+}
+
+// Editorconfig represents a .editorconfig file.
+// It is composed by a "root" property, plus the definitions defined in the
+// file.
+type Editorconfig struct {
+ Root bool
+ Definitions []*Definition
+}
+
+// ParseBytes parses from a slice of bytes.
+func ParseBytes(data []byte) (*Editorconfig, error) {
+ iniFile, err := ini.Load(data)
+ if err != nil {
+ return nil, err
+ }
+
+ editorConfig := &Editorconfig{}
+ editorConfig.Root = iniFile.Section(ini.DEFAULT_SECTION).Key("root").MustBool(false)
+ for _, sectionStr := range iniFile.SectionStrings() {
+ if sectionStr == ini.DEFAULT_SECTION {
+ continue
+ }
+ var (
+ iniSection = iniFile.Section(sectionStr)
+ definition = &Definition{}
+ )
+ err := iniSection.MapTo(&definition)
+ if err != nil {
+ return nil, err
+ }
+
+ // tab_width defaults to indent_size:
+ // https://github.com/editorconfig/editorconfig/wiki/EditorConfig-Properties#tab_width
+ if definition.TabWidth <= 0 {
+ if num, err := strconv.Atoi(definition.IndentSize); err == nil {
+ definition.TabWidth = num
+ }
+ }
+
+ definition.Selector = sectionStr
+ editorConfig.Definitions = append(editorConfig.Definitions, definition)
+ }
+ return editorConfig, nil
+}
+
+// ParseFile parses from a file.
+func ParseFile(f string) (*Editorconfig, error) {
+ data, err := ioutil.ReadFile(f)
+ if err != nil {
+ return nil, err
+ }
+
+ return ParseBytes(data)
+}
+
+var (
+ regexpBraces = regexp.MustCompile("{.*}")
+)
+
+func filenameMatches(pattern, name string) bool {
+ // basic match
+ matched, _ := filepath.Match(pattern, name)
+ if matched {
+ return true
+ }
+ // foo/bar/main.go should match main.go
+ matched, _ = filepath.Match(pattern, filepath.Base(name))
+ if matched {
+ return true
+ }
+ // foo should match foo/main.go
+ matched, _ = filepath.Match(filepath.Join(pattern, "*"), name)
+ if matched {
+ return true
+ }
+ // *.{js,go} should match main.go
+ if str := regexpBraces.FindString(pattern); len(str) > 0 {
+ // remote initial "{" and final "}"
+ str = strings.TrimPrefix(str, "{")
+ str = strings.TrimSuffix(str, "}")
+
+ // testing for empty brackets: "{}"
+ if len(str) == 0 {
+ patt := regexpBraces.ReplaceAllString(pattern, "*")
+ matched, _ = filepath.Match(patt, filepath.Base(name))
+ return matched
+ }
+
+ for _, patt := range strings.Split(str, ",") {
+ patt = regexpBraces.ReplaceAllString(pattern, patt)
+ matched, _ = filepath.Match(patt, filepath.Base(name))
+ if matched {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func (d *Definition) merge(md *Definition) {
+ if len(d.Charset) == 0 {
+ d.Charset = md.Charset
+ }
+ if len(d.IndentStyle) == 0 {
+ d.IndentStyle = md.IndentStyle
+ }
+ if len(d.IndentSize) == 0 {
+ d.IndentSize = md.IndentSize
+ }
+ if d.TabWidth <= 0 {
+ d.TabWidth = md.TabWidth
+ }
+ if len(d.EndOfLine) == 0 {
+ d.EndOfLine = md.EndOfLine
+ }
+ if !d.TrimTrailingWhitespace {
+ d.TrimTrailingWhitespace = md.TrimTrailingWhitespace
+ }
+ if !d.InsertFinalNewline {
+ d.InsertFinalNewline = md.InsertFinalNewline
+ }
+}
+
+// GetDefinitionForFilename returns a definition for the given filename.
+// The result is a merge of the selectors that matched the file.
+// The last section has preference over the priors.
+func (e *Editorconfig) GetDefinitionForFilename(name string) *Definition {
+ def := &Definition{}
+ for i := len(e.Definitions) - 1; i >= 0; i-- {
+ actualDef := e.Definitions[i]
+ if filenameMatches(actualDef.Selector, name) {
+ def.merge(actualDef)
+ }
+ }
+ return def
+}
+
+func boolToString(b bool) string {
+ if b {
+ return "true"
+ }
+ return "false"
+}
+
+// Serialize converts the Editorconfig to a slice of bytes, containing the
+// content of the file in the INI format.
+func (e *Editorconfig) Serialize() ([]byte, error) {
+ var (
+ iniFile = ini.Empty()
+ buffer = bytes.NewBuffer(nil)
+ )
+ iniFile.Section(ini.DEFAULT_SECTION).Comment = "http://editorconfig.org"
+ if e.Root {
+ iniFile.Section(ini.DEFAULT_SECTION).Key("root").SetValue(boolToString(e.Root))
+ }
+ for _, d := range e.Definitions {
+ iniSec := iniFile.Section(d.Selector)
+ if len(d.Charset) > 0 {
+ iniSec.Key("charset").SetValue(d.Charset)
+ }
+ if len(d.IndentStyle) > 0 {
+ iniSec.Key("indent_style").SetValue(d.IndentStyle)
+ }
+ if len(d.IndentSize) > 0 {
+ iniSec.Key("indent_size").SetValue(d.IndentSize)
+ }
+ if d.TabWidth > 0 && strconv.Itoa(d.TabWidth) != d.IndentSize {
+ iniSec.Key("tab_width").SetValue(strconv.Itoa(d.TabWidth))
+ }
+ if len(d.EndOfLine) > 0 {
+ iniSec.Key("end_of_line").SetValue(d.EndOfLine)
+ }
+ if d.TrimTrailingWhitespace {
+ iniSec.Key("trim_trailing_whitespace").SetValue(boolToString(d.TrimTrailingWhitespace))
+ }
+ if d.InsertFinalNewline {
+ iniSec.Key("insert_final_newline").SetValue(boolToString(d.InsertFinalNewline))
+ }
+ }
+ _, err := iniFile.WriteTo(buffer)
+ if err != nil {
+ return nil, err
+ }
+ return buffer.Bytes(), nil
+}
+
+// Save saves the Editorconfig to a compatible INI file.
+func (e *Editorconfig) Save(filename string) error {
+ data, err := e.Serialize()
+ if err != nil {
+ return err
+ }
+ return ioutil.WriteFile(filename, data, 0666)
+}
+
+// GetDefinitionForFilename given a filename, searches
+// for .editorconfig files, starting from the file folder,
+// walking through the previous folders, until it reaches a
+// folder with `root = true`, and returns the right editorconfig
+// definition for the given file.
+func GetDefinitionForFilename(filename string) (*Definition, error) {
+ abs, err := filepath.Abs(filename)
+ if err != nil {
+ return nil, err
+ }
+ definition := &Definition{}
+
+ dir := abs
+ for dir != filepath.Dir(dir) {
+ dir = filepath.Dir(dir)
+ ecFile := filepath.Join(dir, ".editorconfig")
+ if _, err := os.Stat(ecFile); os.IsNotExist(err) {
+ continue
+ }
+ ec, err := ParseFile(ecFile)
+ if err != nil {
+ return nil, err
+ }
+ definition.merge(ec.GetDefinitionForFilename(filename))
+ if ec.Root {
+ break
+ }
+ }
+ return definition, nil
+}
diff --git a/vendor/gopkg.in/gomail.v2/CHANGELOG.md b/vendor/gopkg.in/gomail.v2/CHANGELOG.md
new file mode 100644
index 0000000000..a797ab4c09
--- /dev/null
+++ b/vendor/gopkg.in/gomail.v2/CHANGELOG.md
@@ -0,0 +1,20 @@
+# Change Log
+All notable changes to this project will be documented in this file.
+This project adheres to [Semantic Versioning](http://semver.org/).
+
+## [2.0.0] - 2015-09-02
+
+- Mailer has been removed. It has been replaced by Dialer and Sender.
+- `File` type and the `CreateFile` and `OpenFile` functions have been removed.
+- `Message.Attach` and `Message.Embed` have a new signature.
+- `Message.GetBodyWriter` has been removed. Use `Message.AddAlternativeWriter`
+instead.
+- `Message.Export` has been removed. `Message.WriteTo` can be used instead.
+- `Message.DelHeader` has been removed.
+- The `Bcc` header field is no longer sent. It is far more simpler and
+efficient: the same message is sent to all recipients instead of sending a
+different email to each Bcc address.
+- LoginAuth has been removed. `NewPlainDialer` now implements the LOGIN
+authentication mechanism when needed.
+- Go 1.2 is now required instead of Go 1.3. No external dependency are used when
+using Go 1.5.
diff --git a/vendor/gopkg.in/gomail.v2/CONTRIBUTING.md b/vendor/gopkg.in/gomail.v2/CONTRIBUTING.md
new file mode 100644
index 0000000000..d5601c257a
--- /dev/null
+++ b/vendor/gopkg.in/gomail.v2/CONTRIBUTING.md
@@ -0,0 +1,20 @@
+Thank you for contributing to Gomail! Here are a few guidelines:
+
+## Bugs
+
+If you think you found a bug, create an issue and supply the minimum amount
+of code triggering the bug so it can be reproduced.
+
+
+## Fixing a bug
+
+If you want to fix a bug, you can send a pull request. It should contains a
+new test or update an existing one to cover that bug.
+
+
+## New feature proposal
+
+If you think Gomail lacks a feature, you can open an issue or send a pull
+request. I want to keep Gomail code and API as simple as possible so please
+describe your needs so we can discuss whether this feature should be added to
+Gomail or not.
diff --git a/vendor/gopkg.in/gomail.v2/LICENSE b/vendor/gopkg.in/gomail.v2/LICENSE
new file mode 100644
index 0000000000..5f5c12af73
--- /dev/null
+++ b/vendor/gopkg.in/gomail.v2/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Alexandre Cesaro
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/gopkg.in/gomail.v2/README.md b/vendor/gopkg.in/gomail.v2/README.md
new file mode 100644
index 0000000000..b3be9e146b
--- /dev/null
+++ b/vendor/gopkg.in/gomail.v2/README.md
@@ -0,0 +1,92 @@
+# Gomail
+[![Build Status](https://travis-ci.org/go-gomail/gomail.svg?branch=v2)](https://travis-ci.org/go-gomail/gomail) [![Code Coverage](http://gocover.io/_badge/gopkg.in/gomail.v2)](http://gocover.io/gopkg.in/gomail.v2) [![Documentation](https://godoc.org/gopkg.in/gomail.v2?status.svg)](https://godoc.org/gopkg.in/gomail.v2)
+
+## Introduction
+
+Gomail is a simple and efficient package to send emails. It is well tested and
+documented.
+
+Gomail can only send emails using an SMTP server. But the API is flexible and it
+is easy to implement other methods for sending emails using a local Postfix, an
+API, etc.
+
+It is versioned using [gopkg.in](https://gopkg.in) so I promise
+there will never be backward incompatible changes within each version.
+
+It requires Go 1.2 or newer. With Go 1.5, no external dependencies are used.
+
+
+## Features
+
+Gomail supports:
+- Attachments
+- Embedded images
+- HTML and text templates
+- Automatic encoding of special characters
+- SSL and TLS
+- Sending multiple emails with the same SMTP connection
+
+
+## Documentation
+
+https://godoc.org/gopkg.in/gomail.v2
+
+
+## Download
+
+ go get gopkg.in/gomail.v2
+
+
+## Examples
+
+See the [examples in the documentation](https://godoc.org/gopkg.in/gomail.v2#example-package).
+
+
+## FAQ
+
+### x509: certificate signed by unknown authority
+
+If you get this error it means the certificate used by the SMTP server is not
+considered valid by the client running Gomail. As a quick workaround you can
+bypass the verification of the server's certificate chain and host name by using
+`SetTLSConfig`:
+
+ package main
+
+ import (
+ "crypto/tls"
+
+ "gopkg.in/gomail.v2"
+ )
+
+ func main() {
+ d := gomail.NewDialer("smtp.example.com", 587, "user", "123456")
+ d.TLSConfig = &tls.Config{InsecureSkipVerify: true}
+
+ // Send emails using d.
+ }
+
+Note, however, that this is insecure and should not be used in production.
+
+
+## Contribute
+
+Contributions are more than welcome! See [CONTRIBUTING.md](CONTRIBUTING.md) for
+more info.
+
+
+## Change log
+
+See [CHANGELOG.md](CHANGELOG.md).
+
+
+## License
+
+[MIT](LICENSE)
+
+
+## Contact
+
+You can ask questions on the [Gomail
+thread](https://groups.google.com/d/topic/golang-nuts/jMxZHzvvEVg/discussion)
+in the Go mailing-list.
diff --git a/vendor/gopkg.in/gomail.v2/auth.go b/vendor/gopkg.in/gomail.v2/auth.go
new file mode 100644
index 0000000000..d28b83ab7d
--- /dev/null
+++ b/vendor/gopkg.in/gomail.v2/auth.go
@@ -0,0 +1,49 @@
+package gomail
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net/smtp"
+)
+
+// loginAuth is an smtp.Auth that implements the LOGIN authentication mechanism.
+type loginAuth struct {
+ username string
+ password string
+ host string
+}
+
+func (a *loginAuth) Start(server *smtp.ServerInfo) (string, []byte, error) {
+ if !server.TLS {
+ advertised := false
+ for _, mechanism := range server.Auth {
+ if mechanism == "LOGIN" {
+ advertised = true
+ break
+ }
+ }
+ if !advertised {
+ return "", nil, errors.New("gomail: unencrypted connection")
+ }
+ }
+ if server.Name != a.host {
+ return "", nil, errors.New("gomail: wrong host name")
+ }
+ return "LOGIN", nil, nil
+}
+
+func (a *loginAuth) Next(fromServer []byte, more bool) ([]byte, error) {
+ if !more {
+ return nil, nil
+ }
+
+ switch {
+ case bytes.Equal(fromServer, []byte("Username:")):
+ return []byte(a.username), nil
+ case bytes.Equal(fromServer, []byte("Password:")):
+ return []byte(a.password), nil
+ default:
+ return nil, fmt.Errorf("gomail: unexpected server challenge: %s", fromServer)
+ }
+}
diff --git a/vendor/gopkg.in/gomail.v2/doc.go b/vendor/gopkg.in/gomail.v2/doc.go
new file mode 100644
index 0000000000..a8f5091f54
--- /dev/null
+++ b/vendor/gopkg.in/gomail.v2/doc.go
@@ -0,0 +1,5 @@
+// Package gomail provides a simple interface to compose emails and to mail them
+// efficiently.
+//
+// More info on Github: https://github.com/go-gomail/gomail
+package gomail
diff --git a/vendor/gopkg.in/gomail.v2/message.go b/vendor/gopkg.in/gomail.v2/message.go
new file mode 100644
index 0000000000..4bffb1e7ff
--- /dev/null
+++ b/vendor/gopkg.in/gomail.v2/message.go
@@ -0,0 +1,322 @@
+package gomail
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "path/filepath"
+ "time"
+)
+
+// Message represents an email.
+type Message struct {
+ header header
+ parts []*part
+ attachments []*file
+ embedded []*file
+ charset string
+ encoding Encoding
+ hEncoder mimeEncoder
+ buf bytes.Buffer
+}
+
+type header map[string][]string
+
+type part struct {
+ contentType string
+ copier func(io.Writer) error
+ encoding Encoding
+}
+
+// NewMessage creates a new message. It uses UTF-8 and quoted-printable encoding
+// by default.
+func NewMessage(settings ...MessageSetting) *Message {
+ m := &Message{
+ header: make(header),
+ charset: "UTF-8",
+ encoding: QuotedPrintable,
+ }
+
+ m.applySettings(settings)
+
+ if m.encoding == Base64 {
+ m.hEncoder = bEncoding
+ } else {
+ m.hEncoder = qEncoding
+ }
+
+ return m
+}
+
+// Reset resets the message so it can be reused. The message keeps its previous
+// settings so it is in the same state that after a call to NewMessage.
+func (m *Message) Reset() {
+ for k := range m.header {
+ delete(m.header, k)
+ }
+ m.parts = nil
+ m.attachments = nil
+ m.embedded = nil
+}
+
+func (m *Message) applySettings(settings []MessageSetting) {
+ for _, s := range settings {
+ s(m)
+ }
+}
+
+// A MessageSetting can be used as an argument in NewMessage to configure an
+// email.
+type MessageSetting func(m *Message)
+
+// SetCharset is a message setting to set the charset of the email.
+func SetCharset(charset string) MessageSetting {
+ return func(m *Message) {
+ m.charset = charset
+ }
+}
+
+// SetEncoding is a message setting to set the encoding of the email.
+func SetEncoding(enc Encoding) MessageSetting {
+ return func(m *Message) {
+ m.encoding = enc
+ }
+}
+
+// Encoding represents a MIME encoding scheme like quoted-printable or base64.
+type Encoding string
+
+const (
+ // QuotedPrintable represents the quoted-printable encoding as defined in
+ // RFC 2045.
+ QuotedPrintable Encoding = "quoted-printable"
+ // Base64 represents the base64 encoding as defined in RFC 2045.
+ Base64 Encoding = "base64"
+ // Unencoded can be used to avoid encoding the body of an email. The headers
+ // will still be encoded using quoted-printable encoding.
+ Unencoded Encoding = "8bit"
+)
+
+// SetHeader sets a value to the given header field.
+func (m *Message) SetHeader(field string, value ...string) {
+ m.encodeHeader(value)
+ m.header[field] = value
+}
+
+func (m *Message) encodeHeader(values []string) {
+ for i := range values {
+ values[i] = m.encodeString(values[i])
+ }
+}
+
+func (m *Message) encodeString(value string) string {
+ return m.hEncoder.Encode(m.charset, value)
+}
+
+// SetHeaders sets the message headers.
+func (m *Message) SetHeaders(h map[string][]string) {
+ for k, v := range h {
+ m.SetHeader(k, v...)
+ }
+}
+
+// SetAddressHeader sets an address to the given header field.
+func (m *Message) SetAddressHeader(field, address, name string) {
+ m.header[field] = []string{m.FormatAddress(address, name)}
+}
+
+// FormatAddress formats an address and a name as a valid RFC 5322 address.
+func (m *Message) FormatAddress(address, name string) string {
+ if name == "" {
+ return address
+ }
+
+ enc := m.encodeString(name)
+ if enc == name {
+ m.buf.WriteByte('"')
+ for i := 0; i < len(name); i++ {
+ b := name[i]
+ if b == '\\' || b == '"' {
+ m.buf.WriteByte('\\')
+ }
+ m.buf.WriteByte(b)
+ }
+ m.buf.WriteByte('"')
+ } else if hasSpecials(name) {
+ m.buf.WriteString(bEncoding.Encode(m.charset, name))
+ } else {
+ m.buf.WriteString(enc)
+ }
+ m.buf.WriteString(" <")
+ m.buf.WriteString(address)
+ m.buf.WriteByte('>')
+
+ addr := m.buf.String()
+ m.buf.Reset()
+ return addr
+}
+
+func hasSpecials(text string) bool {
+ for i := 0; i < len(text); i++ {
+ switch c := text[i]; c {
+ case '(', ')', '<', '>', '[', ']', ':', ';', '@', '\\', ',', '.', '"':
+ return true
+ }
+ }
+
+ return false
+}
+
+// SetDateHeader sets a date to the given header field.
+func (m *Message) SetDateHeader(field string, date time.Time) {
+ m.header[field] = []string{m.FormatDate(date)}
+}
+
+// FormatDate formats a date as a valid RFC 5322 date.
+func (m *Message) FormatDate(date time.Time) string {
+ return date.Format(time.RFC1123Z)
+}
+
+// GetHeader gets a header field.
+func (m *Message) GetHeader(field string) []string {
+ return m.header[field]
+}
+
+// SetBody sets the body of the message. It replaces any content previously set
+// by SetBody, AddAlternative or AddAlternativeWriter.
+func (m *Message) SetBody(contentType, body string, settings ...PartSetting) {
+ m.parts = []*part{m.newPart(contentType, newCopier(body), settings)}
+}
+
+// AddAlternative adds an alternative part to the message.
+//
+// It is commonly used to send HTML emails that default to the plain text
+// version for backward compatibility. AddAlternative appends the new part to
+// the end of the message. So the plain text part should be added before the
+// HTML part. See http://en.wikipedia.org/wiki/MIME#Alternative
+func (m *Message) AddAlternative(contentType, body string, settings ...PartSetting) {
+ m.AddAlternativeWriter(contentType, newCopier(body), settings...)
+}
+
+func newCopier(s string) func(io.Writer) error {
+ return func(w io.Writer) error {
+ _, err := io.WriteString(w, s)
+ return err
+ }
+}
+
+// AddAlternativeWriter adds an alternative part to the message. It can be
+// useful with the text/template or html/template packages.
+func (m *Message) AddAlternativeWriter(contentType string, f func(io.Writer) error, settings ...PartSetting) {
+ m.parts = append(m.parts, m.newPart(contentType, f, settings))
+}
+
+func (m *Message) newPart(contentType string, f func(io.Writer) error, settings []PartSetting) *part {
+ p := &part{
+ contentType: contentType,
+ copier: f,
+ encoding: m.encoding,
+ }
+
+ for _, s := range settings {
+ s(p)
+ }
+
+ return p
+}
+
+// A PartSetting can be used as an argument in Message.SetBody,
+// Message.AddAlternative or Message.AddAlternativeWriter to configure the part
+// added to a message.
+type PartSetting func(*part)
+
+// SetPartEncoding sets the encoding of the part added to the message. By
+// default, parts use the same encoding than the message.
+func SetPartEncoding(e Encoding) PartSetting {
+ return PartSetting(func(p *part) {
+ p.encoding = e
+ })
+}
+
+type file struct {
+ Name string
+ Header map[string][]string
+ CopyFunc func(w io.Writer) error
+}
+
+func (f *file) setHeader(field, value string) {
+ f.Header[field] = []string{value}
+}
+
+// A FileSetting can be used as an argument in Message.Attach or Message.Embed.
+type FileSetting func(*file)
+
+// SetHeader is a file setting to set the MIME header of the message part that
+// contains the file content.
+//
+// Mandatory headers are automatically added if they are not set when sending
+// the email.
+func SetHeader(h map[string][]string) FileSetting {
+ return func(f *file) {
+ for k, v := range h {
+ f.Header[k] = v
+ }
+ }
+}
+
+// Rename is a file setting to set the name of the attachment if the name is
+// different than the filename on disk.
+func Rename(name string) FileSetting {
+ return func(f *file) {
+ f.Name = name
+ }
+}
+
+// SetCopyFunc is a file setting to replace the function that runs when the
+// message is sent. It should copy the content of the file to the io.Writer.
+//
+// The default copy function opens the file with the given filename, and copy
+// its content to the io.Writer.
+func SetCopyFunc(f func(io.Writer) error) FileSetting {
+ return func(fi *file) {
+ fi.CopyFunc = f
+ }
+}
+
+func (m *Message) appendFile(list []*file, name string, settings []FileSetting) []*file {
+ f := &file{
+ Name: filepath.Base(name),
+ Header: make(map[string][]string),
+ CopyFunc: func(w io.Writer) error {
+ h, err := os.Open(name)
+ if err != nil {
+ return err
+ }
+ if _, err := io.Copy(w, h); err != nil {
+ h.Close()
+ return err
+ }
+ return h.Close()
+ },
+ }
+
+ for _, s := range settings {
+ s(f)
+ }
+
+ if list == nil {
+ return []*file{f}
+ }
+
+ return append(list, f)
+}
+
+// Attach attaches the files to the email.
+func (m *Message) Attach(filename string, settings ...FileSetting) {
+ m.attachments = m.appendFile(m.attachments, filename, settings)
+}
+
+// Embed embeds the images to the email.
+func (m *Message) Embed(filename string, settings ...FileSetting) {
+ m.embedded = m.appendFile(m.embedded, filename, settings)
+}
diff --git a/vendor/gopkg.in/gomail.v2/mime.go b/vendor/gopkg.in/gomail.v2/mime.go
new file mode 100644
index 0000000000..194d4a769a
--- /dev/null
+++ b/vendor/gopkg.in/gomail.v2/mime.go
@@ -0,0 +1,21 @@
+// +build go1.5
+
+package gomail
+
+import (
+ "mime"
+ "mime/quotedprintable"
+ "strings"
+)
+
+var newQPWriter = quotedprintable.NewWriter
+
+type mimeEncoder struct {
+ mime.WordEncoder
+}
+
+var (
+ bEncoding = mimeEncoder{mime.BEncoding}
+ qEncoding = mimeEncoder{mime.QEncoding}
+ lastIndexByte = strings.LastIndexByte
+)
diff --git a/vendor/gopkg.in/gomail.v2/mime_go14.go b/vendor/gopkg.in/gomail.v2/mime_go14.go
new file mode 100644
index 0000000000..3dc26aa2ae
--- /dev/null
+++ b/vendor/gopkg.in/gomail.v2/mime_go14.go
@@ -0,0 +1,25 @@
+// +build !go1.5
+
+package gomail
+
+import "gopkg.in/alexcesaro/quotedprintable.v3"
+
+var newQPWriter = quotedprintable.NewWriter
+
+type mimeEncoder struct {
+ quotedprintable.WordEncoder
+}
+
+var (
+ bEncoding = mimeEncoder{quotedprintable.BEncoding}
+ qEncoding = mimeEncoder{quotedprintable.QEncoding}
+ lastIndexByte = func(s string, c byte) int {
+ for i := len(s) - 1; i >= 0; i-- {
+
+ if s[i] == c {
+ return i
+ }
+ }
+ return -1
+ }
+)
diff --git a/vendor/gopkg.in/gomail.v2/send.go b/vendor/gopkg.in/gomail.v2/send.go
new file mode 100644
index 0000000000..9115ebe726
--- /dev/null
+++ b/vendor/gopkg.in/gomail.v2/send.go
@@ -0,0 +1,116 @@
+package gomail
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/mail"
+)
+
+// Sender is the interface that wraps the Send method.
+//
+// Send sends an email to the given addresses.
+type Sender interface {
+ Send(from string, to []string, msg io.WriterTo) error
+}
+
+// SendCloser is the interface that groups the Send and Close methods.
+type SendCloser interface {
+ Sender
+ Close() error
+}
+
+// A SendFunc is a function that sends emails to the given addresses.
+//
+// The SendFunc type is an adapter to allow the use of ordinary functions as
+// email senders. If f is a function with the appropriate signature, SendFunc(f)
+// is a Sender object that calls f.
+type SendFunc func(from string, to []string, msg io.WriterTo) error
+
+// Send calls f(from, to, msg).
+func (f SendFunc) Send(from string, to []string, msg io.WriterTo) error {
+ return f(from, to, msg)
+}
+
+// Send sends emails using the given Sender.
+func Send(s Sender, msg ...*Message) error {
+ for i, m := range msg {
+ if err := send(s, m); err != nil {
+ return fmt.Errorf("gomail: could not send email %d: %v", i+1, err)
+ }
+ }
+
+ return nil
+}
+
+func send(s Sender, m *Message) error {
+ from, err := m.getFrom()
+ if err != nil {
+ return err
+ }
+
+ to, err := m.getRecipients()
+ if err != nil {
+ return err
+ }
+
+ if err := s.Send(from, to, m); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *Message) getFrom() (string, error) {
+ from := m.header["Sender"]
+ if len(from) == 0 {
+ from = m.header["From"]
+ if len(from) == 0 {
+ return "", errors.New(`gomail: invalid message, "From" field is absent`)
+ }
+ }
+
+ return parseAddress(from[0])
+}
+
+func (m *Message) getRecipients() ([]string, error) {
+ n := 0
+ for _, field := range []string{"To", "Cc", "Bcc"} {
+ if addresses, ok := m.header[field]; ok {
+ n += len(addresses)
+ }
+ }
+ list := make([]string, 0, n)
+
+ for _, field := range []string{"To", "Cc", "Bcc"} {
+ if addresses, ok := m.header[field]; ok {
+ for _, a := range addresses {
+ addr, err := parseAddress(a)
+ if err != nil {
+ return nil, err
+ }
+ list = addAddress(list, addr)
+ }
+ }
+ }
+
+ return list, nil
+}
+
+func addAddress(list []string, addr string) []string {
+ for _, a := range list {
+ if addr == a {
+ return list
+ }
+ }
+
+ return append(list, addr)
+}
+
+func parseAddress(field string) (string, error) {
+ addr, err := mail.ParseAddress(field)
+ if err != nil {
+ return "", fmt.Errorf("gomail: invalid address %q: %v", field, err)
+ }
+ return addr.Address, nil
+}
diff --git a/vendor/gopkg.in/gomail.v2/smtp.go b/vendor/gopkg.in/gomail.v2/smtp.go
new file mode 100644
index 0000000000..2aa49c8b61
--- /dev/null
+++ b/vendor/gopkg.in/gomail.v2/smtp.go
@@ -0,0 +1,202 @@
+package gomail
+
+import (
+ "crypto/tls"
+ "fmt"
+ "io"
+ "net"
+ "net/smtp"
+ "strings"
+ "time"
+)
+
+// A Dialer is a dialer to an SMTP server.
+type Dialer struct {
+ // Host represents the host of the SMTP server.
+ Host string
+ // Port represents the port of the SMTP server.
+ Port int
+ // Username is the username to use to authenticate to the SMTP server.
+ Username string
+ // Password is the password to use to authenticate to the SMTP server.
+ Password string
+ // Auth represents the authentication mechanism used to authenticate to the
+ // SMTP server.
+ Auth smtp.Auth
+ // SSL defines whether an SSL connection is used. It should be false in
+ // most cases since the authentication mechanism should use the STARTTLS
+ // extension instead.
+ SSL bool
+ // TSLConfig represents the TLS configuration used for the TLS (when the
+ // STARTTLS extension is used) or SSL connection.
+ TLSConfig *tls.Config
+ // LocalName is the hostname sent to the SMTP server with the HELO command.
+ // By default, "localhost" is sent.
+ LocalName string
+}
+
+// NewDialer returns a new SMTP Dialer. The given parameters are used to connect
+// to the SMTP server.
+func NewDialer(host string, port int, username, password string) *Dialer {
+ return &Dialer{
+ Host: host,
+ Port: port,
+ Username: username,
+ Password: password,
+ SSL: port == 465,
+ }
+}
+
+// NewPlainDialer returns a new SMTP Dialer. The given parameters are used to
+// connect to the SMTP server.
+//
+// Deprecated: Use NewDialer instead.
+func NewPlainDialer(host string, port int, username, password string) *Dialer {
+ return NewDialer(host, port, username, password)
+}
+
+// Dial dials and authenticates to an SMTP server. The returned SendCloser
+// should be closed when done using it.
+func (d *Dialer) Dial() (SendCloser, error) {
+ conn, err := netDialTimeout("tcp", addr(d.Host, d.Port), 10*time.Second)
+ if err != nil {
+ return nil, err
+ }
+
+ if d.SSL {
+ conn = tlsClient(conn, d.tlsConfig())
+ }
+
+ c, err := smtpNewClient(conn, d.Host)
+ if err != nil {
+ return nil, err
+ }
+
+ if d.LocalName != "" {
+ if err := c.Hello(d.LocalName); err != nil {
+ return nil, err
+ }
+ }
+
+ if !d.SSL {
+ if ok, _ := c.Extension("STARTTLS"); ok {
+ if err := c.StartTLS(d.tlsConfig()); err != nil {
+ c.Close()
+ return nil, err
+ }
+ }
+ }
+
+ if d.Auth == nil && d.Username != "" {
+ if ok, auths := c.Extension("AUTH"); ok {
+ if strings.Contains(auths, "CRAM-MD5") {
+ d.Auth = smtp.CRAMMD5Auth(d.Username, d.Password)
+ } else if strings.Contains(auths, "LOGIN") &&
+ !strings.Contains(auths, "PLAIN") {
+ d.Auth = &loginAuth{
+ username: d.Username,
+ password: d.Password,
+ host: d.Host,
+ }
+ } else {
+ d.Auth = smtp.PlainAuth("", d.Username, d.Password, d.Host)
+ }
+ }
+ }
+
+ if d.Auth != nil {
+ if err = c.Auth(d.Auth); err != nil {
+ c.Close()
+ return nil, err
+ }
+ }
+
+ return &smtpSender{c, d}, nil
+}
+
+func (d *Dialer) tlsConfig() *tls.Config {
+ if d.TLSConfig == nil {
+ return &tls.Config{ServerName: d.Host}
+ }
+ return d.TLSConfig
+}
+
+func addr(host string, port int) string {
+ return fmt.Sprintf("%s:%d", host, port)
+}
+
+// DialAndSend opens a connection to the SMTP server, sends the given emails and
+// closes the connection.
+func (d *Dialer) DialAndSend(m ...*Message) error {
+ s, err := d.Dial()
+ if err != nil {
+ return err
+ }
+ defer s.Close()
+
+ return Send(s, m...)
+}
+
+type smtpSender struct {
+ smtpClient
+ d *Dialer
+}
+
+func (c *smtpSender) Send(from string, to []string, msg io.WriterTo) error {
+ if err := c.Mail(from); err != nil {
+ if err == io.EOF {
+ // This is probably due to a timeout, so reconnect and try again.
+ sc, derr := c.d.Dial()
+ if derr == nil {
+ if s, ok := sc.(*smtpSender); ok {
+ *c = *s
+ return c.Send(from, to, msg)
+ }
+ }
+ }
+ return err
+ }
+
+ for _, addr := range to {
+ if err := c.Rcpt(addr); err != nil {
+ return err
+ }
+ }
+
+ w, err := c.Data()
+ if err != nil {
+ return err
+ }
+
+ if _, err = msg.WriteTo(w); err != nil {
+ w.Close()
+ return err
+ }
+
+ return w.Close()
+}
+
+func (c *smtpSender) Close() error {
+ return c.Quit()
+}
+
+// Stubbed out for tests.
+var (
+ netDialTimeout = net.DialTimeout
+ tlsClient = tls.Client
+ smtpNewClient = func(conn net.Conn, host string) (smtpClient, error) {
+ return smtp.NewClient(conn, host)
+ }
+)
+
+type smtpClient interface {
+ Hello(string) error
+ Extension(string) (bool, string)
+ StartTLS(*tls.Config) error
+ Auth(smtp.Auth) error
+ Mail(string) error
+ Rcpt(string) error
+ Data() (io.WriteCloser, error)
+ Quit() error
+ Close() error
+}
diff --git a/vendor/gopkg.in/gomail.v2/writeto.go b/vendor/gopkg.in/gomail.v2/writeto.go
new file mode 100644
index 0000000000..9fb6b86e80
--- /dev/null
+++ b/vendor/gopkg.in/gomail.v2/writeto.go
@@ -0,0 +1,306 @@
+package gomail
+
+import (
+ "encoding/base64"
+ "errors"
+ "io"
+ "mime"
+ "mime/multipart"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+// WriteTo implements io.WriterTo. It dumps the whole message into w.
+func (m *Message) WriteTo(w io.Writer) (int64, error) {
+ mw := &messageWriter{w: w}
+ mw.writeMessage(m)
+ return mw.n, mw.err
+}
+
+func (w *messageWriter) writeMessage(m *Message) {
+ if _, ok := m.header["Mime-Version"]; !ok {
+ w.writeString("Mime-Version: 1.0\r\n")
+ }
+ if _, ok := m.header["Date"]; !ok {
+ w.writeHeader("Date", m.FormatDate(now()))
+ }
+ w.writeHeaders(m.header)
+
+ if m.hasMixedPart() {
+ w.openMultipart("mixed")
+ }
+
+ if m.hasRelatedPart() {
+ w.openMultipart("related")
+ }
+
+ if m.hasAlternativePart() {
+ w.openMultipart("alternative")
+ }
+ for _, part := range m.parts {
+ w.writePart(part, m.charset)
+ }
+ if m.hasAlternativePart() {
+ w.closeMultipart()
+ }
+
+ w.addFiles(m.embedded, false)
+ if m.hasRelatedPart() {
+ w.closeMultipart()
+ }
+
+ w.addFiles(m.attachments, true)
+ if m.hasMixedPart() {
+ w.closeMultipart()
+ }
+}
+
+func (m *Message) hasMixedPart() bool {
+ return (len(m.parts) > 0 && len(m.attachments) > 0) || len(m.attachments) > 1
+}
+
+func (m *Message) hasRelatedPart() bool {
+ return (len(m.parts) > 0 && len(m.embedded) > 0) || len(m.embedded) > 1
+}
+
+func (m *Message) hasAlternativePart() bool {
+ return len(m.parts) > 1
+}
+
+type messageWriter struct {
+ w io.Writer
+ n int64
+ writers [3]*multipart.Writer
+ partWriter io.Writer
+ depth uint8
+ err error
+}
+
+func (w *messageWriter) openMultipart(mimeType string) {
+ mw := multipart.NewWriter(w)
+ contentType := "multipart/" + mimeType + ";\r\n boundary=" + mw.Boundary()
+ w.writers[w.depth] = mw
+
+ if w.depth == 0 {
+ w.writeHeader("Content-Type", contentType)
+ w.writeString("\r\n")
+ } else {
+ w.createPart(map[string][]string{
+ "Content-Type": {contentType},
+ })
+ }
+ w.depth++
+}
+
+func (w *messageWriter) createPart(h map[string][]string) {
+ w.partWriter, w.err = w.writers[w.depth-1].CreatePart(h)
+}
+
+func (w *messageWriter) closeMultipart() {
+ if w.depth > 0 {
+ w.writers[w.depth-1].Close()
+ w.depth--
+ }
+}
+
+func (w *messageWriter) writePart(p *part, charset string) {
+ w.writeHeaders(map[string][]string{
+ "Content-Type": {p.contentType + "; charset=" + charset},
+ "Content-Transfer-Encoding": {string(p.encoding)},
+ })
+ w.writeBody(p.copier, p.encoding)
+}
+
+func (w *messageWriter) addFiles(files []*file, isAttachment bool) {
+ for _, f := range files {
+ if _, ok := f.Header["Content-Type"]; !ok {
+ mediaType := mime.TypeByExtension(filepath.Ext(f.Name))
+ if mediaType == "" {
+ mediaType = "application/octet-stream"
+ }
+ f.setHeader("Content-Type", mediaType+`; name="`+f.Name+`"`)
+ }
+
+ if _, ok := f.Header["Content-Transfer-Encoding"]; !ok {
+ f.setHeader("Content-Transfer-Encoding", string(Base64))
+ }
+
+ if _, ok := f.Header["Content-Disposition"]; !ok {
+ var disp string
+ if isAttachment {
+ disp = "attachment"
+ } else {
+ disp = "inline"
+ }
+ f.setHeader("Content-Disposition", disp+`; filename="`+f.Name+`"`)
+ }
+
+ if !isAttachment {
+ if _, ok := f.Header["Content-ID"]; !ok {
+ f.setHeader("Content-ID", "<"+f.Name+">")
+ }
+ }
+ w.writeHeaders(f.Header)
+ w.writeBody(f.CopyFunc, Base64)
+ }
+}
+
+func (w *messageWriter) Write(p []byte) (int, error) {
+ if w.err != nil {
+ return 0, errors.New("gomail: cannot write as writer is in error")
+ }
+
+ var n int
+ n, w.err = w.w.Write(p)
+ w.n += int64(n)
+ return n, w.err
+}
+
+func (w *messageWriter) writeString(s string) {
+ n, _ := io.WriteString(w.w, s)
+ w.n += int64(n)
+}
+
+func (w *messageWriter) writeHeader(k string, v ...string) {
+ w.writeString(k)
+ if len(v) == 0 {
+ w.writeString(":\r\n")
+ return
+ }
+ w.writeString(": ")
+
+ // Max header line length is 78 characters in RFC 5322 and 76 characters
+ // in RFC 2047. So for the sake of simplicity we use the 76 characters
+ // limit.
+ charsLeft := 76 - len(k) - len(": ")
+
+ for i, s := range v {
+ // If the line is already too long, insert a newline right away.
+ if charsLeft < 1 {
+ if i == 0 {
+ w.writeString("\r\n ")
+ } else {
+ w.writeString(",\r\n ")
+ }
+ charsLeft = 75
+ } else if i != 0 {
+ w.writeString(", ")
+ charsLeft -= 2
+ }
+
+ // While the header content is too long, fold it by inserting a newline.
+ for len(s) > charsLeft {
+ s = w.writeLine(s, charsLeft)
+ charsLeft = 75
+ }
+ w.writeString(s)
+ if i := lastIndexByte(s, '\n'); i != -1 {
+ charsLeft = 75 - (len(s) - i - 1)
+ } else {
+ charsLeft -= len(s)
+ }
+ }
+ w.writeString("\r\n")
+}
+
+func (w *messageWriter) writeLine(s string, charsLeft int) string {
+ // If there is already a newline before the limit. Write the line.
+ if i := strings.IndexByte(s, '\n'); i != -1 && i < charsLeft {
+ w.writeString(s[:i+1])
+ return s[i+1:]
+ }
+
+ for i := charsLeft - 1; i >= 0; i-- {
+ if s[i] == ' ' {
+ w.writeString(s[:i])
+ w.writeString("\r\n ")
+ return s[i+1:]
+ }
+ }
+
+ // We could not insert a newline cleanly so look for a space or a newline
+ // even if it is after the limit.
+ for i := 75; i < len(s); i++ {
+ if s[i] == ' ' {
+ w.writeString(s[:i])
+ w.writeString("\r\n ")
+ return s[i+1:]
+ }
+ if s[i] == '\n' {
+ w.writeString(s[:i+1])
+ return s[i+1:]
+ }
+ }
+
+ // Too bad, no space or newline in the whole string. Just write everything.
+ w.writeString(s)
+ return ""
+}
+
+func (w *messageWriter) writeHeaders(h map[string][]string) {
+ if w.depth == 0 {
+ for k, v := range h {
+ if k != "Bcc" {
+ w.writeHeader(k, v...)
+ }
+ }
+ } else {
+ w.createPart(h)
+ }
+}
+
+func (w *messageWriter) writeBody(f func(io.Writer) error, enc Encoding) {
+ var subWriter io.Writer
+ if w.depth == 0 {
+ w.writeString("\r\n")
+ subWriter = w.w
+ } else {
+ subWriter = w.partWriter
+ }
+
+ if enc == Base64 {
+ wc := base64.NewEncoder(base64.StdEncoding, newBase64LineWriter(subWriter))
+ w.err = f(wc)
+ wc.Close()
+ } else if enc == Unencoded {
+ w.err = f(subWriter)
+ } else {
+ wc := newQPWriter(subWriter)
+ w.err = f(wc)
+ wc.Close()
+ }
+}
+
+// As required by RFC 2045, 6.7. (page 21) for quoted-printable, and
+// RFC 2045, 6.8. (page 25) for base64.
+const maxLineLen = 76
+
+// base64LineWriter limits text encoded in base64 to 76 characters per line
+type base64LineWriter struct {
+ w io.Writer
+ lineLen int
+}
+
+func newBase64LineWriter(w io.Writer) *base64LineWriter {
+ return &base64LineWriter{w: w}
+}
+
+func (w *base64LineWriter) Write(p []byte) (int, error) {
+ n := 0
+ for len(p)+w.lineLen > maxLineLen {
+ w.w.Write(p[:maxLineLen-w.lineLen])
+ w.w.Write([]byte("\r\n"))
+ p = p[maxLineLen-w.lineLen:]
+ n += maxLineLen - w.lineLen
+ w.lineLen = 0
+ }
+
+ w.w.Write(p)
+ w.lineLen += len(p)
+
+ return n + len(p), nil
+}
+
+// Stubbed out for testing.
+var now = time.Now
diff --git a/vendor/gopkg.in/ini.v1/LICENSE b/vendor/gopkg.in/ini.v1/LICENSE
new file mode 100644
index 0000000000..37ec93a14f
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/gopkg.in/ini.v1/Makefile b/vendor/gopkg.in/ini.v1/Makefile
new file mode 100644
index 0000000000..ac034e5258
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/Makefile
@@ -0,0 +1,12 @@
+.PHONY: build test bench vet
+
+build: vet bench
+
+test:
+ go test -v -cover -race
+
+bench:
+ go test -v -cover -race -test.bench=. -test.benchmem
+
+vet:
+ go vet
diff --git a/vendor/gopkg.in/ini.v1/README.md b/vendor/gopkg.in/ini.v1/README.md
new file mode 100644
index 0000000000..a939d75e9f
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/README.md
@@ -0,0 +1,703 @@
+INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini)
+===
+
+![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
+
+Package ini provides INI file read and write functionality in Go.
+
+[简体中文](README_ZH.md)
+
+## Feature
+
+- Load multiple data sources(`[]byte` or file) with overwrites.
+- Read with recursion values.
+- Read with parent-child sections.
+- Read with auto-increment key names.
+- Read with multiple-line values.
+- Read with tons of helper methods.
+- Read and convert values to Go types.
+- Read and **WRITE** comments of sections and keys.
+- Manipulate sections, keys and comments with ease.
+- Keep sections and keys in order as you parse and save.
+
+## Installation
+
+To use a tagged revision:
+
+ go get gopkg.in/ini.v1
+
+To use with latest changes:
+
+ go get github.com/go-ini/ini
+
+Please add `-u` flag to update in the future.
+
+### Testing
+
+If you want to test on your machine, please apply `-t` flag:
+
+ go get -t gopkg.in/ini.v1
+
+Please add `-u` flag to update in the future.
+
+## Getting Started
+
+### Loading from data sources
+
+A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many data sources as you want**. Passing other types will simply return an error.
+
+```go
+cfg, err := ini.Load([]byte("raw data"), "filename")
+```
+
+Or start with an empty object:
+
+```go
+cfg := ini.Empty()
+```
+
+When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later.
+
+```go
+err := cfg.Append("other file", []byte("other raw data"))
+```
+
+If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error.
+
+```go
+cfg, err := ini.LooseLoad("filename", "filename_404")
+```
+
+The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual.
+
+#### Ignore cases of key name
+
+When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing.
+
+```go
+cfg, err := ini.InsensitiveLoad("filename")
+//...
+
+// sec1 and sec2 are the exactly same section object
+sec1, err := cfg.GetSection("Section")
+sec2, err := cfg.GetSection("SecTIOn")
+
+// key1 and key2 are the exactly same key object
+key1, err := cfg.GetKey("Key")
+key2, err := cfg.GetKey("KeY")
+```
+
+#### MySQL-like boolean key
+
+MySQL's configuration allows a key without value as follows:
+
+```ini
+[mysqld]
+...
+skip-host-cache
+skip-name-resolve
+```
+
+By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options:
+
+```go
+cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
+```
+
+The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read.
+
+### Working with sections
+
+To get a section, you would need to:
+
+```go
+section, err := cfg.GetSection("section name")
+```
+
+For a shortcut for default section, just give an empty string as name:
+
+```go
+section, err := cfg.GetSection("")
+```
+
+When you're pretty sure the section exists, following code could make your life easier:
+
+```go
+section := cfg.Section("")
+```
+
+What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you.
+
+To create a new section:
+
+```go
+err := cfg.NewSection("new section")
+```
+
+To get a list of sections or section names:
+
+```go
+sections := cfg.Sections()
+names := cfg.SectionStrings()
+```
+
+### Working with keys
+
+To get a key under a section:
+
+```go
+key, err := cfg.Section("").GetKey("key name")
+```
+
+Same rule applies to key operations:
+
+```go
+key := cfg.Section("").Key("key name")
+```
+
+To check if a key exists:
+
+```go
+yes := cfg.Section("").HasKey("key name")
+```
+
+To create a new key:
+
+```go
+err := cfg.Section("").NewKey("name", "value")
+```
+
+To get a list of keys or key names:
+
+```go
+keys := cfg.Section("").Keys()
+names := cfg.Section("").KeyStrings()
+```
+
+To get a clone hash of keys and corresponding values:
+
+```go
+hash := cfg.Section("").KeysHash()
+```
+
+### Working with values
+
+To get a string value:
+
+```go
+val := cfg.Section("").Key("key name").String()
+```
+
+To validate key value on the fly:
+
+```go
+val := cfg.Section("").Key("key name").Validate(func(in string) string {
+ if len(in) == 0 {
+ return "default"
+ }
+ return in
+})
+```
+
+If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance):
+
+```go
+val := cfg.Section("").Key("key name").Value()
+```
+
+To check if raw value exists:
+
+```go
+yes := cfg.Section("").HasValue("test value")
+```
+
+To get value with types:
+
+```go
+// For boolean values:
+// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
+// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
+v, err = cfg.Section("").Key("BOOL").Bool()
+v, err = cfg.Section("").Key("FLOAT64").Float64()
+v, err = cfg.Section("").Key("INT").Int()
+v, err = cfg.Section("").Key("INT64").Int64()
+v, err = cfg.Section("").Key("UINT").Uint()
+v, err = cfg.Section("").Key("UINT64").Uint64()
+v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
+v, err = cfg.Section("").Key("TIME").Time() // RFC3339
+
+v = cfg.Section("").Key("BOOL").MustBool()
+v = cfg.Section("").Key("FLOAT64").MustFloat64()
+v = cfg.Section("").Key("INT").MustInt()
+v = cfg.Section("").Key("INT64").MustInt64()
+v = cfg.Section("").Key("UINT").MustUint()
+v = cfg.Section("").Key("UINT64").MustUint64()
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
+v = cfg.Section("").Key("TIME").MustTime() // RFC3339
+
+// Methods start with Must also accept one argument for default value
+// when key not found or fail to parse value to given type.
+// Except method MustString, which you have to pass a default value.
+
+v = cfg.Section("").Key("String").MustString("default")
+v = cfg.Section("").Key("BOOL").MustBool(true)
+v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
+v = cfg.Section("").Key("INT").MustInt(10)
+v = cfg.Section("").Key("INT64").MustInt64(99)
+v = cfg.Section("").Key("UINT").MustUint(3)
+v = cfg.Section("").Key("UINT64").MustUint64(6)
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
+v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
+```
+
+What if my value is three-line long?
+
+```ini
+[advance]
+ADDRESS = """404 road,
+NotFound, State, 5000
+Earth"""
+```
+
+Not a problem!
+
+```go
+cfg.Section("advance").Key("ADDRESS").String()
+
+/* --- start ---
+404 road,
+NotFound, State, 5000
+Earth
+------ end --- */
+```
+
+That's cool, how about continuation lines?
+
+```ini
+[advance]
+two_lines = how about \
+ continuation lines?
+lots_of_lines = 1 \
+ 2 \
+ 3 \
+ 4
+```
+
+Piece of cake!
+
+```go
+cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
+cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
+```
+
+Well, I hate continuation lines, how do I disable that?
+
+```go
+cfg, err := ini.LoadSources(ini.LoadOptions{
+ IgnoreContinuation: true,
+}, "filename")
+```
+
+Holy crap!
+
+Note that single quotes around values will be stripped:
+
+```ini
+foo = "some value" // foo: some value
+bar = 'some value' // bar: some value
+```
+
+That's all? Hmm, no.
+
+#### Helper methods of working with values
+
+To get value with given candidates:
+
+```go
+v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
+v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
+v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
+v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
+v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
+v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
+v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
+v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
+```
+
+Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates.
+
+To validate value in a given range:
+
+```go
+vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
+vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
+vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
+vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
+vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
+vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
+vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
+```
+
+##### Auto-split values into a slice
+
+To use zero value of type for invalid inputs:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0]
+vals = cfg.Section("").Key("STRINGS").Strings(",")
+vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
+vals = cfg.Section("").Key("INTS").Ints(",")
+vals = cfg.Section("").Key("INT64S").Int64s(",")
+vals = cfg.Section("").Key("UINTS").Uints(",")
+vals = cfg.Section("").Key("UINT64S").Uint64s(",")
+vals = cfg.Section("").Key("TIMES").Times(",")
+```
+
+To exclude invalid values out of result slice:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> [2.2]
+vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",")
+vals = cfg.Section("").Key("INTS").ValidInts(",")
+vals = cfg.Section("").Key("INT64S").ValidInt64s(",")
+vals = cfg.Section("").Key("UINTS").ValidUints(",")
+vals = cfg.Section("").Key("UINT64S").ValidUint64s(",")
+vals = cfg.Section("").Key("TIMES").ValidTimes(",")
+```
+
+Or to return nothing but error when have invalid inputs:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> error
+vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",")
+vals = cfg.Section("").Key("INTS").StrictInts(",")
+vals = cfg.Section("").Key("INT64S").StrictInt64s(",")
+vals = cfg.Section("").Key("UINTS").StrictUints(",")
+vals = cfg.Section("").Key("UINT64S").StrictUint64s(",")
+vals = cfg.Section("").Key("TIMES").StrictTimes(",")
+```
+
+### Save your configuration
+
+Finally, it's time to save your configuration to somewhere.
+
+A typical way to save configuration is writing it to a file:
+
+```go
+// ...
+err = cfg.SaveTo("my.ini")
+err = cfg.SaveToIndent("my.ini", "\t")
+```
+
+Another way to save is writing to a `io.Writer` interface:
+
+```go
+// ...
+cfg.WriteTo(writer)
+cfg.WriteToIndent(writer, "\t")
+```
+
+## Advanced Usage
+
+### Recursive Values
+
+For all value of keys, there is a special syntax `%(<name>)s`, where `<name>` is the key name in same section or default section, and `%(<name>)s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions.
+
+```ini
+NAME = ini
+
+[author]
+NAME = Unknwon
+GITHUB = https://github.com/%(NAME)s
+
+[package]
+FULL_NAME = github.com/go-ini/%(NAME)s
+```
+
+```go
+cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
+cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
+```
+
+### Parent-child Sections
+
+You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section.
+
+```ini
+NAME = ini
+VERSION = v1
+IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
+
+[package]
+CLONE_URL = https://%(IMPORT_PATH)s
+
+[package.sub]
+```
+
+```go
+cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
+```
+
+#### Retrieve parent keys available to a child section
+
+```go
+cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
+```
+
+### Auto-increment Key Names
+
+If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter.
+
+```ini
+[features]
+-: Support read/write comments of keys and sections
+-: Support auto-increment of key names
+-: Support load multiple files to overwrite key values
+```
+
+```go
+cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
+```
+
+### Map To Struct
+
+Want more objective way to play with INI? Cool.
+
+```ini
+Name = Unknwon
+age = 21
+Male = true
+Born = 1993-01-01T20:17:05Z
+
+[Note]
+Content = Hi is a good man!
+Cities = HangZhou, Boston
+```
+
+```go
+type Note struct {
+ Content string
+ Cities []string
+}
+
+type Person struct {
+ Name string
+ Age int `ini:"age"`
+ Male bool
+ Born time.Time
+ Note
+ Created time.Time `ini:"-"`
+}
+
+func main() {
+ cfg, err := ini.Load("path/to/ini")
+ // ...
+ p := new(Person)
+ err = cfg.MapTo(p)
+ // ...
+
+ // Things can be simpler.
+ err = ini.MapTo(p, "path/to/ini")
+ // ...
+
+ // Just map a section? Fine.
+ n := new(Note)
+ err = cfg.Section("Note").MapTo(n)
+ // ...
+}
+```
+
+Can I have default value for field? Absolutely.
+
+Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type.
+
+```go
+// ...
+p := &Person{
+ Name: "Joe",
+}
+// ...
+```
+
+It's really cool, but what's the point if you can't give me my file back from struct?
+
+### Reflect From Struct
+
+Why not?
+
+```go
+type Embeded struct {
+ Dates []time.Time `delim:"|"`
+ Places []string `ini:"places,omitempty"`
+ None []int `ini:",omitempty"`
+}
+
+type Author struct {
+ Name string `ini:"NAME"`
+ Male bool
+ Age int
+ GPA float64
+ NeverMind string `ini:"-"`
+ *Embeded
+}
+
+func main() {
+ a := &Author{"Unknwon", true, 21, 2.8, "",
+ &Embeded{
+ []time.Time{time.Now(), time.Now()},
+ []string{"HangZhou", "Boston"},
+ []int{},
+ }}
+ cfg := ini.Empty()
+ err = ini.ReflectFrom(cfg, a)
+ // ...
+}
+```
+
+So, what do I get?
+
+```ini
+NAME = Unknwon
+Male = true
+Age = 21
+GPA = 2.8
+
+[Embeded]
+Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
+places = HangZhou,Boston
+```
+
+#### Name Mapper
+
+To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name.
+
+There are 2 built-in name mappers:
+
+- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key.
+- `TitleUnderscore`: it converts to format `title_underscore` then match section or key.
+
+To use them:
+
+```go
+type Info struct {
+ PackageName string
+}
+
+func main() {
+ err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
+ // ...
+
+ cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
+ // ...
+ info := new(Info)
+ cfg.NameMapper = ini.AllCapsUnderscore
+ err = cfg.MapTo(info)
+ // ...
+}
+```
+
+Same rules of name mapper apply to `ini.ReflectFromWithMapper` function.
+
+#### Value Mapper
+
+To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values:
+
+```go
+type Env struct {
+ Foo string `ini:"foo"`
+}
+
+func main() {
+ cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n")
+ cfg.ValueMapper = os.ExpandEnv
+ // ...
+ env := &Env{}
+ err = cfg.Section("env").MapTo(env)
+}
+```
+
+This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`.
+
+#### Other Notes On Map/Reflect
+
+Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature:
+
+```go
+type Child struct {
+ Age string
+}
+
+type Parent struct {
+ Name string
+ Child
+}
+
+type Config struct {
+ City string
+ Parent
+}
+```
+
+Example configuration:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+
+[Child]
+Age = 21
+```
+
+What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome.
+
+```go
+type Child struct {
+ Age string
+}
+
+type Parent struct {
+ Name string
+ Child `ini:"Parent"`
+}
+
+type Config struct {
+ City string
+ Parent
+}
+```
+
+Example configuration:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+Age = 21
+```
+
+## Getting Help
+
+- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
+- [File An Issue](https://github.com/go-ini/ini/issues/new)
+
+## FAQs
+
+### What does `BlockMode` field do?
+
+By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster.
+
+### Why another INI library?
+
+Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster.
+
+To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path)
+
+## License
+
+This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
diff --git a/vendor/gopkg.in/ini.v1/README_ZH.md b/vendor/gopkg.in/ini.v1/README_ZH.md
new file mode 100644
index 0000000000..2178e47895
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/README_ZH.md
@@ -0,0 +1,690 @@
+本包提供了 Go 语言中读写 INI 文件的功能。
+
+## 功能特性
+
+- 支持覆盖加载多个数据源(`[]byte` 或文件)
+- 支持递归读取键值
+- 支持读取父子分区
+- 支持读取自增键名
+- 支持读取多行的键值
+- 支持大量辅助方法
+- 支持在读取时直接转换为 Go 语言类型
+- 支持读取和 **写入** 分区和键的注释
+- 轻松操作分区、键值和注释
+- 在保存文件时分区和键值会保持原有的顺序
+
+## 下载安装
+
+使用一个特定版本:
+
+ go get gopkg.in/ini.v1
+
+使用最新版:
+
+ go get github.com/go-ini/ini
+
+如需更新请添加 `-u` 选项。
+
+### 测试安装
+
+如果您想要在自己的机器上运行测试,请使用 `-t` 标记:
+
+ go get -t gopkg.in/ini.v1
+
+如需更新请添加 `-u` 选项。
+
+## 开始使用
+
+### 从数据源加载
+
+一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。
+
+```go
+cfg, err := ini.Load([]byte("raw data"), "filename")
+```
+
+或者从一个空白的文件开始:
+
+```go
+cfg := ini.Empty()
+```
+
+当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。
+
+```go
+err := cfg.Append("other file", []byte("other raw data"))
+```
+
+当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误):
+
+```go
+cfg, err := ini.LooseLoad("filename", "filename_404")
+```
+
+更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。
+
+#### 忽略键名的大小写
+
+有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写:
+
+```go
+cfg, err := ini.InsensitiveLoad("filename")
+//...
+
+// sec1 和 sec2 指向同一个分区对象
+sec1, err := cfg.GetSection("Section")
+sec2, err := cfg.GetSection("SecTIOn")
+
+// key1 和 key2 指向同一个键对象
+key1, err := cfg.GetKey("Key")
+key2, err := cfg.GetKey("KeY")
+```
+
+#### 类似 MySQL 配置中的布尔值键
+
+MySQL 的配置文件中会出现没有具体值的布尔类型的键:
+
+```ini
+[mysqld]
+...
+skip-host-cache
+skip-name-resolve
+```
+
+默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理:
+
+```go
+cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
+```
+
+这些键的值永远为 `true`,且在保存到文件时也只会输出键名。
+
+### 操作分区(Section)
+
+获取指定分区:
+
+```go
+section, err := cfg.GetSection("section name")
+```
+
+如果您想要获取默认分区,则可以用空字符串代替分区名:
+
+```go
+section, err := cfg.GetSection("")
+```
+
+当您非常确定某个分区是存在的,可以使用以下简便方法:
+
+```go
+section := cfg.Section("")
+```
+
+如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。
+
+创建一个分区:
+
+```go
+err := cfg.NewSection("new section")
+```
+
+获取所有分区对象或名称:
+
+```go
+sections := cfg.Sections()
+names := cfg.SectionStrings()
+```
+
+### 操作键(Key)
+
+获取某个分区下的键:
+
+```go
+key, err := cfg.Section("").GetKey("key name")
+```
+
+和分区一样,您也可以直接获取键而忽略错误处理:
+
+```go
+key := cfg.Section("").Key("key name")
+```
+
+判断某个键是否存在:
+
+```go
+yes := cfg.Section("").HasKey("key name")
+```
+
+创建一个新的键:
+
+```go
+err := cfg.Section("").NewKey("name", "value")
+```
+
+获取分区下的所有键或键名:
+
+```go
+keys := cfg.Section("").Keys()
+names := cfg.Section("").KeyStrings()
+```
+
+获取分区下的所有键值对的克隆:
+
+```go
+hash := cfg.Section("").KeysHash()
+```
+
+### 操作键值(Value)
+
+获取一个类型为字符串(string)的值:
+
+```go
+val := cfg.Section("").Key("key name").String()
+```
+
+获取值的同时通过自定义函数进行处理验证:
+
+```go
+val := cfg.Section("").Key("key name").Validate(func(in string) string {
+ if len(in) == 0 {
+ return "default"
+ }
+ return in
+})
+```
+
+如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳):
+
+```go
+val := cfg.Section("").Key("key name").Value()
+```
+
+判断某个原值是否存在:
+
+```go
+yes := cfg.Section("").HasValue("test value")
+```
+
+获取其它类型的值:
+
+```go
+// 布尔值的规则:
+// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
+// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
+v, err = cfg.Section("").Key("BOOL").Bool()
+v, err = cfg.Section("").Key("FLOAT64").Float64()
+v, err = cfg.Section("").Key("INT").Int()
+v, err = cfg.Section("").Key("INT64").Int64()
+v, err = cfg.Section("").Key("UINT").Uint()
+v, err = cfg.Section("").Key("UINT64").Uint64()
+v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
+v, err = cfg.Section("").Key("TIME").Time() // RFC3339
+
+v = cfg.Section("").Key("BOOL").MustBool()
+v = cfg.Section("").Key("FLOAT64").MustFloat64()
+v = cfg.Section("").Key("INT").MustInt()
+v = cfg.Section("").Key("INT64").MustInt64()
+v = cfg.Section("").Key("UINT").MustUint()
+v = cfg.Section("").Key("UINT64").MustUint64()
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
+v = cfg.Section("").Key("TIME").MustTime() // RFC3339
+
+// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值,
+// 当键不存在或者转换失败时,则会直接返回该默认值。
+// 但是,MustString 方法必须传递一个默认值。
+
+v = cfg.Seciont("").Key("String").MustString("default")
+v = cfg.Section("").Key("BOOL").MustBool(true)
+v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
+v = cfg.Section("").Key("INT").MustInt(10)
+v = cfg.Section("").Key("INT64").MustInt64(99)
+v = cfg.Section("").Key("UINT").MustUint(3)
+v = cfg.Section("").Key("UINT64").MustUint64(6)
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
+v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
+```
+
+如果我的值有好多行怎么办?
+
+```ini
+[advance]
+ADDRESS = """404 road,
+NotFound, State, 5000
+Earth"""
+```
+
+嗯哼?小 case!
+
+```go
+cfg.Section("advance").Key("ADDRESS").String()
+
+/* --- start ---
+404 road,
+NotFound, State, 5000
+Earth
+------ end --- */
+```
+
+赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办?
+
+```ini
+[advance]
+two_lines = how about \
+ continuation lines?
+lots_of_lines = 1 \
+ 2 \
+ 3 \
+ 4
+```
+
+简直是小菜一碟!
+
+```go
+cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
+cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
+```
+
+可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢?
+
+```go
+cfg, err := ini.LoadSources(ini.LoadOptions{
+ IgnoreContinuation: true,
+}, "filename")
+```
+
+哇靠给力啊!
+
+需要注意的是,值两侧的单引号会被自动剔除:
+
+```ini
+foo = "some value" // foo: some value
+bar = 'some value' // bar: some value
+```
+
+这就是全部了?哈哈,当然不是。
+
+#### 操作键值的辅助方法
+
+获取键值时设定候选值:
+
+```go
+v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
+v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
+v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
+v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
+v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
+v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
+v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
+v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
+```
+
+如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。
+
+验证获取的值是否在指定范围内:
+
+```go
+vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
+vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
+vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
+vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
+vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
+vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
+vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
+```
+
+##### 自动分割键值到切片(slice)
+
+当存在无效输入时,使用零值代替:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0]
+vals = cfg.Section("").Key("STRINGS").Strings(",")
+vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
+vals = cfg.Section("").Key("INTS").Ints(",")
+vals = cfg.Section("").Key("INT64S").Int64s(",")
+vals = cfg.Section("").Key("UINTS").Uints(",")
+vals = cfg.Section("").Key("UINT64S").Uint64s(",")
+vals = cfg.Section("").Key("TIMES").Times(",")
+```
+
+从结果切片中剔除无效输入:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> [2.2]
+vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",")
+vals = cfg.Section("").Key("INTS").ValidInts(",")
+vals = cfg.Section("").Key("INT64S").ValidInt64s(",")
+vals = cfg.Section("").Key("UINTS").ValidUints(",")
+vals = cfg.Section("").Key("UINT64S").ValidUint64s(",")
+vals = cfg.Section("").Key("TIMES").ValidTimes(",")
+```
+
+当存在无效输入时,直接返回错误:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> error
+vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",")
+vals = cfg.Section("").Key("INTS").StrictInts(",")
+vals = cfg.Section("").Key("INT64S").StrictInt64s(",")
+vals = cfg.Section("").Key("UINTS").StrictUints(",")
+vals = cfg.Section("").Key("UINT64S").StrictUint64s(",")
+vals = cfg.Section("").Key("TIMES").StrictTimes(",")
+```
+
+### 保存配置
+
+终于到了这个时刻,是时候保存一下配置了。
+
+比较原始的做法是输出配置到某个文件:
+
+```go
+// ...
+err = cfg.SaveTo("my.ini")
+err = cfg.SaveToIndent("my.ini", "\t")
+```
+
+另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中:
+
+```go
+// ...
+cfg.WriteTo(writer)
+cfg.WriteToIndent(writer, "\t")
+```
+
+### 高级用法
+
+#### 递归读取键值
+
+在获取所有键值的过程中,特殊语法 `%(<name>)s` 会被应用,其中 `<name>` 可以是相同分区或者默认分区下的键名。字符串 `%(<name>)s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。
+
+```ini
+NAME = ini
+
+[author]
+NAME = Unknwon
+GITHUB = https://github.com/%(NAME)s
+
+[package]
+FULL_NAME = github.com/go-ini/%(NAME)s
+```
+
+```go
+cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
+cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
+```
+
+#### 读取父子分区
+
+您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。
+
+```ini
+NAME = ini
+VERSION = v1
+IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
+
+[package]
+CLONE_URL = https://%(IMPORT_PATH)s
+
+[package.sub]
+```
+
+```go
+cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
+```
+
+#### 获取上级父分区下的所有键名
+
+```go
+cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
+```
+
+#### 读取自增键名
+
+如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。
+
+```ini
+[features]
+-: Support read/write comments of keys and sections
+-: Support auto-increment of key names
+-: Support load multiple files to overwrite key values
+```
+
+```go
+cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
+```
+
+### 映射到结构
+
+想要使用更加面向对象的方式玩转 INI 吗?好主意。
+
+```ini
+Name = Unknwon
+age = 21
+Male = true
+Born = 1993-01-01T20:17:05Z
+
+[Note]
+Content = Hi is a good man!
+Cities = HangZhou, Boston
+```
+
+```go
+type Note struct {
+ Content string
+ Cities []string
+}
+
+type Person struct {
+ Name string
+ Age int `ini:"age"`
+ Male bool
+ Born time.Time
+ Note
+ Created time.Time `ini:"-"`
+}
+
+func main() {
+ cfg, err := ini.Load("path/to/ini")
+ // ...
+ p := new(Person)
+ err = cfg.MapTo(p)
+ // ...
+
+ // 一切竟可以如此的简单。
+ err = ini.MapTo(p, "path/to/ini")
+ // ...
+
+ // 嗯哼?只需要映射一个分区吗?
+ n := new(Note)
+ err = cfg.Section("Note").MapTo(n)
+ // ...
+}
+```
+
+结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。
+
+```go
+// ...
+p := &Person{
+ Name: "Joe",
+}
+// ...
+```
+
+这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用?
+
+### 从结构反射
+
+可是,我有说不能吗?
+
+```go
+type Embeded struct {
+ Dates []time.Time `delim:"|"`
+ Places []string `ini:"places,omitempty"`
+ None []int `ini:",omitempty"`
+}
+
+type Author struct {
+ Name string `ini:"NAME"`
+ Male bool
+ Age int
+ GPA float64
+ NeverMind string `ini:"-"`
+ *Embeded
+}
+
+func main() {
+ a := &Author{"Unknwon", true, 21, 2.8, "",
+ &Embeded{
+ []time.Time{time.Now(), time.Now()},
+ []string{"HangZhou", "Boston"},
+ []int{},
+ }}
+ cfg := ini.Empty()
+ err = ini.ReflectFrom(cfg, a)
+ // ...
+}
+```
+
+瞧瞧,奇迹发生了。
+
+```ini
+NAME = Unknwon
+Male = true
+Age = 21
+GPA = 2.8
+
+[Embeded]
+Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
+places = HangZhou,Boston
+```
+
+#### 名称映射器(Name Mapper)
+
+为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。
+
+目前有 2 款内置的映射器:
+
+- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。
+- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。
+
+使用方法:
+
+```go
+type Info struct{
+ PackageName string
+}
+
+func main() {
+ err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
+ // ...
+
+ cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
+ // ...
+ info := new(Info)
+ cfg.NameMapper = ini.AllCapsUnderscore
+ err = cfg.MapTo(info)
+ // ...
+}
+```
+
+使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。
+
+#### 值映射器(Value Mapper)
+
+值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量:
+
+```go
+type Env struct {
+ Foo string `ini:"foo"`
+}
+
+func main() {
+ cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n")
+ cfg.ValueMapper = os.ExpandEnv
+ // ...
+ env := &Env{}
+ err = cfg.Section("env").MapTo(env)
+}
+```
+
+本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。
+
+#### 映射/反射的其它说明
+
+任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联:
+
+```go
+type Child struct {
+ Age string
+}
+
+type Parent struct {
+ Name string
+ Child
+}
+
+type Config struct {
+ City string
+ Parent
+}
+```
+
+示例配置文件:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+
+[Child]
+Age = 21
+```
+
+很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚!
+
+```go
+type Child struct {
+ Age string
+}
+
+type Parent struct {
+ Name string
+ Child `ini:"Parent"`
+}
+
+type Config struct {
+ City string
+ Parent
+}
+```
+
+示例配置文件:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+Age = 21
+```
+
+## 获取帮助
+
+- [API 文档](https://gowalker.org/gopkg.in/ini.v1)
+- [创建工单](https://github.com/go-ini/ini/issues/new)
+
+## 常见问题
+
+### 字段 `BlockMode` 是什么?
+
+默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。
+
+### 为什么要写另一个 INI 解析库?
+
+许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。
+
+为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了)
diff --git a/vendor/gopkg.in/ini.v1/error.go b/vendor/gopkg.in/ini.v1/error.go
new file mode 100644
index 0000000000..80afe74315
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/error.go
@@ -0,0 +1,32 @@
+// Copyright 2016 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "fmt"
+)
+
+type ErrDelimiterNotFound struct {
+ Line string
+}
+
+func IsErrDelimiterNotFound(err error) bool {
+ _, ok := err.(ErrDelimiterNotFound)
+ return ok
+}
+
+func (err ErrDelimiterNotFound) Error() string {
+ return fmt.Sprintf("key-value delimiter not found: %s", err.Line)
+}
diff --git a/vendor/gopkg.in/ini.v1/ini.go b/vendor/gopkg.in/ini.v1/ini.go
new file mode 100644
index 0000000000..cd065e7822
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/ini.go
@@ -0,0 +1,501 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package ini provides INI file read and write functionality in Go.
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ // Name for default section. You can use this constant or the string literal.
+ // In most of cases, an empty string is all you need to access the section.
+ DEFAULT_SECTION = "DEFAULT"
+
+ // Maximum allowed depth when recursively substituing variable names.
+ _DEPTH_VALUES = 99
+ _VERSION = "1.21.1"
+)
+
+// Version returns current package version literal.
+func Version() string {
+ return _VERSION
+}
+
+var (
+ // Delimiter to determine or compose a new line.
+ // This variable will be changed to "\r\n" automatically on Windows
+ // at package init time.
+ LineBreak = "\n"
+
+ // Variable regexp pattern: %(variable)s
+ varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)
+
+ // Indicate whether to align "=" sign with spaces to produce pretty output
+ // or reduce all possible spaces for compact format.
+ PrettyFormat = true
+
+ // Explicitly write DEFAULT section header
+ DefaultHeader = false
+)
+
+func init() {
+ if runtime.GOOS == "windows" {
+ LineBreak = "\r\n"
+ }
+}
+
+func inSlice(str string, s []string) bool {
+ for _, v := range s {
+ if str == v {
+ return true
+ }
+ }
+ return false
+}
+
+// dataSource is an interface that returns object which can be read and closed.
+type dataSource interface {
+ ReadCloser() (io.ReadCloser, error)
+}
+
+// sourceFile represents an object that contains content on the local file system.
+type sourceFile struct {
+ name string
+}
+
+func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
+ return os.Open(s.name)
+}
+
+type bytesReadCloser struct {
+ reader io.Reader
+}
+
+func (rc *bytesReadCloser) Read(p []byte) (n int, err error) {
+ return rc.reader.Read(p)
+}
+
+func (rc *bytesReadCloser) Close() error {
+ return nil
+}
+
+// sourceData represents an object that contains content in memory.
+type sourceData struct {
+ data []byte
+}
+
+func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
+ return &bytesReadCloser{bytes.NewReader(s.data)}, nil
+}
+
+// File represents a combination of a or more INI file(s) in memory.
+type File struct {
+ // Should make things safe, but sometimes doesn't matter.
+ BlockMode bool
+ // Make sure data is safe in multiple goroutines.
+ lock sync.RWMutex
+
+ // Allow combination of multiple data sources.
+ dataSources []dataSource
+ // Actual data is stored here.
+ sections map[string]*Section
+
+ // To keep data in order.
+ sectionList []string
+
+ options LoadOptions
+
+ NameMapper
+ ValueMapper
+}
+
+// newFile initializes File object with given data sources.
+func newFile(dataSources []dataSource, opts LoadOptions) *File {
+ return &File{
+ BlockMode: true,
+ dataSources: dataSources,
+ sections: make(map[string]*Section),
+ sectionList: make([]string, 0, 10),
+ options: opts,
+ }
+}
+
+func parseDataSource(source interface{}) (dataSource, error) {
+ switch s := source.(type) {
+ case string:
+ return sourceFile{s}, nil
+ case []byte:
+ return &sourceData{s}, nil
+ default:
+ return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s)
+ }
+}
+
+type LoadOptions struct {
+ // Loose indicates whether the parser should ignore nonexistent files or return error.
+ Loose bool
+ // Insensitive indicates whether the parser forces all section and key names to lowercase.
+ Insensitive bool
+ // IgnoreContinuation indicates whether to ignore continuation lines while parsing.
+ IgnoreContinuation bool
+ // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
+ // This type of keys are mostly used in my.cnf.
+ AllowBooleanKeys bool
+}
+
+func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
+ sources := make([]dataSource, len(others)+1)
+ sources[0], err = parseDataSource(source)
+ if err != nil {
+ return nil, err
+ }
+ for i := range others {
+ sources[i+1], err = parseDataSource(others[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+ f := newFile(sources, opts)
+ if err = f.Reload(); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// Load loads and parses from INI data sources.
+// Arguments can be mixed of file name with string type, or raw data in []byte.
+// It will return error if list contains nonexistent files.
+func Load(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{}, source, others...)
+}
+
+// LooseLoad has exactly same functionality as Load function
+// except it ignores nonexistent files instead of returning error.
+func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{Loose: true}, source, others...)
+}
+
+// InsensitiveLoad has exactly same functionality as Load function
+// except it forces all section and key names to be lowercased.
+func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{Insensitive: true}, source, others...)
+}
+
+// Empty returns an empty file object.
+func Empty() *File {
+ // Ignore error here, we sure our data is good.
+ f, _ := Load([]byte(""))
+ return f
+}
+
+// NewSection creates a new section.
+func (f *File) NewSection(name string) (*Section, error) {
+ if len(name) == 0 {
+ return nil, errors.New("error creating new section: empty section name")
+ } else if f.options.Insensitive && name != DEFAULT_SECTION {
+ name = strings.ToLower(name)
+ }
+
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ if inSlice(name, f.sectionList) {
+ return f.sections[name], nil
+ }
+
+ f.sectionList = append(f.sectionList, name)
+ f.sections[name] = newSection(f, name)
+ return f.sections[name], nil
+}
+
+// NewSections creates a list of sections.
+func (f *File) NewSections(names ...string) (err error) {
+ for _, name := range names {
+ if _, err = f.NewSection(name); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetSection returns section by given name.
+func (f *File) GetSection(name string) (*Section, error) {
+ if len(name) == 0 {
+ name = DEFAULT_SECTION
+ } else if f.options.Insensitive {
+ name = strings.ToLower(name)
+ }
+
+ if f.BlockMode {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ }
+
+ sec := f.sections[name]
+ if sec == nil {
+ return nil, fmt.Errorf("section '%s' does not exist", name)
+ }
+ return sec, nil
+}
+
+// Section assumes named section exists and returns a zero-value when not.
+func (f *File) Section(name string) *Section {
+ sec, err := f.GetSection(name)
+ if err != nil {
+ // Note: It's OK here because the only possible error is empty section name,
+ // but if it's empty, this piece of code won't be executed.
+ sec, _ = f.NewSection(name)
+ return sec
+ }
+ return sec
+}
+
+// Section returns list of Section.
+func (f *File) Sections() []*Section {
+ sections := make([]*Section, len(f.sectionList))
+ for i := range f.sectionList {
+ sections[i] = f.Section(f.sectionList[i])
+ }
+ return sections
+}
+
+// SectionStrings returns list of section names.
+func (f *File) SectionStrings() []string {
+ list := make([]string, len(f.sectionList))
+ copy(list, f.sectionList)
+ return list
+}
+
+// DeleteSection deletes a section.
+func (f *File) DeleteSection(name string) {
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ if len(name) == 0 {
+ name = DEFAULT_SECTION
+ }
+
+ for i, s := range f.sectionList {
+ if s == name {
+ f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
+ delete(f.sections, name)
+ return
+ }
+ }
+}
+
+func (f *File) reload(s dataSource) error {
+ r, err := s.ReadCloser()
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+
+ return f.parse(r)
+}
+
+// Reload reloads and parses all data sources.
+func (f *File) Reload() (err error) {
+ for _, s := range f.dataSources {
+ if err = f.reload(s); err != nil {
+ // In loose mode, we create an empty default section for nonexistent files.
+ if os.IsNotExist(err) && f.options.Loose {
+ f.parse(bytes.NewBuffer(nil))
+ continue
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+// Append appends one or more data sources and reloads automatically.
+func (f *File) Append(source interface{}, others ...interface{}) error {
+ ds, err := parseDataSource(source)
+ if err != nil {
+ return err
+ }
+ f.dataSources = append(f.dataSources, ds)
+ for _, s := range others {
+ ds, err = parseDataSource(s)
+ if err != nil {
+ return err
+ }
+ f.dataSources = append(f.dataSources, ds)
+ }
+ return f.Reload()
+}
+
+// WriteToIndent writes content into io.Writer with given indention.
+// If PrettyFormat has been set to be true,
+// it will align "=" sign with spaces under each section.
+func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
+ equalSign := "="
+ if PrettyFormat {
+ equalSign = " = "
+ }
+
+ // Use buffer to make sure target is safe until finish encoding.
+ buf := bytes.NewBuffer(nil)
+ for i, sname := range f.sectionList {
+ sec := f.Section(sname)
+ if len(sec.Comment) > 0 {
+ if sec.Comment[0] != '#' && sec.Comment[0] != ';' {
+ sec.Comment = "; " + sec.Comment
+ }
+ if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil {
+ return 0, err
+ }
+ }
+
+ if i > 0 || DefaultHeader {
+ if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
+ return 0, err
+ }
+ } else {
+ // Write nothing if default section is empty
+ if len(sec.keyList) == 0 {
+ continue
+ }
+ }
+
+ // Count and generate alignment length and buffer spaces using the
+ // longest key. Keys may be modifed if they contain certain characters so
+ // we need to take that into account in our calculation.
+ alignLength := 0
+ if PrettyFormat {
+ for _, kname := range sec.keyList {
+ keyLength := len(kname)
+ // First case will surround key by ` and second by """
+ if strings.ContainsAny(kname, "\"=:") {
+ keyLength += 2
+ } else if strings.Contains(kname, "`") {
+ keyLength += 6
+ }
+
+ if keyLength > alignLength {
+ alignLength = keyLength
+ }
+ }
+ }
+ alignSpaces := bytes.Repeat([]byte(" "), alignLength)
+
+ for _, kname := range sec.keyList {
+ key := sec.Key(kname)
+ if len(key.Comment) > 0 {
+ if len(indent) > 0 && sname != DEFAULT_SECTION {
+ buf.WriteString(indent)
+ }
+ if key.Comment[0] != '#' && key.Comment[0] != ';' {
+ key.Comment = "; " + key.Comment
+ }
+ if _, err = buf.WriteString(key.Comment + LineBreak); err != nil {
+ return 0, err
+ }
+ }
+
+ if len(indent) > 0 && sname != DEFAULT_SECTION {
+ buf.WriteString(indent)
+ }
+
+ switch {
+ case key.isAutoIncrement:
+ kname = "-"
+ case strings.ContainsAny(kname, "\"=:"):
+ kname = "`" + kname + "`"
+ case strings.Contains(kname, "`"):
+ kname = `"""` + kname + `"""`
+ }
+ if _, err = buf.WriteString(kname); err != nil {
+ return 0, err
+ }
+
+ if key.isBooleanType {
+ continue
+ }
+
+ // Write out alignment spaces before "=" sign
+ if PrettyFormat {
+ buf.Write(alignSpaces[:alignLength-len(kname)])
+ }
+
+ val := key.value
+ // In case key value contains "\n", "`", "\"", "#" or ";"
+ if strings.ContainsAny(val, "\n`") {
+ val = `"""` + val + `"""`
+ } else if strings.ContainsAny(val, "#;") {
+ val = "`" + val + "`"
+ }
+ if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil {
+ return 0, err
+ }
+ }
+
+ // Put a line between sections
+ if _, err = buf.WriteString(LineBreak); err != nil {
+ return 0, err
+ }
+ }
+
+ return buf.WriteTo(w)
+}
+
+// WriteTo writes file content into io.Writer.
+func (f *File) WriteTo(w io.Writer) (int64, error) {
+ return f.WriteToIndent(w, "")
+}
+
+// SaveToIndent writes content to file system with given value indention.
+func (f *File) SaveToIndent(filename, indent string) error {
+ // Note: Because we are truncating with os.Create,
+ // so it's safer to save to a temporary file location and rename afte done.
+ tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp"
+ defer os.Remove(tmpPath)
+
+ fw, err := os.Create(tmpPath)
+ if err != nil {
+ return err
+ }
+
+ if _, err = f.WriteToIndent(fw, indent); err != nil {
+ fw.Close()
+ return err
+ }
+ fw.Close()
+
+ // Remove old file and rename the new one.
+ os.Remove(filename)
+ return os.Rename(tmpPath, filename)
+}
+
+// SaveTo writes content to file system.
+func (f *File) SaveTo(filename string) error {
+ return f.SaveToIndent(filename, "")
+}
diff --git a/vendor/gopkg.in/ini.v1/key.go b/vendor/gopkg.in/ini.v1/key.go
new file mode 100644
index 0000000000..9738c55a21
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/key.go
@@ -0,0 +1,633 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Key represents a key under a section.
+type Key struct {
+ s *Section
+ name string
+ value string
+ isAutoIncrement bool
+ isBooleanType bool
+
+ Comment string
+}
+
+// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
+type ValueMapper func(string) string
+
+// Name returns name of key.
+func (k *Key) Name() string {
+ return k.name
+}
+
+// Value returns raw value of key for performance purpose.
+func (k *Key) Value() string {
+ return k.value
+}
+
+// String returns string representation of value.
+func (k *Key) String() string {
+ val := k.value
+ if k.s.f.ValueMapper != nil {
+ val = k.s.f.ValueMapper(val)
+ }
+ if strings.Index(val, "%") == -1 {
+ return val
+ }
+
+ for i := 0; i < _DEPTH_VALUES; i++ {
+ vr := varPattern.FindString(val)
+ if len(vr) == 0 {
+ break
+ }
+
+ // Take off leading '%(' and trailing ')s'.
+ noption := strings.TrimLeft(vr, "%(")
+ noption = strings.TrimRight(noption, ")s")
+
+ // Search in the same section.
+ nk, err := k.s.GetKey(noption)
+ if err != nil {
+ // Search again in default section.
+ nk, _ = k.s.f.Section("").GetKey(noption)
+ }
+
+ // Substitute by new value and take off leading '%(' and trailing ')s'.
+ val = strings.Replace(val, vr, nk.value, -1)
+ }
+ return val
+}
+
+// Validate accepts a validate function which can
+// return modifed result as key value.
+func (k *Key) Validate(fn func(string) string) string {
+ return fn(k.String())
+}
+
+// parseBool returns the boolean value represented by the string.
+//
+// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
+// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
+// Any other value returns an error.
+func parseBool(str string) (value bool, err error) {
+ switch str {
+ case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
+ return true, nil
+ case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
+ return false, nil
+ }
+ return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
+}
+
+// Bool returns bool type value.
+func (k *Key) Bool() (bool, error) {
+ return parseBool(k.String())
+}
+
+// Float64 returns float64 type value.
+func (k *Key) Float64() (float64, error) {
+ return strconv.ParseFloat(k.String(), 64)
+}
+
+// Int returns int type value.
+func (k *Key) Int() (int, error) {
+ return strconv.Atoi(k.String())
+}
+
+// Int64 returns int64 type value.
+func (k *Key) Int64() (int64, error) {
+ return strconv.ParseInt(k.String(), 10, 64)
+}
+
+// Uint returns uint type valued.
+func (k *Key) Uint() (uint, error) {
+ u, e := strconv.ParseUint(k.String(), 10, 64)
+ return uint(u), e
+}
+
+// Uint64 returns uint64 type value.
+func (k *Key) Uint64() (uint64, error) {
+ return strconv.ParseUint(k.String(), 10, 64)
+}
+
+// Duration returns time.Duration type value.
+func (k *Key) Duration() (time.Duration, error) {
+ return time.ParseDuration(k.String())
+}
+
+// TimeFormat parses with given format and returns time.Time type value.
+func (k *Key) TimeFormat(format string) (time.Time, error) {
+ return time.Parse(format, k.String())
+}
+
+// Time parses with RFC3339 format and returns time.Time type value.
+func (k *Key) Time() (time.Time, error) {
+ return k.TimeFormat(time.RFC3339)
+}
+
+// MustString returns default value if key value is empty.
+func (k *Key) MustString(defaultVal string) string {
+ val := k.String()
+ if len(val) == 0 {
+ k.value = defaultVal
+ return defaultVal
+ }
+ return val
+}
+
+// MustBool always returns value without error,
+// it returns false if error occurs.
+func (k *Key) MustBool(defaultVal ...bool) bool {
+ val, err := k.Bool()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatBool(defaultVal[0])
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustFloat64 always returns value without error,
+// it returns 0.0 if error occurs.
+func (k *Key) MustFloat64(defaultVal ...float64) float64 {
+ val, err := k.Float64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustInt always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt(defaultVal ...int) int {
+ val, err := k.Int()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatInt(int64(defaultVal[0]), 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustInt64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt64(defaultVal ...int64) int64 {
+ val, err := k.Int64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatInt(defaultVal[0], 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustUint always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint(defaultVal ...uint) uint {
+ val, err := k.Uint()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatUint(uint64(defaultVal[0]), 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustUint64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
+ val, err := k.Uint64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatUint(defaultVal[0], 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustDuration always returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
+ val, err := k.Duration()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = defaultVal[0].String()
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustTimeFormat always parses with given format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
+ val, err := k.TimeFormat(format)
+ if len(defaultVal) > 0 && err != nil {
+ k.value = defaultVal[0].Format(format)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustTime always parses with RFC3339 format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
+ return k.MustTimeFormat(time.RFC3339, defaultVal...)
+}
+
+// In always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) In(defaultVal string, candidates []string) string {
+ val := k.String()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InFloat64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
+ val := k.MustFloat64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InInt always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt(defaultVal int, candidates []int) int {
+ val := k.MustInt()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InInt64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
+ val := k.MustInt64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InUint always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
+ val := k.MustUint()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InUint64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
+ val := k.MustUint64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InTimeFormat always parses with given format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
+ val := k.MustTimeFormat(format)
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InTime always parses with RFC3339 format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
+ return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
+}
+
+// RangeFloat64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
+ val := k.MustFloat64()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeInt checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt(defaultVal, min, max int) int {
+ val := k.MustInt()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeInt64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
+ val := k.MustInt64()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeTimeFormat checks if value with given format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
+ val := k.MustTimeFormat(format)
+ if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeTime checks if value with RFC3339 format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
+ return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
+}
+
+// Strings returns list of string divided by given delimiter.
+func (k *Key) Strings(delim string) []string {
+ str := k.String()
+ if len(str) == 0 {
+ return []string{}
+ }
+
+ vals := strings.Split(str, delim)
+ for i := range vals {
+ vals[i] = strings.TrimSpace(vals[i])
+ }
+ return vals
+}
+
+// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Float64s(delim string) []float64 {
+ vals, _ := k.getFloat64s(delim, true, false)
+ return vals
+}
+
+// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Ints(delim string) []int {
+ vals, _ := k.getInts(delim, true, false)
+ return vals
+}
+
+// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Int64s(delim string) []int64 {
+ vals, _ := k.getInt64s(delim, true, false)
+ return vals
+}
+
+// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Uints(delim string) []uint {
+ vals, _ := k.getUints(delim, true, false)
+ return vals
+}
+
+// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Uint64s(delim string) []uint64 {
+ vals, _ := k.getUint64s(delim, true, false)
+ return vals
+}
+
+// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
+func (k *Key) TimesFormat(format, delim string) []time.Time {
+ vals, _ := k.getTimesFormat(format, delim, true, false)
+ return vals
+}
+
+// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
+// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
+func (k *Key) Times(delim string) []time.Time {
+ return k.TimesFormat(time.RFC3339, delim)
+}
+
+// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
+// it will not be included to result list.
+func (k *Key) ValidFloat64s(delim string) []float64 {
+ vals, _ := k.getFloat64s(delim, false, false)
+ return vals
+}
+
+// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
+// not be included to result list.
+func (k *Key) ValidInts(delim string) []int {
+ vals, _ := k.getInts(delim, false, false)
+ return vals
+}
+
+// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
+// then it will not be included to result list.
+func (k *Key) ValidInt64s(delim string) []int64 {
+ vals, _ := k.getInt64s(delim, false, false)
+ return vals
+}
+
+// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
+// then it will not be included to result list.
+func (k *Key) ValidUints(delim string) []uint {
+ vals, _ := k.getUints(delim, false, false)
+ return vals
+}
+
+// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
+// integer, then it will not be included to result list.
+func (k *Key) ValidUint64s(delim string) []uint64 {
+ vals, _ := k.getUint64s(delim, false, false)
+ return vals
+}
+
+// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
+ vals, _ := k.getTimesFormat(format, delim, false, false)
+ return vals
+}
+
+// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
+func (k *Key) ValidTimes(delim string) []time.Time {
+ return k.ValidTimesFormat(time.RFC3339, delim)
+}
+
+// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
+ return k.getFloat64s(delim, false, true)
+}
+
+// StrictInts returns list of int divided by given delimiter or error on first invalid input.
+func (k *Key) StrictInts(delim string) ([]int, error) {
+ return k.getInts(delim, false, true)
+}
+
+// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictInt64s(delim string) ([]int64, error) {
+ return k.getInt64s(delim, false, true)
+}
+
+// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
+func (k *Key) StrictUints(delim string) ([]uint, error) {
+ return k.getUints(delim, false, true)
+}
+
+// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
+ return k.getUint64s(delim, false, true)
+}
+
+// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
+// or error on first invalid input.
+func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
+ return k.getTimesFormat(format, delim, false, true)
+}
+
+// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
+// or error on first invalid input.
+func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
+ return k.StrictTimesFormat(time.RFC3339, delim)
+}
+
+// getFloat64s returns list of float64 divided by given delimiter.
+func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]float64, error) {
+ strs := k.Strings(delim)
+ vals := make([]float64, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseFloat(str, 64)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// getInts returns list of int divided by given delimiter.
+func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, error) {
+ strs := k.Strings(delim)
+ vals := make([]int, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.Atoi(str)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// getInt64s returns list of int64 divided by given delimiter.
+func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64, error) {
+ strs := k.Strings(delim)
+ vals := make([]int64, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseInt(str, 10, 64)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// getUints returns list of uint divided by given delimiter.
+func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint, error) {
+ strs := k.Strings(delim)
+ vals := make([]uint, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseUint(str, 10, 0)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, uint(val))
+ }
+ }
+ return vals, nil
+}
+
+// getUint64s returns list of uint64 divided by given delimiter.
+func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
+ strs := k.Strings(delim)
+ vals := make([]uint64, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseUint(str, 10, 64)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// getTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+func (k *Key) getTimesFormat(format, delim string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
+ strs := k.Strings(delim)
+ vals := make([]time.Time, 0, len(strs))
+ for _, str := range strs {
+ val, err := time.Parse(format, str)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// SetValue changes key value.
+func (k *Key) SetValue(v string) {
+ if k.s.f.BlockMode {
+ k.s.f.lock.Lock()
+ defer k.s.f.lock.Unlock()
+ }
+
+ k.value = v
+ k.s.keysHash[k.name] = v
+}
diff --git a/vendor/gopkg.in/ini.v1/parser.go b/vendor/gopkg.in/ini.v1/parser.go
new file mode 100644
index 0000000000..dc6df87a6c
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/parser.go
@@ -0,0 +1,325 @@
+// Copyright 2015 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+type tokenType int
+
+const (
+ _TOKEN_INVALID tokenType = iota
+ _TOKEN_COMMENT
+ _TOKEN_SECTION
+ _TOKEN_KEY
+)
+
+type parser struct {
+ buf *bufio.Reader
+ isEOF bool
+ count int
+ comment *bytes.Buffer
+}
+
+func newParser(r io.Reader) *parser {
+ return &parser{
+ buf: bufio.NewReader(r),
+ count: 1,
+ comment: &bytes.Buffer{},
+ }
+}
+
+// BOM handles header of BOM-UTF8 format.
+// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
+func (p *parser) BOM() error {
+ mask, err := p.buf.Peek(3)
+ if err != nil && err != io.EOF {
+ return err
+ } else if len(mask) < 3 {
+ return nil
+ } else if mask[0] == 239 && mask[1] == 187 && mask[2] == 191 {
+ p.buf.Read(mask)
+ }
+ return nil
+}
+
+func (p *parser) readUntil(delim byte) ([]byte, error) {
+ data, err := p.buf.ReadBytes(delim)
+ if err != nil {
+ if err == io.EOF {
+ p.isEOF = true
+ } else {
+ return nil, err
+ }
+ }
+ return data, nil
+}
+
+func cleanComment(in []byte) ([]byte, bool) {
+ i := bytes.IndexAny(in, "#;")
+ if i == -1 {
+ return nil, false
+ }
+ return in[i:], true
+}
+
+func readKeyName(in []byte) (string, int, error) {
+ line := string(in)
+
+ // Check if key name surrounded by quotes.
+ var keyQuote string
+ if line[0] == '"' {
+ if len(line) > 6 && string(line[0:3]) == `"""` {
+ keyQuote = `"""`
+ } else {
+ keyQuote = `"`
+ }
+ } else if line[0] == '`' {
+ keyQuote = "`"
+ }
+
+ // Get out key name
+ endIdx := -1
+ if len(keyQuote) > 0 {
+ startIdx := len(keyQuote)
+ // FIXME: fail case -> """"""name"""=value
+ pos := strings.Index(line[startIdx:], keyQuote)
+ if pos == -1 {
+ return "", -1, fmt.Errorf("missing closing key quote: %s", line)
+ }
+ pos += startIdx
+
+ // Find key-value delimiter
+ i := strings.IndexAny(line[pos+startIdx:], "=:")
+ if i < 0 {
+ return "", -1, ErrDelimiterNotFound{line}
+ }
+ endIdx = pos + i
+ return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
+ }
+
+ endIdx = strings.IndexAny(line, "=:")
+ if endIdx < 0 {
+ return "", -1, ErrDelimiterNotFound{line}
+ }
+ return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
+}
+
+func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
+ for {
+ data, err := p.readUntil('\n')
+ if err != nil {
+ return "", err
+ }
+ next := string(data)
+
+ pos := strings.LastIndex(next, valQuote)
+ if pos > -1 {
+ val += next[:pos]
+
+ comment, has := cleanComment([]byte(next[pos:]))
+ if has {
+ p.comment.Write(bytes.TrimSpace(comment))
+ }
+ break
+ }
+ val += next
+ if p.isEOF {
+ return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next)
+ }
+ }
+ return val, nil
+}
+
+func (p *parser) readContinuationLines(val string) (string, error) {
+ for {
+ data, err := p.readUntil('\n')
+ if err != nil {
+ return "", err
+ }
+ next := strings.TrimSpace(string(data))
+
+ if len(next) == 0 {
+ break
+ }
+ val += next
+ if val[len(val)-1] != '\\' {
+ break
+ }
+ val = val[:len(val)-1]
+ }
+ return val, nil
+}
+
+// hasSurroundedQuote check if and only if the first and last characters
+// are quotes \" or \'.
+// It returns false if any other parts also contain same kind of quotes.
+func hasSurroundedQuote(in string, quote byte) bool {
+ return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote &&
+ strings.IndexByte(in[1:], quote) == len(in)-2
+}
+
+func (p *parser) readValue(in []byte, ignoreContinuation bool) (string, error) {
+ line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
+ if len(line) == 0 {
+ return "", nil
+ }
+
+ var valQuote string
+ if len(line) > 3 && string(line[0:3]) == `"""` {
+ valQuote = `"""`
+ } else if line[0] == '`' {
+ valQuote = "`"
+ }
+
+ if len(valQuote) > 0 {
+ startIdx := len(valQuote)
+ pos := strings.LastIndex(line[startIdx:], valQuote)
+ // Check for multi-line value
+ if pos == -1 {
+ return p.readMultilines(line, line[startIdx:], valQuote)
+ }
+
+ return line[startIdx : pos+startIdx], nil
+ }
+
+ // Won't be able to reach here if value only contains whitespace.
+ line = strings.TrimSpace(line)
+
+ // Check continuation lines when desired.
+ if !ignoreContinuation && line[len(line)-1] == '\\' {
+ return p.readContinuationLines(line[:len(line)-1])
+ }
+
+ i := strings.IndexAny(line, "#;")
+ if i > -1 {
+ p.comment.WriteString(line[i:])
+ line = strings.TrimSpace(line[:i])
+ }
+
+ // Trim single quotes
+ if hasSurroundedQuote(line, '\'') ||
+ hasSurroundedQuote(line, '"') {
+ line = line[1 : len(line)-1]
+ }
+ return line, nil
+}
+
+// parse parses data through an io.Reader.
+func (f *File) parse(reader io.Reader) (err error) {
+ p := newParser(reader)
+ if err = p.BOM(); err != nil {
+ return fmt.Errorf("BOM: %v", err)
+ }
+
+ // Ignore error because default section name is never empty string.
+ section, _ := f.NewSection(DEFAULT_SECTION)
+
+ var line []byte
+ for !p.isEOF {
+ line, err = p.readUntil('\n')
+ if err != nil {
+ return err
+ }
+
+ line = bytes.TrimLeftFunc(line, unicode.IsSpace)
+ if len(line) == 0 {
+ continue
+ }
+
+ // Comments
+ if line[0] == '#' || line[0] == ';' {
+ // Note: we do not care ending line break,
+ // it is needed for adding second line,
+ // so just clean it once at the end when set to value.
+ p.comment.Write(line)
+ continue
+ }
+
+ // Section
+ if line[0] == '[' {
+ // Read to the next ']' (TODO: support quoted strings)
+ // TODO(unknwon): use LastIndexByte when stop supporting Go1.4
+ closeIdx := bytes.LastIndex(line, []byte("]"))
+ if closeIdx == -1 {
+ return fmt.Errorf("unclosed section: %s", line)
+ }
+
+ name := string(line[1:closeIdx])
+ section, err = f.NewSection(name)
+ if err != nil {
+ return err
+ }
+
+ comment, has := cleanComment(line[closeIdx+1:])
+ if has {
+ p.comment.Write(comment)
+ }
+
+ section.Comment = strings.TrimSpace(p.comment.String())
+
+ // Reset aotu-counter and comments
+ p.comment.Reset()
+ p.count = 1
+ continue
+ }
+
+ kname, offset, err := readKeyName(line)
+ if err != nil {
+ // Treat as boolean key when desired, and whole line is key name.
+ if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys {
+ key, err := section.NewKey(string(line), "true")
+ if err != nil {
+ return err
+ }
+ key.isBooleanType = true
+ key.Comment = strings.TrimSpace(p.comment.String())
+ p.comment.Reset()
+ continue
+ }
+ return err
+ }
+
+ // Auto increment.
+ isAutoIncr := false
+ if kname == "-" {
+ isAutoIncr = true
+ kname = "#" + strconv.Itoa(p.count)
+ p.count++
+ }
+
+ key, err := section.NewKey(kname, "")
+ if err != nil {
+ return err
+ }
+ key.isAutoIncrement = isAutoIncr
+
+ value, err := p.readValue(line[offset:], f.options.IgnoreContinuation)
+ if err != nil {
+ return err
+ }
+ key.SetValue(value)
+ key.Comment = strings.TrimSpace(p.comment.String())
+ p.comment.Reset()
+ }
+ return nil
+}
diff --git a/vendor/gopkg.in/ini.v1/section.go b/vendor/gopkg.in/ini.v1/section.go
new file mode 100644
index 0000000000..bbb73caf8c
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/section.go
@@ -0,0 +1,206 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// Section represents a config section.
+type Section struct {
+ f *File
+ Comment string
+ name string
+ keys map[string]*Key
+ keyList []string
+ keysHash map[string]string
+}
+
+func newSection(f *File, name string) *Section {
+ return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)}
+}
+
+// Name returns name of Section.
+func (s *Section) Name() string {
+ return s.name
+}
+
+// NewKey creates a new key to given section.
+func (s *Section) NewKey(name, val string) (*Key, error) {
+ if len(name) == 0 {
+ return nil, errors.New("error creating new key: empty key name")
+ } else if s.f.options.Insensitive {
+ name = strings.ToLower(name)
+ }
+
+ if s.f.BlockMode {
+ s.f.lock.Lock()
+ defer s.f.lock.Unlock()
+ }
+
+ if inSlice(name, s.keyList) {
+ s.keys[name].value = val
+ return s.keys[name], nil
+ }
+
+ s.keyList = append(s.keyList, name)
+ s.keys[name] = &Key{
+ s: s,
+ name: name,
+ value: val,
+ }
+ s.keysHash[name] = val
+ return s.keys[name], nil
+}
+
+// GetKey returns key in section by given name.
+func (s *Section) GetKey(name string) (*Key, error) {
+ // FIXME: change to section level lock?
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ }
+ if s.f.options.Insensitive {
+ name = strings.ToLower(name)
+ }
+ key := s.keys[name]
+ if s.f.BlockMode {
+ s.f.lock.RUnlock()
+ }
+
+ if key == nil {
+ // Check if it is a child-section.
+ sname := s.name
+ for {
+ if i := strings.LastIndex(sname, "."); i > -1 {
+ sname = sname[:i]
+ sec, err := s.f.GetSection(sname)
+ if err != nil {
+ continue
+ }
+ return sec.GetKey(name)
+ } else {
+ break
+ }
+ }
+ return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name)
+ }
+ return key, nil
+}
+
+// HasKey returns true if section contains a key with given name.
+func (s *Section) HasKey(name string) bool {
+ key, _ := s.GetKey(name)
+ return key != nil
+}
+
+// Haskey is a backwards-compatible name for HasKey.
+func (s *Section) Haskey(name string) bool {
+ return s.HasKey(name)
+}
+
+// HasValue returns true if section contains given raw value.
+func (s *Section) HasValue(value string) bool {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ defer s.f.lock.RUnlock()
+ }
+
+ for _, k := range s.keys {
+ if value == k.value {
+ return true
+ }
+ }
+ return false
+}
+
+// Key assumes named Key exists in section and returns a zero-value when not.
+func (s *Section) Key(name string) *Key {
+ key, err := s.GetKey(name)
+ if err != nil {
+ // It's OK here because the only possible error is empty key name,
+ // but if it's empty, this piece of code won't be executed.
+ key, _ = s.NewKey(name, "")
+ return key
+ }
+ return key
+}
+
+// Keys returns list of keys of section.
+func (s *Section) Keys() []*Key {
+ keys := make([]*Key, len(s.keyList))
+ for i := range s.keyList {
+ keys[i] = s.Key(s.keyList[i])
+ }
+ return keys
+}
+
+// ParentKeys returns list of keys of parent section.
+func (s *Section) ParentKeys() []*Key {
+ var parentKeys []*Key
+ sname := s.name
+ for {
+ if i := strings.LastIndex(sname, "."); i > -1 {
+ sname = sname[:i]
+ sec, err := s.f.GetSection(sname)
+ if err != nil {
+ continue
+ }
+ parentKeys = append(parentKeys, sec.Keys()...)
+ } else {
+ break
+ }
+
+ }
+ return parentKeys
+}
+
+// KeyStrings returns list of key names of section.
+func (s *Section) KeyStrings() []string {
+ list := make([]string, len(s.keyList))
+ copy(list, s.keyList)
+ return list
+}
+
+// KeysHash returns keys hash consisting of names and values.
+func (s *Section) KeysHash() map[string]string {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ defer s.f.lock.RUnlock()
+ }
+
+ hash := map[string]string{}
+ for key, value := range s.keysHash {
+ hash[key] = value
+ }
+ return hash
+}
+
+// DeleteKey deletes a key from section.
+func (s *Section) DeleteKey(name string) {
+ if s.f.BlockMode {
+ s.f.lock.Lock()
+ defer s.f.lock.Unlock()
+ }
+
+ for i, k := range s.keyList {
+ if k == name {
+ s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
+ delete(s.keys, name)
+ return
+ }
+ }
+}
diff --git a/vendor/gopkg.in/ini.v1/struct.go b/vendor/gopkg.in/ini.v1/struct.go
new file mode 100644
index 0000000000..d00fb4b837
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/struct.go
@@ -0,0 +1,431 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+ "unicode"
+)
+
+// NameMapper represents a ini tag name mapper.
+type NameMapper func(string) string
+
+// Built-in name getters.
+var (
+ // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
+ AllCapsUnderscore NameMapper = func(raw string) string {
+ newstr := make([]rune, 0, len(raw))
+ for i, chr := range raw {
+ if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+ if i > 0 {
+ newstr = append(newstr, '_')
+ }
+ }
+ newstr = append(newstr, unicode.ToUpper(chr))
+ }
+ return string(newstr)
+ }
+ // TitleUnderscore converts to format title_underscore.
+ TitleUnderscore NameMapper = func(raw string) string {
+ newstr := make([]rune, 0, len(raw))
+ for i, chr := range raw {
+ if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+ if i > 0 {
+ newstr = append(newstr, '_')
+ }
+ chr -= ('A' - 'a')
+ }
+ newstr = append(newstr, chr)
+ }
+ return string(newstr)
+ }
+)
+
+func (s *Section) parseFieldName(raw, actual string) string {
+ if len(actual) > 0 {
+ return actual
+ }
+ if s.f.NameMapper != nil {
+ return s.f.NameMapper(raw)
+ }
+ return raw
+}
+
+func parseDelim(actual string) string {
+ if len(actual) > 0 {
+ return actual
+ }
+ return ","
+}
+
+var reflectTime = reflect.TypeOf(time.Now()).Kind()
+
+// setSliceWithProperType sets proper values to slice based on its type.
+func setSliceWithProperType(key *Key, field reflect.Value, delim string) error {
+ strs := key.Strings(delim)
+ numVals := len(strs)
+ if numVals == 0 {
+ return nil
+ }
+
+ var vals interface{}
+
+ sliceOf := field.Type().Elem().Kind()
+ switch sliceOf {
+ case reflect.String:
+ vals = strs
+ case reflect.Int:
+ vals = key.Ints(delim)
+ case reflect.Int64:
+ vals = key.Int64s(delim)
+ case reflect.Uint:
+ vals = key.Uints(delim)
+ case reflect.Uint64:
+ vals = key.Uint64s(delim)
+ case reflect.Float64:
+ vals = key.Float64s(delim)
+ case reflectTime:
+ vals = key.Times(delim)
+ default:
+ return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+ }
+
+ slice := reflect.MakeSlice(field.Type(), numVals, numVals)
+ for i := 0; i < numVals; i++ {
+ switch sliceOf {
+ case reflect.String:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i]))
+ case reflect.Int:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i]))
+ case reflect.Int64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i]))
+ case reflect.Uint:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i]))
+ case reflect.Uint64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i]))
+ case reflect.Float64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i]))
+ case reflectTime:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i]))
+ }
+ }
+ field.Set(slice)
+ return nil
+}
+
+// setWithProperType sets proper value to field based on its type,
+// but it does not return error for failing parsing,
+// because we want to use default value that is already assigned to strcut.
+func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
+ switch t.Kind() {
+ case reflect.String:
+ if len(key.String()) == 0 {
+ return nil
+ }
+ field.SetString(key.String())
+ case reflect.Bool:
+ boolVal, err := key.Bool()
+ if err != nil {
+ return nil
+ }
+ field.SetBool(boolVal)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ durationVal, err := key.Duration()
+ // Skip zero value
+ if err == nil && int(durationVal) > 0 {
+ field.Set(reflect.ValueOf(durationVal))
+ return nil
+ }
+
+ intVal, err := key.Int64()
+ if err != nil || intVal == 0 {
+ return nil
+ }
+ field.SetInt(intVal)
+ // byte is an alias for uint8, so supporting uint8 breaks support for byte
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ durationVal, err := key.Duration()
+ // Skip zero value
+ if err == nil && int(durationVal) > 0 {
+ field.Set(reflect.ValueOf(durationVal))
+ return nil
+ }
+
+ uintVal, err := key.Uint64()
+ if err != nil {
+ return nil
+ }
+ field.SetUint(uintVal)
+
+ case reflect.Float64:
+ floatVal, err := key.Float64()
+ if err != nil {
+ return nil
+ }
+ field.SetFloat(floatVal)
+ case reflectTime:
+ timeVal, err := key.Time()
+ if err != nil {
+ return nil
+ }
+ field.Set(reflect.ValueOf(timeVal))
+ case reflect.Slice:
+ return setSliceWithProperType(key, field, delim)
+ default:
+ return fmt.Errorf("unsupported type '%s'", t)
+ }
+ return nil
+}
+
+func (s *Section) mapTo(val reflect.Value) error {
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ typ := val.Type()
+
+ for i := 0; i < typ.NumField(); i++ {
+ field := val.Field(i)
+ tpField := typ.Field(i)
+
+ tag := tpField.Tag.Get("ini")
+ if tag == "-" {
+ continue
+ }
+
+ opts := strings.SplitN(tag, ",", 2) // strip off possible omitempty
+ fieldName := s.parseFieldName(tpField.Name, opts[0])
+ if len(fieldName) == 0 || !field.CanSet() {
+ continue
+ }
+
+ isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
+ isStruct := tpField.Type.Kind() == reflect.Struct
+ if isAnonymous {
+ field.Set(reflect.New(tpField.Type.Elem()))
+ }
+
+ if isAnonymous || isStruct {
+ if sec, err := s.f.GetSection(fieldName); err == nil {
+ if err = sec.mapTo(field); err != nil {
+ return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
+ }
+ continue
+ }
+ }
+
+ if key, err := s.GetKey(fieldName); err == nil {
+ if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
+ return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
+ }
+ }
+ }
+ return nil
+}
+
+// MapTo maps section to given struct.
+func (s *Section) MapTo(v interface{}) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ val = val.Elem()
+ } else {
+ return errors.New("cannot map to non-pointer struct")
+ }
+
+ return s.mapTo(val)
+}
+
+// MapTo maps file to given struct.
+func (f *File) MapTo(v interface{}) error {
+ return f.Section("").MapTo(v)
+}
+
+// MapTo maps data sources to given struct with name mapper.
+func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
+ cfg, err := Load(source, others...)
+ if err != nil {
+ return err
+ }
+ cfg.NameMapper = mapper
+ return cfg.MapTo(v)
+}
+
+// MapTo maps data sources to given struct.
+func MapTo(v, source interface{}, others ...interface{}) error {
+ return MapToWithMapper(v, nil, source, others...)
+}
+
+// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
+func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error {
+ slice := field.Slice(0, field.Len())
+ if field.Len() == 0 {
+ return nil
+ }
+
+ var buf bytes.Buffer
+ sliceOf := field.Type().Elem().Kind()
+ for i := 0; i < field.Len(); i++ {
+ switch sliceOf {
+ case reflect.String:
+ buf.WriteString(slice.Index(i).String())
+ case reflect.Int, reflect.Int64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Int()))
+ case reflect.Uint, reflect.Uint64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Uint()))
+ case reflect.Float64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Float()))
+ case reflectTime:
+ buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339))
+ default:
+ return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+ }
+ buf.WriteString(delim)
+ }
+ key.SetValue(buf.String()[:buf.Len()-1])
+ return nil
+}
+
+// reflectWithProperType does the opposite thing as setWithProperType.
+func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
+ switch t.Kind() {
+ case reflect.String:
+ key.SetValue(field.String())
+ case reflect.Bool:
+ key.SetValue(fmt.Sprint(field.Bool()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ key.SetValue(fmt.Sprint(field.Int()))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ key.SetValue(fmt.Sprint(field.Uint()))
+ case reflect.Float32, reflect.Float64:
+ key.SetValue(fmt.Sprint(field.Float()))
+ case reflectTime:
+ key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
+ case reflect.Slice:
+ return reflectSliceWithProperType(key, field, delim)
+ default:
+ return fmt.Errorf("unsupported type '%s'", t)
+ }
+ return nil
+}
+
+// CR: copied from encoding/json/encode.go with modifications of time.Time support.
+// TODO: add more test coverage.
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflectTime:
+ return v.Interface().(time.Time).IsZero()
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func (s *Section) reflectFrom(val reflect.Value) error {
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ typ := val.Type()
+
+ for i := 0; i < typ.NumField(); i++ {
+ field := val.Field(i)
+ tpField := typ.Field(i)
+
+ tag := tpField.Tag.Get("ini")
+ if tag == "-" {
+ continue
+ }
+
+ opts := strings.SplitN(tag, ",", 2)
+ if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) {
+ continue
+ }
+
+ fieldName := s.parseFieldName(tpField.Name, opts[0])
+ if len(fieldName) == 0 || !field.CanSet() {
+ continue
+ }
+
+ if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
+ (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
+ // Note: The only error here is section doesn't exist.
+ sec, err := s.f.GetSection(fieldName)
+ if err != nil {
+ // Note: fieldName can never be empty here, ignore error.
+ sec, _ = s.f.NewSection(fieldName)
+ }
+ if err = sec.reflectFrom(field); err != nil {
+ return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
+ }
+ continue
+ }
+
+ // Note: Same reason as secion.
+ key, err := s.GetKey(fieldName)
+ if err != nil {
+ key, _ = s.NewKey(fieldName, "")
+ }
+ if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
+ return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
+ }
+
+ }
+ return nil
+}
+
+// ReflectFrom reflects secion from given struct.
+func (s *Section) ReflectFrom(v interface{}) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ val = val.Elem()
+ } else {
+ return errors.New("cannot reflect from non-pointer struct")
+ }
+
+ return s.reflectFrom(val)
+}
+
+// ReflectFrom reflects file from given struct.
+func (f *File) ReflectFrom(v interface{}) error {
+ return f.Section("").ReflectFrom(v)
+}
+
+// ReflectFrom reflects data sources from given struct with name mapper.
+func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
+ cfg.NameMapper = mapper
+ return cfg.ReflectFrom(v)
+}
+
+// ReflectFrom reflects data sources from given struct.
+func ReflectFrom(cfg *File, v interface{}) error {
+ return ReflectFromWithMapper(cfg, v, nil)
+}
diff --git a/vendor/gopkg.in/ldap.v2/LICENSE b/vendor/gopkg.in/ldap.v2/LICENSE
new file mode 100644
index 0000000000..7448756763
--- /dev/null
+++ b/vendor/gopkg.in/ldap.v2/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/gopkg.in/ldap.v2/Makefile b/vendor/gopkg.in/ldap.v2/Makefile
new file mode 100644
index 0000000000..c1fc966573
--- /dev/null
+++ b/vendor/gopkg.in/ldap.v2/Makefile
@@ -0,0 +1,42 @@
+.PHONY: default install build test quicktest fmt vet lint
+
+default: fmt vet lint build quicktest
+
+install:
+ go get -t -v ./...
+
+build:
+ go build -v ./...
+
+test:
+ go test -v -cover ./...
+
+quicktest:
+ go test ./...
+
+# Capture output and force failure when there is non-empty output
+fmt:
+ @echo gofmt -l .
+ @OUTPUT=`gofmt -l . 2>&1`; \
+ if [ "$$OUTPUT" ]; then \
+ echo "gofmt must be run on the following files:"; \
+ echo "$$OUTPUT"; \
+ exit 1; \
+ fi
+
+# Only run on go1.5+
+vet:
+ go tool vet -atomic -bool -copylocks -nilfunc -printf -shadow -rangeloops -unreachable -unsafeptr -unusedresult .
+
+# https://github.com/golang/lint
+# go get github.com/golang/lint/golint
+# Capture output and force failure when there is non-empty output
+# Only run on go1.5+
+lint:
+ @echo golint ./...
+ @OUTPUT=`golint ./... 2>&1`; \
+ if [ "$$OUTPUT" ]; then \
+ echo "golint errors:"; \
+ echo "$$OUTPUT"; \
+ exit 1; \
+ fi
diff --git a/vendor/gopkg.in/ldap.v2/README.md b/vendor/gopkg.in/ldap.v2/README.md
new file mode 100644
index 0000000000..a26ed2d82b
--- /dev/null
+++ b/vendor/gopkg.in/ldap.v2/README.md
@@ -0,0 +1,53 @@
+[![GoDoc](https://godoc.org/gopkg.in/ldap.v2?status.svg)](https://godoc.org/gopkg.in/ldap.v2)
+[![Build Status](https://travis-ci.org/go-ldap/ldap.svg)](https://travis-ci.org/go-ldap/ldap)
+
+# Basic LDAP v3 functionality for the GO programming language.
+
+## Install
+
+For the latest version use:
+
+ go get gopkg.in/ldap.v2
+
+Import the latest version with:
+
+ import "gopkg.in/ldap.v2"
+
+## Required Libraries:
+
+ - gopkg.in/asn1-ber.v1
+
+## Features:
+
+ - Connecting to LDAP server (non-TLS, TLS, STARTTLS)
+ - Binding to LDAP server
+ - Searching for entries
+ - Filter Compile / Decompile
+ - Paging Search Results
+ - Modify Requests / Responses
+ - Add Requests / Responses
+ - Delete Requests / Responses
+
+## Examples:
+
+ - search
+ - modify
+
+## Contributing:
+
+Bug reports and pull requests are welcome!
+
+Before submitting a pull request, please make sure tests and verification scripts pass:
+```
+make all
+```
+
+To set up a pre-push hook to run the tests and verify scripts before pushing:
+```
+ln -s ../../.githooks/pre-push .git/hooks/pre-push
+```
+
+---
+The Go gopher was designed by Renee French. (http://reneefrench.blogspot.com/)
+The design is licensed under the Creative Commons 3.0 Attributions license.
+Read this article for more details: http://blog.golang.org/gopher
diff --git a/vendor/gopkg.in/ldap.v2/add.go b/vendor/gopkg.in/ldap.v2/add.go
new file mode 100644
index 0000000000..0e5f6cdba1
--- /dev/null
+++ b/vendor/gopkg.in/ldap.v2/add.go
@@ -0,0 +1,113 @@
+//
+// https://tools.ietf.org/html/rfc4511
+//
+// AddRequest ::= [APPLICATION 8] SEQUENCE {
+// entry LDAPDN,
+// attributes AttributeList }
+//
+// AttributeList ::= SEQUENCE OF attribute Attribute
+
+package ldap
+
+import (
+ "errors"
+ "log"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+// Attribute represents an LDAP attribute
+type Attribute struct {
+ // Type is the name of the LDAP attribute
+ Type string
+ // Vals are the LDAP attribute values
+ Vals []string
+}
+
+func (a *Attribute) encode() *ber.Packet {
+ seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attribute")
+ seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.Type, "Type"))
+ set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue")
+ for _, value := range a.Vals {
+ set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals"))
+ }
+ seq.AppendChild(set)
+ return seq
+}
+
+// AddRequest represents an LDAP AddRequest operation
+type AddRequest struct {
+ // DN identifies the entry being added
+ DN string
+ // Attributes list the attributes of the new entry
+ Attributes []Attribute
+}
+
+func (a AddRequest) encode() *ber.Packet {
+ request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationAddRequest, nil, "Add Request")
+ request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.DN, "DN"))
+ attributes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes")
+ for _, attribute := range a.Attributes {
+ attributes.AppendChild(attribute.encode())
+ }
+ request.AppendChild(attributes)
+ return request
+}
+
+// Attribute adds an attribute with the given type and values
+func (a *AddRequest) Attribute(attrType string, attrVals []string) {
+ a.Attributes = append(a.Attributes, Attribute{Type: attrType, Vals: attrVals})
+}
+
+// NewAddRequest returns an AddRequest for the given DN, with no attributes
+func NewAddRequest(dn string) *AddRequest {
+ return &AddRequest{
+ DN: dn,
+ }
+
+}
+
+// Add performs the given AddRequest
+func (l *Conn) Add(addRequest *AddRequest) error {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+ packet.AppendChild(addRequest.encode())
+
+ l.Debug.PrintPacket(packet)
+
+ msgCtx, err := l.sendMessage(packet)
+ if err != nil {
+ return err
+ }
+ defer l.finishMessage(msgCtx)
+
+ l.Debug.Printf("%d: waiting for response", msgCtx.id)
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return err
+ }
+
+ if l.Debug {
+ if err := addLDAPDescriptions(packet); err != nil {
+ return err
+ }
+ ber.PrintPacket(packet)
+ }
+
+ if packet.Children[1].Tag == ApplicationAddResponse {
+ resultCode, resultDescription := getLDAPResultCode(packet)
+ if resultCode != 0 {
+ return NewError(resultCode, errors.New(resultDescription))
+ }
+ } else {
+ log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
+ }
+
+ l.Debug.Printf("%d: returning", msgCtx.id)
+ return nil
+}
diff --git a/vendor/gopkg.in/ldap.v2/bind.go b/vendor/gopkg.in/ldap.v2/bind.go
new file mode 100644
index 0000000000..26b3cc7270
--- /dev/null
+++ b/vendor/gopkg.in/ldap.v2/bind.go
@@ -0,0 +1,143 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ldap
+
+import (
+ "errors"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+// SimpleBindRequest represents a username/password bind operation
+type SimpleBindRequest struct {
+ // Username is the name of the Directory object that the client wishes to bind as
+ Username string
+ // Password is the credentials to bind with
+ Password string
+ // Controls are optional controls to send with the bind request
+ Controls []Control
+}
+
+// SimpleBindResult contains the response from the server
+type SimpleBindResult struct {
+ Controls []Control
+}
+
+// NewSimpleBindRequest returns a bind request
+func NewSimpleBindRequest(username string, password string, controls []Control) *SimpleBindRequest {
+ return &SimpleBindRequest{
+ Username: username,
+ Password: password,
+ Controls: controls,
+ }
+}
+
+func (bindRequest *SimpleBindRequest) encode() *ber.Packet {
+ request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
+ request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
+ request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, bindRequest.Username, "User Name"))
+ request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, bindRequest.Password, "Password"))
+
+ request.AppendChild(encodeControls(bindRequest.Controls))
+
+ return request
+}
+
+// SimpleBind performs the simple bind operation defined in the given request
+func (l *Conn) SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error) {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+ encodedBindRequest := simpleBindRequest.encode()
+ packet.AppendChild(encodedBindRequest)
+
+ if l.Debug {
+ ber.PrintPacket(packet)
+ }
+
+ msgCtx, err := l.sendMessage(packet)
+ if err != nil {
+ return nil, err
+ }
+ defer l.finishMessage(msgCtx)
+
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return nil, err
+ }
+
+ if l.Debug {
+ if err := addLDAPDescriptions(packet); err != nil {
+ return nil, err
+ }
+ ber.PrintPacket(packet)
+ }
+
+ result := &SimpleBindResult{
+ Controls: make([]Control, 0),
+ }
+
+ if len(packet.Children) == 3 {
+ for _, child := range packet.Children[2].Children {
+ result.Controls = append(result.Controls, DecodeControl(child))
+ }
+ }
+
+ resultCode, resultDescription := getLDAPResultCode(packet)
+ if resultCode != 0 {
+ return result, NewError(resultCode, errors.New(resultDescription))
+ }
+
+ return result, nil
+}
+
+// Bind performs a bind with the given username and password
+func (l *Conn) Bind(username, password string) error {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+ bindRequest := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request")
+ bindRequest.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version"))
+ bindRequest.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, username, "User Name"))
+ bindRequest.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, password, "Password"))
+ packet.AppendChild(bindRequest)
+
+ if l.Debug {
+ ber.PrintPacket(packet)
+ }
+
+ msgCtx, err := l.sendMessage(packet)
+ if err != nil {
+ return err
+ }
+ defer l.finishMessage(msgCtx)
+
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return err
+ }
+
+ if l.Debug {
+ if err := addLDAPDescriptions(packet); err != nil {
+ return err
+ }
+ ber.PrintPacket(packet)
+ }
+
+ resultCode, resultDescription := getLDAPResultCode(packet)
+ if resultCode != 0 {
+ return NewError(resultCode, errors.New(resultDescription))
+ }
+
+ return nil
+}
diff --git a/vendor/gopkg.in/ldap.v2/client.go b/vendor/gopkg.in/ldap.v2/client.go
new file mode 100644
index 0000000000..055b27b5fc
--- /dev/null
+++ b/vendor/gopkg.in/ldap.v2/client.go
@@ -0,0 +1,27 @@
+package ldap
+
+import (
+ "crypto/tls"
+ "time"
+)
+
+// Client knows how to interact with an LDAP server
+type Client interface {
+ Start()
+ StartTLS(config *tls.Config) error
+ Close()
+ SetTimeout(time.Duration)
+
+ Bind(username, password string) error
+ SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error)
+
+ Add(addRequest *AddRequest) error
+ Del(delRequest *DelRequest) error
+ Modify(modifyRequest *ModifyRequest) error
+
+ Compare(dn, attribute, value string) (bool, error)
+ PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error)
+
+ Search(searchRequest *SearchRequest) (*SearchResult, error)
+ SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error)
+}
diff --git a/vendor/gopkg.in/ldap.v2/compare.go b/vendor/gopkg.in/ldap.v2/compare.go
new file mode 100644
index 0000000000..cc6d2af5e5
--- /dev/null
+++ b/vendor/gopkg.in/ldap.v2/compare.go
@@ -0,0 +1,85 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// File contains Compare functionality
+//
+// https://tools.ietf.org/html/rfc4511
+//
+// CompareRequest ::= [APPLICATION 14] SEQUENCE {
+// entry LDAPDN,
+// ava AttributeValueAssertion }
+//
+// AttributeValueAssertion ::= SEQUENCE {
+// attributeDesc AttributeDescription,
+// assertionValue AssertionValue }
+//
+// AttributeDescription ::= LDAPString
+// -- Constrained to <attributedescription>
+// -- [RFC4512]
+//
+// AttributeValue ::= OCTET STRING
+//
+
+package ldap
+
+import (
+ "errors"
+ "fmt"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+// Compare checks to see if the attribute of the dn matches value. Returns true if it does otherwise
+// false with any error that occurs if any.
+func (l *Conn) Compare(dn, attribute, value string) (bool, error) {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+
+ request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationCompareRequest, nil, "Compare Request")
+ request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, dn, "DN"))
+
+ ava := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "AttributeValueAssertion")
+ ava.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "AttributeDesc"))
+ ava.AppendChild(ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagOctetString, value, "AssertionValue"))
+ request.AppendChild(ava)
+ packet.AppendChild(request)
+
+ l.Debug.PrintPacket(packet)
+
+ msgCtx, err := l.sendMessage(packet)
+ if err != nil {
+ return false, err
+ }
+ defer l.finishMessage(msgCtx)
+
+ l.Debug.Printf("%d: waiting for response", msgCtx.id)
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return false, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return false, err
+ }
+
+ if l.Debug {
+ if err := addLDAPDescriptions(packet); err != nil {
+ return false, err
+ }
+ ber.PrintPacket(packet)
+ }
+
+ if packet.Children[1].Tag == ApplicationCompareResponse {
+ resultCode, resultDescription := getLDAPResultCode(packet)
+ if resultCode == LDAPResultCompareTrue {
+ return true, nil
+ } else if resultCode == LDAPResultCompareFalse {
+ return false, nil
+ } else {
+ return false, NewError(resultCode, errors.New(resultDescription))
+ }
+ }
+ return false, fmt.Errorf("Unexpected Response: %d", packet.Children[1].Tag)
+}
diff --git a/vendor/gopkg.in/ldap.v2/conn.go b/vendor/gopkg.in/ldap.v2/conn.go
new file mode 100644
index 0000000000..b5bd99adb5
--- /dev/null
+++ b/vendor/gopkg.in/ldap.v2/conn.go
@@ -0,0 +1,467 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ldap
+
+import (
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "log"
+ "net"
+ "sync"
+ "time"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+const (
+ // MessageQuit causes the processMessages loop to exit
+ MessageQuit = 0
+ // MessageRequest sends a request to the server
+ MessageRequest = 1
+ // MessageResponse receives a response from the server
+ MessageResponse = 2
+ // MessageFinish indicates the client considers a particular message ID to be finished
+ MessageFinish = 3
+ // MessageTimeout indicates the client-specified timeout for a particular message ID has been reached
+ MessageTimeout = 4
+)
+
+// PacketResponse contains the packet or error encountered reading a response
+type PacketResponse struct {
+ // Packet is the packet read from the server
+ Packet *ber.Packet
+ // Error is an error encountered while reading
+ Error error
+}
+
+// ReadPacket returns the packet or an error
+func (pr *PacketResponse) ReadPacket() (*ber.Packet, error) {
+ if (pr == nil) || (pr.Packet == nil && pr.Error == nil) {
+ return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve response"))
+ }
+ return pr.Packet, pr.Error
+}
+
+type messageContext struct {
+ id int64
+ // close(done) should only be called from finishMessage()
+ done chan struct{}
+ // close(responses) should only be called from processMessages(), and only sent to from sendResponse()
+ responses chan *PacketResponse
+}
+
+// sendResponse should only be called within the processMessages() loop which
+// is also responsible for closing the responses channel.
+func (msgCtx *messageContext) sendResponse(packet *PacketResponse) {
+ select {
+ case msgCtx.responses <- packet:
+ // Successfully sent packet to message handler.
+ case <-msgCtx.done:
+ // The request handler is done and will not receive more
+ // packets.
+ }
+}
+
+type messagePacket struct {
+ Op int
+ MessageID int64
+ Packet *ber.Packet
+ Context *messageContext
+}
+
+type sendMessageFlags uint
+
+const (
+ startTLS sendMessageFlags = 1 << iota
+)
+
+// Conn represents an LDAP Connection
+type Conn struct {
+ conn net.Conn
+ isTLS bool
+ isClosing bool
+ closeErr error
+ isStartingTLS bool
+ Debug debugging
+ chanConfirm chan bool
+ messageContexts map[int64]*messageContext
+ chanMessage chan *messagePacket
+ chanMessageID chan int64
+ wgSender sync.WaitGroup
+ wgClose sync.WaitGroup
+ once sync.Once
+ outstandingRequests uint
+ messageMutex sync.Mutex
+ requestTimeout time.Duration
+}
+
+var _ Client = &Conn{}
+
+// DefaultTimeout is a package-level variable that sets the timeout value
+// used for the Dial and DialTLS methods.
+//
+// WARNING: since this is a package-level variable, setting this value from
+// multiple places will probably result in undesired behaviour.
+var DefaultTimeout = 60 * time.Second
+
+// Dial connects to the given address on the given network using net.Dial
+// and then returns a new Conn for the connection.
+func Dial(network, addr string) (*Conn, error) {
+ c, err := net.DialTimeout(network, addr, DefaultTimeout)
+ if err != nil {
+ return nil, NewError(ErrorNetwork, err)
+ }
+ conn := NewConn(c, false)
+ conn.Start()
+ return conn, nil
+}
+
+// DialTLS connects to the given address on the given network using tls.Dial
+// and then returns a new Conn for the connection.
+func DialTLS(network, addr string, config *tls.Config) (*Conn, error) {
+ dc, err := net.DialTimeout(network, addr, DefaultTimeout)
+ if err != nil {
+ return nil, NewError(ErrorNetwork, err)
+ }
+ c := tls.Client(dc, config)
+ err = c.Handshake()
+ if err != nil {
+ // Handshake error, close the established connection before we return an error
+ dc.Close()
+ return nil, NewError(ErrorNetwork, err)
+ }
+ conn := NewConn(c, true)
+ conn.Start()
+ return conn, nil
+}
+
+// NewConn returns a new Conn using conn for network I/O.
+func NewConn(conn net.Conn, isTLS bool) *Conn {
+ return &Conn{
+ conn: conn,
+ chanConfirm: make(chan bool),
+ chanMessageID: make(chan int64),
+ chanMessage: make(chan *messagePacket, 10),
+ messageContexts: map[int64]*messageContext{},
+ requestTimeout: 0,
+ isTLS: isTLS,
+ }
+}
+
+// Start initializes goroutines to read responses and process messages
+func (l *Conn) Start() {
+ go l.reader()
+ go l.processMessages()
+ l.wgClose.Add(1)
+}
+
+// Close closes the connection.
+func (l *Conn) Close() {
+ l.once.Do(func() {
+ l.isClosing = true
+ l.wgSender.Wait()
+
+ l.Debug.Printf("Sending quit message and waiting for confirmation")
+ l.chanMessage <- &messagePacket{Op: MessageQuit}
+ <-l.chanConfirm
+ close(l.chanMessage)
+
+ l.Debug.Printf("Closing network connection")
+ if err := l.conn.Close(); err != nil {
+ log.Print(err)
+ }
+
+ l.wgClose.Done()
+ })
+ l.wgClose.Wait()
+}
+
+// SetTimeout sets the time after a request is sent that a MessageTimeout triggers
+func (l *Conn) SetTimeout(timeout time.Duration) {
+ if timeout > 0 {
+ l.requestTimeout = timeout
+ }
+}
+
+// Returns the next available messageID
+func (l *Conn) nextMessageID() int64 {
+ if l.chanMessageID != nil {
+ if messageID, ok := <-l.chanMessageID; ok {
+ return messageID
+ }
+ }
+ return 0
+}
+
+// StartTLS sends the command to start a TLS session and then creates a new TLS Client
+func (l *Conn) StartTLS(config *tls.Config) error {
+ if l.isTLS {
+ return NewError(ErrorNetwork, errors.New("ldap: already encrypted"))
+ }
+
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+ request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Start TLS")
+ request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, "1.3.6.1.4.1.1466.20037", "TLS Extended Command"))
+ packet.AppendChild(request)
+ l.Debug.PrintPacket(packet)
+
+ msgCtx, err := l.sendMessageWithFlags(packet, startTLS)
+ if err != nil {
+ return err
+ }
+ defer l.finishMessage(msgCtx)
+
+ l.Debug.Printf("%d: waiting for response", msgCtx.id)
+
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return err
+ }
+
+ if l.Debug {
+ if err := addLDAPDescriptions(packet); err != nil {
+ l.Close()
+ return err
+ }
+ ber.PrintPacket(packet)
+ }
+
+ if resultCode, message := getLDAPResultCode(packet); resultCode == LDAPResultSuccess {
+ conn := tls.Client(l.conn, config)
+
+ if err := conn.Handshake(); err != nil {
+ l.Close()
+ return NewError(ErrorNetwork, fmt.Errorf("TLS handshake failed (%v)", err))
+ }
+
+ l.isTLS = true
+ l.conn = conn
+ } else {
+ return NewError(resultCode, fmt.Errorf("ldap: cannot StartTLS (%s)", message))
+ }
+ go l.reader()
+
+ return nil
+}
+
+func (l *Conn) sendMessage(packet *ber.Packet) (*messageContext, error) {
+ return l.sendMessageWithFlags(packet, 0)
+}
+
+func (l *Conn) sendMessageWithFlags(packet *ber.Packet, flags sendMessageFlags) (*messageContext, error) {
+ if l.isClosing {
+ return nil, NewError(ErrorNetwork, errors.New("ldap: connection closed"))
+ }
+ l.messageMutex.Lock()
+ l.Debug.Printf("flags&startTLS = %d", flags&startTLS)
+ if l.isStartingTLS {
+ l.messageMutex.Unlock()
+ return nil, NewError(ErrorNetwork, errors.New("ldap: connection is in startls phase"))
+ }
+ if flags&startTLS != 0 {
+ if l.outstandingRequests != 0 {
+ l.messageMutex.Unlock()
+ return nil, NewError(ErrorNetwork, errors.New("ldap: cannot StartTLS with outstanding requests"))
+ }
+ l.isStartingTLS = true
+ }
+ l.outstandingRequests++
+
+ l.messageMutex.Unlock()
+
+ responses := make(chan *PacketResponse)
+ messageID := packet.Children[0].Value.(int64)
+ message := &messagePacket{
+ Op: MessageRequest,
+ MessageID: messageID,
+ Packet: packet,
+ Context: &messageContext{
+ id: messageID,
+ done: make(chan struct{}),
+ responses: responses,
+ },
+ }
+ l.sendProcessMessage(message)
+ return message.Context, nil
+}
+
+func (l *Conn) finishMessage(msgCtx *messageContext) {
+ close(msgCtx.done)
+
+ if l.isClosing {
+ return
+ }
+
+ l.messageMutex.Lock()
+ l.outstandingRequests--
+ if l.isStartingTLS {
+ l.isStartingTLS = false
+ }
+ l.messageMutex.Unlock()
+
+ message := &messagePacket{
+ Op: MessageFinish,
+ MessageID: msgCtx.id,
+ }
+ l.sendProcessMessage(message)
+}
+
+func (l *Conn) sendProcessMessage(message *messagePacket) bool {
+ if l.isClosing {
+ return false
+ }
+ l.wgSender.Add(1)
+ l.chanMessage <- message
+ l.wgSender.Done()
+ return true
+}
+
+func (l *Conn) processMessages() {
+ defer func() {
+ if err := recover(); err != nil {
+ log.Printf("ldap: recovered panic in processMessages: %v", err)
+ }
+ for messageID, msgCtx := range l.messageContexts {
+ // If we are closing due to an error, inform anyone who
+ // is waiting about the error.
+ if l.isClosing && l.closeErr != nil {
+ msgCtx.sendResponse(&PacketResponse{Error: l.closeErr})
+ }
+ l.Debug.Printf("Closing channel for MessageID %d", messageID)
+ close(msgCtx.responses)
+ delete(l.messageContexts, messageID)
+ }
+ close(l.chanMessageID)
+ l.chanConfirm <- true
+ close(l.chanConfirm)
+ }()
+
+ var messageID int64 = 1
+ for {
+ select {
+ case l.chanMessageID <- messageID:
+ messageID++
+ case message, ok := <-l.chanMessage:
+ if !ok {
+ l.Debug.Printf("Shutting down - message channel is closed")
+ return
+ }
+ switch message.Op {
+ case MessageQuit:
+ l.Debug.Printf("Shutting down - quit message received")
+ return
+ case MessageRequest:
+ // Add to message list and write to network
+ l.Debug.Printf("Sending message %d", message.MessageID)
+
+ buf := message.Packet.Bytes()
+ _, err := l.conn.Write(buf)
+ if err != nil {
+ l.Debug.Printf("Error Sending Message: %s", err.Error())
+ message.Context.sendResponse(&PacketResponse{Error: fmt.Errorf("unable to send request: %s", err)})
+ close(message.Context.responses)
+ break
+ }
+
+ // Only add to messageContexts if we were able to
+ // successfully write the message.
+ l.messageContexts[message.MessageID] = message.Context
+
+ // Add timeout if defined
+ if l.requestTimeout > 0 {
+ go func() {
+ defer func() {
+ if err := recover(); err != nil {
+ log.Printf("ldap: recovered panic in RequestTimeout: %v", err)
+ }
+ }()
+ time.Sleep(l.requestTimeout)
+ timeoutMessage := &messagePacket{
+ Op: MessageTimeout,
+ MessageID: message.MessageID,
+ }
+ l.sendProcessMessage(timeoutMessage)
+ }()
+ }
+ case MessageResponse:
+ l.Debug.Printf("Receiving message %d", message.MessageID)
+ if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
+ msgCtx.sendResponse(&PacketResponse{message.Packet, nil})
+ } else {
+ log.Printf("Received unexpected message %d, %v", message.MessageID, l.isClosing)
+ ber.PrintPacket(message.Packet)
+ }
+ case MessageTimeout:
+ // Handle the timeout by closing the channel
+ // All reads will return immediately
+ if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
+ l.Debug.Printf("Receiving message timeout for %d", message.MessageID)
+ msgCtx.sendResponse(&PacketResponse{message.Packet, errors.New("ldap: connection timed out")})
+ delete(l.messageContexts, message.MessageID)
+ close(msgCtx.responses)
+ }
+ case MessageFinish:
+ l.Debug.Printf("Finished message %d", message.MessageID)
+ if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
+ delete(l.messageContexts, message.MessageID)
+ close(msgCtx.responses)
+ }
+ }
+ }
+ }
+}
+
+func (l *Conn) reader() {
+ cleanstop := false
+ defer func() {
+ if err := recover(); err != nil {
+ log.Printf("ldap: recovered panic in reader: %v", err)
+ }
+ if !cleanstop {
+ l.Close()
+ }
+ }()
+
+ for {
+ if cleanstop {
+ l.Debug.Printf("reader clean stopping (without closing the connection)")
+ return
+ }
+ packet, err := ber.ReadPacket(l.conn)
+ if err != nil {
+ // A read error is expected here if we are closing the connection...
+ if !l.isClosing {
+ l.closeErr = fmt.Errorf("unable to read LDAP response packet: %s", err)
+ l.Debug.Printf("reader error: %s", err.Error())
+ }
+ return
+ }
+ addLDAPDescriptions(packet)
+ if len(packet.Children) == 0 {
+ l.Debug.Printf("Received bad ldap packet")
+ continue
+ }
+ l.messageMutex.Lock()
+ if l.isStartingTLS {
+ cleanstop = true
+ }
+ l.messageMutex.Unlock()
+ message := &messagePacket{
+ Op: MessageResponse,
+ MessageID: packet.Children[0].Value.(int64),
+ Packet: packet,
+ }
+ if !l.sendProcessMessage(message) {
+ return
+ }
+ }
+}
diff --git a/vendor/gopkg.in/ldap.v2/control.go b/vendor/gopkg.in/ldap.v2/control.go
new file mode 100644
index 0000000000..5c62118d46
--- /dev/null
+++ b/vendor/gopkg.in/ldap.v2/control.go
@@ -0,0 +1,420 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ldap
+
+import (
+ "fmt"
+ "strconv"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+const (
+ // ControlTypePaging - https://www.ietf.org/rfc/rfc2696.txt
+ ControlTypePaging = "1.2.840.113556.1.4.319"
+ // ControlTypeBeheraPasswordPolicy - https://tools.ietf.org/html/draft-behera-ldap-password-policy-10
+ ControlTypeBeheraPasswordPolicy = "1.3.6.1.4.1.42.2.27.8.5.1"
+ // ControlTypeVChuPasswordMustChange - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
+ ControlTypeVChuPasswordMustChange = "2.16.840.1.113730.3.4.4"
+ // ControlTypeVChuPasswordWarning - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
+ ControlTypeVChuPasswordWarning = "2.16.840.1.113730.3.4.5"
+ // ControlTypeManageDsaIT - https://tools.ietf.org/html/rfc3296
+ ControlTypeManageDsaIT = "2.16.840.1.113730.3.4.2"
+)
+
+// ControlTypeMap maps controls to text descriptions
+var ControlTypeMap = map[string]string{
+ ControlTypePaging: "Paging",
+ ControlTypeBeheraPasswordPolicy: "Password Policy - Behera Draft",
+ ControlTypeManageDsaIT: "Manage DSA IT",
+}
+
+// Control defines an interface controls provide to encode and describe themselves
+type Control interface {
+ // GetControlType returns the OID
+ GetControlType() string
+ // Encode returns the ber packet representation
+ Encode() *ber.Packet
+ // String returns a human-readable description
+ String() string
+}
+
+// ControlString implements the Control interface for simple controls
+type ControlString struct {
+ ControlType string
+ Criticality bool
+ ControlValue string
+}
+
+// GetControlType returns the OID
+func (c *ControlString) GetControlType() string {
+ return c.ControlType
+}
+
+// Encode returns the ber packet representation
+func (c *ControlString) Encode() *ber.Packet {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, c.ControlType, "Control Type ("+ControlTypeMap[c.ControlType]+")"))
+ if c.Criticality {
+ packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality"))
+ }
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, string(c.ControlValue), "Control Value"))
+ return packet
+}
+
+// String returns a human-readable description
+func (c *ControlString) String() string {
+ return fmt.Sprintf("Control Type: %s (%q) Criticality: %t Control Value: %s", ControlTypeMap[c.ControlType], c.ControlType, c.Criticality, c.ControlValue)
+}
+
+// ControlPaging implements the paging control described in https://www.ietf.org/rfc/rfc2696.txt
+type ControlPaging struct {
+ // PagingSize indicates the page size
+ PagingSize uint32
+ // Cookie is an opaque value returned by the server to track a paging cursor
+ Cookie []byte
+}
+
+// GetControlType returns the OID
+func (c *ControlPaging) GetControlType() string {
+ return ControlTypePaging
+}
+
+// Encode returns the ber packet representation
+func (c *ControlPaging) Encode() *ber.Packet {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypePaging, "Control Type ("+ControlTypeMap[ControlTypePaging]+")"))
+
+ p2 := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Control Value (Paging)")
+ seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Search Control Value")
+ seq.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, int64(c.PagingSize), "Paging Size"))
+ cookie := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Cookie")
+ cookie.Value = c.Cookie
+ cookie.Data.Write(c.Cookie)
+ seq.AppendChild(cookie)
+ p2.AppendChild(seq)
+
+ packet.AppendChild(p2)
+ return packet
+}
+
+// String returns a human-readable description
+func (c *ControlPaging) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q) Criticality: %t PagingSize: %d Cookie: %q",
+ ControlTypeMap[ControlTypePaging],
+ ControlTypePaging,
+ false,
+ c.PagingSize,
+ c.Cookie)
+}
+
+// SetCookie stores the given cookie in the paging control
+func (c *ControlPaging) SetCookie(cookie []byte) {
+ c.Cookie = cookie
+}
+
+// ControlBeheraPasswordPolicy implements the control described in https://tools.ietf.org/html/draft-behera-ldap-password-policy-10
+type ControlBeheraPasswordPolicy struct {
+ // Expire contains the number of seconds before a password will expire
+ Expire int64
+ // Grace indicates the remaining number of times a user will be allowed to authenticate with an expired password
+ Grace int64
+ // Error indicates the error code
+ Error int8
+ // ErrorString is a human readable error
+ ErrorString string
+}
+
+// GetControlType returns the OID
+func (c *ControlBeheraPasswordPolicy) GetControlType() string {
+ return ControlTypeBeheraPasswordPolicy
+}
+
+// Encode returns the ber packet representation
+func (c *ControlBeheraPasswordPolicy) Encode() *ber.Packet {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeBeheraPasswordPolicy, "Control Type ("+ControlTypeMap[ControlTypeBeheraPasswordPolicy]+")"))
+
+ return packet
+}
+
+// String returns a human-readable description
+func (c *ControlBeheraPasswordPolicy) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q) Criticality: %t Expire: %d Grace: %d Error: %d, ErrorString: %s",
+ ControlTypeMap[ControlTypeBeheraPasswordPolicy],
+ ControlTypeBeheraPasswordPolicy,
+ false,
+ c.Expire,
+ c.Grace,
+ c.Error,
+ c.ErrorString)
+}
+
+// ControlVChuPasswordMustChange implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
+type ControlVChuPasswordMustChange struct {
+ // MustChange indicates if the password is required to be changed
+ MustChange bool
+}
+
+// GetControlType returns the OID
+func (c *ControlVChuPasswordMustChange) GetControlType() string {
+ return ControlTypeVChuPasswordMustChange
+}
+
+// Encode returns the ber packet representation
+func (c *ControlVChuPasswordMustChange) Encode() *ber.Packet {
+ return nil
+}
+
+// String returns a human-readable description
+func (c *ControlVChuPasswordMustChange) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q) Criticality: %t MustChange: %v",
+ ControlTypeMap[ControlTypeVChuPasswordMustChange],
+ ControlTypeVChuPasswordMustChange,
+ false,
+ c.MustChange)
+}
+
+// ControlVChuPasswordWarning implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
+type ControlVChuPasswordWarning struct {
+ // Expire indicates the time in seconds until the password expires
+ Expire int64
+}
+
+// GetControlType returns the OID
+func (c *ControlVChuPasswordWarning) GetControlType() string {
+ return ControlTypeVChuPasswordWarning
+}
+
+// Encode returns the ber packet representation
+func (c *ControlVChuPasswordWarning) Encode() *ber.Packet {
+ return nil
+}
+
+// String returns a human-readable description
+func (c *ControlVChuPasswordWarning) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q) Criticality: %t Expire: %b",
+ ControlTypeMap[ControlTypeVChuPasswordWarning],
+ ControlTypeVChuPasswordWarning,
+ false,
+ c.Expire)
+}
+
+// ControlManageDsaIT implements the control described in https://tools.ietf.org/html/rfc3296
+type ControlManageDsaIT struct {
+ // Criticality indicates if this control is required
+ Criticality bool
+}
+
+// GetControlType returns the OID
+func (c *ControlManageDsaIT) GetControlType() string {
+ return ControlTypeManageDsaIT
+}
+
+// Encode returns the ber packet representation
+func (c *ControlManageDsaIT) Encode() *ber.Packet {
+ //FIXME
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeManageDsaIT, "Control Type ("+ControlTypeMap[ControlTypeManageDsaIT]+")"))
+ if c.Criticality {
+ packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality"))
+ }
+ return packet
+}
+
+// String returns a human-readable description
+func (c *ControlManageDsaIT) String() string {
+ return fmt.Sprintf(
+ "Control Type: %s (%q) Criticality: %t",
+ ControlTypeMap[ControlTypeManageDsaIT],
+ ControlTypeManageDsaIT,
+ c.Criticality)
+}
+
+// NewControlManageDsaIT returns a ControlManageDsaIT control
+func NewControlManageDsaIT(Criticality bool) *ControlManageDsaIT {
+ return &ControlManageDsaIT{Criticality: Criticality}
+}
+
+// FindControl returns the first control of the given type in the list, or nil
+func FindControl(controls []Control, controlType string) Control {
+ for _, c := range controls {
+ if c.GetControlType() == controlType {
+ return c
+ }
+ }
+ return nil
+}
+
+// DecodeControl returns a control read from the given packet, or nil if no recognized control can be made
+func DecodeControl(packet *ber.Packet) Control {
+ var (
+ ControlType = ""
+ Criticality = false
+ value *ber.Packet
+ )
+
+ switch len(packet.Children) {
+ case 0:
+ // at least one child is required for control type
+ return nil
+
+ case 1:
+ // just type, no criticality or value
+ packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
+ ControlType = packet.Children[0].Value.(string)
+
+ case 2:
+ packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
+ ControlType = packet.Children[0].Value.(string)
+
+ // Children[1] could be criticality or value (both are optional)
+ // duck-type on whether this is a boolean
+ if _, ok := packet.Children[1].Value.(bool); ok {
+ packet.Children[1].Description = "Criticality"
+ Criticality = packet.Children[1].Value.(bool)
+ } else {
+ packet.Children[1].Description = "Control Value"
+ value = packet.Children[1]
+ }
+
+ case 3:
+ packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")"
+ ControlType = packet.Children[0].Value.(string)
+
+ packet.Children[1].Description = "Criticality"
+ Criticality = packet.Children[1].Value.(bool)
+
+ packet.Children[2].Description = "Control Value"
+ value = packet.Children[2]
+
+ default:
+ // more than 3 children is invalid
+ return nil
+ }
+
+ switch ControlType {
+ case ControlTypeManageDsaIT:
+ return NewControlManageDsaIT(Criticality)
+ case ControlTypePaging:
+ value.Description += " (Paging)"
+ c := new(ControlPaging)
+ if value.Value != nil {
+ valueChildren := ber.DecodePacket(value.Data.Bytes())
+ value.Data.Truncate(0)
+ value.Value = nil
+ value.AppendChild(valueChildren)
+ }
+ value = value.Children[0]
+ value.Description = "Search Control Value"
+ value.Children[0].Description = "Paging Size"
+ value.Children[1].Description = "Cookie"
+ c.PagingSize = uint32(value.Children[0].Value.(int64))
+ c.Cookie = value.Children[1].Data.Bytes()
+ value.Children[1].Value = c.Cookie
+ return c
+ case ControlTypeBeheraPasswordPolicy:
+ value.Description += " (Password Policy - Behera)"
+ c := NewControlBeheraPasswordPolicy()
+ if value.Value != nil {
+ valueChildren := ber.DecodePacket(value.Data.Bytes())
+ value.Data.Truncate(0)
+ value.Value = nil
+ value.AppendChild(valueChildren)
+ }
+
+ sequence := value.Children[0]
+
+ for _, child := range sequence.Children {
+ if child.Tag == 0 {
+ //Warning
+ child := child.Children[0]
+ packet := ber.DecodePacket(child.Data.Bytes())
+ val, ok := packet.Value.(int64)
+ if ok {
+ if child.Tag == 0 {
+ //timeBeforeExpiration
+ c.Expire = val
+ child.Value = c.Expire
+ } else if child.Tag == 1 {
+ //graceAuthNsRemaining
+ c.Grace = val
+ child.Value = c.Grace
+ }
+ }
+ } else if child.Tag == 1 {
+ // Error
+ packet := ber.DecodePacket(child.Data.Bytes())
+ val, ok := packet.Value.(int8)
+ if !ok {
+ // what to do?
+ val = -1
+ }
+ c.Error = val
+ child.Value = c.Error
+ c.ErrorString = BeheraPasswordPolicyErrorMap[c.Error]
+ }
+ }
+ return c
+ case ControlTypeVChuPasswordMustChange:
+ c := &ControlVChuPasswordMustChange{MustChange: true}
+ return c
+ case ControlTypeVChuPasswordWarning:
+ c := &ControlVChuPasswordWarning{Expire: -1}
+ expireStr := ber.DecodeString(value.Data.Bytes())
+
+ expire, err := strconv.ParseInt(expireStr, 10, 64)
+ if err != nil {
+ return nil
+ }
+ c.Expire = expire
+ value.Value = c.Expire
+
+ return c
+ default:
+ c := new(ControlString)
+ c.ControlType = ControlType
+ c.Criticality = Criticality
+ if value != nil {
+ c.ControlValue = value.Value.(string)
+ }
+ return c
+ }
+}
+
+// NewControlString returns a generic control
+func NewControlString(controlType string, criticality bool, controlValue string) *ControlString {
+ return &ControlString{
+ ControlType: controlType,
+ Criticality: criticality,
+ ControlValue: controlValue,
+ }
+}
+
+// NewControlPaging returns a paging control
+func NewControlPaging(pagingSize uint32) *ControlPaging {
+ return &ControlPaging{PagingSize: pagingSize}
+}
+
+// NewControlBeheraPasswordPolicy returns a ControlBeheraPasswordPolicy
+func NewControlBeheraPasswordPolicy() *ControlBeheraPasswordPolicy {
+ return &ControlBeheraPasswordPolicy{
+ Expire: -1,
+ Grace: -1,
+ Error: -1,
+ }
+}
+
+func encodeControls(controls []Control) *ber.Packet {
+ packet := ber.Encode(ber.ClassContext, ber.TypeConstructed, 0, nil, "Controls")
+ for _, control := range controls {
+ packet.AppendChild(control.Encode())
+ }
+ return packet
+}
diff --git a/vendor/gopkg.in/ldap.v2/debug.go b/vendor/gopkg.in/ldap.v2/debug.go
new file mode 100644
index 0000000000..b8a7ecbff1
--- /dev/null
+++ b/vendor/gopkg.in/ldap.v2/debug.go
@@ -0,0 +1,24 @@
+package ldap
+
+import (
+ "log"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+// debbuging type
+// - has a Printf method to write the debug output
+type debugging bool
+
+// write debug output
+func (debug debugging) Printf(format string, args ...interface{}) {
+ if debug {
+ log.Printf(format, args...)
+ }
+}
+
+func (debug debugging) PrintPacket(packet *ber.Packet) {
+ if debug {
+ ber.PrintPacket(packet)
+ }
+}
diff --git a/vendor/gopkg.in/ldap.v2/del.go b/vendor/gopkg.in/ldap.v2/del.go
new file mode 100644
index 0000000000..4fd63dc3f2
--- /dev/null
+++ b/vendor/gopkg.in/ldap.v2/del.go
@@ -0,0 +1,84 @@
+//
+// https://tools.ietf.org/html/rfc4511
+//
+// DelRequest ::= [APPLICATION 10] LDAPDN
+
+package ldap
+
+import (
+ "errors"
+ "log"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+// DelRequest implements an LDAP deletion request
+type DelRequest struct {
+ // DN is the name of the directory entry to delete
+ DN string
+ // Controls hold optional controls to send with the request
+ Controls []Control
+}
+
+func (d DelRequest) encode() *ber.Packet {
+ request := ber.Encode(ber.ClassApplication, ber.TypePrimitive, ApplicationDelRequest, d.DN, "Del Request")
+ request.Data.Write([]byte(d.DN))
+ return request
+}
+
+// NewDelRequest creates a delete request for the given DN and controls
+func NewDelRequest(DN string,
+ Controls []Control) *DelRequest {
+ return &DelRequest{
+ DN: DN,
+ Controls: Controls,
+ }
+}
+
+// Del executes the given delete request
+func (l *Conn) Del(delRequest *DelRequest) error {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+ packet.AppendChild(delRequest.encode())
+ if delRequest.Controls != nil {
+ packet.AppendChild(encodeControls(delRequest.Controls))
+ }
+
+ l.Debug.PrintPacket(packet)
+
+ msgCtx, err := l.sendMessage(packet)
+ if err != nil {
+ return err
+ }
+ defer l.finishMessage(msgCtx)
+
+ l.Debug.Printf("%d: waiting for response", msgCtx.id)
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return err
+ }
+
+ if l.Debug {
+ if err := addLDAPDescriptions(packet); err != nil {
+ return err
+ }
+ ber.PrintPacket(packet)
+ }
+
+ if packet.Children[1].Tag == ApplicationDelResponse {
+ resultCode, resultDescription := getLDAPResultCode(packet)
+ if resultCode != 0 {
+ return NewError(resultCode, errors.New(resultDescription))
+ }
+ } else {
+ log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
+ }
+
+ l.Debug.Printf("%d: returning", msgCtx.id)
+ return nil
+}
diff --git a/vendor/gopkg.in/ldap.v2/dn.go b/vendor/gopkg.in/ldap.v2/dn.go
new file mode 100644
index 0000000000..cc70c894c2
--- /dev/null
+++ b/vendor/gopkg.in/ldap.v2/dn.go
@@ -0,0 +1,158 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// File contains DN parsing functionallity
+//
+// https://tools.ietf.org/html/rfc4514
+//
+// distinguishedName = [ relativeDistinguishedName
+// *( COMMA relativeDistinguishedName ) ]
+// relativeDistinguishedName = attributeTypeAndValue
+// *( PLUS attributeTypeAndValue )
+// attributeTypeAndValue = attributeType EQUALS attributeValue
+// attributeType = descr / numericoid
+// attributeValue = string / hexstring
+//
+// ; The following characters are to be escaped when they appear
+// ; in the value to be encoded: ESC, one of <escaped>, leading
+// ; SHARP or SPACE, trailing SPACE, and NULL.
+// string = [ ( leadchar / pair ) [ *( stringchar / pair )
+// ( trailchar / pair ) ] ]
+//
+// leadchar = LUTF1 / UTFMB
+// LUTF1 = %x01-1F / %x21 / %x24-2A / %x2D-3A /
+// %x3D / %x3F-5B / %x5D-7F
+//
+// trailchar = TUTF1 / UTFMB
+// TUTF1 = %x01-1F / %x21 / %x23-2A / %x2D-3A /
+// %x3D / %x3F-5B / %x5D-7F
+//
+// stringchar = SUTF1 / UTFMB
+// SUTF1 = %x01-21 / %x23-2A / %x2D-3A /
+// %x3D / %x3F-5B / %x5D-7F
+//
+// pair = ESC ( ESC / special / hexpair )
+// special = escaped / SPACE / SHARP / EQUALS
+// escaped = DQUOTE / PLUS / COMMA / SEMI / LANGLE / RANGLE
+// hexstring = SHARP 1*hexpair
+// hexpair = HEX HEX
+//
+// where the productions <descr>, <numericoid>, <COMMA>, <DQUOTE>,
+// <EQUALS>, <ESC>, <HEX>, <LANGLE>, <NULL>, <PLUS>, <RANGLE>, <SEMI>,
+// <SPACE>, <SHARP>, and <UTFMB> are defined in [RFC4512].
+//
+
+package ldap
+
+import (
+ "bytes"
+ enchex "encoding/hex"
+ "errors"
+ "fmt"
+ "strings"
+
+ ber "gopkg.in/asn1-ber.v1"
+)
+
+// AttributeTypeAndValue represents an attributeTypeAndValue from https://tools.ietf.org/html/rfc4514
+type AttributeTypeAndValue struct {
+ // Type is the attribute type
+ Type string
+ // Value is the attribute value
+ Value string
+}
+
+// RelativeDN represents a relativeDistinguishedName from https://tools.ietf.org/html/rfc4514
+type RelativeDN struct {
+ Attributes []*AttributeTypeAndValue
+}
+
+// DN represents a distinguishedName from https://tools.ietf.org/html/rfc4514
+type DN struct {
+ RDNs []*RelativeDN
+}
+
+// ParseDN returns a distinguishedName or an error
+func ParseDN(str string) (*DN, error) {
+ dn := new(DN)
+ dn.RDNs = make([]*RelativeDN, 0)
+ rdn := new(RelativeDN)
+ rdn.Attributes = make([]*AttributeTypeAndValue, 0)
+ buffer := bytes.Buffer{}
+ attribute := new(AttributeTypeAndValue)
+ escaping := false
+
+ for i := 0; i < len(str); i++ {
+ char := str[i]
+ if escaping {
+ escaping = false
+ switch char {
+ case ' ', '"', '#', '+', ',', ';', '<', '=', '>', '\\':
+ buffer.WriteByte(char)
+ continue
+ }
+ // Not a special character, assume hex encoded octet
+ if len(str) == i+1 {
+ return nil, errors.New("Got corrupted escaped character")
+ }
+
+ dst := []byte{0}
+ n, err := enchex.Decode([]byte(dst), []byte(str[i:i+2]))
+ if err != nil {
+ return nil, fmt.Errorf("Failed to decode escaped character: %s", err)
+ } else if n != 1 {
+ return nil, fmt.Errorf("Expected 1 byte when un-escaping, got %d", n)
+ }
+ buffer.WriteByte(dst[0])
+ i++
+ } else if char == '\\' {
+ escaping = true
+ } else if char == '=' {
+ attribute.Type = buffer.String()
+ buffer.Reset()
+ // Special case: If the first character in the value is # the
+ // following data is BER encoded so we can just fast forward
+ // and decode.
+ if len(str) > i+1 && str[i+1] == '#' {
+ i += 2
+ index := strings.IndexAny(str[i:], ",+")
+ data := str
+ if index > 0 {
+ data = str[i : i+index]
+ } else {
+ data = str[i:]
+ }
+ rawBER, err := enchex.DecodeString(data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to decode BER encoding: %s", err)
+ }
+ packet := ber.DecodePacket(rawBER)
+ buffer.WriteString(packet.Data.String())
+ i += len(data) - 1
+ }
+ } else if char == ',' || char == '+' {
+ // We're done with this RDN or value, push it
+ attribute.Value = buffer.String()
+ rdn.Attributes = append(rdn.Attributes, attribute)
+ attribute = new(AttributeTypeAndValue)
+ if char == ',' {
+ dn.RDNs = append(dn.RDNs, rdn)
+ rdn = new(RelativeDN)
+ rdn.Attributes = make([]*AttributeTypeAndValue, 0)
+ }
+ buffer.Reset()
+ } else {
+ buffer.WriteByte(char)
+ }
+ }
+ if buffer.Len() > 0 {
+ if len(attribute.Type) == 0 {
+ return nil, errors.New("DN ended with incomplete type, value pair")
+ }
+ attribute.Value = buffer.String()
+ rdn.Attributes = append(rdn.Attributes, attribute)
+ dn.RDNs = append(dn.RDNs, rdn)
+ }
+ return dn, nil
+}
diff --git a/vendor/gopkg.in/ldap.v2/doc.go b/vendor/gopkg.in/ldap.v2/doc.go
new file mode 100644
index 0000000000..f20d39bc99
--- /dev/null
+++ b/vendor/gopkg.in/ldap.v2/doc.go
@@ -0,0 +1,4 @@
+/*
+Package ldap provides basic LDAP v3 functionality.
+*/
+package ldap
diff --git a/vendor/gopkg.in/ldap.v2/error.go b/vendor/gopkg.in/ldap.v2/error.go
new file mode 100644
index 0000000000..ff697873dd
--- /dev/null
+++ b/vendor/gopkg.in/ldap.v2/error.go
@@ -0,0 +1,148 @@
+package ldap
+
+import (
+ "fmt"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+// LDAP Result Codes
+const (
+ LDAPResultSuccess = 0
+ LDAPResultOperationsError = 1
+ LDAPResultProtocolError = 2
+ LDAPResultTimeLimitExceeded = 3
+ LDAPResultSizeLimitExceeded = 4
+ LDAPResultCompareFalse = 5
+ LDAPResultCompareTrue = 6
+ LDAPResultAuthMethodNotSupported = 7
+ LDAPResultStrongAuthRequired = 8
+ LDAPResultReferral = 10
+ LDAPResultAdminLimitExceeded = 11
+ LDAPResultUnavailableCriticalExtension = 12
+ LDAPResultConfidentialityRequired = 13
+ LDAPResultSaslBindInProgress = 14
+ LDAPResultNoSuchAttribute = 16
+ LDAPResultUndefinedAttributeType = 17
+ LDAPResultInappropriateMatching = 18
+ LDAPResultConstraintViolation = 19
+ LDAPResultAttributeOrValueExists = 20
+ LDAPResultInvalidAttributeSyntax = 21
+ LDAPResultNoSuchObject = 32
+ LDAPResultAliasProblem = 33
+ LDAPResultInvalidDNSyntax = 34
+ LDAPResultAliasDereferencingProblem = 36
+ LDAPResultInappropriateAuthentication = 48
+ LDAPResultInvalidCredentials = 49
+ LDAPResultInsufficientAccessRights = 50
+ LDAPResultBusy = 51
+ LDAPResultUnavailable = 52
+ LDAPResultUnwillingToPerform = 53
+ LDAPResultLoopDetect = 54
+ LDAPResultNamingViolation = 64
+ LDAPResultObjectClassViolation = 65
+ LDAPResultNotAllowedOnNonLeaf = 66
+ LDAPResultNotAllowedOnRDN = 67
+ LDAPResultEntryAlreadyExists = 68
+ LDAPResultObjectClassModsProhibited = 69
+ LDAPResultAffectsMultipleDSAs = 71
+ LDAPResultOther = 80
+
+ ErrorNetwork = 200
+ ErrorFilterCompile = 201
+ ErrorFilterDecompile = 202
+ ErrorDebugging = 203
+ ErrorUnexpectedMessage = 204
+ ErrorUnexpectedResponse = 205
+)
+
+// LDAPResultCodeMap contains string descriptions for LDAP error codes
+var LDAPResultCodeMap = map[uint8]string{
+ LDAPResultSuccess: "Success",
+ LDAPResultOperationsError: "Operations Error",
+ LDAPResultProtocolError: "Protocol Error",
+ LDAPResultTimeLimitExceeded: "Time Limit Exceeded",
+ LDAPResultSizeLimitExceeded: "Size Limit Exceeded",
+ LDAPResultCompareFalse: "Compare False",
+ LDAPResultCompareTrue: "Compare True",
+ LDAPResultAuthMethodNotSupported: "Auth Method Not Supported",
+ LDAPResultStrongAuthRequired: "Strong Auth Required",
+ LDAPResultReferral: "Referral",
+ LDAPResultAdminLimitExceeded: "Admin Limit Exceeded",
+ LDAPResultUnavailableCriticalExtension: "Unavailable Critical Extension",
+ LDAPResultConfidentialityRequired: "Confidentiality Required",
+ LDAPResultSaslBindInProgress: "Sasl Bind In Progress",
+ LDAPResultNoSuchAttribute: "No Such Attribute",
+ LDAPResultUndefinedAttributeType: "Undefined Attribute Type",
+ LDAPResultInappropriateMatching: "Inappropriate Matching",
+ LDAPResultConstraintViolation: "Constraint Violation",
+ LDAPResultAttributeOrValueExists: "Attribute Or Value Exists",
+ LDAPResultInvalidAttributeSyntax: "Invalid Attribute Syntax",
+ LDAPResultNoSuchObject: "No Such Object",
+ LDAPResultAliasProblem: "Alias Problem",
+ LDAPResultInvalidDNSyntax: "Invalid DN Syntax",
+ LDAPResultAliasDereferencingProblem: "Alias Dereferencing Problem",
+ LDAPResultInappropriateAuthentication: "Inappropriate Authentication",
+ LDAPResultInvalidCredentials: "Invalid Credentials",
+ LDAPResultInsufficientAccessRights: "Insufficient Access Rights",
+ LDAPResultBusy: "Busy",
+ LDAPResultUnavailable: "Unavailable",
+ LDAPResultUnwillingToPerform: "Unwilling To Perform",
+ LDAPResultLoopDetect: "Loop Detect",
+ LDAPResultNamingViolation: "Naming Violation",
+ LDAPResultObjectClassViolation: "Object Class Violation",
+ LDAPResultNotAllowedOnNonLeaf: "Not Allowed On Non Leaf",
+ LDAPResultNotAllowedOnRDN: "Not Allowed On RDN",
+ LDAPResultEntryAlreadyExists: "Entry Already Exists",
+ LDAPResultObjectClassModsProhibited: "Object Class Mods Prohibited",
+ LDAPResultAffectsMultipleDSAs: "Affects Multiple DSAs",
+ LDAPResultOther: "Other",
+}
+
+func getLDAPResultCode(packet *ber.Packet) (code uint8, description string) {
+ if packet == nil {
+ return ErrorUnexpectedResponse, "Empty packet"
+ } else if len(packet.Children) >= 2 {
+ response := packet.Children[1]
+ if response == nil {
+ return ErrorUnexpectedResponse, "Empty response in packet"
+ }
+ if response.ClassType == ber.ClassApplication && response.TagType == ber.TypeConstructed && len(response.Children) >= 3 {
+ // Children[1].Children[2] is the diagnosticMessage which is guaranteed to exist as seen here: https://tools.ietf.org/html/rfc4511#section-4.1.9
+ return uint8(response.Children[0].Value.(int64)), response.Children[2].Value.(string)
+ }
+ }
+
+ return ErrorNetwork, "Invalid packet format"
+}
+
+// Error holds LDAP error information
+type Error struct {
+ // Err is the underlying error
+ Err error
+ // ResultCode is the LDAP error code
+ ResultCode uint8
+}
+
+func (e *Error) Error() string {
+ return fmt.Sprintf("LDAP Result Code %d %q: %s", e.ResultCode, LDAPResultCodeMap[e.ResultCode], e.Err.Error())
+}
+
+// NewError creates an LDAP error with the given code and underlying error
+func NewError(resultCode uint8, err error) error {
+ return &Error{ResultCode: resultCode, Err: err}
+}
+
+// IsErrorWithCode returns true if the given error is an LDAP error with the given result code
+func IsErrorWithCode(err error, desiredResultCode uint8) bool {
+ if err == nil {
+ return false
+ }
+
+ serverError, ok := err.(*Error)
+ if !ok {
+ return false
+ }
+
+ return serverError.ResultCode == desiredResultCode
+}
diff --git a/vendor/gopkg.in/ldap.v2/filter.go b/vendor/gopkg.in/ldap.v2/filter.go
new file mode 100644
index 0000000000..7eae310f16
--- /dev/null
+++ b/vendor/gopkg.in/ldap.v2/filter.go
@@ -0,0 +1,466 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ldap
+
+import (
+ "bytes"
+ hexpac "encoding/hex"
+ "errors"
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+// Filter choices
+const (
+ FilterAnd = 0
+ FilterOr = 1
+ FilterNot = 2
+ FilterEqualityMatch = 3
+ FilterSubstrings = 4
+ FilterGreaterOrEqual = 5
+ FilterLessOrEqual = 6
+ FilterPresent = 7
+ FilterApproxMatch = 8
+ FilterExtensibleMatch = 9
+)
+
+// FilterMap contains human readable descriptions of Filter choices
+var FilterMap = map[uint64]string{
+ FilterAnd: "And",
+ FilterOr: "Or",
+ FilterNot: "Not",
+ FilterEqualityMatch: "Equality Match",
+ FilterSubstrings: "Substrings",
+ FilterGreaterOrEqual: "Greater Or Equal",
+ FilterLessOrEqual: "Less Or Equal",
+ FilterPresent: "Present",
+ FilterApproxMatch: "Approx Match",
+ FilterExtensibleMatch: "Extensible Match",
+}
+
+// SubstringFilter options
+const (
+ FilterSubstringsInitial = 0
+ FilterSubstringsAny = 1
+ FilterSubstringsFinal = 2
+)
+
+// FilterSubstringsMap contains human readable descriptions of SubstringFilter choices
+var FilterSubstringsMap = map[uint64]string{
+ FilterSubstringsInitial: "Substrings Initial",
+ FilterSubstringsAny: "Substrings Any",
+ FilterSubstringsFinal: "Substrings Final",
+}
+
+// MatchingRuleAssertion choices
+const (
+ MatchingRuleAssertionMatchingRule = 1
+ MatchingRuleAssertionType = 2
+ MatchingRuleAssertionMatchValue = 3
+ MatchingRuleAssertionDNAttributes = 4
+)
+
+// MatchingRuleAssertionMap contains human readable descriptions of MatchingRuleAssertion choices
+var MatchingRuleAssertionMap = map[uint64]string{
+ MatchingRuleAssertionMatchingRule: "Matching Rule Assertion Matching Rule",
+ MatchingRuleAssertionType: "Matching Rule Assertion Type",
+ MatchingRuleAssertionMatchValue: "Matching Rule Assertion Match Value",
+ MatchingRuleAssertionDNAttributes: "Matching Rule Assertion DN Attributes",
+}
+
+// CompileFilter converts a string representation of a filter into a BER-encoded packet
+func CompileFilter(filter string) (*ber.Packet, error) {
+ if len(filter) == 0 || filter[0] != '(' {
+ return nil, NewError(ErrorFilterCompile, errors.New("ldap: filter does not start with an '('"))
+ }
+ packet, pos, err := compileFilter(filter, 1)
+ if err != nil {
+ return nil, err
+ }
+ if pos != len(filter) {
+ return nil, NewError(ErrorFilterCompile, errors.New("ldap: finished compiling filter with extra at end: "+fmt.Sprint(filter[pos:])))
+ }
+ return packet, nil
+}
+
+// DecompileFilter converts a packet representation of a filter into a string representation
+func DecompileFilter(packet *ber.Packet) (ret string, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = NewError(ErrorFilterDecompile, errors.New("ldap: error decompiling filter"))
+ }
+ }()
+ ret = "("
+ err = nil
+ childStr := ""
+
+ switch packet.Tag {
+ case FilterAnd:
+ ret += "&"
+ for _, child := range packet.Children {
+ childStr, err = DecompileFilter(child)
+ if err != nil {
+ return
+ }
+ ret += childStr
+ }
+ case FilterOr:
+ ret += "|"
+ for _, child := range packet.Children {
+ childStr, err = DecompileFilter(child)
+ if err != nil {
+ return
+ }
+ ret += childStr
+ }
+ case FilterNot:
+ ret += "!"
+ childStr, err = DecompileFilter(packet.Children[0])
+ if err != nil {
+ return
+ }
+ ret += childStr
+
+ case FilterSubstrings:
+ ret += ber.DecodeString(packet.Children[0].Data.Bytes())
+ ret += "="
+ for i, child := range packet.Children[1].Children {
+ if i == 0 && child.Tag != FilterSubstringsInitial {
+ ret += "*"
+ }
+ ret += EscapeFilter(ber.DecodeString(child.Data.Bytes()))
+ if child.Tag != FilterSubstringsFinal {
+ ret += "*"
+ }
+ }
+ case FilterEqualityMatch:
+ ret += ber.DecodeString(packet.Children[0].Data.Bytes())
+ ret += "="
+ ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
+ case FilterGreaterOrEqual:
+ ret += ber.DecodeString(packet.Children[0].Data.Bytes())
+ ret += ">="
+ ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
+ case FilterLessOrEqual:
+ ret += ber.DecodeString(packet.Children[0].Data.Bytes())
+ ret += "<="
+ ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
+ case FilterPresent:
+ ret += ber.DecodeString(packet.Data.Bytes())
+ ret += "=*"
+ case FilterApproxMatch:
+ ret += ber.DecodeString(packet.Children[0].Data.Bytes())
+ ret += "~="
+ ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes()))
+ case FilterExtensibleMatch:
+ attr := ""
+ dnAttributes := false
+ matchingRule := ""
+ value := ""
+
+ for _, child := range packet.Children {
+ switch child.Tag {
+ case MatchingRuleAssertionMatchingRule:
+ matchingRule = ber.DecodeString(child.Data.Bytes())
+ case MatchingRuleAssertionType:
+ attr = ber.DecodeString(child.Data.Bytes())
+ case MatchingRuleAssertionMatchValue:
+ value = ber.DecodeString(child.Data.Bytes())
+ case MatchingRuleAssertionDNAttributes:
+ dnAttributes = child.Value.(bool)
+ }
+ }
+
+ if len(attr) > 0 {
+ ret += attr
+ }
+ if dnAttributes {
+ ret += ":dn"
+ }
+ if len(matchingRule) > 0 {
+ ret += ":"
+ ret += matchingRule
+ }
+ ret += ":="
+ ret += EscapeFilter(value)
+ }
+
+ ret += ")"
+ return
+}
+
+func compileFilterSet(filter string, pos int, parent *ber.Packet) (int, error) {
+ for pos < len(filter) && filter[pos] == '(' {
+ child, newPos, err := compileFilter(filter, pos+1)
+ if err != nil {
+ return pos, err
+ }
+ pos = newPos
+ parent.AppendChild(child)
+ }
+ if pos == len(filter) {
+ return pos, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
+ }
+
+ return pos + 1, nil
+}
+
+func compileFilter(filter string, pos int) (*ber.Packet, int, error) {
+ var (
+ packet *ber.Packet
+ err error
+ )
+
+ defer func() {
+ if r := recover(); r != nil {
+ err = NewError(ErrorFilterCompile, errors.New("ldap: error compiling filter"))
+ }
+ }()
+ newPos := pos
+
+ currentRune, currentWidth := utf8.DecodeRuneInString(filter[newPos:])
+
+ switch currentRune {
+ case utf8.RuneError:
+ return nil, 0, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos))
+ case '(':
+ packet, newPos, err = compileFilter(filter, pos+currentWidth)
+ newPos++
+ return packet, newPos, err
+ case '&':
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterAnd, nil, FilterMap[FilterAnd])
+ newPos, err = compileFilterSet(filter, pos+currentWidth, packet)
+ return packet, newPos, err
+ case '|':
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterOr, nil, FilterMap[FilterOr])
+ newPos, err = compileFilterSet(filter, pos+currentWidth, packet)
+ return packet, newPos, err
+ case '!':
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterNot, nil, FilterMap[FilterNot])
+ var child *ber.Packet
+ child, newPos, err = compileFilter(filter, pos+currentWidth)
+ packet.AppendChild(child)
+ return packet, newPos, err
+ default:
+ const (
+ stateReadingAttr = 0
+ stateReadingExtensibleMatchingRule = 1
+ stateReadingCondition = 2
+ )
+
+ state := stateReadingAttr
+
+ attribute := ""
+ extensibleDNAttributes := false
+ extensibleMatchingRule := ""
+ condition := ""
+
+ for newPos < len(filter) {
+ remainingFilter := filter[newPos:]
+ currentRune, currentWidth = utf8.DecodeRuneInString(remainingFilter)
+ if currentRune == ')' {
+ break
+ }
+ if currentRune == utf8.RuneError {
+ return packet, newPos, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos))
+ }
+
+ switch state {
+ case stateReadingAttr:
+ switch {
+ // Extensible rule, with only DN-matching
+ case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:="):
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
+ extensibleDNAttributes = true
+ state = stateReadingCondition
+ newPos += 5
+
+ // Extensible rule, with DN-matching and a matching OID
+ case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:"):
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
+ extensibleDNAttributes = true
+ state = stateReadingExtensibleMatchingRule
+ newPos += 4
+
+ // Extensible rule, with attr only
+ case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="):
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
+ state = stateReadingCondition
+ newPos += 2
+
+ // Extensible rule, with no DN attribute matching
+ case currentRune == ':':
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
+ state = stateReadingExtensibleMatchingRule
+ newPos++
+
+ // Equality condition
+ case currentRune == '=':
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterEqualityMatch, nil, FilterMap[FilterEqualityMatch])
+ state = stateReadingCondition
+ newPos++
+
+ // Greater-than or equal
+ case currentRune == '>' && strings.HasPrefix(remainingFilter, ">="):
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterGreaterOrEqual, nil, FilterMap[FilterGreaterOrEqual])
+ state = stateReadingCondition
+ newPos += 2
+
+ // Less-than or equal
+ case currentRune == '<' && strings.HasPrefix(remainingFilter, "<="):
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterLessOrEqual, nil, FilterMap[FilterLessOrEqual])
+ state = stateReadingCondition
+ newPos += 2
+
+ // Approx
+ case currentRune == '~' && strings.HasPrefix(remainingFilter, "~="):
+ packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterApproxMatch, nil, FilterMap[FilterApproxMatch])
+ state = stateReadingCondition
+ newPos += 2
+
+ // Still reading the attribute name
+ default:
+ attribute += fmt.Sprintf("%c", currentRune)
+ newPos += currentWidth
+ }
+
+ case stateReadingExtensibleMatchingRule:
+ switch {
+
+ // Matching rule OID is done
+ case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="):
+ state = stateReadingCondition
+ newPos += 2
+
+ // Still reading the matching rule oid
+ default:
+ extensibleMatchingRule += fmt.Sprintf("%c", currentRune)
+ newPos += currentWidth
+ }
+
+ case stateReadingCondition:
+ // append to the condition
+ condition += fmt.Sprintf("%c", currentRune)
+ newPos += currentWidth
+ }
+ }
+
+ if newPos == len(filter) {
+ err = NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
+ return packet, newPos, err
+ }
+ if packet == nil {
+ err = NewError(ErrorFilterCompile, errors.New("ldap: error parsing filter"))
+ return packet, newPos, err
+ }
+
+ switch {
+ case packet.Tag == FilterExtensibleMatch:
+ // MatchingRuleAssertion ::= SEQUENCE {
+ // matchingRule [1] MatchingRuleID OPTIONAL,
+ // type [2] AttributeDescription OPTIONAL,
+ // matchValue [3] AssertionValue,
+ // dnAttributes [4] BOOLEAN DEFAULT FALSE
+ // }
+
+ // Include the matching rule oid, if specified
+ if len(extensibleMatchingRule) > 0 {
+ packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchingRule, extensibleMatchingRule, MatchingRuleAssertionMap[MatchingRuleAssertionMatchingRule]))
+ }
+
+ // Include the attribute, if specified
+ if len(attribute) > 0 {
+ packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionType, attribute, MatchingRuleAssertionMap[MatchingRuleAssertionType]))
+ }
+
+ // Add the value (only required child)
+ encodedString, encodeErr := escapedStringToEncodedBytes(condition)
+ if encodeErr != nil {
+ return packet, newPos, encodeErr
+ }
+ packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchValue, encodedString, MatchingRuleAssertionMap[MatchingRuleAssertionMatchValue]))
+
+ // Defaults to false, so only include in the sequence if true
+ if extensibleDNAttributes {
+ packet.AppendChild(ber.NewBoolean(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionDNAttributes, extensibleDNAttributes, MatchingRuleAssertionMap[MatchingRuleAssertionDNAttributes]))
+ }
+
+ case packet.Tag == FilterEqualityMatch && condition == "*":
+ packet = ber.NewString(ber.ClassContext, ber.TypePrimitive, FilterPresent, attribute, FilterMap[FilterPresent])
+ case packet.Tag == FilterEqualityMatch && strings.Contains(condition, "*"):
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute"))
+ packet.Tag = FilterSubstrings
+ packet.Description = FilterMap[uint64(packet.Tag)]
+ seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Substrings")
+ parts := strings.Split(condition, "*")
+ for i, part := range parts {
+ if part == "" {
+ continue
+ }
+ var tag ber.Tag
+ switch i {
+ case 0:
+ tag = FilterSubstringsInitial
+ case len(parts) - 1:
+ tag = FilterSubstringsFinal
+ default:
+ tag = FilterSubstringsAny
+ }
+ encodedString, encodeErr := escapedStringToEncodedBytes(part)
+ if encodeErr != nil {
+ return packet, newPos, encodeErr
+ }
+ seq.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, tag, encodedString, FilterSubstringsMap[uint64(tag)]))
+ }
+ packet.AppendChild(seq)
+ default:
+ encodedString, encodeErr := escapedStringToEncodedBytes(condition)
+ if encodeErr != nil {
+ return packet, newPos, encodeErr
+ }
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute"))
+ packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, encodedString, "Condition"))
+ }
+
+ newPos += currentWidth
+ return packet, newPos, err
+ }
+}
+
+// Convert from "ABC\xx\xx\xx" form to literal bytes for transport
+func escapedStringToEncodedBytes(escapedString string) (string, error) {
+ var buffer bytes.Buffer
+ i := 0
+ for i < len(escapedString) {
+ currentRune, currentWidth := utf8.DecodeRuneInString(escapedString[i:])
+ if currentRune == utf8.RuneError {
+ return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", i))
+ }
+
+ // Check for escaped hex characters and convert them to their literal value for transport.
+ if currentRune == '\\' {
+ // http://tools.ietf.org/search/rfc4515
+ // \ (%x5C) is not a valid character unless it is followed by two HEX characters due to not
+ // being a member of UTF1SUBSET.
+ if i+2 > len(escapedString) {
+ return "", NewError(ErrorFilterCompile, errors.New("ldap: missing characters for escape in filter"))
+ }
+ escByte, decodeErr := hexpac.DecodeString(escapedString[i+1 : i+3])
+ if decodeErr != nil {
+ return "", NewError(ErrorFilterCompile, errors.New("ldap: invalid characters for escape in filter"))
+ }
+ buffer.WriteByte(escByte[0])
+ i += 2 // +1 from end of loop, so 3 total for \xx.
+ } else {
+ buffer.WriteRune(currentRune)
+ }
+
+ i += currentWidth
+ }
+ return buffer.String(), nil
+}
diff --git a/vendor/gopkg.in/ldap.v2/ldap.go b/vendor/gopkg.in/ldap.v2/ldap.go
new file mode 100644
index 0000000000..90018be83f
--- /dev/null
+++ b/vendor/gopkg.in/ldap.v2/ldap.go
@@ -0,0 +1,289 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ldap
+
+import (
+ "errors"
+ "io/ioutil"
+ "os"
+
+ ber "gopkg.in/asn1-ber.v1"
+)
+
+// LDAP Application Codes
+const (
+ ApplicationBindRequest = 0
+ ApplicationBindResponse = 1
+ ApplicationUnbindRequest = 2
+ ApplicationSearchRequest = 3
+ ApplicationSearchResultEntry = 4
+ ApplicationSearchResultDone = 5
+ ApplicationModifyRequest = 6
+ ApplicationModifyResponse = 7
+ ApplicationAddRequest = 8
+ ApplicationAddResponse = 9
+ ApplicationDelRequest = 10
+ ApplicationDelResponse = 11
+ ApplicationModifyDNRequest = 12
+ ApplicationModifyDNResponse = 13
+ ApplicationCompareRequest = 14
+ ApplicationCompareResponse = 15
+ ApplicationAbandonRequest = 16
+ ApplicationSearchResultReference = 19
+ ApplicationExtendedRequest = 23
+ ApplicationExtendedResponse = 24
+)
+
+// ApplicationMap contains human readable descriptions of LDAP Application Codes
+var ApplicationMap = map[uint8]string{
+ ApplicationBindRequest: "Bind Request",
+ ApplicationBindResponse: "Bind Response",
+ ApplicationUnbindRequest: "Unbind Request",
+ ApplicationSearchRequest: "Search Request",
+ ApplicationSearchResultEntry: "Search Result Entry",
+ ApplicationSearchResultDone: "Search Result Done",
+ ApplicationModifyRequest: "Modify Request",
+ ApplicationModifyResponse: "Modify Response",
+ ApplicationAddRequest: "Add Request",
+ ApplicationAddResponse: "Add Response",
+ ApplicationDelRequest: "Del Request",
+ ApplicationDelResponse: "Del Response",
+ ApplicationModifyDNRequest: "Modify DN Request",
+ ApplicationModifyDNResponse: "Modify DN Response",
+ ApplicationCompareRequest: "Compare Request",
+ ApplicationCompareResponse: "Compare Response",
+ ApplicationAbandonRequest: "Abandon Request",
+ ApplicationSearchResultReference: "Search Result Reference",
+ ApplicationExtendedRequest: "Extended Request",
+ ApplicationExtendedResponse: "Extended Response",
+}
+
+// Ldap Behera Password Policy Draft 10 (https://tools.ietf.org/html/draft-behera-ldap-password-policy-10)
+const (
+ BeheraPasswordExpired = 0
+ BeheraAccountLocked = 1
+ BeheraChangeAfterReset = 2
+ BeheraPasswordModNotAllowed = 3
+ BeheraMustSupplyOldPassword = 4
+ BeheraInsufficientPasswordQuality = 5
+ BeheraPasswordTooShort = 6
+ BeheraPasswordTooYoung = 7
+ BeheraPasswordInHistory = 8
+)
+
+// BeheraPasswordPolicyErrorMap contains human readable descriptions of Behera Password Policy error codes
+var BeheraPasswordPolicyErrorMap = map[int8]string{
+ BeheraPasswordExpired: "Password expired",
+ BeheraAccountLocked: "Account locked",
+ BeheraChangeAfterReset: "Password must be changed",
+ BeheraPasswordModNotAllowed: "Policy prevents password modification",
+ BeheraMustSupplyOldPassword: "Policy requires old password in order to change password",
+ BeheraInsufficientPasswordQuality: "Password fails quality checks",
+ BeheraPasswordTooShort: "Password is too short for policy",
+ BeheraPasswordTooYoung: "Password has been changed too recently",
+ BeheraPasswordInHistory: "New password is in list of old passwords",
+}
+
+// Adds descriptions to an LDAP Response packet for debugging
+func addLDAPDescriptions(packet *ber.Packet) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = NewError(ErrorDebugging, errors.New("ldap: cannot process packet to add descriptions"))
+ }
+ }()
+ packet.Description = "LDAP Response"
+ packet.Children[0].Description = "Message ID"
+
+ application := uint8(packet.Children[1].Tag)
+ packet.Children[1].Description = ApplicationMap[application]
+
+ switch application {
+ case ApplicationBindRequest:
+ addRequestDescriptions(packet)
+ case ApplicationBindResponse:
+ addDefaultLDAPResponseDescriptions(packet)
+ case ApplicationUnbindRequest:
+ addRequestDescriptions(packet)
+ case ApplicationSearchRequest:
+ addRequestDescriptions(packet)
+ case ApplicationSearchResultEntry:
+ packet.Children[1].Children[0].Description = "Object Name"
+ packet.Children[1].Children[1].Description = "Attributes"
+ for _, child := range packet.Children[1].Children[1].Children {
+ child.Description = "Attribute"
+ child.Children[0].Description = "Attribute Name"
+ child.Children[1].Description = "Attribute Values"
+ for _, grandchild := range child.Children[1].Children {
+ grandchild.Description = "Attribute Value"
+ }
+ }
+ if len(packet.Children) == 3 {
+ addControlDescriptions(packet.Children[2])
+ }
+ case ApplicationSearchResultDone:
+ addDefaultLDAPResponseDescriptions(packet)
+ case ApplicationModifyRequest:
+ addRequestDescriptions(packet)
+ case ApplicationModifyResponse:
+ case ApplicationAddRequest:
+ addRequestDescriptions(packet)
+ case ApplicationAddResponse:
+ case ApplicationDelRequest:
+ addRequestDescriptions(packet)
+ case ApplicationDelResponse:
+ case ApplicationModifyDNRequest:
+ addRequestDescriptions(packet)
+ case ApplicationModifyDNResponse:
+ case ApplicationCompareRequest:
+ addRequestDescriptions(packet)
+ case ApplicationCompareResponse:
+ case ApplicationAbandonRequest:
+ addRequestDescriptions(packet)
+ case ApplicationSearchResultReference:
+ case ApplicationExtendedRequest:
+ addRequestDescriptions(packet)
+ case ApplicationExtendedResponse:
+ }
+
+ return nil
+}
+
+func addControlDescriptions(packet *ber.Packet) {
+ packet.Description = "Controls"
+ for _, child := range packet.Children {
+ child.Description = "Control"
+ child.Children[0].Description = "Control Type (" + ControlTypeMap[child.Children[0].Value.(string)] + ")"
+ value := child.Children[1]
+ if len(child.Children) == 3 {
+ child.Children[1].Description = "Criticality"
+ value = child.Children[2]
+ }
+ value.Description = "Control Value"
+
+ switch child.Children[0].Value.(string) {
+ case ControlTypePaging:
+ value.Description += " (Paging)"
+ if value.Value != nil {
+ valueChildren := ber.DecodePacket(value.Data.Bytes())
+ value.Data.Truncate(0)
+ value.Value = nil
+ valueChildren.Children[1].Value = valueChildren.Children[1].Data.Bytes()
+ value.AppendChild(valueChildren)
+ }
+ value.Children[0].Description = "Real Search Control Value"
+ value.Children[0].Children[0].Description = "Paging Size"
+ value.Children[0].Children[1].Description = "Cookie"
+
+ case ControlTypeBeheraPasswordPolicy:
+ value.Description += " (Password Policy - Behera Draft)"
+ if value.Value != nil {
+ valueChildren := ber.DecodePacket(value.Data.Bytes())
+ value.Data.Truncate(0)
+ value.Value = nil
+ value.AppendChild(valueChildren)
+ }
+ sequence := value.Children[0]
+ for _, child := range sequence.Children {
+ if child.Tag == 0 {
+ //Warning
+ child := child.Children[0]
+ packet := ber.DecodePacket(child.Data.Bytes())
+ val, ok := packet.Value.(int64)
+ if ok {
+ if child.Tag == 0 {
+ //timeBeforeExpiration
+ value.Description += " (TimeBeforeExpiration)"
+ child.Value = val
+ } else if child.Tag == 1 {
+ //graceAuthNsRemaining
+ value.Description += " (GraceAuthNsRemaining)"
+ child.Value = val
+ }
+ }
+ } else if child.Tag == 1 {
+ // Error
+ packet := ber.DecodePacket(child.Data.Bytes())
+ val, ok := packet.Value.(int8)
+ if !ok {
+ val = -1
+ }
+ child.Description = "Error"
+ child.Value = val
+ }
+ }
+ }
+ }
+}
+
+func addRequestDescriptions(packet *ber.Packet) {
+ packet.Description = "LDAP Request"
+ packet.Children[0].Description = "Message ID"
+ packet.Children[1].Description = ApplicationMap[uint8(packet.Children[1].Tag)]
+ if len(packet.Children) == 3 {
+ addControlDescriptions(packet.Children[2])
+ }
+}
+
+func addDefaultLDAPResponseDescriptions(packet *ber.Packet) {
+ resultCode, _ := getLDAPResultCode(packet)
+ packet.Children[1].Children[0].Description = "Result Code (" + LDAPResultCodeMap[resultCode] + ")"
+ packet.Children[1].Children[1].Description = "Matched DN"
+ packet.Children[1].Children[2].Description = "Error Message"
+ if len(packet.Children[1].Children) > 3 {
+ packet.Children[1].Children[3].Description = "Referral"
+ }
+ if len(packet.Children) == 3 {
+ addControlDescriptions(packet.Children[2])
+ }
+}
+
+// DebugBinaryFile reads and prints packets from the given filename
+func DebugBinaryFile(fileName string) error {
+ file, err := ioutil.ReadFile(fileName)
+ if err != nil {
+ return NewError(ErrorDebugging, err)
+ }
+ ber.PrintBytes(os.Stdout, file, "")
+ packet := ber.DecodePacket(file)
+ addLDAPDescriptions(packet)
+ ber.PrintPacket(packet)
+
+ return nil
+}
+
+var hex = "0123456789abcdef"
+
+func mustEscape(c byte) bool {
+ return c > 0x7f || c == '(' || c == ')' || c == '\\' || c == '*' || c == 0
+}
+
+// EscapeFilter escapes from the provided LDAP filter string the special
+// characters in the set `()*\` and those out of the range 0 < c < 0x80,
+// as defined in RFC4515.
+func EscapeFilter(filter string) string {
+ escape := 0
+ for i := 0; i < len(filter); i++ {
+ if mustEscape(filter[i]) {
+ escape++
+ }
+ }
+ if escape == 0 {
+ return filter
+ }
+ buf := make([]byte, len(filter)+escape*2)
+ for i, j := 0, 0; i < len(filter); i++ {
+ c := filter[i]
+ if mustEscape(c) {
+ buf[j+0] = '\\'
+ buf[j+1] = hex[c>>4]
+ buf[j+2] = hex[c&0xf]
+ j += 3
+ } else {
+ buf[j] = c
+ j++
+ }
+ }
+ return string(buf)
+}
diff --git a/vendor/gopkg.in/ldap.v2/modify.go b/vendor/gopkg.in/ldap.v2/modify.go
new file mode 100644
index 0000000000..e4ab6cefc7
--- /dev/null
+++ b/vendor/gopkg.in/ldap.v2/modify.go
@@ -0,0 +1,170 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// File contains Modify functionality
+//
+// https://tools.ietf.org/html/rfc4511
+//
+// ModifyRequest ::= [APPLICATION 6] SEQUENCE {
+// object LDAPDN,
+// changes SEQUENCE OF change SEQUENCE {
+// operation ENUMERATED {
+// add (0),
+// delete (1),
+// replace (2),
+// ... },
+// modification PartialAttribute } }
+//
+// PartialAttribute ::= SEQUENCE {
+// type AttributeDescription,
+// vals SET OF value AttributeValue }
+//
+// AttributeDescription ::= LDAPString
+// -- Constrained to <attributedescription>
+// -- [RFC4512]
+//
+// AttributeValue ::= OCTET STRING
+//
+
+package ldap
+
+import (
+ "errors"
+ "log"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+// Change operation choices
+const (
+ AddAttribute = 0
+ DeleteAttribute = 1
+ ReplaceAttribute = 2
+)
+
+// PartialAttribute for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
+type PartialAttribute struct {
+ // Type is the type of the partial attribute
+ Type string
+ // Vals are the values of the partial attribute
+ Vals []string
+}
+
+func (p *PartialAttribute) encode() *ber.Packet {
+ seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "PartialAttribute")
+ seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, p.Type, "Type"))
+ set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue")
+ for _, value := range p.Vals {
+ set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals"))
+ }
+ seq.AppendChild(set)
+ return seq
+}
+
+// ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
+type ModifyRequest struct {
+ // DN is the distinguishedName of the directory entry to modify
+ DN string
+ // AddAttributes contain the attributes to add
+ AddAttributes []PartialAttribute
+ // DeleteAttributes contain the attributes to delete
+ DeleteAttributes []PartialAttribute
+ // ReplaceAttributes contain the attributes to replace
+ ReplaceAttributes []PartialAttribute
+}
+
+// Add inserts the given attribute to the list of attributes to add
+func (m *ModifyRequest) Add(attrType string, attrVals []string) {
+ m.AddAttributes = append(m.AddAttributes, PartialAttribute{Type: attrType, Vals: attrVals})
+}
+
+// Delete inserts the given attribute to the list of attributes to delete
+func (m *ModifyRequest) Delete(attrType string, attrVals []string) {
+ m.DeleteAttributes = append(m.DeleteAttributes, PartialAttribute{Type: attrType, Vals: attrVals})
+}
+
+// Replace inserts the given attribute to the list of attributes to replace
+func (m *ModifyRequest) Replace(attrType string, attrVals []string) {
+ m.ReplaceAttributes = append(m.ReplaceAttributes, PartialAttribute{Type: attrType, Vals: attrVals})
+}
+
+func (m ModifyRequest) encode() *ber.Packet {
+ request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyRequest, nil, "Modify Request")
+ request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, m.DN, "DN"))
+ changes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Changes")
+ for _, attribute := range m.AddAttributes {
+ change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change")
+ change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(AddAttribute), "Operation"))
+ change.AppendChild(attribute.encode())
+ changes.AppendChild(change)
+ }
+ for _, attribute := range m.DeleteAttributes {
+ change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change")
+ change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(DeleteAttribute), "Operation"))
+ change.AppendChild(attribute.encode())
+ changes.AppendChild(change)
+ }
+ for _, attribute := range m.ReplaceAttributes {
+ change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change")
+ change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(ReplaceAttribute), "Operation"))
+ change.AppendChild(attribute.encode())
+ changes.AppendChild(change)
+ }
+ request.AppendChild(changes)
+ return request
+}
+
+// NewModifyRequest creates a modify request for the given DN
+func NewModifyRequest(
+ dn string,
+) *ModifyRequest {
+ return &ModifyRequest{
+ DN: dn,
+ }
+}
+
+// Modify performs the ModifyRequest
+func (l *Conn) Modify(modifyRequest *ModifyRequest) error {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+ packet.AppendChild(modifyRequest.encode())
+
+ l.Debug.PrintPacket(packet)
+
+ msgCtx, err := l.sendMessage(packet)
+ if err != nil {
+ return err
+ }
+ defer l.finishMessage(msgCtx)
+
+ l.Debug.Printf("%d: waiting for response", msgCtx.id)
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return err
+ }
+
+ if l.Debug {
+ if err := addLDAPDescriptions(packet); err != nil {
+ return err
+ }
+ ber.PrintPacket(packet)
+ }
+
+ if packet.Children[1].Tag == ApplicationModifyResponse {
+ resultCode, resultDescription := getLDAPResultCode(packet)
+ if resultCode != 0 {
+ return NewError(resultCode, errors.New(resultDescription))
+ }
+ } else {
+ log.Printf("Unexpected Response: %d", packet.Children[1].Tag)
+ }
+
+ l.Debug.Printf("%d: returning", msgCtx.id)
+ return nil
+}
diff --git a/vendor/gopkg.in/ldap.v2/passwdmodify.go b/vendor/gopkg.in/ldap.v2/passwdmodify.go
new file mode 100644
index 0000000000..26110ccf4a
--- /dev/null
+++ b/vendor/gopkg.in/ldap.v2/passwdmodify.go
@@ -0,0 +1,148 @@
+// This file contains the password modify extended operation as specified in rfc 3062
+//
+// https://tools.ietf.org/html/rfc3062
+//
+
+package ldap
+
+import (
+ "errors"
+ "fmt"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+const (
+ passwordModifyOID = "1.3.6.1.4.1.4203.1.11.1"
+)
+
+// PasswordModifyRequest implements the Password Modify Extended Operation as defined in https://www.ietf.org/rfc/rfc3062.txt
+type PasswordModifyRequest struct {
+ // UserIdentity is an optional string representation of the user associated with the request.
+ // This string may or may not be an LDAPDN [RFC2253].
+ // If no UserIdentity field is present, the request acts up upon the password of the user currently associated with the LDAP session
+ UserIdentity string
+ // OldPassword, if present, contains the user's current password
+ OldPassword string
+ // NewPassword, if present, contains the desired password for this user
+ NewPassword string
+}
+
+// PasswordModifyResult holds the server response to a PasswordModifyRequest
+type PasswordModifyResult struct {
+ // GeneratedPassword holds a password generated by the server, if present
+ GeneratedPassword string
+}
+
+func (r *PasswordModifyRequest) encode() (*ber.Packet, error) {
+ request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Password Modify Extended Operation")
+ request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, passwordModifyOID, "Extended Request Name: Password Modify OID"))
+ extendedRequestValue := ber.Encode(ber.ClassContext, ber.TypePrimitive, 1, nil, "Extended Request Value: Password Modify Request")
+ passwordModifyRequestValue := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Password Modify Request")
+ if r.UserIdentity != "" {
+ passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, r.UserIdentity, "User Identity"))
+ }
+ if r.OldPassword != "" {
+ passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 1, r.OldPassword, "Old Password"))
+ }
+ if r.NewPassword != "" {
+ passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 2, r.NewPassword, "New Password"))
+ }
+
+ extendedRequestValue.AppendChild(passwordModifyRequestValue)
+ request.AppendChild(extendedRequestValue)
+
+ return request, nil
+}
+
+// NewPasswordModifyRequest creates a new PasswordModifyRequest
+//
+// According to the RFC 3602:
+// userIdentity is a string representing the user associated with the request.
+// This string may or may not be an LDAPDN (RFC 2253).
+// If userIdentity is empty then the operation will act on the user associated
+// with the session.
+//
+// oldPassword is the current user's password, it can be empty or it can be
+// needed depending on the session user access rights (usually an administrator
+// can change a user's password without knowing the current one) and the
+// password policy (see pwdSafeModify password policy's attribute)
+//
+// newPassword is the desired user's password. If empty the server can return
+// an error or generate a new password that will be available in the
+// PasswordModifyResult.GeneratedPassword
+//
+func NewPasswordModifyRequest(userIdentity string, oldPassword string, newPassword string) *PasswordModifyRequest {
+ return &PasswordModifyRequest{
+ UserIdentity: userIdentity,
+ OldPassword: oldPassword,
+ NewPassword: newPassword,
+ }
+}
+
+// PasswordModify performs the modification request
+func (l *Conn) PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error) {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+
+ encodedPasswordModifyRequest, err := passwordModifyRequest.encode()
+ if err != nil {
+ return nil, err
+ }
+ packet.AppendChild(encodedPasswordModifyRequest)
+
+ l.Debug.PrintPacket(packet)
+
+ msgCtx, err := l.sendMessage(packet)
+ if err != nil {
+ return nil, err
+ }
+ defer l.finishMessage(msgCtx)
+
+ result := &PasswordModifyResult{}
+
+ l.Debug.Printf("%d: waiting for response", msgCtx.id)
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return nil, err
+ }
+
+ if packet == nil {
+ return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve message"))
+ }
+
+ if l.Debug {
+ if err := addLDAPDescriptions(packet); err != nil {
+ return nil, err
+ }
+ ber.PrintPacket(packet)
+ }
+
+ if packet.Children[1].Tag == ApplicationExtendedResponse {
+ resultCode, resultDescription := getLDAPResultCode(packet)
+ if resultCode != 0 {
+ return nil, NewError(resultCode, errors.New(resultDescription))
+ }
+ } else {
+ return nil, NewError(ErrorUnexpectedResponse, fmt.Errorf("Unexpected Response: %d", packet.Children[1].Tag))
+ }
+
+ extendedResponse := packet.Children[1]
+ for _, child := range extendedResponse.Children {
+ if child.Tag == 11 {
+ passwordModifyReponseValue := ber.DecodePacket(child.Data.Bytes())
+ if len(passwordModifyReponseValue.Children) == 1 {
+ if passwordModifyReponseValue.Children[0].Tag == 0 {
+ result.GeneratedPassword = ber.DecodeString(passwordModifyReponseValue.Children[0].Data.Bytes())
+ }
+ }
+ }
+ }
+
+ return result, nil
+}
diff --git a/vendor/gopkg.in/ldap.v2/search.go b/vendor/gopkg.in/ldap.v2/search.go
new file mode 100644
index 0000000000..2a99894c94
--- /dev/null
+++ b/vendor/gopkg.in/ldap.v2/search.go
@@ -0,0 +1,450 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// File contains Search functionality
+//
+// https://tools.ietf.org/html/rfc4511
+//
+// SearchRequest ::= [APPLICATION 3] SEQUENCE {
+// baseObject LDAPDN,
+// scope ENUMERATED {
+// baseObject (0),
+// singleLevel (1),
+// wholeSubtree (2),
+// ... },
+// derefAliases ENUMERATED {
+// neverDerefAliases (0),
+// derefInSearching (1),
+// derefFindingBaseObj (2),
+// derefAlways (3) },
+// sizeLimit INTEGER (0 .. maxInt),
+// timeLimit INTEGER (0 .. maxInt),
+// typesOnly BOOLEAN,
+// filter Filter,
+// attributes AttributeSelection }
+//
+// AttributeSelection ::= SEQUENCE OF selector LDAPString
+// -- The LDAPString is constrained to
+// -- <attributeSelector> in Section 4.5.1.8
+//
+// Filter ::= CHOICE {
+// and [0] SET SIZE (1..MAX) OF filter Filter,
+// or [1] SET SIZE (1..MAX) OF filter Filter,
+// not [2] Filter,
+// equalityMatch [3] AttributeValueAssertion,
+// substrings [4] SubstringFilter,
+// greaterOrEqual [5] AttributeValueAssertion,
+// lessOrEqual [6] AttributeValueAssertion,
+// present [7] AttributeDescription,
+// approxMatch [8] AttributeValueAssertion,
+// extensibleMatch [9] MatchingRuleAssertion,
+// ... }
+//
+// SubstringFilter ::= SEQUENCE {
+// type AttributeDescription,
+// substrings SEQUENCE SIZE (1..MAX) OF substring CHOICE {
+// initial [0] AssertionValue, -- can occur at most once
+// any [1] AssertionValue,
+// final [2] AssertionValue } -- can occur at most once
+// }
+//
+// MatchingRuleAssertion ::= SEQUENCE {
+// matchingRule [1] MatchingRuleId OPTIONAL,
+// type [2] AttributeDescription OPTIONAL,
+// matchValue [3] AssertionValue,
+// dnAttributes [4] BOOLEAN DEFAULT FALSE }
+//
+//
+
+package ldap
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+
+ "gopkg.in/asn1-ber.v1"
+)
+
+// scope choices
+const (
+ ScopeBaseObject = 0
+ ScopeSingleLevel = 1
+ ScopeWholeSubtree = 2
+)
+
+// ScopeMap contains human readable descriptions of scope choices
+var ScopeMap = map[int]string{
+ ScopeBaseObject: "Base Object",
+ ScopeSingleLevel: "Single Level",
+ ScopeWholeSubtree: "Whole Subtree",
+}
+
+// derefAliases
+const (
+ NeverDerefAliases = 0
+ DerefInSearching = 1
+ DerefFindingBaseObj = 2
+ DerefAlways = 3
+)
+
+// DerefMap contains human readable descriptions of derefAliases choices
+var DerefMap = map[int]string{
+ NeverDerefAliases: "NeverDerefAliases",
+ DerefInSearching: "DerefInSearching",
+ DerefFindingBaseObj: "DerefFindingBaseObj",
+ DerefAlways: "DerefAlways",
+}
+
+// NewEntry returns an Entry object with the specified distinguished name and attribute key-value pairs.
+// The map of attributes is accessed in alphabetical order of the keys in order to ensure that, for the
+// same input map of attributes, the output entry will contain the same order of attributes
+func NewEntry(dn string, attributes map[string][]string) *Entry {
+ var attributeNames []string
+ for attributeName := range attributes {
+ attributeNames = append(attributeNames, attributeName)
+ }
+ sort.Strings(attributeNames)
+
+ var encodedAttributes []*EntryAttribute
+ for _, attributeName := range attributeNames {
+ encodedAttributes = append(encodedAttributes, NewEntryAttribute(attributeName, attributes[attributeName]))
+ }
+ return &Entry{
+ DN: dn,
+ Attributes: encodedAttributes,
+ }
+}
+
+// Entry represents a single search result entry
+type Entry struct {
+ // DN is the distinguished name of the entry
+ DN string
+ // Attributes are the returned attributes for the entry
+ Attributes []*EntryAttribute
+}
+
+// GetAttributeValues returns the values for the named attribute, or an empty list
+func (e *Entry) GetAttributeValues(attribute string) []string {
+ for _, attr := range e.Attributes {
+ if attr.Name == attribute {
+ return attr.Values
+ }
+ }
+ return []string{}
+}
+
+// GetRawAttributeValues returns the byte values for the named attribute, or an empty list
+func (e *Entry) GetRawAttributeValues(attribute string) [][]byte {
+ for _, attr := range e.Attributes {
+ if attr.Name == attribute {
+ return attr.ByteValues
+ }
+ }
+ return [][]byte{}
+}
+
+// GetAttributeValue returns the first value for the named attribute, or ""
+func (e *Entry) GetAttributeValue(attribute string) string {
+ values := e.GetAttributeValues(attribute)
+ if len(values) == 0 {
+ return ""
+ }
+ return values[0]
+}
+
+// GetRawAttributeValue returns the first value for the named attribute, or an empty slice
+func (e *Entry) GetRawAttributeValue(attribute string) []byte {
+ values := e.GetRawAttributeValues(attribute)
+ if len(values) == 0 {
+ return []byte{}
+ }
+ return values[0]
+}
+
+// Print outputs a human-readable description
+func (e *Entry) Print() {
+ fmt.Printf("DN: %s\n", e.DN)
+ for _, attr := range e.Attributes {
+ attr.Print()
+ }
+}
+
+// PrettyPrint outputs a human-readable description indenting
+func (e *Entry) PrettyPrint(indent int) {
+ fmt.Printf("%sDN: %s\n", strings.Repeat(" ", indent), e.DN)
+ for _, attr := range e.Attributes {
+ attr.PrettyPrint(indent + 2)
+ }
+}
+
+// NewEntryAttribute returns a new EntryAttribute with the desired key-value pair
+func NewEntryAttribute(name string, values []string) *EntryAttribute {
+ var bytes [][]byte
+ for _, value := range values {
+ bytes = append(bytes, []byte(value))
+ }
+ return &EntryAttribute{
+ Name: name,
+ Values: values,
+ ByteValues: bytes,
+ }
+}
+
+// EntryAttribute holds a single attribute
+type EntryAttribute struct {
+ // Name is the name of the attribute
+ Name string
+ // Values contain the string values of the attribute
+ Values []string
+ // ByteValues contain the raw values of the attribute
+ ByteValues [][]byte
+}
+
+// Print outputs a human-readable description
+func (e *EntryAttribute) Print() {
+ fmt.Printf("%s: %s\n", e.Name, e.Values)
+}
+
+// PrettyPrint outputs a human-readable description with indenting
+func (e *EntryAttribute) PrettyPrint(indent int) {
+ fmt.Printf("%s%s: %s\n", strings.Repeat(" ", indent), e.Name, e.Values)
+}
+
+// SearchResult holds the server's response to a search request
+type SearchResult struct {
+ // Entries are the returned entries
+ Entries []*Entry
+ // Referrals are the returned referrals
+ Referrals []string
+ // Controls are the returned controls
+ Controls []Control
+}
+
+// Print outputs a human-readable description
+func (s *SearchResult) Print() {
+ for _, entry := range s.Entries {
+ entry.Print()
+ }
+}
+
+// PrettyPrint outputs a human-readable description with indenting
+func (s *SearchResult) PrettyPrint(indent int) {
+ for _, entry := range s.Entries {
+ entry.PrettyPrint(indent)
+ }
+}
+
+// SearchRequest represents a search request to send to the server
+type SearchRequest struct {
+ BaseDN string
+ Scope int
+ DerefAliases int
+ SizeLimit int
+ TimeLimit int
+ TypesOnly bool
+ Filter string
+ Attributes []string
+ Controls []Control
+}
+
+func (s *SearchRequest) encode() (*ber.Packet, error) {
+ request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationSearchRequest, nil, "Search Request")
+ request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, s.BaseDN, "Base DN"))
+ request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(s.Scope), "Scope"))
+ request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(s.DerefAliases), "Deref Aliases"))
+ request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(s.SizeLimit), "Size Limit"))
+ request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(s.TimeLimit), "Time Limit"))
+ request.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, s.TypesOnly, "Types Only"))
+ // compile and encode filter
+ filterPacket, err := CompileFilter(s.Filter)
+ if err != nil {
+ return nil, err
+ }
+ request.AppendChild(filterPacket)
+ // encode attributes
+ attributesPacket := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes")
+ for _, attribute := range s.Attributes {
+ attributesPacket.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute"))
+ }
+ request.AppendChild(attributesPacket)
+ return request, nil
+}
+
+// NewSearchRequest creates a new search request
+func NewSearchRequest(
+ BaseDN string,
+ Scope, DerefAliases, SizeLimit, TimeLimit int,
+ TypesOnly bool,
+ Filter string,
+ Attributes []string,
+ Controls []Control,
+) *SearchRequest {
+ return &SearchRequest{
+ BaseDN: BaseDN,
+ Scope: Scope,
+ DerefAliases: DerefAliases,
+ SizeLimit: SizeLimit,
+ TimeLimit: TimeLimit,
+ TypesOnly: TypesOnly,
+ Filter: Filter,
+ Attributes: Attributes,
+ Controls: Controls,
+ }
+}
+
+// SearchWithPaging accepts a search request and desired page size in order to execute LDAP queries to fulfill the
+// search request. All paged LDAP query responses will be buffered and the final result will be returned atomically.
+// The following four cases are possible given the arguments:
+// - given SearchRequest missing a control of type ControlTypePaging: we will add one with the desired paging size
+// - given SearchRequest contains a control of type ControlTypePaging that isn't actually a ControlPaging: fail without issuing any queries
+// - given SearchRequest contains a control of type ControlTypePaging with pagingSize equal to the size requested: no change to the search request
+// - given SearchRequest contains a control of type ControlTypePaging with pagingSize not equal to the size requested: fail without issuing any queries
+// A requested pagingSize of 0 is interpreted as no limit by LDAP servers.
+func (l *Conn) SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error) {
+ var pagingControl *ControlPaging
+
+ control := FindControl(searchRequest.Controls, ControlTypePaging)
+ if control == nil {
+ pagingControl = NewControlPaging(pagingSize)
+ searchRequest.Controls = append(searchRequest.Controls, pagingControl)
+ } else {
+ castControl, ok := control.(*ControlPaging)
+ if !ok {
+ return nil, fmt.Errorf("Expected paging control to be of type *ControlPaging, got %v", control)
+ }
+ if castControl.PagingSize != pagingSize {
+ return nil, fmt.Errorf("Paging size given in search request (%d) conflicts with size given in search call (%d)", castControl.PagingSize, pagingSize)
+ }
+ pagingControl = castControl
+ }
+
+ searchResult := new(SearchResult)
+ for {
+ result, err := l.Search(searchRequest)
+ l.Debug.Printf("Looking for Paging Control...")
+ if err != nil {
+ return searchResult, err
+ }
+ if result == nil {
+ return searchResult, NewError(ErrorNetwork, errors.New("ldap: packet not received"))
+ }
+
+ for _, entry := range result.Entries {
+ searchResult.Entries = append(searchResult.Entries, entry)
+ }
+ for _, referral := range result.Referrals {
+ searchResult.Referrals = append(searchResult.Referrals, referral)
+ }
+ for _, control := range result.Controls {
+ searchResult.Controls = append(searchResult.Controls, control)
+ }
+
+ l.Debug.Printf("Looking for Paging Control...")
+ pagingResult := FindControl(result.Controls, ControlTypePaging)
+ if pagingResult == nil {
+ pagingControl = nil
+ l.Debug.Printf("Could not find paging control. Breaking...")
+ break
+ }
+
+ cookie := pagingResult.(*ControlPaging).Cookie
+ if len(cookie) == 0 {
+ pagingControl = nil
+ l.Debug.Printf("Could not find cookie. Breaking...")
+ break
+ }
+ pagingControl.SetCookie(cookie)
+ }
+
+ if pagingControl != nil {
+ l.Debug.Printf("Abandoning Paging...")
+ pagingControl.PagingSize = 0
+ l.Search(searchRequest)
+ }
+
+ return searchResult, nil
+}
+
+// Search performs the given search request
+func (l *Conn) Search(searchRequest *SearchRequest) (*SearchResult, error) {
+ packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
+ packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
+ // encode search request
+ encodedSearchRequest, err := searchRequest.encode()
+ if err != nil {
+ return nil, err
+ }
+ packet.AppendChild(encodedSearchRequest)
+ // encode search controls
+ if searchRequest.Controls != nil {
+ packet.AppendChild(encodeControls(searchRequest.Controls))
+ }
+
+ l.Debug.PrintPacket(packet)
+
+ msgCtx, err := l.sendMessage(packet)
+ if err != nil {
+ return nil, err
+ }
+ defer l.finishMessage(msgCtx)
+
+ result := &SearchResult{
+ Entries: make([]*Entry, 0),
+ Referrals: make([]string, 0),
+ Controls: make([]Control, 0)}
+
+ foundSearchResultDone := false
+ for !foundSearchResultDone {
+ l.Debug.Printf("%d: waiting for response", msgCtx.id)
+ packetResponse, ok := <-msgCtx.responses
+ if !ok {
+ return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed"))
+ }
+ packet, err = packetResponse.ReadPacket()
+ l.Debug.Printf("%d: got response %p", msgCtx.id, packet)
+ if err != nil {
+ return nil, err
+ }
+
+ if l.Debug {
+ if err := addLDAPDescriptions(packet); err != nil {
+ return nil, err
+ }
+ ber.PrintPacket(packet)
+ }
+
+ switch packet.Children[1].Tag {
+ case 4:
+ entry := new(Entry)
+ entry.DN = packet.Children[1].Children[0].Value.(string)
+ for _, child := range packet.Children[1].Children[1].Children {
+ attr := new(EntryAttribute)
+ attr.Name = child.Children[0].Value.(string)
+ for _, value := range child.Children[1].Children {
+ attr.Values = append(attr.Values, value.Value.(string))
+ attr.ByteValues = append(attr.ByteValues, value.ByteValue)
+ }
+ entry.Attributes = append(entry.Attributes, attr)
+ }
+ result.Entries = append(result.Entries, entry)
+ case 5:
+ resultCode, resultDescription := getLDAPResultCode(packet)
+ if resultCode != 0 {
+ return result, NewError(resultCode, errors.New(resultDescription))
+ }
+ if len(packet.Children) == 3 {
+ for _, child := range packet.Children[2].Children {
+ result.Controls = append(result.Controls, DecodeControl(child))
+ }
+ }
+ foundSearchResultDone = true
+ case 19:
+ result.Referrals = append(result.Referrals, packet.Children[1].Children[0].Value.(string))
+ }
+ }
+ l.Debug.Printf("%d: returning", msgCtx.id)
+ return result, nil
+}
diff --git a/vendor/gopkg.in/macaron.v1/LICENSE b/vendor/gopkg.in/macaron.v1/LICENSE
new file mode 100644
index 0000000000..37ec93a14f
--- /dev/null
+++ b/vendor/gopkg.in/macaron.v1/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/gopkg.in/macaron.v1/README.md b/vendor/gopkg.in/macaron.v1/README.md
new file mode 100644
index 0000000000..0e0fe79810
--- /dev/null
+++ b/vendor/gopkg.in/macaron.v1/README.md
@@ -0,0 +1,92 @@
+Macaron [![Build Status](https://travis-ci.org/go-macaron/macaron.svg?branch=v1)](https://travis-ci.org/go-macaron/macaron) [![](http://gocover.io/_badge/github.com/go-macaron/macaron)](http://gocover.io/github.com/go-macaron/macaron)
+=======================
+
+![Macaron Logo](https://raw.githubusercontent.com/go-macaron/macaron/v1/macaronlogo.png)
+
+Package macaron is a high productive and modular web framework in Go.
+
+## Getting Started
+
+The minimum requirement of Go is **1.3**.
+
+To install Macaron:
+
+ go get gopkg.in/macaron.v1
+
+The very basic usage of Macaron:
+
+```go
+package main
+
+import "gopkg.in/macaron.v1"
+
+func main() {
+ m := macaron.Classic()
+ m.Get("/", func() string {
+ return "Hello world!"
+ })
+ m.Run()
+}
+```
+
+## Features
+
+- Powerful routing with suburl.
+- Flexible routes combinations.
+- Unlimited nested group routers.
+- Directly integrate with existing services.
+- Dynamically change template files at runtime.
+- Allow to use in-memory template and static files.
+- Easy to plugin/unplugin features with modular design.
+- Handy dependency injection powered by [inject](https://github.com/codegangsta/inject).
+- Better router layer and less reflection make faster speed.
+
+## Middlewares
+
+Middlewares allow you easily plugin/unplugin features for your Macaron applications.
+
+There are already many [middlewares](https://github.com/go-macaron) to simplify your work:
+
+- render - Go template engine
+- static - Serves static files
+- [gzip](https://github.com/go-macaron/gzip) - Gzip compression to all responses
+- [binding](https://github.com/go-macaron/binding) - Request data binding and validation
+- [i18n](https://github.com/go-macaron/i18n) - Internationalization and Localization
+- [cache](https://github.com/go-macaron/cache) - Cache manager
+- [session](https://github.com/go-macaron/session) - Session manager
+- [csrf](https://github.com/go-macaron/csrf) - Generates and validates csrf tokens
+- [captcha](https://github.com/go-macaron/captcha) - Captcha service
+- [pongo2](https://github.com/go-macaron/pongo2) - Pongo2 template engine support
+- [sockets](https://github.com/go-macaron/sockets) - WebSockets channels binding
+- [bindata](https://github.com/go-macaron/bindata) - Embed binary data as static and template files
+- [toolbox](https://github.com/go-macaron/toolbox) - Health check, pprof, profile and statistic services
+- [oauth2](https://github.com/go-macaron/oauth2) - OAuth 2.0 backend
+- [switcher](https://github.com/go-macaron/switcher) - Multiple-site support
+- [method](https://github.com/go-macaron/method) - HTTP method override
+- [permissions2](https://github.com/xyproto/permissions2) - Cookies, users and permissions
+- [renders](https://github.com/go-macaron/renders) - Beego-like render engine(Macaron has built-in template engine, this is another option)
+
+## Use Cases
+
+- [Gogs](https://gogs.io): A painless self-hosted Git Service
+- [Peach](https://peachdocs.org): A modern web documentation server
+- [Go Walker](https://gowalker.org): Go online API documentation
+- [Switch](https://gopm.io): Gopm registry
+- [YouGam](http://yougam.com): Online Forum
+- [Critical Stack Intel](https://intel.criticalstack.com/): A 100% free intel marketplace from Critical Stack, Inc.
+
+## Getting Help
+
+- [API Reference](https://gowalker.org/gopkg.in/macaron.v1)
+- [Documentation](https://go-macaron.com)
+- [FAQs](https://go-macaron.com/docs/faqs)
+- [![Join the chat at https://gitter.im/Unknwon/macaron](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-macaron/macaron?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+
+## Credits
+
+- Basic design of [Martini](https://github.com/go-martini/martini).
+- Logo is modified by [@insionng](https://github.com/insionng) based on [Tribal Dragon](http://xtremeyamazaki.deviantart.com/art/Tribal-Dragon-27005087).
+
+## License
+
+This project is under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for the full license text.
diff --git a/vendor/gopkg.in/macaron.v1/context.go b/vendor/gopkg.in/macaron.v1/context.go
new file mode 100644
index 0000000000..4ac4e49465
--- /dev/null
+++ b/vendor/gopkg.in/macaron.v1/context.go
@@ -0,0 +1,520 @@
+// Copyright 2014 The Macaron Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package macaron
+
+import (
+ "crypto/md5"
+ "encoding/hex"
+ "html/template"
+ "io"
+ "io/ioutil"
+ "mime/multipart"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/Unknwon/com"
+
+ "github.com/go-macaron/inject"
+)
+
+// Locale reprents a localization interface.
+type Locale interface {
+ Language() string
+ Tr(string, ...interface{}) string
+}
+
+// RequestBody represents a request body.
+type RequestBody struct {
+ reader io.ReadCloser
+}
+
+// Bytes reads and returns content of request body in bytes.
+func (rb *RequestBody) Bytes() ([]byte, error) {
+ return ioutil.ReadAll(rb.reader)
+}
+
+// String reads and returns content of request body in string.
+func (rb *RequestBody) String() (string, error) {
+ data, err := rb.Bytes()
+ return string(data), err
+}
+
+// ReadCloser returns a ReadCloser for request body.
+func (rb *RequestBody) ReadCloser() io.ReadCloser {
+ return rb.reader
+}
+
+// Request represents an HTTP request received by a server or to be sent by a client.
+type Request struct {
+ *http.Request
+}
+
+func (r *Request) Body() *RequestBody {
+ return &RequestBody{r.Request.Body}
+}
+
+// Context represents the runtime context of current request of Macaron instance.
+// It is the integration of most frequently used middlewares and helper methods.
+type Context struct {
+ inject.Injector
+ handlers []Handler
+ action Handler
+ index int
+
+ *Router
+ Req Request
+ Resp ResponseWriter
+ params Params
+ Render
+ Locale
+ Data map[string]interface{}
+}
+
+func (c *Context) handler() Handler {
+ if c.index < len(c.handlers) {
+ return c.handlers[c.index]
+ }
+ if c.index == len(c.handlers) {
+ return c.action
+ }
+ panic("invalid index for context handler")
+}
+
+func (c *Context) Next() {
+ c.index += 1
+ c.run()
+}
+
+func (c *Context) Written() bool {
+ return c.Resp.Written()
+}
+
+func (c *Context) run() {
+ for c.index <= len(c.handlers) {
+ vals, err := c.Invoke(c.handler())
+ if err != nil {
+ panic(err)
+ }
+ c.index += 1
+
+ // if the handler returned something, write it to the http response
+ if len(vals) > 0 {
+ ev := c.GetVal(reflect.TypeOf(ReturnHandler(nil)))
+ handleReturn := ev.Interface().(ReturnHandler)
+ handleReturn(c, vals)
+ }
+
+ if c.Written() {
+ return
+ }
+ }
+}
+
+// RemoteAddr returns more real IP address.
+func (ctx *Context) RemoteAddr() string {
+ addr := ctx.Req.Header.Get("X-Real-IP")
+ if len(addr) == 0 {
+ addr = ctx.Req.Header.Get("X-Forwarded-For")
+ if addr == "" {
+ addr = ctx.Req.RemoteAddr
+ if i := strings.LastIndex(addr, ":"); i > -1 {
+ addr = addr[:i]
+ }
+ }
+ }
+ return addr
+}
+
+func (ctx *Context) renderHTML(status int, setName, tplName string, data ...interface{}) {
+ if len(data) <= 0 {
+ ctx.Render.HTMLSet(status, setName, tplName, ctx.Data)
+ } else if len(data) == 1 {
+ ctx.Render.HTMLSet(status, setName, tplName, data[0])
+ } else {
+ ctx.Render.HTMLSet(status, setName, tplName, data[0], data[1].(HTMLOptions))
+ }
+}
+
+// HTML calls Render.HTML but allows less arguments.
+func (ctx *Context) HTML(status int, name string, data ...interface{}) {
+ ctx.renderHTML(status, DEFAULT_TPL_SET_NAME, name, data...)
+}
+
+// HTML calls Render.HTMLSet but allows less arguments.
+func (ctx *Context) HTMLSet(status int, setName, tplName string, data ...interface{}) {
+ ctx.renderHTML(status, setName, tplName, data...)
+}
+
+func (ctx *Context) Redirect(location string, status ...int) {
+ code := http.StatusFound
+ if len(status) == 1 {
+ code = status[0]
+ }
+
+ http.Redirect(ctx.Resp, ctx.Req.Request, location, code)
+}
+
+// Maximum amount of memory to use when parsing a multipart form.
+// Set this to whatever value you prefer; default is 10 MB.
+var MaxMemory = int64(1024 * 1024 * 10)
+
+func (ctx *Context) parseForm() {
+ if ctx.Req.Form != nil {
+ return
+ }
+
+ contentType := ctx.Req.Header.Get(_CONTENT_TYPE)
+ if (ctx.Req.Method == "POST" || ctx.Req.Method == "PUT") &&
+ len(contentType) > 0 && strings.Contains(contentType, "multipart/form-data") {
+ ctx.Req.ParseMultipartForm(MaxMemory)
+ } else {
+ ctx.Req.ParseForm()
+ }
+}
+
+// Query querys form parameter.
+func (ctx *Context) Query(name string) string {
+ ctx.parseForm()
+ return ctx.Req.Form.Get(name)
+}
+
+// QueryTrim querys and trims spaces form parameter.
+func (ctx *Context) QueryTrim(name string) string {
+ return strings.TrimSpace(ctx.Query(name))
+}
+
+// QueryStrings returns a list of results by given query name.
+func (ctx *Context) QueryStrings(name string) []string {
+ ctx.parseForm()
+
+ vals, ok := ctx.Req.Form[name]
+ if !ok {
+ return []string{}
+ }
+ return vals
+}
+
+// QueryEscape returns escapred query result.
+func (ctx *Context) QueryEscape(name string) string {
+ return template.HTMLEscapeString(ctx.Query(name))
+}
+
+// QueryBool returns query result in bool type.
+func (ctx *Context) QueryBool(name string) bool {
+ v, _ := strconv.ParseBool(ctx.Query(name))
+ return v
+}
+
+// QueryInt returns query result in int type.
+func (ctx *Context) QueryInt(name string) int {
+ return com.StrTo(ctx.Query(name)).MustInt()
+}
+
+// QueryInt64 returns query result in int64 type.
+func (ctx *Context) QueryInt64(name string) int64 {
+ return com.StrTo(ctx.Query(name)).MustInt64()
+}
+
+// QueryFloat64 returns query result in float64 type.
+func (ctx *Context) QueryFloat64(name string) float64 {
+ v, _ := strconv.ParseFloat(ctx.Query(name), 64)
+ return v
+}
+
+// Params returns value of given param name.
+// e.g. ctx.Params(":uid") or ctx.Params("uid")
+func (ctx *Context) Params(name string) string {
+ if len(name) == 0 {
+ return ""
+ }
+ if len(name) > 1 && name[0] != ':' {
+ name = ":" + name
+ }
+ return ctx.params[name]
+}
+
+// SetParams sets value of param with given name.
+func (ctx *Context) SetParams(name, val string) {
+ if !strings.HasPrefix(name, ":") {
+ name = ":" + name
+ }
+ ctx.params[name] = val
+}
+
+// ParamsEscape returns escapred params result.
+// e.g. ctx.ParamsEscape(":uname")
+func (ctx *Context) ParamsEscape(name string) string {
+ return template.HTMLEscapeString(ctx.Params(name))
+}
+
+// ParamsInt returns params result in int type.
+// e.g. ctx.ParamsInt(":uid")
+func (ctx *Context) ParamsInt(name string) int {
+ return com.StrTo(ctx.Params(name)).MustInt()
+}
+
+// ParamsInt64 returns params result in int64 type.
+// e.g. ctx.ParamsInt64(":uid")
+func (ctx *Context) ParamsInt64(name string) int64 {
+ return com.StrTo(ctx.Params(name)).MustInt64()
+}
+
+// ParamsFloat64 returns params result in int64 type.
+// e.g. ctx.ParamsFloat64(":uid")
+func (ctx *Context) ParamsFloat64(name string) float64 {
+ v, _ := strconv.ParseFloat(ctx.Params(name), 64)
+ return v
+}
+
+// GetFile returns information about user upload file by given form field name.
+func (ctx *Context) GetFile(name string) (multipart.File, *multipart.FileHeader, error) {
+ return ctx.Req.FormFile(name)
+}
+
+// SaveToFile reads a file from request by field name and saves to given path.
+func (ctx *Context) SaveToFile(name, savePath string) error {
+ fr, _, err := ctx.GetFile(name)
+ if err != nil {
+ return err
+ }
+ defer fr.Close()
+
+ fw, err := os.OpenFile(savePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ return err
+ }
+ defer fw.Close()
+
+ _, err = io.Copy(fw, fr)
+ return err
+}
+
+// SetCookie sets given cookie value to response header.
+// FIXME: IE support? http://golanghome.com/post/620#reply2
+func (ctx *Context) SetCookie(name string, value string, others ...interface{}) {
+ cookie := http.Cookie{}
+ cookie.Name = name
+ cookie.Value = url.QueryEscape(value)
+
+ if len(others) > 0 {
+ switch v := others[0].(type) {
+ case int:
+ cookie.MaxAge = v
+ case int64:
+ cookie.MaxAge = int(v)
+ case int32:
+ cookie.MaxAge = int(v)
+ }
+ }
+
+ cookie.Path = "/"
+ if len(others) > 1 {
+ if v, ok := others[1].(string); ok && len(v) > 0 {
+ cookie.Path = v
+ }
+ }
+
+ if len(others) > 2 {
+ if v, ok := others[2].(string); ok && len(v) > 0 {
+ cookie.Domain = v
+ }
+ }
+
+ if len(others) > 3 {
+ switch v := others[3].(type) {
+ case bool:
+ cookie.Secure = v
+ default:
+ if others[3] != nil {
+ cookie.Secure = true
+ }
+ }
+ }
+
+ if len(others) > 4 {
+ if v, ok := others[4].(bool); ok && v {
+ cookie.HttpOnly = true
+ }
+ }
+
+ if len(others) > 5 {
+ if v, ok := others[5].(time.Time); ok {
+ cookie.Expires = v
+ cookie.RawExpires = v.Format(time.UnixDate)
+ }
+ }
+
+ ctx.Resp.Header().Add("Set-Cookie", cookie.String())
+}
+
+// GetCookie returns given cookie value from request header.
+func (ctx *Context) GetCookie(name string) string {
+ cookie, err := ctx.Req.Cookie(name)
+ if err != nil {
+ return ""
+ }
+ val, _ := url.QueryUnescape(cookie.Value)
+ return val
+}
+
+// GetCookieInt returns cookie result in int type.
+func (ctx *Context) GetCookieInt(name string) int {
+ return com.StrTo(ctx.GetCookie(name)).MustInt()
+}
+
+// GetCookieInt64 returns cookie result in int64 type.
+func (ctx *Context) GetCookieInt64(name string) int64 {
+ return com.StrTo(ctx.GetCookie(name)).MustInt64()
+}
+
+// GetCookieFloat64 returns cookie result in float64 type.
+func (ctx *Context) GetCookieFloat64(name string) float64 {
+ v, _ := strconv.ParseFloat(ctx.GetCookie(name), 64)
+ return v
+}
+
+var defaultCookieSecret string
+
+// SetDefaultCookieSecret sets global default secure cookie secret.
+func (m *Macaron) SetDefaultCookieSecret(secret string) {
+ defaultCookieSecret = secret
+}
+
+// SetSecureCookie sets given cookie value to response header with default secret string.
+func (ctx *Context) SetSecureCookie(name, value string, others ...interface{}) {
+ ctx.SetSuperSecureCookie(defaultCookieSecret, name, value, others...)
+}
+
+// GetSecureCookie returns given cookie value from request header with default secret string.
+func (ctx *Context) GetSecureCookie(key string) (string, bool) {
+ return ctx.GetSuperSecureCookie(defaultCookieSecret, key)
+}
+
+// SetSuperSecureCookie sets given cookie value to response header with secret string.
+func (ctx *Context) SetSuperSecureCookie(secret, name, value string, others ...interface{}) {
+ m := md5.Sum([]byte(secret))
+ secret = hex.EncodeToString(m[:])
+ text, err := com.AESEncrypt([]byte(secret), []byte(value))
+ if err != nil {
+ panic("error encrypting cookie: " + err.Error())
+ }
+ ctx.SetCookie(name, hex.EncodeToString(text), others...)
+}
+
+// GetSuperSecureCookie returns given cookie value from request header with secret string.
+func (ctx *Context) GetSuperSecureCookie(secret, key string) (string, bool) {
+ val := ctx.GetCookie(key)
+ if val == "" {
+ return "", false
+ }
+
+ data, err := hex.DecodeString(val)
+ if err != nil {
+ return "", false
+ }
+
+ m := md5.Sum([]byte(secret))
+ secret = hex.EncodeToString(m[:])
+ text, err := com.AESDecrypt([]byte(secret), data)
+ return string(text), err == nil
+}
+
+func (ctx *Context) setRawContentHeader() {
+ ctx.Resp.Header().Set("Content-Description", "Raw content")
+ ctx.Resp.Header().Set("Content-Type", "text/plain")
+ ctx.Resp.Header().Set("Expires", "0")
+ ctx.Resp.Header().Set("Cache-Control", "must-revalidate")
+ ctx.Resp.Header().Set("Pragma", "public")
+}
+
+// ServeContent serves given content to response.
+func (ctx *Context) ServeContent(name string, r io.ReadSeeker, params ...interface{}) {
+ modtime := time.Now()
+ for _, p := range params {
+ switch v := p.(type) {
+ case time.Time:
+ modtime = v
+ }
+ }
+
+ ctx.setRawContentHeader()
+ http.ServeContent(ctx.Resp, ctx.Req.Request, name, modtime, r)
+}
+
+// ServeFileContent serves given file as content to response.
+func (ctx *Context) ServeFileContent(file string, names ...string) {
+ var name string
+ if len(names) > 0 {
+ name = names[0]
+ } else {
+ name = path.Base(file)
+ }
+
+ f, err := os.Open(file)
+ if err != nil {
+ if Env == PROD {
+ http.Error(ctx.Resp, "Internal Server Error", 500)
+ } else {
+ http.Error(ctx.Resp, err.Error(), 500)
+ }
+ return
+ }
+ defer f.Close()
+
+ ctx.setRawContentHeader()
+ http.ServeContent(ctx.Resp, ctx.Req.Request, name, time.Now(), f)
+}
+
+// ServeFile serves given file to response.
+func (ctx *Context) ServeFile(file string, names ...string) {
+ var name string
+ if len(names) > 0 {
+ name = names[0]
+ } else {
+ name = path.Base(file)
+ }
+ ctx.Resp.Header().Set("Content-Description", "File Transfer")
+ ctx.Resp.Header().Set("Content-Type", "application/octet-stream")
+ ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+name)
+ ctx.Resp.Header().Set("Content-Transfer-Encoding", "binary")
+ ctx.Resp.Header().Set("Expires", "0")
+ ctx.Resp.Header().Set("Cache-Control", "must-revalidate")
+ ctx.Resp.Header().Set("Pragma", "public")
+ http.ServeFile(ctx.Resp, ctx.Req.Request, file)
+}
+
+// ChangeStaticPath changes static path from old to new one.
+func (ctx *Context) ChangeStaticPath(oldPath, newPath string) {
+ if !filepath.IsAbs(oldPath) {
+ oldPath = filepath.Join(Root, oldPath)
+ }
+ dir := statics.Get(oldPath)
+ if dir != nil {
+ statics.Delete(oldPath)
+
+ if !filepath.IsAbs(newPath) {
+ newPath = filepath.Join(Root, newPath)
+ }
+ *dir = http.Dir(newPath)
+ statics.Set(dir)
+ }
+}
diff --git a/vendor/gopkg.in/macaron.v1/logger.go b/vendor/gopkg.in/macaron.v1/logger.go
new file mode 100644
index 0000000000..add6049072
--- /dev/null
+++ b/vendor/gopkg.in/macaron.v1/logger.go
@@ -0,0 +1,64 @@
+// Copyright 2013 Martini Authors
+// Copyright 2014 The Macaron Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package macaron
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+ "runtime"
+ "time"
+)
+
+var (
+ ColorLog = true
+ LogTimeFormat = "2006-01-02 15:04:05"
+)
+
+func init() {
+ ColorLog = runtime.GOOS != "windows"
+}
+
+// Logger returns a middleware handler that logs the request as it goes in and the response as it goes out.
+func Logger() Handler {
+ return func(ctx *Context, log *log.Logger) {
+ start := time.Now()
+
+ log.Printf("%s: Started %s %s for %s", time.Now().Format(LogTimeFormat), ctx.Req.Method, ctx.Req.RequestURI, ctx.RemoteAddr())
+
+ rw := ctx.Resp.(ResponseWriter)
+ ctx.Next()
+
+ content := fmt.Sprintf("%s: Completed %s %v %s in %v", time.Now().Format(LogTimeFormat), ctx.Req.RequestURI, rw.Status(), http.StatusText(rw.Status()), time.Since(start))
+ if ColorLog {
+ switch rw.Status() {
+ case 200, 201, 202:
+ content = fmt.Sprintf("\033[1;32m%s\033[0m", content)
+ case 301, 302:
+ content = fmt.Sprintf("\033[1;37m%s\033[0m", content)
+ case 304:
+ content = fmt.Sprintf("\033[1;33m%s\033[0m", content)
+ case 401, 403:
+ content = fmt.Sprintf("\033[4;31m%s\033[0m", content)
+ case 404:
+ content = fmt.Sprintf("\033[1;31m%s\033[0m", content)
+ case 500:
+ content = fmt.Sprintf("\033[1;36m%s\033[0m", content)
+ }
+ }
+ log.Println(content)
+ }
+}
diff --git a/vendor/gopkg.in/macaron.v1/macaron.go b/vendor/gopkg.in/macaron.v1/macaron.go
new file mode 100644
index 0000000000..442402ead7
--- /dev/null
+++ b/vendor/gopkg.in/macaron.v1/macaron.go
@@ -0,0 +1,291 @@
+// +build go1.3
+
+// Copyright 2014 The Macaron Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package macaron is a high productive and modular web framework in Go.
+package macaron
+
+import (
+ "io"
+ "log"
+ "net/http"
+ "os"
+ "reflect"
+ "strings"
+ "sync"
+
+ "github.com/Unknwon/com"
+ "gopkg.in/ini.v1"
+
+ "github.com/go-macaron/inject"
+)
+
+const _VERSION = "1.1.8.0826"
+
+func Version() string {
+ return _VERSION
+}
+
+// Handler can be any callable function.
+// Macaron attempts to inject services into the handler's argument list,
+// and panics if an argument could not be fullfilled via dependency injection.
+type Handler interface{}
+
+// validateHandler makes sure a handler is a callable function,
+// and panics if it is not.
+func validateHandler(h Handler) {
+ if reflect.TypeOf(h).Kind() != reflect.Func {
+ panic("Macaron handler must be a callable function")
+ }
+}
+
+// validateHandlers makes sure handlers are callable functions,
+// and panics if any of them is not.
+func validateHandlers(handlers []Handler) {
+ for _, h := range handlers {
+ validateHandler(h)
+ }
+}
+
+// Macaron represents the top level web application.
+// inject.Injector methods can be invoked to map services on a global level.
+type Macaron struct {
+ inject.Injector
+ befores []BeforeHandler
+ handlers []Handler
+ action Handler
+
+ hasURLPrefix bool
+ urlPrefix string // For suburl support.
+ *Router
+
+ logger *log.Logger
+}
+
+// NewWithLogger creates a bare bones Macaron instance.
+// Use this method if you want to have full control over the middleware that is used.
+// You can specify logger output writer with this function.
+func NewWithLogger(out io.Writer) *Macaron {
+ m := &Macaron{
+ Injector: inject.New(),
+ action: func() {},
+ Router: NewRouter(),
+ logger: log.New(out, "[Macaron] ", 0),
+ }
+ m.Router.m = m
+ m.Map(m.logger)
+ m.Map(defaultReturnHandler())
+ m.NotFound(http.NotFound)
+ m.InternalServerError(func(rw http.ResponseWriter, err error) {
+ http.Error(rw, err.Error(), 500)
+ })
+ return m
+}
+
+// New creates a bare bones Macaron instance.
+// Use this method if you want to have full control over the middleware that is used.
+func New() *Macaron {
+ return NewWithLogger(os.Stdout)
+}
+
+// Classic creates a classic Macaron with some basic default middleware:
+// mocaron.Logger, mocaron.Recovery and mocaron.Static.
+func Classic() *Macaron {
+ m := New()
+ m.Use(Logger())
+ m.Use(Recovery())
+ m.Use(Static("public"))
+ return m
+}
+
+// Handlers sets the entire middleware stack with the given Handlers.
+// This will clear any current middleware handlers,
+// and panics if any of the handlers is not a callable function
+func (m *Macaron) Handlers(handlers ...Handler) {
+ m.handlers = make([]Handler, 0)
+ for _, handler := range handlers {
+ m.Use(handler)
+ }
+}
+
+// Action sets the handler that will be called after all the middleware has been invoked.
+// This is set to macaron.Router in a macaron.Classic().
+func (m *Macaron) Action(handler Handler) {
+ validateHandler(handler)
+ m.action = handler
+}
+
+// BeforeHandler represents a handler executes at beginning of every request.
+// Macaron stops future process when it returns true.
+type BeforeHandler func(rw http.ResponseWriter, req *http.Request) bool
+
+func (m *Macaron) Before(handler BeforeHandler) {
+ m.befores = append(m.befores, handler)
+}
+
+// Use adds a middleware Handler to the stack,
+// and panics if the handler is not a callable func.
+// Middleware Handlers are invoked in the order that they are added.
+func (m *Macaron) Use(handler Handler) {
+ validateHandler(handler)
+ m.handlers = append(m.handlers, handler)
+}
+
+func (m *Macaron) createContext(rw http.ResponseWriter, req *http.Request) *Context {
+ c := &Context{
+ Injector: inject.New(),
+ handlers: m.handlers,
+ action: m.action,
+ index: 0,
+ Router: m.Router,
+ Req: Request{req},
+ Resp: NewResponseWriter(rw),
+ Render: &DummyRender{rw},
+ Data: make(map[string]interface{}),
+ }
+ c.SetParent(m)
+ c.Map(c)
+ c.MapTo(c.Resp, (*http.ResponseWriter)(nil))
+ c.Map(req)
+ return c
+}
+
+// ServeHTTP is the HTTP Entry point for a Macaron instance.
+// Useful if you want to control your own HTTP server.
+// Be aware that none of middleware will run without registering any router.
+func (m *Macaron) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ if m.hasURLPrefix {
+ req.URL.Path = strings.TrimPrefix(req.URL.Path, m.urlPrefix)
+ }
+ for _, h := range m.befores {
+ if h(rw, req) {
+ return
+ }
+ }
+ m.Router.ServeHTTP(rw, req)
+}
+
+func GetDefaultListenInfo() (string, int) {
+ host := os.Getenv("HOST")
+ if len(host) == 0 {
+ host = "0.0.0.0"
+ }
+ port := com.StrTo(os.Getenv("PORT")).MustInt()
+ if port == 0 {
+ port = 4000
+ }
+ return host, port
+}
+
+// Run the http server. Listening on os.GetEnv("PORT") or 4000 by default.
+func (m *Macaron) Run(args ...interface{}) {
+ host, port := GetDefaultListenInfo()
+ if len(args) == 1 {
+ switch arg := args[0].(type) {
+ case string:
+ host = arg
+ case int:
+ port = arg
+ }
+ } else if len(args) >= 2 {
+ if arg, ok := args[0].(string); ok {
+ host = arg
+ }
+ if arg, ok := args[1].(int); ok {
+ port = arg
+ }
+ }
+
+ addr := host + ":" + com.ToStr(port)
+ logger := m.GetVal(reflect.TypeOf(m.logger)).Interface().(*log.Logger)
+ logger.Printf("listening on %s (%s)\n", addr, safeEnv())
+ logger.Fatalln(http.ListenAndServe(addr, m))
+}
+
+// SetURLPrefix sets URL prefix of router layer, so that it support suburl.
+func (m *Macaron) SetURLPrefix(prefix string) {
+ m.urlPrefix = prefix
+ m.hasURLPrefix = len(m.urlPrefix) > 0
+}
+
+// ____ ____ .__ ___. .__
+// \ \ / /____ _______|__|____ \_ |__ | | ____ ______
+// \ Y /\__ \\_ __ \ \__ \ | __ \| | _/ __ \ / ___/
+// \ / / __ \| | \/ |/ __ \| \_\ \ |_\ ___/ \___ \
+// \___/ (____ /__| |__(____ /___ /____/\___ >____ >
+// \/ \/ \/ \/ \/
+
+const (
+ DEV = "development"
+ PROD = "production"
+ TEST = "test"
+)
+
+var (
+ // Env is the environment that Macaron is executing in.
+ // The MACARON_ENV is read on initialization to set this variable.
+ Env = DEV
+ envLock sync.Mutex
+
+ // Path of work directory.
+ Root string
+
+ // Flash applies to current request.
+ FlashNow bool
+
+ // Configuration convention object.
+ cfg *ini.File
+)
+
+func setENV(e string) {
+ envLock.Lock()
+ defer envLock.Unlock()
+
+ if len(e) > 0 {
+ Env = e
+ }
+}
+
+func safeEnv() string {
+ envLock.Lock()
+ defer envLock.Unlock()
+
+ return Env
+}
+
+func init() {
+ setENV(os.Getenv("MACARON_ENV"))
+
+ var err error
+ Root, err = os.Getwd()
+ if err != nil {
+ panic("error getting work directory: " + err.Error())
+ }
+}
+
+// SetConfig sets data sources for configuration.
+func SetConfig(source interface{}, others ...interface{}) (_ *ini.File, err error) {
+ cfg, err = ini.Load(source, others...)
+ return Config(), err
+}
+
+// Config returns configuration convention object.
+// It returns an empty object if there is no one available.
+func Config() *ini.File {
+ if cfg == nil {
+ return ini.Empty()
+ }
+ return cfg
+}
diff --git a/vendor/gopkg.in/macaron.v1/macaronlogo.png b/vendor/gopkg.in/macaron.v1/macaronlogo.png
new file mode 100644
index 0000000000..399759769a
--- /dev/null
+++ b/vendor/gopkg.in/macaron.v1/macaronlogo.png
Binary files differ
diff --git a/vendor/gopkg.in/macaron.v1/recovery.go b/vendor/gopkg.in/macaron.v1/recovery.go
new file mode 100644
index 0000000000..ea3bdac045
--- /dev/null
+++ b/vendor/gopkg.in/macaron.v1/recovery.go
@@ -0,0 +1,163 @@
+// Copyright 2013 Martini Authors
+// Copyright 2014 The Macaron Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package macaron
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "runtime"
+
+ "github.com/go-macaron/inject"
+)
+
+const (
+ panicHtml = `<html>
+<head><title>PANIC: %s</title>
+<meta charset="utf-8" />
+<style type="text/css">
+html, body {
+ font-family: "Roboto", sans-serif;
+ color: #333333;
+ background-color: #ea5343;
+ margin: 0px;
+}
+h1 {
+ color: #d04526;
+ background-color: #ffffff;
+ padding: 20px;
+ border-bottom: 1px dashed #2b3848;
+}
+pre {
+ margin: 20px;
+ padding: 20px;
+ border: 2px solid #2b3848;
+ background-color: #ffffff;
+ white-space: pre-wrap; /* css-3 */
+ white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
+ white-space: -pre-wrap; /* Opera 4-6 */
+ white-space: -o-pre-wrap; /* Opera 7 */
+ word-wrap: break-word; /* Internet Explorer 5.5+ */
+}
+</style>
+</head><body>
+<h1>PANIC</h1>
+<pre style="font-weight: bold;">%s</pre>
+<pre>%s</pre>
+</body>
+</html>`
+)
+
+var (
+ dunno = []byte("???")
+ centerDot = []byte("·")
+ dot = []byte(".")
+ slash = []byte("/")
+)
+
+// stack returns a nicely formated stack frame, skipping skip frames
+func stack(skip int) []byte {
+ buf := new(bytes.Buffer) // the returned data
+ // As we loop, we open files and read them. These variables record the currently
+ // loaded file.
+ var lines [][]byte
+ var lastFile string
+ for i := skip; ; i++ { // Skip the expected number of frames
+ pc, file, line, ok := runtime.Caller(i)
+ if !ok {
+ break
+ }
+ // Print this much at least. If we can't find the source, it won't show.
+ fmt.Fprintf(buf, "%s:%d (0x%x)\n", file, line, pc)
+ if file != lastFile {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ continue
+ }
+ lines = bytes.Split(data, []byte{'\n'})
+ lastFile = file
+ }
+ fmt.Fprintf(buf, "\t%s: %s\n", function(pc), source(lines, line))
+ }
+ return buf.Bytes()
+}
+
+// source returns a space-trimmed slice of the n'th line.
+func source(lines [][]byte, n int) []byte {
+ n-- // in stack trace, lines are 1-indexed but our array is 0-indexed
+ if n < 0 || n >= len(lines) {
+ return dunno
+ }
+ return bytes.TrimSpace(lines[n])
+}
+
+// function returns, if possible, the name of the function containing the PC.
+func function(pc uintptr) []byte {
+ fn := runtime.FuncForPC(pc)
+ if fn == nil {
+ return dunno
+ }
+ name := []byte(fn.Name())
+ // The name includes the path name to the package, which is unnecessary
+ // since the file name is already included. Plus, it has center dots.
+ // That is, we see
+ // runtime/debug.*T·ptrmethod
+ // and want
+ // *T.ptrmethod
+ // Also the package path might contains dot (e.g. code.google.com/...),
+ // so first eliminate the path prefix
+ if lastslash := bytes.LastIndex(name, slash); lastslash >= 0 {
+ name = name[lastslash+1:]
+ }
+ if period := bytes.Index(name, dot); period >= 0 {
+ name = name[period+1:]
+ }
+ name = bytes.Replace(name, centerDot, dot, -1)
+ return name
+}
+
+// Recovery returns a middleware that recovers from any panics and writes a 500 if there was one.
+// While Martini is in development mode, Recovery will also output the panic as HTML.
+func Recovery() Handler {
+ return func(c *Context, log *log.Logger) {
+ defer func() {
+ if err := recover(); err != nil {
+ stack := stack(3)
+ log.Printf("PANIC: %s\n%s", err, stack)
+
+ // Lookup the current responsewriter
+ val := c.GetVal(inject.InterfaceOf((*http.ResponseWriter)(nil)))
+ res := val.Interface().(http.ResponseWriter)
+
+ // respond with panic message while in development mode
+ var body []byte
+ if Env == DEV {
+ res.Header().Set("Content-Type", "text/html")
+ body = []byte(fmt.Sprintf(panicHtml, err, err, stack))
+ }
+
+ res.WriteHeader(http.StatusInternalServerError)
+ if nil != body {
+ res.Write(body)
+ }
+ }
+ }()
+
+ c.Next()
+ }
+}
diff --git a/vendor/gopkg.in/macaron.v1/render.go b/vendor/gopkg.in/macaron.v1/render.go
new file mode 100644
index 0000000000..ff2dcaacdd
--- /dev/null
+++ b/vendor/gopkg.in/macaron.v1/render.go
@@ -0,0 +1,714 @@
+// Copyright 2013 Martini Authors
+// Copyright 2014 The Macaron Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package macaron
+
+import (
+ "bytes"
+ "encoding/json"
+ "encoding/xml"
+ "fmt"
+ "html/template"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/Unknwon/com"
+)
+
+const (
+ _CONTENT_TYPE = "Content-Type"
+ _CONTENT_LENGTH = "Content-Length"
+ _CONTENT_BINARY = "application/octet-stream"
+ _CONTENT_JSON = "application/json"
+ _CONTENT_HTML = "text/html"
+ _CONTENT_PLAIN = "text/plain"
+ _CONTENT_XHTML = "application/xhtml+xml"
+ _CONTENT_XML = "text/xml"
+ _DEFAULT_CHARSET = "UTF-8"
+)
+
+var (
+ // Provides a temporary buffer to execute templates into and catch errors.
+ bufpool = sync.Pool{
+ New: func() interface{} { return new(bytes.Buffer) },
+ }
+
+ // Included helper functions for use when rendering html
+ helperFuncs = template.FuncMap{
+ "yield": func() (string, error) {
+ return "", fmt.Errorf("yield called with no layout defined")
+ },
+ "current": func() (string, error) {
+ return "", nil
+ },
+ }
+)
+
+type (
+ // TemplateFile represents a interface of template file that has name and can be read.
+ TemplateFile interface {
+ Name() string
+ Data() []byte
+ Ext() string
+ }
+ // TemplateFileSystem represents a interface of template file system that able to list all files.
+ TemplateFileSystem interface {
+ ListFiles() []TemplateFile
+ }
+
+ // Delims represents a set of Left and Right delimiters for HTML template rendering
+ Delims struct {
+ // Left delimiter, defaults to {{
+ Left string
+ // Right delimiter, defaults to }}
+ Right string
+ }
+
+ // RenderOptions represents a struct for specifying configuration options for the Render middleware.
+ RenderOptions struct {
+ // Directory to load templates. Default is "templates".
+ Directory string
+ // Addtional directories to overwite templates.
+ AppendDirectories []string
+ // Layout template name. Will not render a layout if "". Default is to "".
+ Layout string
+ // Extensions to parse template files from. Defaults are [".tmpl", ".html"].
+ Extensions []string
+ // Funcs is a slice of FuncMaps to apply to the template upon compilation. This is useful for helper functions. Default is [].
+ Funcs []template.FuncMap
+ // Delims sets the action delimiters to the specified strings in the Delims struct.
+ Delims Delims
+ // Appends the given charset to the Content-Type header. Default is "UTF-8".
+ Charset string
+ // Outputs human readable JSON.
+ IndentJSON bool
+ // Outputs human readable XML.
+ IndentXML bool
+ // Prefixes the JSON output with the given bytes.
+ PrefixJSON []byte
+ // Prefixes the XML output with the given bytes.
+ PrefixXML []byte
+ // Allows changing of output to XHTML instead of HTML. Default is "text/html"
+ HTMLContentType string
+ // TemplateFileSystem is the interface for supporting any implmentation of template file system.
+ TemplateFileSystem
+ }
+
+ // HTMLOptions is a struct for overriding some rendering Options for specific HTML call
+ HTMLOptions struct {
+ // Layout template name. Overrides Options.Layout.
+ Layout string
+ }
+
+ Render interface {
+ http.ResponseWriter
+ SetResponseWriter(http.ResponseWriter)
+
+ JSON(int, interface{})
+ JSONString(interface{}) (string, error)
+ RawData(int, []byte) // Serve content as binary
+ PlainText(int, []byte) // Serve content as plain text
+ HTML(int, string, interface{}, ...HTMLOptions)
+ HTMLSet(int, string, string, interface{}, ...HTMLOptions)
+ HTMLSetString(string, string, interface{}, ...HTMLOptions) (string, error)
+ HTMLString(string, interface{}, ...HTMLOptions) (string, error)
+ HTMLSetBytes(string, string, interface{}, ...HTMLOptions) ([]byte, error)
+ HTMLBytes(string, interface{}, ...HTMLOptions) ([]byte, error)
+ XML(int, interface{})
+ Error(int, ...string)
+ Status(int)
+ SetTemplatePath(string, string)
+ HasTemplateSet(string) bool
+ }
+)
+
+// TplFile implements TemplateFile interface.
+type TplFile struct {
+ name string
+ data []byte
+ ext string
+}
+
+// NewTplFile cerates new template file with given name and data.
+func NewTplFile(name string, data []byte, ext string) *TplFile {
+ return &TplFile{name, data, ext}
+}
+
+func (f *TplFile) Name() string {
+ return f.name
+}
+
+func (f *TplFile) Data() []byte {
+ return f.data
+}
+
+func (f *TplFile) Ext() string {
+ return f.ext
+}
+
+// TplFileSystem implements TemplateFileSystem interface.
+type TplFileSystem struct {
+ files []TemplateFile
+}
+
+// NewTemplateFileSystem creates new template file system with given options.
+func NewTemplateFileSystem(opt RenderOptions, omitData bool) TplFileSystem {
+ fs := TplFileSystem{}
+ fs.files = make([]TemplateFile, 0, 10)
+
+ // Directories are composed in reverse order because later one overwrites previous ones,
+ // so once found, we can directly jump out of the loop.
+ dirs := make([]string, 0, len(opt.AppendDirectories)+1)
+ for i := len(opt.AppendDirectories) - 1; i >= 0; i-- {
+ dirs = append(dirs, opt.AppendDirectories[i])
+ }
+ dirs = append(dirs, opt.Directory)
+
+ var err error
+ for i := range dirs {
+ // Skip ones that does not exists for symlink test,
+ // but allow non-symlink ones added after start.
+ if !com.IsExist(dirs[i]) {
+ continue
+ }
+
+ dirs[i], err = filepath.EvalSymlinks(dirs[i])
+ if err != nil {
+ panic("EvalSymlinks(" + dirs[i] + "): " + err.Error())
+ }
+ }
+ lastDir := dirs[len(dirs)-1]
+
+ // We still walk the last (original) directory because it's non-sense we load templates not exist in original directory.
+ if err = filepath.Walk(lastDir, func(path string, info os.FileInfo, err error) error {
+ r, err := filepath.Rel(lastDir, path)
+ if err != nil {
+ return err
+ }
+
+ ext := GetExt(r)
+
+ for _, extension := range opt.Extensions {
+ if ext != extension {
+ continue
+ }
+
+ var data []byte
+ if !omitData {
+ // Loop over candidates of directory, break out once found.
+ // The file always exists because it's inside the walk function,
+ // and read original file is the worst case.
+ for i := range dirs {
+ path = filepath.Join(dirs[i], r)
+ if !com.IsFile(path) {
+ continue
+ }
+
+ data, err = ioutil.ReadFile(path)
+ if err != nil {
+ return err
+ }
+ break
+ }
+ }
+
+ name := filepath.ToSlash((r[0 : len(r)-len(ext)]))
+ fs.files = append(fs.files, NewTplFile(name, data, ext))
+ }
+
+ return nil
+ }); err != nil {
+ panic("NewTemplateFileSystem: " + err.Error())
+ }
+
+ return fs
+}
+
+func (fs TplFileSystem) ListFiles() []TemplateFile {
+ return fs.files
+}
+
+func PrepareCharset(charset string) string {
+ if len(charset) != 0 {
+ return "; charset=" + charset
+ }
+
+ return "; charset=" + _DEFAULT_CHARSET
+}
+
+func GetExt(s string) string {
+ index := strings.Index(s, ".")
+ if index == -1 {
+ return ""
+ }
+ return s[index:]
+}
+
+func compile(opt RenderOptions) *template.Template {
+ t := template.New(opt.Directory)
+ t.Delims(opt.Delims.Left, opt.Delims.Right)
+ // Parse an initial template in case we don't have any.
+ template.Must(t.Parse("Macaron"))
+
+ if opt.TemplateFileSystem == nil {
+ opt.TemplateFileSystem = NewTemplateFileSystem(opt, false)
+ }
+
+ for _, f := range opt.TemplateFileSystem.ListFiles() {
+ tmpl := t.New(f.Name())
+ for _, funcs := range opt.Funcs {
+ tmpl.Funcs(funcs)
+ }
+ // Bomb out if parse fails. We don't want any silent server starts.
+ template.Must(tmpl.Funcs(helperFuncs).Parse(string(f.Data())))
+ }
+
+ return t
+}
+
+const (
+ DEFAULT_TPL_SET_NAME = "DEFAULT"
+)
+
+// TemplateSet represents a template set of type *template.Template.
+type TemplateSet struct {
+ lock sync.RWMutex
+ sets map[string]*template.Template
+ dirs map[string]string
+}
+
+// NewTemplateSet initializes a new empty template set.
+func NewTemplateSet() *TemplateSet {
+ return &TemplateSet{
+ sets: make(map[string]*template.Template),
+ dirs: make(map[string]string),
+ }
+}
+
+func (ts *TemplateSet) Set(name string, opt *RenderOptions) *template.Template {
+ t := compile(*opt)
+
+ ts.lock.Lock()
+ defer ts.lock.Unlock()
+
+ ts.sets[name] = t
+ ts.dirs[name] = opt.Directory
+ return t
+}
+
+func (ts *TemplateSet) Get(name string) *template.Template {
+ ts.lock.RLock()
+ defer ts.lock.RUnlock()
+
+ return ts.sets[name]
+}
+
+func (ts *TemplateSet) GetDir(name string) string {
+ ts.lock.RLock()
+ defer ts.lock.RUnlock()
+
+ return ts.dirs[name]
+}
+
+func prepareRenderOptions(options []RenderOptions) RenderOptions {
+ var opt RenderOptions
+ if len(options) > 0 {
+ opt = options[0]
+ }
+
+ // Defaults.
+ if len(opt.Directory) == 0 {
+ opt.Directory = "templates"
+ }
+ if len(opt.Extensions) == 0 {
+ opt.Extensions = []string{".tmpl", ".html"}
+ }
+ if len(opt.HTMLContentType) == 0 {
+ opt.HTMLContentType = _CONTENT_HTML
+ }
+
+ return opt
+}
+
+func ParseTplSet(tplSet string) (tplName string, tplDir string) {
+ tplSet = strings.TrimSpace(tplSet)
+ if len(tplSet) == 0 {
+ panic("empty template set argument")
+ }
+ infos := strings.Split(tplSet, ":")
+ if len(infos) == 1 {
+ tplDir = infos[0]
+ tplName = path.Base(tplDir)
+ } else {
+ tplName = infos[0]
+ tplDir = infos[1]
+ }
+
+ if !com.IsDir(tplDir) {
+ panic("template set path does not exist or is not a directory")
+ }
+ return tplName, tplDir
+}
+
+func renderHandler(opt RenderOptions, tplSets []string) Handler {
+ cs := PrepareCharset(opt.Charset)
+ ts := NewTemplateSet()
+ ts.Set(DEFAULT_TPL_SET_NAME, &opt)
+
+ var tmpOpt RenderOptions
+ for _, tplSet := range tplSets {
+ tplName, tplDir := ParseTplSet(tplSet)
+ tmpOpt = opt
+ tmpOpt.Directory = tplDir
+ ts.Set(tplName, &tmpOpt)
+ }
+
+ return func(ctx *Context) {
+ r := &TplRender{
+ ResponseWriter: ctx.Resp,
+ TemplateSet: ts,
+ Opt: &opt,
+ CompiledCharset: cs,
+ }
+ ctx.Data["TmplLoadTimes"] = func() string {
+ if r.startTime.IsZero() {
+ return ""
+ }
+ return fmt.Sprint(time.Since(r.startTime).Nanoseconds()/1e6) + "ms"
+ }
+
+ ctx.Render = r
+ ctx.MapTo(r, (*Render)(nil))
+ }
+}
+
+// Renderer is a Middleware that maps a macaron.Render service into the Macaron handler chain.
+// An single variadic macaron.RenderOptions struct can be optionally provided to configure
+// HTML rendering. The default directory for templates is "templates" and the default
+// file extension is ".tmpl" and ".html".
+//
+// If MACARON_ENV is set to "" or "development" then templates will be recompiled on every request. For more performance, set the
+// MACARON_ENV environment variable to "production".
+func Renderer(options ...RenderOptions) Handler {
+ return renderHandler(prepareRenderOptions(options), []string{})
+}
+
+func Renderers(options RenderOptions, tplSets ...string) Handler {
+ return renderHandler(prepareRenderOptions([]RenderOptions{options}), tplSets)
+}
+
+type TplRender struct {
+ http.ResponseWriter
+ *TemplateSet
+ Opt *RenderOptions
+ CompiledCharset string
+
+ startTime time.Time
+}
+
+func (r *TplRender) SetResponseWriter(rw http.ResponseWriter) {
+ r.ResponseWriter = rw
+}
+
+func (r *TplRender) JSON(status int, v interface{}) {
+ var (
+ result []byte
+ err error
+ )
+ if r.Opt.IndentJSON {
+ result, err = json.MarshalIndent(v, "", " ")
+ } else {
+ result, err = json.Marshal(v)
+ }
+ if err != nil {
+ http.Error(r, err.Error(), 500)
+ return
+ }
+
+ // json rendered fine, write out the result
+ r.Header().Set(_CONTENT_TYPE, _CONTENT_JSON+r.CompiledCharset)
+ r.WriteHeader(status)
+ if len(r.Opt.PrefixJSON) > 0 {
+ r.Write(r.Opt.PrefixJSON)
+ }
+ r.Write(result)
+}
+
+func (r *TplRender) JSONString(v interface{}) (string, error) {
+ var result []byte
+ var err error
+ if r.Opt.IndentJSON {
+ result, err = json.MarshalIndent(v, "", " ")
+ } else {
+ result, err = json.Marshal(v)
+ }
+ if err != nil {
+ return "", err
+ }
+ return string(result), nil
+}
+
+func (r *TplRender) XML(status int, v interface{}) {
+ var result []byte
+ var err error
+ if r.Opt.IndentXML {
+ result, err = xml.MarshalIndent(v, "", " ")
+ } else {
+ result, err = xml.Marshal(v)
+ }
+ if err != nil {
+ http.Error(r, err.Error(), 500)
+ return
+ }
+
+ // XML rendered fine, write out the result
+ r.Header().Set(_CONTENT_TYPE, _CONTENT_XML+r.CompiledCharset)
+ r.WriteHeader(status)
+ if len(r.Opt.PrefixXML) > 0 {
+ r.Write(r.Opt.PrefixXML)
+ }
+ r.Write(result)
+}
+
+func (r *TplRender) data(status int, contentType string, v []byte) {
+ if r.Header().Get(_CONTENT_TYPE) == "" {
+ r.Header().Set(_CONTENT_TYPE, contentType)
+ }
+ r.WriteHeader(status)
+ r.Write(v)
+}
+
+func (r *TplRender) RawData(status int, v []byte) {
+ r.data(status, _CONTENT_BINARY, v)
+}
+
+func (r *TplRender) PlainText(status int, v []byte) {
+ r.data(status, _CONTENT_PLAIN, v)
+}
+
+func (r *TplRender) execute(t *template.Template, name string, data interface{}) (*bytes.Buffer, error) {
+ buf := bufpool.Get().(*bytes.Buffer)
+ return buf, t.ExecuteTemplate(buf, name, data)
+}
+
+func (r *TplRender) addYield(t *template.Template, tplName string, data interface{}) {
+ funcs := template.FuncMap{
+ "yield": func() (template.HTML, error) {
+ buf, err := r.execute(t, tplName, data)
+ // return safe html here since we are rendering our own template
+ return template.HTML(buf.String()), err
+ },
+ "current": func() (string, error) {
+ return tplName, nil
+ },
+ }
+ t.Funcs(funcs)
+}
+
+func (r *TplRender) renderBytes(setName, tplName string, data interface{}, htmlOpt ...HTMLOptions) (*bytes.Buffer, error) {
+ t := r.TemplateSet.Get(setName)
+ if Env == DEV {
+ opt := *r.Opt
+ opt.Directory = r.TemplateSet.GetDir(setName)
+ t = r.TemplateSet.Set(setName, &opt)
+ }
+ if t == nil {
+ return nil, fmt.Errorf("html/template: template \"%s\" is undefined", tplName)
+ }
+
+ opt := r.prepareHTMLOptions(htmlOpt)
+
+ if len(opt.Layout) > 0 {
+ r.addYield(t, tplName, data)
+ tplName = opt.Layout
+ }
+
+ out, err := r.execute(t, tplName, data)
+ if err != nil {
+ return nil, err
+ }
+
+ return out, nil
+}
+
+func (r *TplRender) renderHTML(status int, setName, tplName string, data interface{}, htmlOpt ...HTMLOptions) {
+ r.startTime = time.Now()
+
+ out, err := r.renderBytes(setName, tplName, data, htmlOpt...)
+ if err != nil {
+ http.Error(r, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ r.Header().Set(_CONTENT_TYPE, r.Opt.HTMLContentType+r.CompiledCharset)
+ r.WriteHeader(status)
+
+ if _, err := out.WriteTo(r); err != nil {
+ out.Reset()
+ }
+ bufpool.Put(out)
+}
+
+func (r *TplRender) HTML(status int, name string, data interface{}, htmlOpt ...HTMLOptions) {
+ r.renderHTML(status, DEFAULT_TPL_SET_NAME, name, data, htmlOpt...)
+}
+
+func (r *TplRender) HTMLSet(status int, setName, tplName string, data interface{}, htmlOpt ...HTMLOptions) {
+ r.renderHTML(status, setName, tplName, data, htmlOpt...)
+}
+
+func (r *TplRender) HTMLSetBytes(setName, tplName string, data interface{}, htmlOpt ...HTMLOptions) ([]byte, error) {
+ out, err := r.renderBytes(setName, tplName, data, htmlOpt...)
+ if err != nil {
+ return []byte(""), err
+ }
+ return out.Bytes(), nil
+}
+
+func (r *TplRender) HTMLBytes(name string, data interface{}, htmlOpt ...HTMLOptions) ([]byte, error) {
+ return r.HTMLSetBytes(DEFAULT_TPL_SET_NAME, name, data, htmlOpt...)
+}
+
+func (r *TplRender) HTMLSetString(setName, tplName string, data interface{}, htmlOpt ...HTMLOptions) (string, error) {
+ p, err := r.HTMLSetBytes(setName, tplName, data, htmlOpt...)
+ return string(p), err
+}
+
+func (r *TplRender) HTMLString(name string, data interface{}, htmlOpt ...HTMLOptions) (string, error) {
+ p, err := r.HTMLBytes(name, data, htmlOpt...)
+ return string(p), err
+}
+
+// Error writes the given HTTP status to the current ResponseWriter
+func (r *TplRender) Error(status int, message ...string) {
+ r.WriteHeader(status)
+ if len(message) > 0 {
+ r.Write([]byte(message[0]))
+ }
+}
+
+func (r *TplRender) Status(status int) {
+ r.WriteHeader(status)
+}
+
+func (r *TplRender) prepareHTMLOptions(htmlOpt []HTMLOptions) HTMLOptions {
+ if len(htmlOpt) > 0 {
+ return htmlOpt[0]
+ }
+
+ return HTMLOptions{
+ Layout: r.Opt.Layout,
+ }
+}
+
+func (r *TplRender) SetTemplatePath(setName, dir string) {
+ if len(setName) == 0 {
+ setName = DEFAULT_TPL_SET_NAME
+ }
+ opt := *r.Opt
+ opt.Directory = dir
+ r.TemplateSet.Set(setName, &opt)
+}
+
+func (r *TplRender) HasTemplateSet(name string) bool {
+ return r.TemplateSet.Get(name) != nil
+}
+
+// DummyRender is used when user does not choose any real render to use.
+// This way, we can print out friendly message which asks them to register one,
+// instead of ugly and confusing 'nil pointer' panic.
+type DummyRender struct {
+ http.ResponseWriter
+}
+
+func renderNotRegistered() {
+ panic("middleware render hasn't been registered")
+}
+
+func (r *DummyRender) SetResponseWriter(http.ResponseWriter) {
+ renderNotRegistered()
+}
+
+func (r *DummyRender) JSON(int, interface{}) {
+ renderNotRegistered()
+}
+
+func (r *DummyRender) JSONString(interface{}) (string, error) {
+ renderNotRegistered()
+ return "", nil
+}
+
+func (r *DummyRender) RawData(int, []byte) {
+ renderNotRegistered()
+}
+
+func (r *DummyRender) PlainText(int, []byte) {
+ renderNotRegistered()
+}
+
+func (r *DummyRender) HTML(int, string, interface{}, ...HTMLOptions) {
+ renderNotRegistered()
+}
+
+func (r *DummyRender) HTMLSet(int, string, string, interface{}, ...HTMLOptions) {
+ renderNotRegistered()
+}
+
+func (r *DummyRender) HTMLSetString(string, string, interface{}, ...HTMLOptions) (string, error) {
+ renderNotRegistered()
+ return "", nil
+}
+
+func (r *DummyRender) HTMLString(string, interface{}, ...HTMLOptions) (string, error) {
+ renderNotRegistered()
+ return "", nil
+}
+
+func (r *DummyRender) HTMLSetBytes(string, string, interface{}, ...HTMLOptions) ([]byte, error) {
+ renderNotRegistered()
+ return nil, nil
+}
+
+func (r *DummyRender) HTMLBytes(string, interface{}, ...HTMLOptions) ([]byte, error) {
+ renderNotRegistered()
+ return nil, nil
+}
+
+func (r *DummyRender) XML(int, interface{}) {
+ renderNotRegistered()
+}
+
+func (r *DummyRender) Error(int, ...string) {
+ renderNotRegistered()
+}
+
+func (r *DummyRender) Status(int) {
+ renderNotRegistered()
+}
+
+func (r *DummyRender) SetTemplatePath(string, string) {
+ renderNotRegistered()
+}
+
+func (r *DummyRender) HasTemplateSet(string) bool {
+ renderNotRegistered()
+ return false
+}
diff --git a/vendor/gopkg.in/macaron.v1/response_writer.go b/vendor/gopkg.in/macaron.v1/response_writer.go
new file mode 100644
index 0000000000..ab54f56c03
--- /dev/null
+++ b/vendor/gopkg.in/macaron.v1/response_writer.go
@@ -0,0 +1,111 @@
+// Copyright 2013 Martini Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package macaron
+
+import (
+ "bufio"
+ "fmt"
+ "net"
+ "net/http"
+)
+
+// ResponseWriter is a wrapper around http.ResponseWriter that provides extra information about
+// the response. It is recommended that middleware handlers use this construct to wrap a responsewriter
+// if the functionality calls for it.
+type ResponseWriter interface {
+ http.ResponseWriter
+ http.Flusher
+ // Status returns the status code of the response or 0 if the response has not been written.
+ Status() int
+ // Written returns whether or not the ResponseWriter has been written.
+ Written() bool
+ // Size returns the size of the response body.
+ Size() int
+ // Before allows for a function to be called before the ResponseWriter has been written to. This is
+ // useful for setting headers or any other operations that must happen before a response has been written.
+ Before(BeforeFunc)
+}
+
+// BeforeFunc is a function that is called before the ResponseWriter has been written to.
+type BeforeFunc func(ResponseWriter)
+
+// NewResponseWriter creates a ResponseWriter that wraps an http.ResponseWriter
+func NewResponseWriter(rw http.ResponseWriter) ResponseWriter {
+ return &responseWriter{rw, 0, 0, nil}
+}
+
+type responseWriter struct {
+ http.ResponseWriter
+ status int
+ size int
+ beforeFuncs []BeforeFunc
+}
+
+func (rw *responseWriter) WriteHeader(s int) {
+ rw.callBefore()
+ rw.ResponseWriter.WriteHeader(s)
+ rw.status = s
+}
+
+func (rw *responseWriter) Write(b []byte) (int, error) {
+ if !rw.Written() {
+ // The status will be StatusOK if WriteHeader has not been called yet
+ rw.WriteHeader(http.StatusOK)
+ }
+ size, err := rw.ResponseWriter.Write(b)
+ rw.size += size
+ return size, err
+}
+
+func (rw *responseWriter) Status() int {
+ return rw.status
+}
+
+func (rw *responseWriter) Size() int {
+ return rw.size
+}
+
+func (rw *responseWriter) Written() bool {
+ return rw.status != 0
+}
+
+func (rw *responseWriter) Before(before BeforeFunc) {
+ rw.beforeFuncs = append(rw.beforeFuncs, before)
+}
+
+func (rw *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ hijacker, ok := rw.ResponseWriter.(http.Hijacker)
+ if !ok {
+ return nil, nil, fmt.Errorf("the ResponseWriter doesn't support the Hijacker interface")
+ }
+ return hijacker.Hijack()
+}
+
+func (rw *responseWriter) CloseNotify() <-chan bool {
+ return rw.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+func (rw *responseWriter) callBefore() {
+ for i := len(rw.beforeFuncs) - 1; i >= 0; i-- {
+ rw.beforeFuncs[i](rw)
+ }
+}
+
+func (rw *responseWriter) Flush() {
+ flusher, ok := rw.ResponseWriter.(http.Flusher)
+ if ok {
+ flusher.Flush()
+ }
+}
diff --git a/vendor/gopkg.in/macaron.v1/return_handler.go b/vendor/gopkg.in/macaron.v1/return_handler.go
new file mode 100644
index 0000000000..db6eec3e9f
--- /dev/null
+++ b/vendor/gopkg.in/macaron.v1/return_handler.go
@@ -0,0 +1,76 @@
+// Copyright 2013 Martini Authors
+// Copyright 2014 The Macaron Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package macaron
+
+import (
+ "net/http"
+ "reflect"
+
+ "github.com/go-macaron/inject"
+)
+
+// ReturnHandler is a service that Martini provides that is called
+// when a route handler returns something. The ReturnHandler is
+// responsible for writing to the ResponseWriter based on the values
+// that are passed into this function.
+type ReturnHandler func(*Context, []reflect.Value)
+
+func canDeref(val reflect.Value) bool {
+ return val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr
+}
+
+func isError(val reflect.Value) bool {
+ _, ok := val.Interface().(error)
+ return ok
+}
+
+func isByteSlice(val reflect.Value) bool {
+ return val.Kind() == reflect.Slice && val.Type().Elem().Kind() == reflect.Uint8
+}
+
+func defaultReturnHandler() ReturnHandler {
+ return func(ctx *Context, vals []reflect.Value) {
+ rv := ctx.GetVal(inject.InterfaceOf((*http.ResponseWriter)(nil)))
+ resp := rv.Interface().(http.ResponseWriter)
+ var respVal reflect.Value
+ if len(vals) > 1 && vals[0].Kind() == reflect.Int {
+ resp.WriteHeader(int(vals[0].Int()))
+ respVal = vals[1]
+ } else if len(vals) > 0 {
+ respVal = vals[0]
+
+ if isError(respVal) {
+ err := respVal.Interface().(error)
+ if err != nil {
+ ctx.internalServerError(ctx, err)
+ }
+ return
+ } else if canDeref(respVal) {
+ if respVal.IsNil() {
+ return // Ignore nil error
+ }
+ }
+ }
+ if canDeref(respVal) {
+ respVal = respVal.Elem()
+ }
+ if isByteSlice(respVal) {
+ resp.Write(respVal.Bytes())
+ } else {
+ resp.Write([]byte(respVal.String()))
+ }
+ }
+}
diff --git a/vendor/gopkg.in/macaron.v1/router.go b/vendor/gopkg.in/macaron.v1/router.go
new file mode 100644
index 0000000000..f9b421a330
--- /dev/null
+++ b/vendor/gopkg.in/macaron.v1/router.go
@@ -0,0 +1,360 @@
+// Copyright 2014 The Macaron Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package macaron
+
+import (
+ "net/http"
+ "strings"
+ "sync"
+)
+
+var (
+ // Known HTTP methods.
+ _HTTP_METHODS = map[string]bool{
+ "GET": true,
+ "POST": true,
+ "PUT": true,
+ "DELETE": true,
+ "PATCH": true,
+ "OPTIONS": true,
+ "HEAD": true,
+ }
+)
+
+// routeMap represents a thread-safe map for route tree.
+type routeMap struct {
+ lock sync.RWMutex
+ routes map[string]map[string]*Leaf
+}
+
+// NewRouteMap initializes and returns a new routeMap.
+func NewRouteMap() *routeMap {
+ rm := &routeMap{
+ routes: make(map[string]map[string]*Leaf),
+ }
+ for m := range _HTTP_METHODS {
+ rm.routes[m] = make(map[string]*Leaf)
+ }
+ return rm
+}
+
+// getLeaf returns Leaf object if a route has been registered.
+func (rm *routeMap) getLeaf(method, pattern string) *Leaf {
+ rm.lock.RLock()
+ defer rm.lock.RUnlock()
+
+ return rm.routes[method][pattern]
+}
+
+// add adds new route to route tree map.
+func (rm *routeMap) add(method, pattern string, leaf *Leaf) {
+ rm.lock.Lock()
+ defer rm.lock.Unlock()
+
+ rm.routes[method][pattern] = leaf
+}
+
+type group struct {
+ pattern string
+ handlers []Handler
+}
+
+// Router represents a Macaron router layer.
+type Router struct {
+ m *Macaron
+ autoHead bool
+ routers map[string]*Tree
+ *routeMap
+ namedRoutes map[string]*Leaf
+
+ groups []group
+ notFound http.HandlerFunc
+ internalServerError func(*Context, error)
+}
+
+func NewRouter() *Router {
+ return &Router{
+ routers: make(map[string]*Tree),
+ routeMap: NewRouteMap(),
+ namedRoutes: make(map[string]*Leaf),
+ }
+}
+
+// SetAutoHead sets the value who determines whether add HEAD method automatically
+// when GET method is added. Combo router will not be affected by this value.
+func (r *Router) SetAutoHead(v bool) {
+ r.autoHead = v
+}
+
+type Params map[string]string
+
+// Handle is a function that can be registered to a route to handle HTTP requests.
+// Like http.HandlerFunc, but has a third parameter for the values of wildcards (variables).
+type Handle func(http.ResponseWriter, *http.Request, Params)
+
+// Route represents a wrapper of leaf route and upper level router.
+type Route struct {
+ router *Router
+ leaf *Leaf
+}
+
+// Name sets name of route.
+func (r *Route) Name(name string) {
+ if len(name) == 0 {
+ panic("route name cannot be empty")
+ } else if r.router.namedRoutes[name] != nil {
+ panic("route with given name already exists")
+ }
+ r.router.namedRoutes[name] = r.leaf
+}
+
+// handle adds new route to the router tree.
+func (r *Router) handle(method, pattern string, handle Handle) *Route {
+ method = strings.ToUpper(method)
+
+ var leaf *Leaf
+ // Prevent duplicate routes.
+ if leaf = r.getLeaf(method, pattern); leaf != nil {
+ return &Route{r, leaf}
+ }
+
+ // Validate HTTP methods.
+ if !_HTTP_METHODS[method] && method != "*" {
+ panic("unknown HTTP method: " + method)
+ }
+
+ // Generate methods need register.
+ methods := make(map[string]bool)
+ if method == "*" {
+ for m := range _HTTP_METHODS {
+ methods[m] = true
+ }
+ } else {
+ methods[method] = true
+ }
+
+ // Add to router tree.
+ for m := range methods {
+ if t, ok := r.routers[m]; ok {
+ leaf = t.Add(pattern, handle)
+ } else {
+ t := NewTree()
+ leaf = t.Add(pattern, handle)
+ r.routers[m] = t
+ }
+ r.add(m, pattern, leaf)
+ }
+ return &Route{r, leaf}
+}
+
+// Handle registers a new request handle with the given pattern, method and handlers.
+func (r *Router) Handle(method string, pattern string, handlers []Handler) *Route {
+ if len(r.groups) > 0 {
+ groupPattern := ""
+ h := make([]Handler, 0)
+ for _, g := range r.groups {
+ groupPattern += g.pattern
+ h = append(h, g.handlers...)
+ }
+
+ pattern = groupPattern + pattern
+ h = append(h, handlers...)
+ handlers = h
+ }
+ validateHandlers(handlers)
+
+ return r.handle(method, pattern, func(resp http.ResponseWriter, req *http.Request, params Params) {
+ c := r.m.createContext(resp, req)
+ c.params = params
+ c.handlers = make([]Handler, 0, len(r.m.handlers)+len(handlers))
+ c.handlers = append(c.handlers, r.m.handlers...)
+ c.handlers = append(c.handlers, handlers...)
+ c.run()
+ })
+}
+
+func (r *Router) Group(pattern string, fn func(), h ...Handler) {
+ r.groups = append(r.groups, group{pattern, h})
+ fn()
+ r.groups = r.groups[:len(r.groups)-1]
+}
+
+// Get is a shortcut for r.Handle("GET", pattern, handlers)
+func (r *Router) Get(pattern string, h ...Handler) (leaf *Route) {
+ leaf = r.Handle("GET", pattern, h)
+ if r.autoHead {
+ r.Head(pattern, h...)
+ }
+ return leaf
+}
+
+// Patch is a shortcut for r.Handle("PATCH", pattern, handlers)
+func (r *Router) Patch(pattern string, h ...Handler) *Route {
+ return r.Handle("PATCH", pattern, h)
+}
+
+// Post is a shortcut for r.Handle("POST", pattern, handlers)
+func (r *Router) Post(pattern string, h ...Handler) *Route {
+ return r.Handle("POST", pattern, h)
+}
+
+// Put is a shortcut for r.Handle("PUT", pattern, handlers)
+func (r *Router) Put(pattern string, h ...Handler) *Route {
+ return r.Handle("PUT", pattern, h)
+}
+
+// Delete is a shortcut for r.Handle("DELETE", pattern, handlers)
+func (r *Router) Delete(pattern string, h ...Handler) *Route {
+ return r.Handle("DELETE", pattern, h)
+}
+
+// Options is a shortcut for r.Handle("OPTIONS", pattern, handlers)
+func (r *Router) Options(pattern string, h ...Handler) *Route {
+ return r.Handle("OPTIONS", pattern, h)
+}
+
+// Head is a shortcut for r.Handle("HEAD", pattern, handlers)
+func (r *Router) Head(pattern string, h ...Handler) *Route {
+ return r.Handle("HEAD", pattern, h)
+}
+
+// Any is a shortcut for r.Handle("*", pattern, handlers)
+func (r *Router) Any(pattern string, h ...Handler) *Route {
+ return r.Handle("*", pattern, h)
+}
+
+// Route is a shortcut for same handlers but different HTTP methods.
+//
+// Example:
+// m.Route("/", "GET,POST", h)
+func (r *Router) Route(pattern, methods string, h ...Handler) (route *Route) {
+ for _, m := range strings.Split(methods, ",") {
+ route = r.Handle(strings.TrimSpace(m), pattern, h)
+ }
+ return route
+}
+
+// Combo returns a combo router.
+func (r *Router) Combo(pattern string, h ...Handler) *ComboRouter {
+ return &ComboRouter{r, pattern, h, map[string]bool{}, nil}
+}
+
+// Configurable http.HandlerFunc which is called when no matching route is
+// found. If it is not set, http.NotFound is used.
+// Be sure to set 404 response code in your handler.
+func (r *Router) NotFound(handlers ...Handler) {
+ validateHandlers(handlers)
+ r.notFound = func(rw http.ResponseWriter, req *http.Request) {
+ c := r.m.createContext(rw, req)
+ c.handlers = append(r.m.handlers, handlers...)
+ c.run()
+ }
+}
+
+// Configurable handler which is called when route handler returns
+// error. If it is not set, default handler is used.
+// Be sure to set 500 response code in your handler.
+func (r *Router) InternalServerError(handlers ...Handler) {
+ validateHandlers(handlers)
+ r.internalServerError = func(c *Context, err error) {
+ c.index = 0
+ c.handlers = handlers
+ c.Map(err)
+ c.run()
+ }
+}
+
+func (r *Router) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ if t, ok := r.routers[req.Method]; ok {
+ h, p, ok := t.Match(req.URL.Path)
+ if ok {
+ if splat, ok := p["*0"]; ok {
+ p["*"] = splat // Easy name.
+ }
+ h(rw, req, p)
+ return
+ }
+ }
+
+ r.notFound(rw, req)
+}
+
+// URLFor builds path part of URL by given pair values.
+func (r *Router) URLFor(name string, pairs ...string) string {
+ leaf, ok := r.namedRoutes[name]
+ if !ok {
+ panic("route with given name does not exists: " + name)
+ }
+ return leaf.URLPath(pairs...)
+}
+
+// ComboRouter represents a combo router.
+type ComboRouter struct {
+ router *Router
+ pattern string
+ handlers []Handler
+ methods map[string]bool // Registered methods.
+
+ lastRoute *Route
+}
+
+func (cr *ComboRouter) checkMethod(name string) {
+ if cr.methods[name] {
+ panic("method '" + name + "' has already been registered")
+ }
+ cr.methods[name] = true
+}
+
+func (cr *ComboRouter) route(fn func(string, ...Handler) *Route, method string, h ...Handler) *ComboRouter {
+ cr.checkMethod(method)
+ cr.lastRoute = fn(cr.pattern, append(cr.handlers, h...)...)
+ return cr
+}
+
+func (cr *ComboRouter) Get(h ...Handler) *ComboRouter {
+ return cr.route(cr.router.Get, "GET", h...)
+}
+
+func (cr *ComboRouter) Patch(h ...Handler) *ComboRouter {
+ return cr.route(cr.router.Patch, "PATCH", h...)
+}
+
+func (cr *ComboRouter) Post(h ...Handler) *ComboRouter {
+ return cr.route(cr.router.Post, "POST", h...)
+}
+
+func (cr *ComboRouter) Put(h ...Handler) *ComboRouter {
+ return cr.route(cr.router.Put, "PUT", h...)
+}
+
+func (cr *ComboRouter) Delete(h ...Handler) *ComboRouter {
+ return cr.route(cr.router.Delete, "DELETE", h...)
+}
+
+func (cr *ComboRouter) Options(h ...Handler) *ComboRouter {
+ return cr.route(cr.router.Options, "OPTIONS", h...)
+}
+
+func (cr *ComboRouter) Head(h ...Handler) *ComboRouter {
+ return cr.route(cr.router.Head, "HEAD", h...)
+}
+
+// Name sets name of ComboRouter route.
+func (cr *ComboRouter) Name(name string) {
+ if cr.lastRoute == nil {
+ panic("no corresponding route to be named")
+ }
+ cr.lastRoute.Name(name)
+}
diff --git a/vendor/gopkg.in/macaron.v1/static.go b/vendor/gopkg.in/macaron.v1/static.go
new file mode 100644
index 0000000000..4ff8342fc5
--- /dev/null
+++ b/vendor/gopkg.in/macaron.v1/static.go
@@ -0,0 +1,205 @@
+// Copyright 2013 Martini Authors
+// Copyright 2014 The Macaron Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package macaron
+
+import (
+ "log"
+ "net/http"
+ "path"
+ "path/filepath"
+ "strings"
+ "sync"
+)
+
+// StaticOptions is a struct for specifying configuration options for the macaron.Static middleware.
+type StaticOptions struct {
+ // Prefix is the optional prefix used to serve the static directory content
+ Prefix string
+ // SkipLogging will disable [Static] log messages when a static file is served.
+ SkipLogging bool
+ // IndexFile defines which file to serve as index if it exists.
+ IndexFile string
+ // Expires defines which user-defined function to use for producing a HTTP Expires Header
+ // https://developers.google.com/speed/docs/insights/LeverageBrowserCaching
+ Expires func() string
+ // FileSystem is the interface for supporting any implmentation of file system.
+ FileSystem http.FileSystem
+}
+
+// FIXME: to be deleted.
+type staticMap struct {
+ lock sync.RWMutex
+ data map[string]*http.Dir
+}
+
+func (sm *staticMap) Set(dir *http.Dir) {
+ sm.lock.Lock()
+ defer sm.lock.Unlock()
+
+ sm.data[string(*dir)] = dir
+}
+
+func (sm *staticMap) Get(name string) *http.Dir {
+ sm.lock.RLock()
+ defer sm.lock.RUnlock()
+
+ return sm.data[name]
+}
+
+func (sm *staticMap) Delete(name string) {
+ sm.lock.Lock()
+ defer sm.lock.Unlock()
+
+ delete(sm.data, name)
+}
+
+var statics = staticMap{sync.RWMutex{}, map[string]*http.Dir{}}
+
+// staticFileSystem implements http.FileSystem interface.
+type staticFileSystem struct {
+ dir *http.Dir
+}
+
+func newStaticFileSystem(directory string) staticFileSystem {
+ if !filepath.IsAbs(directory) {
+ directory = filepath.Join(Root, directory)
+ }
+ dir := http.Dir(directory)
+ statics.Set(&dir)
+ return staticFileSystem{&dir}
+}
+
+func (fs staticFileSystem) Open(name string) (http.File, error) {
+ return fs.dir.Open(name)
+}
+
+func prepareStaticOption(dir string, opt StaticOptions) StaticOptions {
+ // Defaults
+ if len(opt.IndexFile) == 0 {
+ opt.IndexFile = "index.html"
+ }
+ // Normalize the prefix if provided
+ if opt.Prefix != "" {
+ // Ensure we have a leading '/'
+ if opt.Prefix[0] != '/' {
+ opt.Prefix = "/" + opt.Prefix
+ }
+ // Remove any trailing '/'
+ opt.Prefix = strings.TrimRight(opt.Prefix, "/")
+ }
+ if opt.FileSystem == nil {
+ opt.FileSystem = newStaticFileSystem(dir)
+ }
+ return opt
+}
+
+func prepareStaticOptions(dir string, options []StaticOptions) StaticOptions {
+ var opt StaticOptions
+ if len(options) > 0 {
+ opt = options[0]
+ }
+ return prepareStaticOption(dir, opt)
+}
+
+func staticHandler(ctx *Context, log *log.Logger, opt StaticOptions) bool {
+ if ctx.Req.Method != "GET" && ctx.Req.Method != "HEAD" {
+ return false
+ }
+
+ file := ctx.Req.URL.Path
+ // if we have a prefix, filter requests by stripping the prefix
+ if opt.Prefix != "" {
+ if !strings.HasPrefix(file, opt.Prefix) {
+ return false
+ }
+ file = file[len(opt.Prefix):]
+ if file != "" && file[0] != '/' {
+ return false
+ }
+ }
+
+ f, err := opt.FileSystem.Open(file)
+ if err != nil {
+ return false
+ }
+ defer f.Close()
+
+ fi, err := f.Stat()
+ if err != nil {
+ return true // File exists but fail to open.
+ }
+
+ // Try to serve index file
+ if fi.IsDir() {
+ // Redirect if missing trailing slash.
+ if !strings.HasSuffix(ctx.Req.URL.Path, "/") {
+ http.Redirect(ctx.Resp, ctx.Req.Request, ctx.Req.URL.Path+"/", http.StatusFound)
+ return true
+ }
+
+ file = path.Join(file, opt.IndexFile)
+ f, err = opt.FileSystem.Open(file)
+ if err != nil {
+ return false // Discard error.
+ }
+ defer f.Close()
+
+ fi, err = f.Stat()
+ if err != nil || fi.IsDir() {
+ return true
+ }
+ }
+
+ if !opt.SkipLogging {
+ log.Println("[Static] Serving " + file)
+ }
+
+ // Add an Expires header to the static content
+ if opt.Expires != nil {
+ ctx.Resp.Header().Set("Expires", opt.Expires())
+ }
+
+ http.ServeContent(ctx.Resp, ctx.Req.Request, file, fi.ModTime(), f)
+ return true
+}
+
+// Static returns a middleware handler that serves static files in the given directory.
+func Static(directory string, staticOpt ...StaticOptions) Handler {
+ opt := prepareStaticOptions(directory, staticOpt)
+
+ return func(ctx *Context, log *log.Logger) {
+ staticHandler(ctx, log, opt)
+ }
+}
+
+// Statics registers multiple static middleware handlers all at once.
+func Statics(opt StaticOptions, dirs ...string) Handler {
+ if len(dirs) == 0 {
+ panic("no static directory is given")
+ }
+ opts := make([]StaticOptions, len(dirs))
+ for i := range dirs {
+ opts[i] = prepareStaticOption(dirs[i], opt)
+ }
+
+ return func(ctx *Context, log *log.Logger) {
+ for i := range opts {
+ if staticHandler(ctx, log, opts[i]) {
+ return
+ }
+ }
+ }
+}
diff --git a/vendor/gopkg.in/macaron.v1/tree.go b/vendor/gopkg.in/macaron.v1/tree.go
new file mode 100644
index 0000000000..8bba72fb34
--- /dev/null
+++ b/vendor/gopkg.in/macaron.v1/tree.go
@@ -0,0 +1,379 @@
+// Copyright 2015 The Macaron Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package macaron
+
+import (
+ "regexp"
+ "strings"
+
+ "github.com/Unknwon/com"
+)
+
+type patternType int8
+
+const (
+ _PATTERN_STATIC patternType = iota // /home
+ _PATTERN_REGEXP // /:id([0-9]+)
+ _PATTERN_PATH_EXT // /*.*
+ _PATTERN_HOLDER // /:user
+ _PATTERN_MATCH_ALL // /*
+)
+
+// Leaf represents a leaf route information.
+type Leaf struct {
+ parent *Tree
+
+ typ patternType
+ pattern string
+ rawPattern string // Contains wildcard instead of regexp
+ wildcards []string
+ reg *regexp.Regexp
+ optional bool
+
+ handle Handle
+}
+
+var wildcardPattern = regexp.MustCompile(`:[a-zA-Z0-9]+`)
+
+func isSpecialRegexp(pattern, regStr string, pos []int) bool {
+ return len(pattern) >= pos[1]+len(regStr) && pattern[pos[1]:pos[1]+len(regStr)] == regStr
+}
+
+// getNextWildcard tries to find next wildcard and update pattern with corresponding regexp.
+func getNextWildcard(pattern string) (wildcard, _ string) {
+ pos := wildcardPattern.FindStringIndex(pattern)
+ if pos == nil {
+ return "", pattern
+ }
+ wildcard = pattern[pos[0]:pos[1]]
+
+ // Reach last character or no regexp is given.
+ if len(pattern) == pos[1] {
+ return wildcard, strings.Replace(pattern, wildcard, `(.+)`, 1)
+ } else if pattern[pos[1]] != '(' {
+ switch {
+ case isSpecialRegexp(pattern, ":int", pos):
+ pattern = strings.Replace(pattern, ":int", "([0-9]+)", 1)
+ case isSpecialRegexp(pattern, ":string", pos):
+ pattern = strings.Replace(pattern, ":string", "([\\w]+)", 1)
+ default:
+ return wildcard, strings.Replace(pattern, wildcard, `(.+)`, 1)
+ }
+ }
+
+ // Cut out placeholder directly.
+ return wildcard, pattern[:pos[0]] + pattern[pos[1]:]
+}
+
+func getWildcards(pattern string) (string, []string) {
+ wildcards := make([]string, 0, 2)
+
+ // Keep getting next wildcard until nothing is left.
+ var wildcard string
+ for {
+ wildcard, pattern = getNextWildcard(pattern)
+ if len(wildcard) > 0 {
+ wildcards = append(wildcards, wildcard)
+ } else {
+ break
+ }
+ }
+
+ return pattern, wildcards
+}
+
+// getRawPattern removes all regexp but keeps wildcards for building URL path.
+func getRawPattern(rawPattern string) string {
+ rawPattern = strings.Replace(rawPattern, ":int", "", -1)
+ rawPattern = strings.Replace(rawPattern, ":string", "", -1)
+
+ for {
+ startIdx := strings.Index(rawPattern, "(")
+ if startIdx == -1 {
+ break
+ }
+
+ closeIdx := strings.Index(rawPattern, ")")
+ if closeIdx > -1 {
+ rawPattern = rawPattern[:startIdx] + rawPattern[closeIdx+1:]
+ }
+ }
+ return rawPattern
+}
+
+func checkPattern(pattern string) (typ patternType, rawPattern string, wildcards []string, reg *regexp.Regexp) {
+ pattern = strings.TrimLeft(pattern, "?")
+ rawPattern = getRawPattern(pattern)
+
+ if pattern == "*" {
+ typ = _PATTERN_MATCH_ALL
+ } else if pattern == "*.*" {
+ typ = _PATTERN_PATH_EXT
+ } else if strings.Contains(pattern, ":") {
+ typ = _PATTERN_REGEXP
+ pattern, wildcards = getWildcards(pattern)
+ if pattern == "(.+)" {
+ typ = _PATTERN_HOLDER
+ } else {
+ reg = regexp.MustCompile(pattern)
+ }
+ }
+ return typ, rawPattern, wildcards, reg
+}
+
+func NewLeaf(parent *Tree, pattern string, handle Handle) *Leaf {
+ typ, rawPattern, wildcards, reg := checkPattern(pattern)
+ optional := false
+ if len(pattern) > 0 && pattern[0] == '?' {
+ optional = true
+ }
+ return &Leaf{parent, typ, pattern, rawPattern, wildcards, reg, optional, handle}
+}
+
+// URLPath build path part of URL by given pair values.
+func (l *Leaf) URLPath(pairs ...string) string {
+ if len(pairs)%2 != 0 {
+ panic("number of pairs does not match")
+ }
+
+ urlPath := l.rawPattern
+ parent := l.parent
+ for parent != nil {
+ urlPath = parent.rawPattern + "/" + urlPath
+ parent = parent.parent
+ }
+ for i := 0; i < len(pairs); i += 2 {
+ if len(pairs[i]) == 0 {
+ panic("pair value cannot be empty: " + com.ToStr(i))
+ } else if pairs[i][0] != ':' && pairs[i] != "*" && pairs[i] != "*.*" {
+ pairs[i] = ":" + pairs[i]
+ }
+ urlPath = strings.Replace(urlPath, pairs[i], pairs[i+1], 1)
+ }
+ return urlPath
+}
+
+// Tree represents a router tree in Macaron.
+type Tree struct {
+ parent *Tree
+
+ typ patternType
+ pattern string
+ rawPattern string
+ wildcards []string
+ reg *regexp.Regexp
+
+ subtrees []*Tree
+ leaves []*Leaf
+}
+
+func NewSubtree(parent *Tree, pattern string) *Tree {
+ typ, rawPattern, wildcards, reg := checkPattern(pattern)
+ return &Tree{parent, typ, pattern, rawPattern, wildcards, reg, make([]*Tree, 0, 5), make([]*Leaf, 0, 5)}
+}
+
+func NewTree() *Tree {
+ return NewSubtree(nil, "")
+}
+
+func (t *Tree) addLeaf(pattern string, handle Handle) *Leaf {
+ for i := 0; i < len(t.leaves); i++ {
+ if t.leaves[i].pattern == pattern {
+ return t.leaves[i]
+ }
+ }
+
+ leaf := NewLeaf(t, pattern, handle)
+
+ // Add exact same leaf to grandparent/parent level without optional.
+ if leaf.optional {
+ parent := leaf.parent
+ if parent.parent != nil {
+ parent.parent.addLeaf(parent.pattern, handle)
+ } else {
+ parent.addLeaf("", handle) // Root tree can add as empty pattern.
+ }
+ }
+
+ i := 0
+ for ; i < len(t.leaves); i++ {
+ if leaf.typ < t.leaves[i].typ {
+ break
+ }
+ }
+
+ if i == len(t.leaves) {
+ t.leaves = append(t.leaves, leaf)
+ } else {
+ t.leaves = append(t.leaves[:i], append([]*Leaf{leaf}, t.leaves[i:]...)...)
+ }
+ return leaf
+}
+
+func (t *Tree) addSubtree(segment, pattern string, handle Handle) *Leaf {
+ for i := 0; i < len(t.subtrees); i++ {
+ if t.subtrees[i].pattern == segment {
+ return t.subtrees[i].addNextSegment(pattern, handle)
+ }
+ }
+
+ subtree := NewSubtree(t, segment)
+ i := 0
+ for ; i < len(t.subtrees); i++ {
+ if subtree.typ < t.subtrees[i].typ {
+ break
+ }
+ }
+
+ if i == len(t.subtrees) {
+ t.subtrees = append(t.subtrees, subtree)
+ } else {
+ t.subtrees = append(t.subtrees[:i], append([]*Tree{subtree}, t.subtrees[i:]...)...)
+ }
+ return subtree.addNextSegment(pattern, handle)
+}
+
+func (t *Tree) addNextSegment(pattern string, handle Handle) *Leaf {
+ pattern = strings.TrimPrefix(pattern, "/")
+
+ i := strings.Index(pattern, "/")
+ if i == -1 {
+ return t.addLeaf(pattern, handle)
+ }
+ return t.addSubtree(pattern[:i], pattern[i+1:], handle)
+}
+
+func (t *Tree) Add(pattern string, handle Handle) *Leaf {
+ pattern = strings.TrimSuffix(pattern, "/")
+ return t.addNextSegment(pattern, handle)
+}
+
+func (t *Tree) matchLeaf(globLevel int, url string, params Params) (Handle, bool) {
+ for i := 0; i < len(t.leaves); i++ {
+ switch t.leaves[i].typ {
+ case _PATTERN_STATIC:
+ if t.leaves[i].pattern == url {
+ return t.leaves[i].handle, true
+ }
+ case _PATTERN_REGEXP:
+ results := t.leaves[i].reg.FindStringSubmatch(url)
+ // Number of results and wildcasrd should be exact same.
+ if len(results)-1 != len(t.leaves[i].wildcards) {
+ break
+ }
+
+ for j := 0; j < len(t.leaves[i].wildcards); j++ {
+ params[t.leaves[i].wildcards[j]] = results[j+1]
+ }
+ return t.leaves[i].handle, true
+ case _PATTERN_PATH_EXT:
+ j := strings.LastIndex(url, ".")
+ if j > -1 {
+ params[":path"] = url[:j]
+ params[":ext"] = url[j+1:]
+ } else {
+ params[":path"] = url
+ }
+ return t.leaves[i].handle, true
+ case _PATTERN_HOLDER:
+ params[t.leaves[i].wildcards[0]] = url
+ return t.leaves[i].handle, true
+ case _PATTERN_MATCH_ALL:
+ params["*"] = url
+ params["*"+com.ToStr(globLevel)] = url
+ return t.leaves[i].handle, true
+ }
+ }
+ return nil, false
+}
+
+func (t *Tree) matchSubtree(globLevel int, segment, url string, params Params) (Handle, bool) {
+ for i := 0; i < len(t.subtrees); i++ {
+ switch t.subtrees[i].typ {
+ case _PATTERN_STATIC:
+ if t.subtrees[i].pattern == segment {
+ if handle, ok := t.subtrees[i].matchNextSegment(globLevel, url, params); ok {
+ return handle, true
+ }
+ }
+ case _PATTERN_REGEXP:
+ results := t.subtrees[i].reg.FindStringSubmatch(segment)
+ if len(results)-1 != len(t.subtrees[i].wildcards) {
+ break
+ }
+
+ for j := 0; j < len(t.subtrees[i].wildcards); j++ {
+ params[t.subtrees[i].wildcards[j]] = results[j+1]
+ }
+ if handle, ok := t.subtrees[i].matchNextSegment(globLevel, url, params); ok {
+ return handle, true
+ }
+ case _PATTERN_HOLDER:
+ if handle, ok := t.subtrees[i].matchNextSegment(globLevel+1, url, params); ok {
+ params[t.subtrees[i].wildcards[0]] = segment
+ return handle, true
+ }
+ case _PATTERN_MATCH_ALL:
+ if handle, ok := t.subtrees[i].matchNextSegment(globLevel+1, url, params); ok {
+ params["*"+com.ToStr(globLevel)] = segment
+ return handle, true
+ }
+ }
+ }
+
+ if len(t.leaves) > 0 {
+ leaf := t.leaves[len(t.leaves)-1]
+ if leaf.typ == _PATTERN_PATH_EXT {
+ url = segment + "/" + url
+ j := strings.LastIndex(url, ".")
+ if j > -1 {
+ params[":path"] = url[:j]
+ params[":ext"] = url[j+1:]
+ } else {
+ params[":path"] = url
+ }
+ return leaf.handle, true
+ } else if leaf.typ == _PATTERN_MATCH_ALL {
+ params["*"] = segment + "/" + url
+ params["*"+com.ToStr(globLevel)] = segment + "/" + url
+ return leaf.handle, true
+ }
+ }
+ return nil, false
+}
+
+func (t *Tree) matchNextSegment(globLevel int, url string, params Params) (Handle, bool) {
+ i := strings.Index(url, "/")
+ if i == -1 {
+ return t.matchLeaf(globLevel, url, params)
+ }
+ return t.matchSubtree(globLevel, url[:i], url[i+1:], params)
+}
+
+func (t *Tree) Match(url string) (Handle, Params, bool) {
+ url = strings.TrimPrefix(url, "/")
+ url = strings.TrimSuffix(url, "/")
+ params := make(Params)
+ handle, ok := t.matchNextSegment(0, url, params)
+ return handle, params, ok
+}
+
+// MatchTest returns true if given URL is matched by given pattern.
+func MatchTest(pattern, url string) bool {
+ t := NewTree()
+ t.Add(pattern, nil)
+ _, _, ok := t.Match(url)
+ return ok
+}
diff --git a/vendor/gopkg.in/redis.v2/LICENSE b/vendor/gopkg.in/redis.v2/LICENSE
new file mode 100644
index 0000000000..6855a95feb
--- /dev/null
+++ b/vendor/gopkg.in/redis.v2/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Redis Go Client Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/gopkg.in/redis.v2/Makefile b/vendor/gopkg.in/redis.v2/Makefile
new file mode 100644
index 0000000000..b250d9bfa9
--- /dev/null
+++ b/vendor/gopkg.in/redis.v2/Makefile
@@ -0,0 +1,3 @@
+all:
+ go test gopkg.in/redis.v2 -cpu=1,2,4
+ go test gopkg.in/redis.v2 -short -race
diff --git a/vendor/gopkg.in/redis.v2/README.md b/vendor/gopkg.in/redis.v2/README.md
new file mode 100644
index 0000000000..ddf875f9a1
--- /dev/null
+++ b/vendor/gopkg.in/redis.v2/README.md
@@ -0,0 +1,46 @@
+Redis client for Golang [![Build Status](https://travis-ci.org/go-redis/redis.png?branch=master)](https://travis-ci.org/go-redis/redis)
+=======================
+
+Supports:
+
+- Redis 2.8 commands except QUIT, MONITOR, SLOWLOG and SYNC.
+- Pub/sub.
+- Transactions.
+- Pipelining.
+- Connection pool.
+- TLS connections.
+- Thread safety.
+- Timeouts.
+- Redis Sentinel.
+
+API docs: http://godoc.org/gopkg.in/redis.v2.
+Examples: http://godoc.org/gopkg.in/redis.v2#pkg-examples.
+
+Installation
+------------
+
+Install:
+
+ go get gopkg.in/redis.v2
+
+Look and feel
+-------------
+
+Some corner cases:
+
+ SORT list LIMIT 0 2 ASC
+ vals, err := client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
+
+ ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
+ vals, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeByScore{
+ Min: "-inf",
+ Max: "+inf",
+ Offset: 0,
+ Count: 2,
+ }).Result()
+
+ ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
+ vals, err := client.ZInterStore("out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2").Result()
+
+ EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
+ vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, []string{"hello"}).Result()
diff --git a/vendor/gopkg.in/redis.v2/command.go b/vendor/gopkg.in/redis.v2/command.go
new file mode 100644
index 0000000000..d7c76cf92a
--- /dev/null
+++ b/vendor/gopkg.in/redis.v2/command.go
@@ -0,0 +1,597 @@
+package redis
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "gopkg.in/bufio.v1"
+)
+
+var (
+ _ Cmder = (*Cmd)(nil)
+ _ Cmder = (*SliceCmd)(nil)
+ _ Cmder = (*StatusCmd)(nil)
+ _ Cmder = (*IntCmd)(nil)
+ _ Cmder = (*DurationCmd)(nil)
+ _ Cmder = (*BoolCmd)(nil)
+ _ Cmder = (*StringCmd)(nil)
+ _ Cmder = (*FloatCmd)(nil)
+ _ Cmder = (*StringSliceCmd)(nil)
+ _ Cmder = (*BoolSliceCmd)(nil)
+ _ Cmder = (*StringStringMapCmd)(nil)
+ _ Cmder = (*ZSliceCmd)(nil)
+ _ Cmder = (*ScanCmd)(nil)
+)
+
+type Cmder interface {
+ args() []string
+ parseReply(*bufio.Reader) error
+ setErr(error)
+
+ writeTimeout() *time.Duration
+ readTimeout() *time.Duration
+
+ Err() error
+ String() string
+}
+
+func setCmdsErr(cmds []Cmder, e error) {
+ for _, cmd := range cmds {
+ cmd.setErr(e)
+ }
+}
+
+func cmdString(cmd Cmder, val interface{}) string {
+ s := strings.Join(cmd.args(), " ")
+ if err := cmd.Err(); err != nil {
+ return s + ": " + err.Error()
+ }
+ if val != nil {
+ return s + ": " + fmt.Sprint(val)
+ }
+ return s
+
+}
+
+//------------------------------------------------------------------------------
+
+type baseCmd struct {
+ _args []string
+
+ err error
+
+ _writeTimeout, _readTimeout *time.Duration
+}
+
+func newBaseCmd(args ...string) *baseCmd {
+ return &baseCmd{
+ _args: args,
+ }
+}
+
+func (cmd *baseCmd) Err() error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+ return nil
+}
+
+func (cmd *baseCmd) args() []string {
+ return cmd._args
+}
+
+func (cmd *baseCmd) readTimeout() *time.Duration {
+ return cmd._readTimeout
+}
+
+func (cmd *baseCmd) setReadTimeout(d time.Duration) {
+ cmd._readTimeout = &d
+}
+
+func (cmd *baseCmd) writeTimeout() *time.Duration {
+ return cmd._writeTimeout
+}
+
+func (cmd *baseCmd) setWriteTimeout(d time.Duration) {
+ cmd._writeTimeout = &d
+}
+
+func (cmd *baseCmd) setErr(e error) {
+ cmd.err = e
+}
+
+//------------------------------------------------------------------------------
+
+type Cmd struct {
+ *baseCmd
+
+ val interface{}
+}
+
+func NewCmd(args ...string) *Cmd {
+ return &Cmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *Cmd) Val() interface{} {
+ return cmd.val
+}
+
+func (cmd *Cmd) Result() (interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *Cmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *Cmd) parseReply(rd *bufio.Reader) error {
+ cmd.val, cmd.err = parseReply(rd, parseSlice)
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type SliceCmd struct {
+ *baseCmd
+
+ val []interface{}
+}
+
+func NewSliceCmd(args ...string) *SliceCmd {
+ return &SliceCmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *SliceCmd) Val() []interface{} {
+ return cmd.val
+}
+
+func (cmd *SliceCmd) Result() ([]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *SliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *SliceCmd) parseReply(rd *bufio.Reader) error {
+ v, err := parseReply(rd, parseSlice)
+ if err != nil {
+ cmd.err = err
+ return err
+ }
+ cmd.val = v.([]interface{})
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StatusCmd struct {
+ *baseCmd
+
+ val string
+}
+
+func NewStatusCmd(args ...string) *StatusCmd {
+ return &StatusCmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *StatusCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StatusCmd) Result() (string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StatusCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StatusCmd) parseReply(rd *bufio.Reader) error {
+ v, err := parseReply(rd, nil)
+ if err != nil {
+ cmd.err = err
+ return err
+ }
+ cmd.val = v.(string)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type IntCmd struct {
+ *baseCmd
+
+ val int64
+}
+
+func NewIntCmd(args ...string) *IntCmd {
+ return &IntCmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *IntCmd) Val() int64 {
+ return cmd.val
+}
+
+func (cmd *IntCmd) Result() (int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntCmd) parseReply(rd *bufio.Reader) error {
+ v, err := parseReply(rd, nil)
+ if err != nil {
+ cmd.err = err
+ return err
+ }
+ cmd.val = v.(int64)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type DurationCmd struct {
+ *baseCmd
+
+ val time.Duration
+ precision time.Duration
+}
+
+func NewDurationCmd(precision time.Duration, args ...string) *DurationCmd {
+ return &DurationCmd{
+ baseCmd: newBaseCmd(args...),
+ precision: precision,
+ }
+}
+
+func (cmd *DurationCmd) Val() time.Duration {
+ return cmd.val
+}
+
+func (cmd *DurationCmd) Result() (time.Duration, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *DurationCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *DurationCmd) parseReply(rd *bufio.Reader) error {
+ v, err := parseReply(rd, nil)
+ if err != nil {
+ cmd.err = err
+ return err
+ }
+ cmd.val = time.Duration(v.(int64)) * cmd.precision
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type BoolCmd struct {
+ *baseCmd
+
+ val bool
+}
+
+func NewBoolCmd(args ...string) *BoolCmd {
+ return &BoolCmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *BoolCmd) Val() bool {
+ return cmd.val
+}
+
+func (cmd *BoolCmd) Result() (bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolCmd) parseReply(rd *bufio.Reader) error {
+ v, err := parseReply(rd, nil)
+ if err != nil {
+ cmd.err = err
+ return err
+ }
+ cmd.val = v.(int64) == 1
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StringCmd struct {
+ *baseCmd
+
+ val string
+}
+
+func NewStringCmd(args ...string) *StringCmd {
+ return &StringCmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *StringCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StringCmd) Result() (string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringCmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseInt(cmd.val, 10, 64)
+}
+
+func (cmd *StringCmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseUint(cmd.val, 10, 64)
+}
+
+func (cmd *StringCmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseFloat(cmd.val, 64)
+}
+
+func (cmd *StringCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringCmd) parseReply(rd *bufio.Reader) error {
+ v, err := parseReply(rd, nil)
+ if err != nil {
+ cmd.err = err
+ return err
+ }
+ cmd.val = v.(string)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type FloatCmd struct {
+ *baseCmd
+
+ val float64
+}
+
+func NewFloatCmd(args ...string) *FloatCmd {
+ return &FloatCmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *FloatCmd) Val() float64 {
+ return cmd.val
+}
+
+func (cmd *FloatCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatCmd) parseReply(rd *bufio.Reader) error {
+ v, err := parseReply(rd, nil)
+ if err != nil {
+ cmd.err = err
+ return err
+ }
+ cmd.val, cmd.err = strconv.ParseFloat(v.(string), 64)
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type StringSliceCmd struct {
+ *baseCmd
+
+ val []string
+}
+
+func NewStringSliceCmd(args ...string) *StringSliceCmd {
+ return &StringSliceCmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *StringSliceCmd) Val() []string {
+ return cmd.val
+}
+
+func (cmd *StringSliceCmd) Result() ([]string, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *StringSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringSliceCmd) parseReply(rd *bufio.Reader) error {
+ v, err := parseReply(rd, parseStringSlice)
+ if err != nil {
+ cmd.err = err
+ return err
+ }
+ cmd.val = v.([]string)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type BoolSliceCmd struct {
+ *baseCmd
+
+ val []bool
+}
+
+func NewBoolSliceCmd(args ...string) *BoolSliceCmd {
+ return &BoolSliceCmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *BoolSliceCmd) Val() []bool {
+ return cmd.val
+}
+
+func (cmd *BoolSliceCmd) Result() ([]bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolSliceCmd) parseReply(rd *bufio.Reader) error {
+ v, err := parseReply(rd, parseBoolSlice)
+ if err != nil {
+ cmd.err = err
+ return err
+ }
+ cmd.val = v.([]bool)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StringStringMapCmd struct {
+ *baseCmd
+
+ val map[string]string
+}
+
+func NewStringStringMapCmd(args ...string) *StringStringMapCmd {
+ return &StringStringMapCmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *StringStringMapCmd) Val() map[string]string {
+ return cmd.val
+}
+
+func (cmd *StringStringMapCmd) Result() (map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringStringMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStringMapCmd) parseReply(rd *bufio.Reader) error {
+ v, err := parseReply(rd, parseStringStringMap)
+ if err != nil {
+ cmd.err = err
+ return err
+ }
+ cmd.val = v.(map[string]string)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceCmd struct {
+ *baseCmd
+
+ val []Z
+}
+
+func NewZSliceCmd(args ...string) *ZSliceCmd {
+ return &ZSliceCmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *ZSliceCmd) Val() []Z {
+ return cmd.val
+}
+
+func (cmd *ZSliceCmd) Result() ([]Z, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ZSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceCmd) parseReply(rd *bufio.Reader) error {
+ v, err := parseReply(rd, parseZSlice)
+ if err != nil {
+ cmd.err = err
+ return err
+ }
+ cmd.val = v.([]Z)
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type ScanCmd struct {
+ *baseCmd
+
+ cursor int64
+ keys []string
+}
+
+func NewScanCmd(args ...string) *ScanCmd {
+ return &ScanCmd{
+ baseCmd: newBaseCmd(args...),
+ }
+}
+
+func (cmd *ScanCmd) Val() (int64, []string) {
+ return cmd.cursor, cmd.keys
+}
+
+func (cmd *ScanCmd) Result() (int64, []string, error) {
+ return cmd.cursor, cmd.keys, cmd.err
+}
+
+func (cmd *ScanCmd) String() string {
+ return cmdString(cmd, cmd.keys)
+}
+
+func (cmd *ScanCmd) parseReply(rd *bufio.Reader) error {
+ vi, err := parseReply(rd, parseSlice)
+ if err != nil {
+ cmd.err = err
+ return cmd.err
+ }
+ v := vi.([]interface{})
+
+ cmd.cursor, cmd.err = strconv.ParseInt(v[0].(string), 10, 64)
+ if cmd.err != nil {
+ return cmd.err
+ }
+
+ keys := v[1].([]interface{})
+ for _, keyi := range keys {
+ cmd.keys = append(cmd.keys, keyi.(string))
+ }
+
+ return nil
+}
diff --git a/vendor/gopkg.in/redis.v2/commands.go b/vendor/gopkg.in/redis.v2/commands.go
new file mode 100644
index 0000000000..6068bab17e
--- /dev/null
+++ b/vendor/gopkg.in/redis.v2/commands.go
@@ -0,0 +1,1246 @@
+package redis
+
+import (
+ "io"
+ "strconv"
+ "time"
+)
+
+func formatFloat(f float64) string {
+ return strconv.FormatFloat(f, 'f', -1, 64)
+}
+
+func readTimeout(sec int64) time.Duration {
+ if sec == 0 {
+ return 0
+ }
+ return time.Duration(sec+1) * time.Second
+}
+
+//------------------------------------------------------------------------------
+
+func (c *Client) Auth(password string) *StatusCmd {
+ cmd := NewStatusCmd("AUTH", password)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Echo(message string) *StringCmd {
+ cmd := NewStringCmd("ECHO", message)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Ping() *StatusCmd {
+ cmd := NewStatusCmd("PING")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Quit() *StatusCmd {
+ panic("not implemented")
+}
+
+func (c *Client) Select(index int64) *StatusCmd {
+ cmd := NewStatusCmd("SELECT", strconv.FormatInt(index, 10))
+ c.Process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *Client) Del(keys ...string) *IntCmd {
+ args := append([]string{"DEL"}, keys...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Dump(key string) *StringCmd {
+ cmd := NewStringCmd("DUMP", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Exists(key string) *BoolCmd {
+ cmd := NewBoolCmd("EXISTS", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Expire(key string, dur time.Duration) *BoolCmd {
+ cmd := NewBoolCmd("EXPIRE", key, strconv.FormatInt(int64(dur/time.Second), 10))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ExpireAt(key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd("EXPIREAT", key, strconv.FormatInt(tm.Unix(), 10))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Keys(pattern string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("KEYS", pattern)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Migrate(host, port, key string, db, timeout int64) *StatusCmd {
+ cmd := NewStatusCmd(
+ "MIGRATE",
+ host,
+ port,
+ key,
+ strconv.FormatInt(db, 10),
+ strconv.FormatInt(timeout, 10),
+ )
+ cmd.setReadTimeout(readTimeout(timeout))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Move(key string, db int64) *BoolCmd {
+ cmd := NewBoolCmd("MOVE", key, strconv.FormatInt(db, 10))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ObjectRefCount(keys ...string) *IntCmd {
+ args := append([]string{"OBJECT", "REFCOUNT"}, keys...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ObjectEncoding(keys ...string) *StringCmd {
+ args := append([]string{"OBJECT", "ENCODING"}, keys...)
+ cmd := NewStringCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ObjectIdleTime(keys ...string) *DurationCmd {
+ args := append([]string{"OBJECT", "IDLETIME"}, keys...)
+ cmd := NewDurationCmd(time.Second, args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Persist(key string) *BoolCmd {
+ cmd := NewBoolCmd("PERSIST", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) PExpire(key string, dur time.Duration) *BoolCmd {
+ cmd := NewBoolCmd("PEXPIRE", key, strconv.FormatInt(int64(dur/time.Millisecond), 10))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) PExpireAt(key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd(
+ "PEXPIREAT",
+ key,
+ strconv.FormatInt(tm.UnixNano()/int64(time.Millisecond), 10),
+ )
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) PTTL(key string) *DurationCmd {
+ cmd := NewDurationCmd(time.Millisecond, "PTTL", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) RandomKey() *StringCmd {
+ cmd := NewStringCmd("RANDOMKEY")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Rename(key, newkey string) *StatusCmd {
+ cmd := NewStatusCmd("RENAME", key, newkey)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) RenameNX(key, newkey string) *BoolCmd {
+ cmd := NewBoolCmd("RENAMENX", key, newkey)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Restore(key string, ttl int64, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ "RESTORE",
+ key,
+ strconv.FormatInt(ttl, 10),
+ value,
+ )
+ c.Process(cmd)
+ return cmd
+}
+
+type Sort struct {
+ By string
+ Offset, Count float64
+ Get []string
+ Order string
+ IsAlpha bool
+ Store string
+}
+
+func (c *Client) Sort(key string, sort Sort) *StringSliceCmd {
+ args := []string{"SORT", key}
+ if sort.By != "" {
+ args = append(args, "BY", sort.By)
+ }
+ if sort.Offset != 0 || sort.Count != 0 {
+ args = append(args, "LIMIT", formatFloat(sort.Offset), formatFloat(sort.Count))
+ }
+ for _, get := range sort.Get {
+ args = append(args, "GET", get)
+ }
+ if sort.Order != "" {
+ args = append(args, sort.Order)
+ }
+ if sort.IsAlpha {
+ args = append(args, "ALPHA")
+ }
+ if sort.Store != "" {
+ args = append(args, "STORE", sort.Store)
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) TTL(key string) *DurationCmd {
+ cmd := NewDurationCmd(time.Second, "TTL", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Type(key string) *StatusCmd {
+ cmd := NewStatusCmd("TYPE", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Scan(cursor int64, match string, count int64) *ScanCmd {
+ args := []string{"SCAN", strconv.FormatInt(cursor, 10)}
+ if match != "" {
+ args = append(args, "MATCH", match)
+ }
+ if count > 0 {
+ args = append(args, "COUNT", strconv.FormatInt(count, 10))
+ }
+ cmd := NewScanCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SScan(key string, cursor int64, match string, count int64) *ScanCmd {
+ args := []string{"SSCAN", key, strconv.FormatInt(cursor, 10)}
+ if match != "" {
+ args = append(args, "MATCH", match)
+ }
+ if count > 0 {
+ args = append(args, "COUNT", strconv.FormatInt(count, 10))
+ }
+ cmd := NewScanCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HScan(key string, cursor int64, match string, count int64) *ScanCmd {
+ args := []string{"HSCAN", key, strconv.FormatInt(cursor, 10)}
+ if match != "" {
+ args = append(args, "MATCH", match)
+ }
+ if count > 0 {
+ args = append(args, "COUNT", strconv.FormatInt(count, 10))
+ }
+ cmd := NewScanCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZScan(key string, cursor int64, match string, count int64) *ScanCmd {
+ args := []string{"ZSCAN", key, strconv.FormatInt(cursor, 10)}
+ if match != "" {
+ args = append(args, "MATCH", match)
+ }
+ if count > 0 {
+ args = append(args, "COUNT", strconv.FormatInt(count, 10))
+ }
+ cmd := NewScanCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *Client) Append(key, value string) *IntCmd {
+ cmd := NewIntCmd("APPEND", key, value)
+ c.Process(cmd)
+ return cmd
+}
+
+type BitCount struct {
+ Start, End int64
+}
+
+func (c *Client) BitCount(key string, bitCount *BitCount) *IntCmd {
+ args := []string{"BITCOUNT", key}
+ if bitCount != nil {
+ args = append(
+ args,
+ strconv.FormatInt(bitCount.Start, 10),
+ strconv.FormatInt(bitCount.End, 10),
+ )
+ }
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) bitOp(op, destKey string, keys ...string) *IntCmd {
+ args := []string{"BITOP", op, destKey}
+ args = append(args, keys...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) BitOpAnd(destKey string, keys ...string) *IntCmd {
+ return c.bitOp("AND", destKey, keys...)
+}
+
+func (c *Client) BitOpOr(destKey string, keys ...string) *IntCmd {
+ return c.bitOp("OR", destKey, keys...)
+}
+
+func (c *Client) BitOpXor(destKey string, keys ...string) *IntCmd {
+ return c.bitOp("XOR", destKey, keys...)
+}
+
+func (c *Client) BitOpNot(destKey string, key string) *IntCmd {
+ return c.bitOp("NOT", destKey, key)
+}
+
+func (c *Client) Decr(key string) *IntCmd {
+ cmd := NewIntCmd("DECR", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) DecrBy(key string, decrement int64) *IntCmd {
+ cmd := NewIntCmd("DECRBY", key, strconv.FormatInt(decrement, 10))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Get(key string) *StringCmd {
+ cmd := NewStringCmd("GET", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) GetBit(key string, offset int64) *IntCmd {
+ cmd := NewIntCmd("GETBIT", key, strconv.FormatInt(offset, 10))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) GetRange(key string, start, end int64) *StringCmd {
+ cmd := NewStringCmd(
+ "GETRANGE",
+ key,
+ strconv.FormatInt(start, 10),
+ strconv.FormatInt(end, 10),
+ )
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) GetSet(key, value string) *StringCmd {
+ cmd := NewStringCmd("GETSET", key, value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Incr(key string) *IntCmd {
+ cmd := NewIntCmd("INCR", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) IncrBy(key string, value int64) *IntCmd {
+ cmd := NewIntCmd("INCRBY", key, strconv.FormatInt(value, 10))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) IncrByFloat(key string, value float64) *FloatCmd {
+ cmd := NewFloatCmd("INCRBYFLOAT", key, formatFloat(value))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) MGet(keys ...string) *SliceCmd {
+ args := append([]string{"MGET"}, keys...)
+ cmd := NewSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) MSet(pairs ...string) *StatusCmd {
+ args := append([]string{"MSET"}, pairs...)
+ cmd := NewStatusCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) MSetNX(pairs ...string) *BoolCmd {
+ args := append([]string{"MSETNX"}, pairs...)
+ cmd := NewBoolCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) PSetEx(key string, dur time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ "PSETEX",
+ key,
+ strconv.FormatInt(int64(dur/time.Millisecond), 10),
+ value,
+ )
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Set(key, value string) *StatusCmd {
+ cmd := NewStatusCmd("SET", key, value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SetBit(key string, offset int64, value int) *IntCmd {
+ cmd := NewIntCmd(
+ "SETBIT",
+ key,
+ strconv.FormatInt(offset, 10),
+ strconv.FormatInt(int64(value), 10),
+ )
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SetEx(key string, dur time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd("SETEX", key, strconv.FormatInt(int64(dur/time.Second), 10), value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SetNX(key, value string) *BoolCmd {
+ cmd := NewBoolCmd("SETNX", key, value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SetRange(key string, offset int64, value string) *IntCmd {
+ cmd := NewIntCmd("SETRANGE", key, strconv.FormatInt(offset, 10), value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) StrLen(key string) *IntCmd {
+ cmd := NewIntCmd("STRLEN", key)
+ c.Process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *Client) HDel(key string, fields ...string) *IntCmd {
+ args := append([]string{"HDEL", key}, fields...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HExists(key, field string) *BoolCmd {
+ cmd := NewBoolCmd("HEXISTS", key, field)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HGet(key, field string) *StringCmd {
+ cmd := NewStringCmd("HGET", key, field)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HGetAll(key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("HGETALL", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HGetAllMap(key string) *StringStringMapCmd {
+ cmd := NewStringStringMapCmd("HGETALL", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HIncrBy(key, field string, incr int64) *IntCmd {
+ cmd := NewIntCmd("HINCRBY", key, field, strconv.FormatInt(incr, 10))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HIncrByFloat(key, field string, incr float64) *FloatCmd {
+ cmd := NewFloatCmd("HINCRBYFLOAT", key, field, formatFloat(incr))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HKeys(key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("HKEYS", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HLen(key string) *IntCmd {
+ cmd := NewIntCmd("HLEN", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HMGet(key string, fields ...string) *SliceCmd {
+ args := append([]string{"HMGET", key}, fields...)
+ cmd := NewSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HMSet(key, field, value string, pairs ...string) *StatusCmd {
+ args := append([]string{"HMSET", key, field, value}, pairs...)
+ cmd := NewStatusCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HSet(key, field, value string) *BoolCmd {
+ cmd := NewBoolCmd("HSET", key, field, value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HSetNX(key, field, value string) *BoolCmd {
+ cmd := NewBoolCmd("HSETNX", key, field, value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) HVals(key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("HVALS", key)
+ c.Process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *Client) BLPop(timeout int64, keys ...string) *StringSliceCmd {
+ args := append([]string{"BLPOP"}, keys...)
+ args = append(args, strconv.FormatInt(timeout, 10))
+ cmd := NewStringSliceCmd(args...)
+ cmd.setReadTimeout(readTimeout(timeout))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) BRPop(timeout int64, keys ...string) *StringSliceCmd {
+ args := append([]string{"BRPOP"}, keys...)
+ args = append(args, strconv.FormatInt(timeout, 10))
+ cmd := NewStringSliceCmd(args...)
+ cmd.setReadTimeout(readTimeout(timeout))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) BRPopLPush(source, destination string, timeout int64) *StringCmd {
+ cmd := NewStringCmd(
+ "BRPOPLPUSH",
+ source,
+ destination,
+ strconv.FormatInt(timeout, 10),
+ )
+ cmd.setReadTimeout(readTimeout(timeout))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) LIndex(key string, index int64) *StringCmd {
+ cmd := NewStringCmd("LINDEX", key, strconv.FormatInt(index, 10))
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) LInsert(key, op, pivot, value string) *IntCmd {
+ cmd := NewIntCmd("LINSERT", key, op, pivot, value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) LLen(key string) *IntCmd {
+ cmd := NewIntCmd("LLEN", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) LPop(key string) *StringCmd {
+ cmd := NewStringCmd("LPOP", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) LPush(key string, values ...string) *IntCmd {
+ args := append([]string{"LPUSH", key}, values...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) LPushX(key, value string) *IntCmd {
+ cmd := NewIntCmd("LPUSHX", key, value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) LRange(key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(
+ "LRANGE",
+ key,
+ strconv.FormatInt(start, 10),
+ strconv.FormatInt(stop, 10),
+ )
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) LRem(key string, count int64, value string) *IntCmd {
+ cmd := NewIntCmd("LREM", key, strconv.FormatInt(count, 10), value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) LSet(key string, index int64, value string) *StatusCmd {
+ cmd := NewStatusCmd("LSET", key, strconv.FormatInt(index, 10), value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) LTrim(key string, start, stop int64) *StatusCmd {
+ cmd := NewStatusCmd(
+ "LTRIM",
+ key,
+ strconv.FormatInt(start, 10),
+ strconv.FormatInt(stop, 10),
+ )
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) RPop(key string) *StringCmd {
+ cmd := NewStringCmd("RPOP", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) RPopLPush(source, destination string) *StringCmd {
+ cmd := NewStringCmd("RPOPLPUSH", source, destination)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) RPush(key string, values ...string) *IntCmd {
+ args := append([]string{"RPUSH", key}, values...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) RPushX(key string, value string) *IntCmd {
+ cmd := NewIntCmd("RPUSHX", key, value)
+ c.Process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *Client) SAdd(key string, members ...string) *IntCmd {
+ args := append([]string{"SADD", key}, members...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SCard(key string) *IntCmd {
+ cmd := NewIntCmd("SCARD", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SDiff(keys ...string) *StringSliceCmd {
+ args := append([]string{"SDIFF"}, keys...)
+ cmd := NewStringSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SDiffStore(destination string, keys ...string) *IntCmd {
+ args := append([]string{"SDIFFSTORE", destination}, keys...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SInter(keys ...string) *StringSliceCmd {
+ args := append([]string{"SINTER"}, keys...)
+ cmd := NewStringSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SInterStore(destination string, keys ...string) *IntCmd {
+ args := append([]string{"SINTERSTORE", destination}, keys...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SIsMember(key, member string) *BoolCmd {
+ cmd := NewBoolCmd("SISMEMBER", key, member)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SMembers(key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("SMEMBERS", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SMove(source, destination, member string) *BoolCmd {
+ cmd := NewBoolCmd("SMOVE", source, destination, member)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SPop(key string) *StringCmd {
+ cmd := NewStringCmd("SPOP", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SRandMember(key string) *StringCmd {
+ cmd := NewStringCmd("SRANDMEMBER", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SRem(key string, members ...string) *IntCmd {
+ args := append([]string{"SREM", key}, members...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SUnion(keys ...string) *StringSliceCmd {
+ args := append([]string{"SUNION"}, keys...)
+ cmd := NewStringSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SUnionStore(destination string, keys ...string) *IntCmd {
+ args := append([]string{"SUNIONSTORE", destination}, keys...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+type Z struct {
+ Score float64
+ Member string
+}
+
+type ZStore struct {
+ Weights []int64
+ Aggregate string
+}
+
+func (c *Client) ZAdd(key string, members ...Z) *IntCmd {
+ args := []string{"ZADD", key}
+ for _, m := range members {
+ args = append(args, formatFloat(m.Score), m.Member)
+ }
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZCard(key string) *IntCmd {
+ cmd := NewIntCmd("ZCARD", key)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZCount(key, min, max string) *IntCmd {
+ cmd := NewIntCmd("ZCOUNT", key, min, max)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZIncrBy(key string, increment float64, member string) *FloatCmd {
+ cmd := NewFloatCmd("ZINCRBY", key, formatFloat(increment), member)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZInterStore(
+ destination string,
+ store ZStore,
+ keys ...string,
+) *IntCmd {
+ args := []string{"ZINTERSTORE", destination, strconv.FormatInt(int64(len(keys)), 10)}
+ args = append(args, keys...)
+ if len(store.Weights) > 0 {
+ args = append(args, "WEIGHTS")
+ for _, weight := range store.Weights {
+ args = append(args, strconv.FormatInt(weight, 10))
+ }
+ }
+ if store.Aggregate != "" {
+ args = append(args, "AGGREGATE", store.Aggregate)
+ }
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) zRange(key string, start, stop int64, withScores bool) *StringSliceCmd {
+ args := []string{
+ "ZRANGE",
+ key,
+ strconv.FormatInt(start, 10),
+ strconv.FormatInt(stop, 10),
+ }
+ if withScores {
+ args = append(args, "WITHSCORES")
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZRange(key string, start, stop int64) *StringSliceCmd {
+ return c.zRange(key, start, stop, false)
+}
+
+func (c *Client) ZRangeWithScores(key string, start, stop int64) *ZSliceCmd {
+ args := []string{
+ "ZRANGE",
+ key,
+ strconv.FormatInt(start, 10),
+ strconv.FormatInt(stop, 10),
+ "WITHSCORES",
+ }
+ cmd := NewZSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+type ZRangeByScore struct {
+ Min, Max string
+
+ Offset, Count int64
+}
+
+func (c *Client) zRangeByScore(key string, opt ZRangeByScore, withScores bool) *StringSliceCmd {
+ args := []string{"ZRANGEBYSCORE", key, opt.Min, opt.Max}
+ if withScores {
+ args = append(args, "WITHSCORES")
+ }
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "LIMIT",
+ strconv.FormatInt(opt.Offset, 10),
+ strconv.FormatInt(opt.Count, 10),
+ )
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZRangeByScore(key string, opt ZRangeByScore) *StringSliceCmd {
+ return c.zRangeByScore(key, opt, false)
+}
+
+func (c *Client) ZRangeByScoreWithScores(key string, opt ZRangeByScore) *ZSliceCmd {
+ args := []string{"ZRANGEBYSCORE", key, opt.Min, opt.Max, "WITHSCORES"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "LIMIT",
+ strconv.FormatInt(opt.Offset, 10),
+ strconv.FormatInt(opt.Count, 10),
+ )
+ }
+ cmd := NewZSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZRank(key, member string) *IntCmd {
+ cmd := NewIntCmd("ZRANK", key, member)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZRem(key string, members ...string) *IntCmd {
+ args := append([]string{"ZREM", key}, members...)
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZRemRangeByRank(key string, start, stop int64) *IntCmd {
+ cmd := NewIntCmd(
+ "ZREMRANGEBYRANK",
+ key,
+ strconv.FormatInt(start, 10),
+ strconv.FormatInt(stop, 10),
+ )
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZRemRangeByScore(key, min, max string) *IntCmd {
+ cmd := NewIntCmd("ZREMRANGEBYSCORE", key, min, max)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) zRevRange(key, start, stop string, withScores bool) *StringSliceCmd {
+ args := []string{"ZREVRANGE", key, start, stop}
+ if withScores {
+ args = append(args, "WITHSCORES")
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZRevRange(key, start, stop string) *StringSliceCmd {
+ return c.zRevRange(key, start, stop, false)
+}
+
+func (c *Client) ZRevRangeWithScores(key, start, stop string) *ZSliceCmd {
+ args := []string{"ZREVRANGE", key, start, stop, "WITHSCORES"}
+ cmd := NewZSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) zRevRangeByScore(key string, opt ZRangeByScore, withScores bool) *StringSliceCmd {
+ args := []string{"ZREVRANGEBYSCORE", key, opt.Max, opt.Min}
+ if withScores {
+ args = append(args, "WITHSCORES")
+ }
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "LIMIT",
+ strconv.FormatInt(opt.Offset, 10),
+ strconv.FormatInt(opt.Count, 10),
+ )
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZRevRangeByScore(key string, opt ZRangeByScore) *StringSliceCmd {
+ return c.zRevRangeByScore(key, opt, false)
+}
+
+func (c *Client) ZRevRangeByScoreWithScores(key string, opt ZRangeByScore) *ZSliceCmd {
+ args := []string{"ZREVRANGEBYSCORE", key, opt.Max, opt.Min, "WITHSCORES"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "LIMIT",
+ strconv.FormatInt(opt.Offset, 10),
+ strconv.FormatInt(opt.Count, 10),
+ )
+ }
+ cmd := NewZSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZRevRank(key, member string) *IntCmd {
+ cmd := NewIntCmd("ZREVRANK", key, member)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZScore(key, member string) *FloatCmd {
+ cmd := NewFloatCmd("ZSCORE", key, member)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ZUnionStore(
+ destination string,
+ store ZStore,
+ keys ...string,
+) *IntCmd {
+ args := []string{"ZUNIONSTORE", destination, strconv.FormatInt(int64(len(keys)), 10)}
+ args = append(args, keys...)
+ if len(store.Weights) > 0 {
+ args = append(args, "WEIGHTS")
+ for _, weight := range store.Weights {
+ args = append(args, strconv.FormatInt(weight, 10))
+ }
+ }
+ if store.Aggregate != "" {
+ args = append(args, "AGGREGATE", store.Aggregate)
+ }
+ cmd := NewIntCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *Client) BgRewriteAOF() *StatusCmd {
+ cmd := NewStatusCmd("BGREWRITEAOF")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) BgSave() *StatusCmd {
+ cmd := NewStatusCmd("BGSAVE")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ClientKill(ipPort string) *StatusCmd {
+ cmd := NewStatusCmd("CLIENT", "KILL", ipPort)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ClientList() *StringCmd {
+ cmd := NewStringCmd("CLIENT", "LIST")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ConfigGet(parameter string) *SliceCmd {
+ cmd := NewSliceCmd("CONFIG", "GET", parameter)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ConfigResetStat() *StatusCmd {
+ cmd := NewStatusCmd("CONFIG", "RESETSTAT")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ConfigSet(parameter, value string) *StatusCmd {
+ cmd := NewStatusCmd("CONFIG", "SET", parameter, value)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) DbSize() *IntCmd {
+ cmd := NewIntCmd("DBSIZE")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) FlushAll() *StatusCmd {
+ cmd := NewStatusCmd("FLUSHALL")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) FlushDb() *StatusCmd {
+ cmd := NewStatusCmd("FLUSHDB")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Info() *StringCmd {
+ cmd := NewStringCmd("INFO")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) LastSave() *IntCmd {
+ cmd := NewIntCmd("LASTSAVE")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) Save() *StatusCmd {
+ cmd := NewStatusCmd("SAVE")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) shutdown(modifier string) *StatusCmd {
+ var args []string
+ if modifier == "" {
+ args = []string{"SHUTDOWN"}
+ } else {
+ args = []string{"SHUTDOWN", modifier}
+ }
+ cmd := NewStatusCmd(args...)
+ c.Process(cmd)
+ if err := cmd.Err(); err != nil {
+ if err == io.EOF {
+ // Server quit as expected.
+ cmd.err = nil
+ }
+ } else {
+ // Server did not quit. String reply contains the reason.
+ cmd.err = errorf(cmd.val)
+ cmd.val = ""
+ }
+ return cmd
+}
+
+func (c *Client) Shutdown() *StatusCmd {
+ return c.shutdown("")
+}
+
+func (c *Client) ShutdownSave() *StatusCmd {
+ return c.shutdown("SAVE")
+}
+
+func (c *Client) ShutdownNoSave() *StatusCmd {
+ return c.shutdown("NOSAVE")
+}
+
+func (c *Client) SlaveOf(host, port string) *StatusCmd {
+ cmd := NewStatusCmd("SLAVEOF", host, port)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) SlowLog() {
+ panic("not implemented")
+}
+
+func (c *Client) Sync() {
+ panic("not implemented")
+}
+
+func (c *Client) Time() *StringSliceCmd {
+ cmd := NewStringSliceCmd("TIME")
+ c.Process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *Client) Eval(script string, keys []string, args []string) *Cmd {
+ cmdArgs := []string{"EVAL", script, strconv.FormatInt(int64(len(keys)), 10)}
+ cmdArgs = append(cmdArgs, keys...)
+ cmdArgs = append(cmdArgs, args...)
+ cmd := NewCmd(cmdArgs...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) EvalSha(sha1 string, keys []string, args []string) *Cmd {
+ cmdArgs := []string{"EVALSHA", sha1, strconv.FormatInt(int64(len(keys)), 10)}
+ cmdArgs = append(cmdArgs, keys...)
+ cmdArgs = append(cmdArgs, args...)
+ cmd := NewCmd(cmdArgs...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ScriptExists(scripts ...string) *BoolSliceCmd {
+ args := append([]string{"SCRIPT", "EXISTS"}, scripts...)
+ cmd := NewBoolSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ScriptFlush() *StatusCmd {
+ cmd := NewStatusCmd("SCRIPT", "FLUSH")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ScriptKill() *StatusCmd {
+ cmd := NewStatusCmd("SCRIPT", "KILL")
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) ScriptLoad(script string) *StringCmd {
+ cmd := NewStringCmd("SCRIPT", "LOAD", script)
+ c.Process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *Client) DebugObject(key string) *StringCmd {
+ cmd := NewStringCmd("DEBUG", "OBJECT", key)
+ c.Process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *Client) PubSubChannels(pattern string) *StringSliceCmd {
+ args := []string{"PUBSUB", "CHANNELS"}
+ if pattern != "*" {
+ args = append(args, pattern)
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) PubSubNumSub(channels ...string) *SliceCmd {
+ args := []string{"PUBSUB", "NUMSUB"}
+ args = append(args, channels...)
+ cmd := NewSliceCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Client) PubSubNumPat() *IntCmd {
+ cmd := NewIntCmd("PUBSUB", "NUMPAT")
+ c.Process(cmd)
+ return cmd
+}
diff --git a/vendor/gopkg.in/redis.v2/doc.go b/vendor/gopkg.in/redis.v2/doc.go
new file mode 100644
index 0000000000..55262533a6
--- /dev/null
+++ b/vendor/gopkg.in/redis.v2/doc.go
@@ -0,0 +1,4 @@
+/*
+Package redis implements a Redis client.
+*/
+package redis
diff --git a/vendor/gopkg.in/redis.v2/error.go b/vendor/gopkg.in/redis.v2/error.go
new file mode 100644
index 0000000000..667fffdc68
--- /dev/null
+++ b/vendor/gopkg.in/redis.v2/error.go
@@ -0,0 +1,23 @@
+package redis
+
+import (
+ "fmt"
+)
+
+// Redis nil reply.
+var Nil = errorf("redis: nil")
+
+// Redis transaction failed.
+var TxFailedErr = errorf("redis: transaction failed")
+
+type redisError struct {
+ s string
+}
+
+func errorf(s string, args ...interface{}) redisError {
+ return redisError{s: fmt.Sprintf(s, args...)}
+}
+
+func (err redisError) Error() string {
+ return err.s
+}
diff --git a/vendor/gopkg.in/redis.v2/multi.go b/vendor/gopkg.in/redis.v2/multi.go
new file mode 100644
index 0000000000..bff38dfaaa
--- /dev/null
+++ b/vendor/gopkg.in/redis.v2/multi.go
@@ -0,0 +1,138 @@
+package redis
+
+import (
+ "errors"
+ "fmt"
+)
+
+var errDiscard = errors.New("redis: Discard can be used only inside Exec")
+
+// Not thread-safe.
+type Multi struct {
+ *Client
+}
+
+func (c *Client) Multi() *Multi {
+ return &Multi{
+ Client: &Client{
+ baseClient: &baseClient{
+ opt: c.opt,
+ connPool: newSingleConnPool(c.connPool, true),
+ },
+ },
+ }
+}
+
+func (c *Multi) Close() error {
+ if err := c.Unwatch().Err(); err != nil {
+ return err
+ }
+ return c.Client.Close()
+}
+
+func (c *Multi) Watch(keys ...string) *StatusCmd {
+ args := append([]string{"WATCH"}, keys...)
+ cmd := NewStatusCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Multi) Unwatch(keys ...string) *StatusCmd {
+ args := append([]string{"UNWATCH"}, keys...)
+ cmd := NewStatusCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *Multi) Discard() error {
+ if c.cmds == nil {
+ return errDiscard
+ }
+ c.cmds = c.cmds[:1]
+ return nil
+}
+
+// Exec always returns list of commands. If transaction fails
+// TxFailedErr is returned. Otherwise Exec returns error of the first
+// failed command or nil.
+func (c *Multi) Exec(f func() error) ([]Cmder, error) {
+ c.cmds = []Cmder{NewStatusCmd("MULTI")}
+ if err := f(); err != nil {
+ return nil, err
+ }
+ c.cmds = append(c.cmds, NewSliceCmd("EXEC"))
+
+ cmds := c.cmds
+ c.cmds = nil
+
+ if len(cmds) == 2 {
+ return []Cmder{}, nil
+ }
+
+ cn, err := c.conn()
+ if err != nil {
+ setCmdsErr(cmds[1:len(cmds)-1], err)
+ return cmds[1 : len(cmds)-1], err
+ }
+
+ err = c.execCmds(cn, cmds)
+ if err != nil {
+ c.freeConn(cn, err)
+ return cmds[1 : len(cmds)-1], err
+ }
+
+ c.putConn(cn)
+ return cmds[1 : len(cmds)-1], nil
+}
+
+func (c *Multi) execCmds(cn *conn, cmds []Cmder) error {
+ err := c.writeCmd(cn, cmds...)
+ if err != nil {
+ setCmdsErr(cmds[1:len(cmds)-1], err)
+ return err
+ }
+
+ statusCmd := NewStatusCmd()
+
+ // Omit last command (EXEC).
+ cmdsLen := len(cmds) - 1
+
+ // Parse queued replies.
+ for i := 0; i < cmdsLen; i++ {
+ if err := statusCmd.parseReply(cn.rd); err != nil {
+ setCmdsErr(cmds[1:len(cmds)-1], err)
+ return err
+ }
+ }
+
+ // Parse number of replies.
+ line, err := readLine(cn.rd)
+ if err != nil {
+ setCmdsErr(cmds[1:len(cmds)-1], err)
+ return err
+ }
+ if line[0] != '*' {
+ err := fmt.Errorf("redis: expected '*', but got line %q", line)
+ setCmdsErr(cmds[1:len(cmds)-1], err)
+ return err
+ }
+ if len(line) == 3 && line[1] == '-' && line[2] == '1' {
+ setCmdsErr(cmds[1:len(cmds)-1], TxFailedErr)
+ return TxFailedErr
+ }
+
+ var firstCmdErr error
+
+ // Parse replies.
+ // Loop starts from 1 to omit MULTI cmd.
+ for i := 1; i < cmdsLen; i++ {
+ cmd := cmds[i]
+ if err := cmd.parseReply(cn.rd); err != nil {
+ if firstCmdErr == nil {
+ firstCmdErr = err
+ }
+ }
+ }
+
+ return firstCmdErr
+}
diff --git a/vendor/gopkg.in/redis.v2/parser.go b/vendor/gopkg.in/redis.v2/parser.go
new file mode 100644
index 0000000000..b4c380c764
--- /dev/null
+++ b/vendor/gopkg.in/redis.v2/parser.go
@@ -0,0 +1,262 @@
+package redis
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+
+ "gopkg.in/bufio.v1"
+)
+
+type multiBulkParser func(rd *bufio.Reader, n int64) (interface{}, error)
+
+var (
+ errReaderTooSmall = errors.New("redis: reader is too small")
+)
+
+//------------------------------------------------------------------------------
+
+func appendArgs(buf []byte, args []string) []byte {
+ buf = append(buf, '*')
+ buf = strconv.AppendUint(buf, uint64(len(args)), 10)
+ buf = append(buf, '\r', '\n')
+ for _, arg := range args {
+ buf = append(buf, '$')
+ buf = strconv.AppendUint(buf, uint64(len(arg)), 10)
+ buf = append(buf, '\r', '\n')
+ buf = append(buf, arg...)
+ buf = append(buf, '\r', '\n')
+ }
+ return buf
+}
+
+//------------------------------------------------------------------------------
+
+func readLine(rd *bufio.Reader) ([]byte, error) {
+ line, isPrefix, err := rd.ReadLine()
+ if err != nil {
+ return line, err
+ }
+ if isPrefix {
+ return line, errReaderTooSmall
+ }
+ return line, nil
+}
+
+func readN(rd *bufio.Reader, n int) ([]byte, error) {
+ b, err := rd.ReadN(n)
+ if err == bufio.ErrBufferFull {
+ tmp := make([]byte, n)
+ r := copy(tmp, b)
+ b = tmp
+
+ for {
+ nn, err := rd.Read(b[r:])
+ r += nn
+ if r >= n {
+ // Ignore error if we read enough.
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ } else if err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+//------------------------------------------------------------------------------
+
+func parseReq(rd *bufio.Reader) ([]string, error) {
+ line, err := readLine(rd)
+ if err != nil {
+ return nil, err
+ }
+
+ if line[0] != '*' {
+ return []string{string(line)}, nil
+ }
+ numReplies, err := strconv.ParseInt(string(line[1:]), 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ args := make([]string, 0, numReplies)
+ for i := int64(0); i < numReplies; i++ {
+ line, err = readLine(rd)
+ if err != nil {
+ return nil, err
+ }
+ if line[0] != '$' {
+ return nil, fmt.Errorf("redis: expected '$', but got %q", line)
+ }
+
+ argLen, err := strconv.ParseInt(string(line[1:]), 10, 32)
+ if err != nil {
+ return nil, err
+ }
+
+ arg, err := readN(rd, int(argLen)+2)
+ if err != nil {
+ return nil, err
+ }
+ args = append(args, string(arg[:argLen]))
+ }
+ return args, nil
+}
+
+//------------------------------------------------------------------------------
+
+func parseReply(rd *bufio.Reader, p multiBulkParser) (interface{}, error) {
+ line, err := readLine(rd)
+ if err != nil {
+ return nil, err
+ }
+
+ switch line[0] {
+ case '-':
+ return nil, errorf(string(line[1:]))
+ case '+':
+ return string(line[1:]), nil
+ case ':':
+ v, err := strconv.ParseInt(string(line[1:]), 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ return v, nil
+ case '$':
+ if len(line) == 3 && line[1] == '-' && line[2] == '1' {
+ return nil, Nil
+ }
+
+ replyLen, err := strconv.Atoi(string(line[1:]))
+ if err != nil {
+ return nil, err
+ }
+
+ b, err := readN(rd, replyLen+2)
+ if err != nil {
+ return nil, err
+ }
+ return string(b[:replyLen]), nil
+ case '*':
+ if len(line) == 3 && line[1] == '-' && line[2] == '1' {
+ return nil, Nil
+ }
+
+ repliesNum, err := strconv.ParseInt(string(line[1:]), 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ return p(rd, repliesNum)
+ }
+ return nil, fmt.Errorf("redis: can't parse %q", line)
+}
+
+func parseSlice(rd *bufio.Reader, n int64) (interface{}, error) {
+ vals := make([]interface{}, 0, n)
+ for i := int64(0); i < n; i++ {
+ v, err := parseReply(rd, parseSlice)
+ if err == Nil {
+ vals = append(vals, nil)
+ } else if err != nil {
+ return nil, err
+ } else {
+ vals = append(vals, v)
+ }
+ }
+ return vals, nil
+}
+
+func parseStringSlice(rd *bufio.Reader, n int64) (interface{}, error) {
+ vals := make([]string, 0, n)
+ for i := int64(0); i < n; i++ {
+ viface, err := parseReply(rd, nil)
+ if err != nil {
+ return nil, err
+ }
+ v, ok := viface.(string)
+ if !ok {
+ return nil, fmt.Errorf("got %T, expected string", viface)
+ }
+ vals = append(vals, v)
+ }
+ return vals, nil
+}
+
+func parseBoolSlice(rd *bufio.Reader, n int64) (interface{}, error) {
+ vals := make([]bool, 0, n)
+ for i := int64(0); i < n; i++ {
+ viface, err := parseReply(rd, nil)
+ if err != nil {
+ return nil, err
+ }
+ v, ok := viface.(int64)
+ if !ok {
+ return nil, fmt.Errorf("got %T, expected int64", viface)
+ }
+ vals = append(vals, v == 1)
+ }
+ return vals, nil
+}
+
+func parseStringStringMap(rd *bufio.Reader, n int64) (interface{}, error) {
+ m := make(map[string]string, n/2)
+ for i := int64(0); i < n; i += 2 {
+ keyiface, err := parseReply(rd, nil)
+ if err != nil {
+ return nil, err
+ }
+ key, ok := keyiface.(string)
+ if !ok {
+ return nil, fmt.Errorf("got %T, expected string", keyiface)
+ }
+
+ valueiface, err := parseReply(rd, nil)
+ if err != nil {
+ return nil, err
+ }
+ value, ok := valueiface.(string)
+ if !ok {
+ return nil, fmt.Errorf("got %T, expected string", valueiface)
+ }
+
+ m[key] = value
+ }
+ return m, nil
+}
+
+func parseZSlice(rd *bufio.Reader, n int64) (interface{}, error) {
+ zz := make([]Z, n/2)
+ for i := int64(0); i < n; i += 2 {
+ z := &zz[i/2]
+
+ memberiface, err := parseReply(rd, nil)
+ if err != nil {
+ return nil, err
+ }
+ member, ok := memberiface.(string)
+ if !ok {
+ return nil, fmt.Errorf("got %T, expected string", memberiface)
+ }
+ z.Member = member
+
+ scoreiface, err := parseReply(rd, nil)
+ if err != nil {
+ return nil, err
+ }
+ scorestr, ok := scoreiface.(string)
+ if !ok {
+ return nil, fmt.Errorf("got %T, expected string", scoreiface)
+ }
+ score, err := strconv.ParseFloat(scorestr, 64)
+ if err != nil {
+ return nil, err
+ }
+ z.Score = score
+ }
+ return zz, nil
+}
diff --git a/vendor/gopkg.in/redis.v2/pipeline.go b/vendor/gopkg.in/redis.v2/pipeline.go
new file mode 100644
index 0000000000..540d6c51d9
--- /dev/null
+++ b/vendor/gopkg.in/redis.v2/pipeline.go
@@ -0,0 +1,91 @@
+package redis
+
+// Not thread-safe.
+type Pipeline struct {
+ *Client
+
+ closed bool
+}
+
+func (c *Client) Pipeline() *Pipeline {
+ return &Pipeline{
+ Client: &Client{
+ baseClient: &baseClient{
+ opt: c.opt,
+ connPool: c.connPool,
+
+ cmds: make([]Cmder, 0),
+ },
+ },
+ }
+}
+
+func (c *Client) Pipelined(f func(*Pipeline) error) ([]Cmder, error) {
+ pc := c.Pipeline()
+ if err := f(pc); err != nil {
+ return nil, err
+ }
+ cmds, err := pc.Exec()
+ pc.Close()
+ return cmds, err
+}
+
+func (c *Pipeline) Close() error {
+ c.closed = true
+ return nil
+}
+
+func (c *Pipeline) Discard() error {
+ if c.closed {
+ return errClosed
+ }
+ c.cmds = c.cmds[:0]
+ return nil
+}
+
+// Exec always returns list of commands and error of the first failed
+// command if any.
+func (c *Pipeline) Exec() ([]Cmder, error) {
+ if c.closed {
+ return nil, errClosed
+ }
+
+ cmds := c.cmds
+ c.cmds = make([]Cmder, 0)
+
+ if len(cmds) == 0 {
+ return []Cmder{}, nil
+ }
+
+ cn, err := c.conn()
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return cmds, err
+ }
+
+ if err := c.execCmds(cn, cmds); err != nil {
+ c.freeConn(cn, err)
+ return cmds, err
+ }
+
+ c.putConn(cn)
+ return cmds, nil
+}
+
+func (c *Pipeline) execCmds(cn *conn, cmds []Cmder) error {
+ if err := c.writeCmd(cn, cmds...); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ var firstCmdErr error
+ for _, cmd := range cmds {
+ if err := cmd.parseReply(cn.rd); err != nil {
+ if firstCmdErr == nil {
+ firstCmdErr = err
+ }
+ }
+ }
+
+ return firstCmdErr
+}
diff --git a/vendor/gopkg.in/redis.v2/pool.go b/vendor/gopkg.in/redis.v2/pool.go
new file mode 100644
index 0000000000..bca4d19633
--- /dev/null
+++ b/vendor/gopkg.in/redis.v2/pool.go
@@ -0,0 +1,405 @@
+package redis
+
+import (
+ "container/list"
+ "errors"
+ "log"
+ "net"
+ "sync"
+ "time"
+
+ "gopkg.in/bufio.v1"
+)
+
+var (
+ errClosed = errors.New("redis: client is closed")
+ errRateLimited = errors.New("redis: you open connections too fast")
+)
+
+var (
+ zeroTime = time.Time{}
+)
+
+type pool interface {
+ Get() (*conn, bool, error)
+ Put(*conn) error
+ Remove(*conn) error
+ Len() int
+ Size() int
+ Close() error
+ Filter(func(*conn) bool)
+}
+
+//------------------------------------------------------------------------------
+
+type conn struct {
+ netcn net.Conn
+ rd *bufio.Reader
+ buf []byte
+
+ inUse bool
+ usedAt time.Time
+
+ readTimeout time.Duration
+ writeTimeout time.Duration
+
+ elem *list.Element
+}
+
+func newConnFunc(dial func() (net.Conn, error)) func() (*conn, error) {
+ return func() (*conn, error) {
+ netcn, err := dial()
+ if err != nil {
+ return nil, err
+ }
+ cn := &conn{
+ netcn: netcn,
+ buf: make([]byte, 0, 64),
+ }
+ cn.rd = bufio.NewReader(cn)
+ return cn, nil
+ }
+}
+
+func (cn *conn) Read(b []byte) (int, error) {
+ if cn.readTimeout != 0 {
+ cn.netcn.SetReadDeadline(time.Now().Add(cn.readTimeout))
+ } else {
+ cn.netcn.SetReadDeadline(zeroTime)
+ }
+ return cn.netcn.Read(b)
+}
+
+func (cn *conn) Write(b []byte) (int, error) {
+ if cn.writeTimeout != 0 {
+ cn.netcn.SetWriteDeadline(time.Now().Add(cn.writeTimeout))
+ } else {
+ cn.netcn.SetWriteDeadline(zeroTime)
+ }
+ return cn.netcn.Write(b)
+}
+
+func (cn *conn) RemoteAddr() net.Addr {
+ return cn.netcn.RemoteAddr()
+}
+
+func (cn *conn) Close() error {
+ return cn.netcn.Close()
+}
+
+//------------------------------------------------------------------------------
+
+type connPool struct {
+ dial func() (*conn, error)
+ rl *rateLimiter
+
+ opt *options
+
+ cond *sync.Cond
+ conns *list.List
+
+ idleNum int
+ closed bool
+}
+
+func newConnPool(dial func() (*conn, error), opt *options) *connPool {
+ return &connPool{
+ dial: dial,
+ rl: newRateLimiter(time.Second, 2*opt.PoolSize),
+
+ opt: opt,
+
+ cond: sync.NewCond(&sync.Mutex{}),
+ conns: list.New(),
+ }
+}
+
+func (p *connPool) new() (*conn, error) {
+ if !p.rl.Check() {
+ return nil, errRateLimited
+ }
+ return p.dial()
+}
+
+func (p *connPool) Get() (*conn, bool, error) {
+ p.cond.L.Lock()
+
+ if p.closed {
+ p.cond.L.Unlock()
+ return nil, false, errClosed
+ }
+
+ if p.opt.IdleTimeout > 0 {
+ for el := p.conns.Front(); el != nil; el = el.Next() {
+ cn := el.Value.(*conn)
+ if cn.inUse {
+ break
+ }
+ if time.Since(cn.usedAt) > p.opt.IdleTimeout {
+ if err := p.remove(cn); err != nil {
+ log.Printf("remove failed: %s", err)
+ }
+ }
+ }
+ }
+
+ for p.conns.Len() >= p.opt.PoolSize && p.idleNum == 0 {
+ p.cond.Wait()
+ }
+
+ if p.idleNum > 0 {
+ elem := p.conns.Front()
+ cn := elem.Value.(*conn)
+ if cn.inUse {
+ panic("pool: precondition failed")
+ }
+ cn.inUse = true
+ p.conns.MoveToBack(elem)
+ p.idleNum--
+
+ p.cond.L.Unlock()
+ return cn, false, nil
+ }
+
+ if p.conns.Len() < p.opt.PoolSize {
+ cn, err := p.new()
+ if err != nil {
+ p.cond.L.Unlock()
+ return nil, false, err
+ }
+
+ cn.inUse = true
+ cn.elem = p.conns.PushBack(cn)
+
+ p.cond.L.Unlock()
+ return cn, true, nil
+ }
+
+ panic("not reached")
+}
+
+func (p *connPool) Put(cn *conn) error {
+ if cn.rd.Buffered() != 0 {
+ b, _ := cn.rd.ReadN(cn.rd.Buffered())
+ log.Printf("redis: connection has unread data: %q", b)
+ return p.Remove(cn)
+ }
+
+ if p.opt.IdleTimeout > 0 {
+ cn.usedAt = time.Now()
+ }
+
+ p.cond.L.Lock()
+ if p.closed {
+ p.cond.L.Unlock()
+ return errClosed
+ }
+ cn.inUse = false
+ p.conns.MoveToFront(cn.elem)
+ p.idleNum++
+ p.cond.Signal()
+ p.cond.L.Unlock()
+
+ return nil
+}
+
+func (p *connPool) Remove(cn *conn) error {
+ p.cond.L.Lock()
+ if p.closed {
+ // Noop, connection is already closed.
+ p.cond.L.Unlock()
+ return nil
+ }
+ err := p.remove(cn)
+ p.cond.Signal()
+ p.cond.L.Unlock()
+ return err
+}
+
+func (p *connPool) remove(cn *conn) error {
+ p.conns.Remove(cn.elem)
+ cn.elem = nil
+ if !cn.inUse {
+ p.idleNum--
+ }
+ return cn.Close()
+}
+
+// Len returns number of idle connections.
+func (p *connPool) Len() int {
+ defer p.cond.L.Unlock()
+ p.cond.L.Lock()
+ return p.idleNum
+}
+
+// Size returns number of connections in the pool.
+func (p *connPool) Size() int {
+ defer p.cond.L.Unlock()
+ p.cond.L.Lock()
+ return p.conns.Len()
+}
+
+func (p *connPool) Filter(f func(*conn) bool) {
+ p.cond.L.Lock()
+ for el, next := p.conns.Front(), p.conns.Front(); el != nil; el = next {
+ next = el.Next()
+ cn := el.Value.(*conn)
+ if !f(cn) {
+ p.remove(cn)
+ }
+ }
+ p.cond.L.Unlock()
+}
+
+func (p *connPool) Close() error {
+ defer p.cond.L.Unlock()
+ p.cond.L.Lock()
+ if p.closed {
+ return nil
+ }
+ p.closed = true
+ p.rl.Close()
+ var retErr error
+ for {
+ e := p.conns.Front()
+ if e == nil {
+ break
+ }
+ if err := p.remove(e.Value.(*conn)); err != nil {
+ log.Printf("cn.Close failed: %s", err)
+ retErr = err
+ }
+ }
+ return retErr
+}
+
+//------------------------------------------------------------------------------
+
+type singleConnPool struct {
+ pool pool
+
+ cnMtx sync.Mutex
+ cn *conn
+
+ reusable bool
+
+ closed bool
+}
+
+func newSingleConnPool(pool pool, reusable bool) *singleConnPool {
+ return &singleConnPool{
+ pool: pool,
+ reusable: reusable,
+ }
+}
+
+func (p *singleConnPool) SetConn(cn *conn) {
+ p.cnMtx.Lock()
+ p.cn = cn
+ p.cnMtx.Unlock()
+}
+
+func (p *singleConnPool) Get() (*conn, bool, error) {
+ defer p.cnMtx.Unlock()
+ p.cnMtx.Lock()
+
+ if p.closed {
+ return nil, false, errClosed
+ }
+ if p.cn != nil {
+ return p.cn, false, nil
+ }
+
+ cn, isNew, err := p.pool.Get()
+ if err != nil {
+ return nil, false, err
+ }
+ p.cn = cn
+
+ return p.cn, isNew, nil
+}
+
+func (p *singleConnPool) Put(cn *conn) error {
+ defer p.cnMtx.Unlock()
+ p.cnMtx.Lock()
+ if p.cn != cn {
+ panic("p.cn != cn")
+ }
+ if p.closed {
+ return errClosed
+ }
+ return nil
+}
+
+func (p *singleConnPool) put() error {
+ err := p.pool.Put(p.cn)
+ p.cn = nil
+ return err
+}
+
+func (p *singleConnPool) Remove(cn *conn) error {
+ defer p.cnMtx.Unlock()
+ p.cnMtx.Lock()
+ if p.cn == nil {
+ panic("p.cn == nil")
+ }
+ if p.cn != cn {
+ panic("p.cn != cn")
+ }
+ if p.closed {
+ return errClosed
+ }
+ return p.remove()
+}
+
+func (p *singleConnPool) remove() error {
+ err := p.pool.Remove(p.cn)
+ p.cn = nil
+ return err
+}
+
+func (p *singleConnPool) Len() int {
+ defer p.cnMtx.Unlock()
+ p.cnMtx.Lock()
+ if p.cn == nil {
+ return 0
+ }
+ return 1
+}
+
+func (p *singleConnPool) Size() int {
+ defer p.cnMtx.Unlock()
+ p.cnMtx.Lock()
+ if p.cn == nil {
+ return 0
+ }
+ return 1
+}
+
+func (p *singleConnPool) Filter(f func(*conn) bool) {
+ p.cnMtx.Lock()
+ if p.cn != nil {
+ if !f(p.cn) {
+ p.remove()
+ }
+ }
+ p.cnMtx.Unlock()
+}
+
+func (p *singleConnPool) Close() error {
+ defer p.cnMtx.Unlock()
+ p.cnMtx.Lock()
+ if p.closed {
+ return nil
+ }
+ p.closed = true
+ var err error
+ if p.cn != nil {
+ if p.reusable {
+ err = p.put()
+ } else {
+ err = p.remove()
+ }
+ }
+ return err
+}
diff --git a/vendor/gopkg.in/redis.v2/pubsub.go b/vendor/gopkg.in/redis.v2/pubsub.go
new file mode 100644
index 0000000000..6ac130bac4
--- /dev/null
+++ b/vendor/gopkg.in/redis.v2/pubsub.go
@@ -0,0 +1,134 @@
+package redis
+
+import (
+ "fmt"
+ "time"
+)
+
+// Not thread-safe.
+type PubSub struct {
+ *baseClient
+}
+
+func (c *Client) PubSub() *PubSub {
+ return &PubSub{
+ baseClient: &baseClient{
+ opt: c.opt,
+ connPool: newSingleConnPool(c.connPool, false),
+ },
+ }
+}
+
+func (c *Client) Publish(channel, message string) *IntCmd {
+ req := NewIntCmd("PUBLISH", channel, message)
+ c.Process(req)
+ return req
+}
+
+type Message struct {
+ Channel string
+ Payload string
+}
+
+func (m *Message) String() string {
+ return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload)
+}
+
+type PMessage struct {
+ Channel string
+ Pattern string
+ Payload string
+}
+
+func (m *PMessage) String() string {
+ return fmt.Sprintf("PMessage<%s: %s>", m.Channel, m.Payload)
+}
+
+type Subscription struct {
+ Kind string
+ Channel string
+ Count int
+}
+
+func (m *Subscription) String() string {
+ return fmt.Sprintf("%s: %s", m.Kind, m.Channel)
+}
+
+func (c *PubSub) Receive() (interface{}, error) {
+ return c.ReceiveTimeout(0)
+}
+
+func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) {
+ cn, err := c.conn()
+ if err != nil {
+ return nil, err
+ }
+ cn.readTimeout = timeout
+
+ cmd := NewSliceCmd()
+ if err := cmd.parseReply(cn.rd); err != nil {
+ return nil, err
+ }
+
+ reply := cmd.Val()
+
+ msgName := reply[0].(string)
+ switch msgName {
+ case "subscribe", "unsubscribe", "psubscribe", "punsubscribe":
+ return &Subscription{
+ Kind: msgName,
+ Channel: reply[1].(string),
+ Count: int(reply[2].(int64)),
+ }, nil
+ case "message":
+ return &Message{
+ Channel: reply[1].(string),
+ Payload: reply[2].(string),
+ }, nil
+ case "pmessage":
+ return &PMessage{
+ Pattern: reply[1].(string),
+ Channel: reply[2].(string),
+ Payload: reply[3].(string),
+ }, nil
+ }
+ return nil, fmt.Errorf("redis: unsupported message name: %q", msgName)
+}
+
+func (c *PubSub) subscribe(cmd string, channels ...string) error {
+ cn, err := c.conn()
+ if err != nil {
+ return err
+ }
+
+ args := append([]string{cmd}, channels...)
+ req := NewSliceCmd(args...)
+ return c.writeCmd(cn, req)
+}
+
+func (c *PubSub) Subscribe(channels ...string) error {
+ return c.subscribe("SUBSCRIBE", channels...)
+}
+
+func (c *PubSub) PSubscribe(patterns ...string) error {
+ return c.subscribe("PSUBSCRIBE", patterns...)
+}
+
+func (c *PubSub) unsubscribe(cmd string, channels ...string) error {
+ cn, err := c.conn()
+ if err != nil {
+ return err
+ }
+
+ args := append([]string{cmd}, channels...)
+ req := NewSliceCmd(args...)
+ return c.writeCmd(cn, req)
+}
+
+func (c *PubSub) Unsubscribe(channels ...string) error {
+ return c.unsubscribe("UNSUBSCRIBE", channels...)
+}
+
+func (c *PubSub) PUnsubscribe(patterns ...string) error {
+ return c.unsubscribe("PUNSUBSCRIBE", patterns...)
+}
diff --git a/vendor/gopkg.in/redis.v2/rate_limit.go b/vendor/gopkg.in/redis.v2/rate_limit.go
new file mode 100644
index 0000000000..20d8512707
--- /dev/null
+++ b/vendor/gopkg.in/redis.v2/rate_limit.go
@@ -0,0 +1,53 @@
+package redis
+
+import (
+ "sync/atomic"
+ "time"
+)
+
+type rateLimiter struct {
+ v int64
+
+ _closed int64
+}
+
+func newRateLimiter(limit time.Duration, bucketSize int) *rateLimiter {
+ rl := &rateLimiter{
+ v: int64(bucketSize),
+ }
+ go rl.loop(limit, int64(bucketSize))
+ return rl
+}
+
+func (rl *rateLimiter) loop(limit time.Duration, bucketSize int64) {
+ for {
+ if rl.closed() {
+ break
+ }
+ if v := atomic.LoadInt64(&rl.v); v < bucketSize {
+ atomic.AddInt64(&rl.v, 1)
+ }
+ time.Sleep(limit)
+ }
+}
+
+func (rl *rateLimiter) Check() bool {
+ for {
+ if v := atomic.LoadInt64(&rl.v); v > 0 {
+ if atomic.CompareAndSwapInt64(&rl.v, v, v-1) {
+ return true
+ }
+ } else {
+ return false
+ }
+ }
+}
+
+func (rl *rateLimiter) Close() error {
+ atomic.StoreInt64(&rl._closed, 1)
+ return nil
+}
+
+func (rl *rateLimiter) closed() bool {
+ return atomic.LoadInt64(&rl._closed) == 1
+}
diff --git a/vendor/gopkg.in/redis.v2/redis.go b/vendor/gopkg.in/redis.v2/redis.go
new file mode 100644
index 0000000000..0d15dc8f85
--- /dev/null
+++ b/vendor/gopkg.in/redis.v2/redis.go
@@ -0,0 +1,231 @@
+package redis
+
+import (
+ "log"
+ "net"
+ "time"
+)
+
+type baseClient struct {
+ connPool pool
+ opt *options
+ cmds []Cmder
+}
+
+func (c *baseClient) writeCmd(cn *conn, cmds ...Cmder) error {
+ buf := cn.buf[:0]
+ for _, cmd := range cmds {
+ buf = appendArgs(buf, cmd.args())
+ }
+
+ _, err := cn.Write(buf)
+ return err
+}
+
+func (c *baseClient) conn() (*conn, error) {
+ cn, isNew, err := c.connPool.Get()
+ if err != nil {
+ return nil, err
+ }
+
+ if isNew {
+ if err := c.initConn(cn); err != nil {
+ c.removeConn(cn)
+ return nil, err
+ }
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) initConn(cn *conn) error {
+ if c.opt.Password == "" && c.opt.DB == 0 {
+ return nil
+ }
+
+ pool := newSingleConnPool(c.connPool, false)
+ pool.SetConn(cn)
+
+ // Client is not closed because we want to reuse underlying connection.
+ client := &Client{
+ baseClient: &baseClient{
+ opt: c.opt,
+ connPool: pool,
+ },
+ }
+
+ if c.opt.Password != "" {
+ if err := client.Auth(c.opt.Password).Err(); err != nil {
+ return err
+ }
+ }
+
+ if c.opt.DB > 0 {
+ if err := client.Select(c.opt.DB).Err(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *baseClient) freeConn(cn *conn, ei error) error {
+ if cn.rd.Buffered() > 0 {
+ return c.connPool.Remove(cn)
+ }
+ if _, ok := ei.(redisError); ok {
+ return c.connPool.Put(cn)
+ }
+ return c.connPool.Remove(cn)
+}
+
+func (c *baseClient) removeConn(cn *conn) {
+ if err := c.connPool.Remove(cn); err != nil {
+ log.Printf("pool.Remove failed: %s", err)
+ }
+}
+
+func (c *baseClient) putConn(cn *conn) {
+ if err := c.connPool.Put(cn); err != nil {
+ log.Printf("pool.Put failed: %s", err)
+ }
+}
+
+func (c *baseClient) Process(cmd Cmder) {
+ if c.cmds == nil {
+ c.run(cmd)
+ } else {
+ c.cmds = append(c.cmds, cmd)
+ }
+}
+
+func (c *baseClient) run(cmd Cmder) {
+ cn, err := c.conn()
+ if err != nil {
+ cmd.setErr(err)
+ return
+ }
+
+ if timeout := cmd.writeTimeout(); timeout != nil {
+ cn.writeTimeout = *timeout
+ } else {
+ cn.writeTimeout = c.opt.WriteTimeout
+ }
+
+ if timeout := cmd.readTimeout(); timeout != nil {
+ cn.readTimeout = *timeout
+ } else {
+ cn.readTimeout = c.opt.ReadTimeout
+ }
+
+ if err := c.writeCmd(cn, cmd); err != nil {
+ c.freeConn(cn, err)
+ cmd.setErr(err)
+ return
+ }
+
+ if err := cmd.parseReply(cn.rd); err != nil {
+ c.freeConn(cn, err)
+ return
+ }
+
+ c.putConn(cn)
+}
+
+// Close closes the client, releasing any open resources.
+func (c *baseClient) Close() error {
+ return c.connPool.Close()
+}
+
+//------------------------------------------------------------------------------
+
+type options struct {
+ Password string
+ DB int64
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ PoolSize int
+ IdleTimeout time.Duration
+}
+
+type Options struct {
+ Network string
+ Addr string
+
+ // Dialer creates new network connection and has priority over
+ // Network and Addr options.
+ Dialer func() (net.Conn, error)
+
+ Password string
+ DB int64
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ PoolSize int
+ IdleTimeout time.Duration
+}
+
+func (opt *Options) getPoolSize() int {
+ if opt.PoolSize == 0 {
+ return 10
+ }
+ return opt.PoolSize
+}
+
+func (opt *Options) getDialTimeout() time.Duration {
+ if opt.DialTimeout == 0 {
+ return 5 * time.Second
+ }
+ return opt.DialTimeout
+}
+
+func (opt *Options) options() *options {
+ return &options{
+ DB: opt.DB,
+ Password: opt.Password,
+
+ DialTimeout: opt.getDialTimeout(),
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.getPoolSize(),
+ IdleTimeout: opt.IdleTimeout,
+ }
+}
+
+type Client struct {
+ *baseClient
+}
+
+func NewClient(clOpt *Options) *Client {
+ opt := clOpt.options()
+ dialer := clOpt.Dialer
+ if dialer == nil {
+ dialer = func() (net.Conn, error) {
+ return net.DialTimeout(clOpt.Network, clOpt.Addr, opt.DialTimeout)
+ }
+ }
+ return &Client{
+ baseClient: &baseClient{
+ opt: opt,
+ connPool: newConnPool(newConnFunc(dialer), opt),
+ },
+ }
+}
+
+// Deprecated. Use NewClient instead.
+func NewTCPClient(opt *Options) *Client {
+ opt.Network = "tcp"
+ return NewClient(opt)
+}
+
+// Deprecated. Use NewClient instead.
+func NewUnixClient(opt *Options) *Client {
+ opt.Network = "unix"
+ return NewClient(opt)
+}
diff --git a/vendor/gopkg.in/redis.v2/script.go b/vendor/gopkg.in/redis.v2/script.go
new file mode 100644
index 0000000000..96c35f5149
--- /dev/null
+++ b/vendor/gopkg.in/redis.v2/script.go
@@ -0,0 +1,52 @@
+package redis
+
+import (
+ "crypto/sha1"
+ "encoding/hex"
+ "io"
+ "strings"
+)
+
+type scripter interface {
+ Eval(script string, keys []string, args []string) *Cmd
+ EvalSha(sha1 string, keys []string, args []string) *Cmd
+ ScriptExists(scripts ...string) *BoolSliceCmd
+ ScriptLoad(script string) *StringCmd
+}
+
+type Script struct {
+ src, hash string
+}
+
+func NewScript(src string) *Script {
+ h := sha1.New()
+ io.WriteString(h, src)
+ return &Script{
+ src: src,
+ hash: hex.EncodeToString(h.Sum(nil)),
+ }
+}
+
+func (s *Script) Load(c scripter) *StringCmd {
+ return c.ScriptLoad(s.src)
+}
+
+func (s *Script) Exists(c scripter) *BoolSliceCmd {
+ return c.ScriptExists(s.src)
+}
+
+func (s *Script) Eval(c scripter, keys []string, args []string) *Cmd {
+ return c.Eval(s.src, keys, args)
+}
+
+func (s *Script) EvalSha(c scripter, keys []string, args []string) *Cmd {
+ return c.EvalSha(s.hash, keys, args)
+}
+
+func (s *Script) Run(c *Client, keys []string, args []string) *Cmd {
+ r := s.EvalSha(c, keys, args)
+ if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
+ return s.Eval(c, keys, args)
+ }
+ return r
+}
diff --git a/vendor/gopkg.in/redis.v2/sentinel.go b/vendor/gopkg.in/redis.v2/sentinel.go
new file mode 100644
index 0000000000..d3ffeca9a5
--- /dev/null
+++ b/vendor/gopkg.in/redis.v2/sentinel.go
@@ -0,0 +1,291 @@
+package redis
+
+import (
+ "errors"
+ "log"
+ "net"
+ "strings"
+ "sync"
+ "time"
+)
+
+//------------------------------------------------------------------------------
+
+type FailoverOptions struct {
+ MasterName string
+ SentinelAddrs []string
+
+ Password string
+ DB int64
+
+ PoolSize int
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ IdleTimeout time.Duration
+}
+
+func (opt *FailoverOptions) getPoolSize() int {
+ if opt.PoolSize == 0 {
+ return 10
+ }
+ return opt.PoolSize
+}
+
+func (opt *FailoverOptions) getDialTimeout() time.Duration {
+ if opt.DialTimeout == 0 {
+ return 5 * time.Second
+ }
+ return opt.DialTimeout
+}
+
+func (opt *FailoverOptions) options() *options {
+ return &options{
+ DB: opt.DB,
+ Password: opt.Password,
+
+ DialTimeout: opt.getDialTimeout(),
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.getPoolSize(),
+ IdleTimeout: opt.IdleTimeout,
+ }
+}
+
+func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
+ opt := failoverOpt.options()
+ failover := &sentinelFailover{
+ masterName: failoverOpt.MasterName,
+ sentinelAddrs: failoverOpt.SentinelAddrs,
+
+ opt: opt,
+ }
+ return &Client{
+ baseClient: &baseClient{
+ opt: opt,
+ connPool: failover.Pool(),
+ },
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type sentinelClient struct {
+ *baseClient
+}
+
+func newSentinel(clOpt *Options) *sentinelClient {
+ opt := clOpt.options()
+ opt.Password = ""
+ opt.DB = 0
+ dialer := func() (net.Conn, error) {
+ return net.DialTimeout("tcp", clOpt.Addr, opt.DialTimeout)
+ }
+ return &sentinelClient{
+ baseClient: &baseClient{
+ opt: opt,
+ connPool: newConnPool(newConnFunc(dialer), opt),
+ },
+ }
+}
+
+func (c *sentinelClient) PubSub() *PubSub {
+ return &PubSub{
+ baseClient: &baseClient{
+ opt: c.opt,
+ connPool: newSingleConnPool(c.connPool, false),
+ },
+ }
+}
+
+func (c *sentinelClient) GetMasterAddrByName(name string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("SENTINEL", "get-master-addr-by-name", name)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *sentinelClient) Sentinels(name string) *SliceCmd {
+ cmd := NewSliceCmd("SENTINEL", "sentinels", name)
+ c.Process(cmd)
+ return cmd
+}
+
+type sentinelFailover struct {
+ masterName string
+ sentinelAddrs []string
+
+ opt *options
+
+ pool pool
+ poolOnce sync.Once
+
+ lock sync.RWMutex
+ _sentinel *sentinelClient
+}
+
+func (d *sentinelFailover) dial() (net.Conn, error) {
+ addr, err := d.MasterAddr()
+ if err != nil {
+ return nil, err
+ }
+ return net.DialTimeout("tcp", addr, d.opt.DialTimeout)
+}
+
+func (d *sentinelFailover) Pool() pool {
+ d.poolOnce.Do(func() {
+ d.pool = newConnPool(newConnFunc(d.dial), d.opt)
+ })
+ return d.pool
+}
+
+func (d *sentinelFailover) MasterAddr() (string, error) {
+ defer d.lock.Unlock()
+ d.lock.Lock()
+
+ // Try last working sentinel.
+ if d._sentinel != nil {
+ addr, err := d._sentinel.GetMasterAddrByName(d.masterName).Result()
+ if err != nil {
+ log.Printf("redis-sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err)
+ d.resetSentinel()
+ } else {
+ addr := net.JoinHostPort(addr[0], addr[1])
+ log.Printf("redis-sentinel: %q addr is %s", d.masterName, addr)
+ return addr, nil
+ }
+ }
+
+ for i, sentinelAddr := range d.sentinelAddrs {
+ sentinel := newSentinel(&Options{
+ Addr: sentinelAddr,
+
+ DB: d.opt.DB,
+ Password: d.opt.Password,
+
+ DialTimeout: d.opt.DialTimeout,
+ ReadTimeout: d.opt.ReadTimeout,
+ WriteTimeout: d.opt.WriteTimeout,
+
+ PoolSize: d.opt.PoolSize,
+ IdleTimeout: d.opt.IdleTimeout,
+ })
+ masterAddr, err := sentinel.GetMasterAddrByName(d.masterName).Result()
+ if err != nil {
+ log.Printf("redis-sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err)
+ sentinel.Close()
+ continue
+ }
+
+ // Push working sentinel to the top.
+ d.sentinelAddrs[0], d.sentinelAddrs[i] = d.sentinelAddrs[i], d.sentinelAddrs[0]
+
+ d.setSentinel(sentinel)
+ addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
+ log.Printf("redis-sentinel: %q addr is %s", d.masterName, addr)
+ return addr, nil
+ }
+
+ return "", errors.New("redis: all sentinels are unreachable")
+}
+
+func (d *sentinelFailover) setSentinel(sentinel *sentinelClient) {
+ d.discoverSentinels(sentinel)
+ d._sentinel = sentinel
+ go d.listen()
+}
+
+func (d *sentinelFailover) discoverSentinels(sentinel *sentinelClient) {
+ sentinels, err := sentinel.Sentinels(d.masterName).Result()
+ if err != nil {
+ log.Printf("redis-sentinel: Sentinels %q failed: %s", d.masterName, err)
+ return
+ }
+ for _, sentinel := range sentinels {
+ vals := sentinel.([]interface{})
+ for i := 0; i < len(vals); i += 2 {
+ key := vals[i].(string)
+ if key == "name" {
+ sentinelAddr := vals[i+1].(string)
+ if !contains(d.sentinelAddrs, sentinelAddr) {
+ log.Printf(
+ "redis-sentinel: discovered new %q sentinel: %s",
+ d.masterName, sentinelAddr,
+ )
+ d.sentinelAddrs = append(d.sentinelAddrs, sentinelAddr)
+ }
+ }
+ }
+ }
+}
+
+func (d *sentinelFailover) listen() {
+ var pubsub *PubSub
+ for {
+ if pubsub == nil {
+ pubsub = d._sentinel.PubSub()
+ if err := pubsub.Subscribe("+switch-master"); err != nil {
+ log.Printf("redis-sentinel: Subscribe failed: %s", err)
+ d.lock.Lock()
+ d.resetSentinel()
+ d.lock.Unlock()
+ return
+ }
+ }
+
+ msgIface, err := pubsub.Receive()
+ if err != nil {
+ log.Printf("redis-sentinel: Receive failed: %s", err)
+ pubsub.Close()
+ return
+ }
+
+ switch msg := msgIface.(type) {
+ case *Message:
+ switch msg.Channel {
+ case "+switch-master":
+ parts := strings.Split(msg.Payload, " ")
+ if parts[0] != d.masterName {
+ log.Printf("redis-sentinel: ignore new %s addr", parts[0])
+ continue
+ }
+ addr := net.JoinHostPort(parts[3], parts[4])
+ log.Printf(
+ "redis-sentinel: new %q addr is %s",
+ d.masterName, addr,
+ )
+ d.pool.Filter(func(cn *conn) bool {
+ if cn.RemoteAddr().String() != addr {
+ log.Printf(
+ "redis-sentinel: closing connection to old master %s",
+ cn.RemoteAddr(),
+ )
+ return false
+ }
+ return true
+ })
+ default:
+ log.Printf("redis-sentinel: unsupported message: %s", msg)
+ }
+ case *Subscription:
+ // Ignore.
+ default:
+ log.Printf("redis-sentinel: unsupported message: %s", msgIface)
+ }
+ }
+}
+
+func (d *sentinelFailover) resetSentinel() {
+ d._sentinel.Close()
+ d._sentinel = nil
+}
+
+func contains(slice []string, str string) bool {
+ for _, s := range slice {
+ if s == str {
+ return true
+ }
+ }
+ return false
+}