summaryrefslogtreecommitdiffstats
path: root/vendor/github.com
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/emirpasic/gods/LICENSE41
-rw-r--r--vendor/github.com/emirpasic/gods/containers/containers.go35
-rw-r--r--vendor/github.com/emirpasic/gods/containers/enumerable.go61
-rw-r--r--vendor/github.com/emirpasic/gods/containers/iterator.go109
-rw-r--r--vendor/github.com/emirpasic/gods/containers/serialization.go17
-rw-r--r--vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go228
-rw-r--r--vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go79
-rw-r--r--vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go83
-rw-r--r--vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go29
-rw-r--r--vendor/github.com/emirpasic/gods/lists/lists.go33
-rw-r--r--vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go163
-rw-r--r--vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go84
-rw-r--r--vendor/github.com/emirpasic/gods/trees/binaryheap/serialization.go22
-rw-r--r--vendor/github.com/emirpasic/gods/trees/trees.go21
-rw-r--r--vendor/github.com/emirpasic/gods/utils/comparator.go251
-rw-r--r--vendor/github.com/emirpasic/gods/utils/sort.go29
-rw-r--r--vendor/github.com/emirpasic/gods/utils/utils.go47
-rw-r--r--vendor/github.com/jbenet/go-context/LICENSE21
-rw-r--r--vendor/github.com/jbenet/go-context/io/ctxio.go120
-rw-r--r--vendor/github.com/kevinburke/ssh_config/AUTHORS.txt4
-rw-r--r--vendor/github.com/kevinburke/ssh_config/LICENSE49
-rw-r--r--vendor/github.com/kevinburke/ssh_config/config.go639
-rw-r--r--vendor/github.com/kevinburke/ssh_config/lexer.go241
-rw-r--r--vendor/github.com/kevinburke/ssh_config/parser.go185
-rw-r--r--vendor/github.com/kevinburke/ssh_config/position.go25
-rw-r--r--vendor/github.com/kevinburke/ssh_config/token.go49
-rw-r--r--vendor/github.com/kevinburke/ssh_config/validators.go162
-rw-r--r--vendor/github.com/mitchellh/go-homedir/LICENSE21
-rw-r--r--vendor/github.com/mitchellh/go-homedir/homedir.go157
-rw-r--r--vendor/github.com/pelletier/go-buffruneio/buffruneio.go117
-rw-r--r--vendor/github.com/src-d/gcfg/LICENSE28
-rw-r--r--vendor/github.com/src-d/gcfg/doc.go145
-rw-r--r--vendor/github.com/src-d/gcfg/errors.go41
-rw-r--r--vendor/github.com/src-d/gcfg/go1_0.go7
-rw-r--r--vendor/github.com/src-d/gcfg/go1_2.go9
-rw-r--r--vendor/github.com/src-d/gcfg/read.go273
-rw-r--r--vendor/github.com/src-d/gcfg/scanner/errors.go121
-rw-r--r--vendor/github.com/src-d/gcfg/scanner/scanner.go342
-rw-r--r--vendor/github.com/src-d/gcfg/set.go332
-rw-r--r--vendor/github.com/src-d/gcfg/token/position.go435
-rw-r--r--vendor/github.com/src-d/gcfg/token/serialize.go56
-rw-r--r--vendor/github.com/src-d/gcfg/token/token.go83
-rw-r--r--vendor/github.com/src-d/gcfg/types/bool.go23
-rw-r--r--vendor/github.com/src-d/gcfg/types/doc.go4
-rw-r--r--vendor/github.com/src-d/gcfg/types/enum.go44
-rw-r--r--vendor/github.com/src-d/gcfg/types/int.go86
-rw-r--r--vendor/github.com/src-d/gcfg/types/scan.go23
-rw-r--r--vendor/github.com/xanzy/ssh-agent/LICENSE202
-rw-r--r--vendor/github.com/xanzy/ssh-agent/pageant_windows.go146
-rw-r--r--vendor/github.com/xanzy/ssh-agent/sshagent.go49
-rw-r--r--vendor/github.com/xanzy/ssh-agent/sshagent_windows.go80
51 files changed, 5651 insertions, 0 deletions
diff --git a/vendor/github.com/emirpasic/gods/LICENSE b/vendor/github.com/emirpasic/gods/LICENSE
new file mode 100644
index 0000000000..e5e449b6ec
--- /dev/null
+++ b/vendor/github.com/emirpasic/gods/LICENSE
@@ -0,0 +1,41 @@
+Copyright (c) 2015, Emir Pasic
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-------------------------------------------------------------------------------
+
+AVL Tree:
+
+Copyright (c) 2017 Benjamin Scher Purcell <benjapurcell@gmail.com>
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/emirpasic/gods/containers/containers.go b/vendor/github.com/emirpasic/gods/containers/containers.go
new file mode 100644
index 0000000000..c35ab36d2c
--- /dev/null
+++ b/vendor/github.com/emirpasic/gods/containers/containers.go
@@ -0,0 +1,35 @@
+// Copyright (c) 2015, Emir Pasic. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package containers provides core interfaces and functions for data structures.
+//
+// Container is the base interface for all data structures to implement.
+//
+// Iterators provide stateful iterators.
+//
+// Enumerable provides Ruby inspired (each, select, map, find, any?, etc.) container functions.
+//
+// Serialization provides serializers (marshalers) and deserializers (unmarshalers).
+package containers
+
+import "github.com/emirpasic/gods/utils"
+
+// Container is base interface that all data structures implement.
+type Container interface {
+ Empty() bool
+ Size() int
+ Clear()
+ Values() []interface{}
+}
+
+// GetSortedValues returns sorted container's elements with respect to the passed comparator.
+// Does not effect the ordering of elements within the container.
+func GetSortedValues(container Container, comparator utils.Comparator) []interface{} {
+ values := container.Values()
+ if len(values) < 2 {
+ return values
+ }
+ utils.Sort(values, comparator)
+ return values
+}
diff --git a/vendor/github.com/emirpasic/gods/containers/enumerable.go b/vendor/github.com/emirpasic/gods/containers/enumerable.go
new file mode 100644
index 0000000000..ac48b54531
--- /dev/null
+++ b/vendor/github.com/emirpasic/gods/containers/enumerable.go
@@ -0,0 +1,61 @@
+// Copyright (c) 2015, Emir Pasic. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package containers
+
+// EnumerableWithIndex provides functions for ordered containers whose values can be fetched by an index.
+type EnumerableWithIndex interface {
+ // Each calls the given function once for each element, passing that element's index and value.
+ Each(func(index int, value interface{}))
+
+ // Map invokes the given function once for each element and returns a
+ // container containing the values returned by the given function.
+ // TODO would appreciate help on how to enforce this in containers (don't want to type assert when chaining)
+ // Map(func(index int, value interface{}) interface{}) Container
+
+ // Select returns a new container containing all elements for which the given function returns a true value.
+ // TODO need help on how to enforce this in containers (don't want to type assert when chaining)
+ // Select(func(index int, value interface{}) bool) Container
+
+ // Any passes each element of the container to the given function and
+ // returns true if the function ever returns true for any element.
+ Any(func(index int, value interface{}) bool) bool
+
+ // All passes each element of the container to the given function and
+ // returns true if the function returns true for all elements.
+ All(func(index int, value interface{}) bool) bool
+
+ // Find passes each element of the container to the given function and returns
+ // the first (index,value) for which the function is true or -1,nil otherwise
+ // if no element matches the criteria.
+ Find(func(index int, value interface{}) bool) (int, interface{})
+}
+
+// EnumerableWithKey provides functions for ordered containers whose values whose elements are key/value pairs.
+type EnumerableWithKey interface {
+ // Each calls the given function once for each element, passing that element's key and value.
+ Each(func(key interface{}, value interface{}))
+
+ // Map invokes the given function once for each element and returns a container
+ // containing the values returned by the given function as key/value pairs.
+ // TODO need help on how to enforce this in containers (don't want to type assert when chaining)
+ // Map(func(key interface{}, value interface{}) (interface{}, interface{})) Container
+
+ // Select returns a new container containing all elements for which the given function returns a true value.
+ // TODO need help on how to enforce this in containers (don't want to type assert when chaining)
+ // Select(func(key interface{}, value interface{}) bool) Container
+
+ // Any passes each element of the container to the given function and
+ // returns true if the function ever returns true for any element.
+ Any(func(key interface{}, value interface{}) bool) bool
+
+ // All passes each element of the container to the given function and
+ // returns true if the function returns true for all elements.
+ All(func(key interface{}, value interface{}) bool) bool
+
+ // Find passes each element of the container to the given function and returns
+ // the first (key,value) for which the function is true or nil,nil otherwise if no element
+ // matches the criteria.
+ Find(func(key interface{}, value interface{}) bool) (interface{}, interface{})
+}
diff --git a/vendor/github.com/emirpasic/gods/containers/iterator.go b/vendor/github.com/emirpasic/gods/containers/iterator.go
new file mode 100644
index 0000000000..f1a52a365a
--- /dev/null
+++ b/vendor/github.com/emirpasic/gods/containers/iterator.go
@@ -0,0 +1,109 @@
+// Copyright (c) 2015, Emir Pasic. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package containers
+
+// IteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index.
+type IteratorWithIndex interface {
+ // Next moves the iterator to the next element and returns true if there was a next element in the container.
+ // If Next() returns true, then next element's index and value can be retrieved by Index() and Value().
+ // If Next() was called for the first time, then it will point the iterator to the first element if it exists.
+ // Modifies the state of the iterator.
+ Next() bool
+
+ // Value returns the current element's value.
+ // Does not modify the state of the iterator.
+ Value() interface{}
+
+ // Index returns the current element's index.
+ // Does not modify the state of the iterator.
+ Index() int
+
+ // Begin resets the iterator to its initial state (one-before-first)
+ // Call Next() to fetch the first element if any.
+ Begin()
+
+ // First moves the iterator to the first element and returns true if there was a first element in the container.
+ // If First() returns true, then first element's index and value can be retrieved by Index() and Value().
+ // Modifies the state of the iterator.
+ First() bool
+}
+
+// IteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs.
+type IteratorWithKey interface {
+ // Next moves the iterator to the next element and returns true if there was a next element in the container.
+ // If Next() returns true, then next element's key and value can be retrieved by Key() and Value().
+ // If Next() was called for the first time, then it will point the iterator to the first element if it exists.
+ // Modifies the state of the iterator.
+ Next() bool
+
+ // Value returns the current element's value.
+ // Does not modify the state of the iterator.
+ Value() interface{}
+
+ // Key returns the current element's key.
+ // Does not modify the state of the iterator.
+ Key() interface{}
+
+ // Begin resets the iterator to its initial state (one-before-first)
+ // Call Next() to fetch the first element if any.
+ Begin()
+
+ // First moves the iterator to the first element and returns true if there was a first element in the container.
+ // If First() returns true, then first element's key and value can be retrieved by Key() and Value().
+ // Modifies the state of the iterator.
+ First() bool
+}
+
+// ReverseIteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index.
+//
+// Essentially it is the same as IteratorWithIndex, but provides additional:
+//
+// Prev() function to enable traversal in reverse
+//
+// Last() function to move the iterator to the last element.
+//
+// End() function to move the iterator past the last element (one-past-the-end).
+type ReverseIteratorWithIndex interface {
+ // Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
+ // If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value().
+ // Modifies the state of the iterator.
+ Prev() bool
+
+ // End moves the iterator past the last element (one-past-the-end).
+ // Call Prev() to fetch the last element if any.
+ End()
+
+ // Last moves the iterator to the last element and returns true if there was a last element in the container.
+ // If Last() returns true, then last element's index and value can be retrieved by Index() and Value().
+ // Modifies the state of the iterator.
+ Last() bool
+
+ IteratorWithIndex
+}
+
+// ReverseIteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs.
+//
+// Essentially it is the same as IteratorWithKey, but provides additional:
+//
+// Prev() function to enable traversal in reverse
+//
+// Last() function to move the iterator to the last element.
+type ReverseIteratorWithKey interface {
+ // Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
+ // If Prev() returns true, then previous element's key and value can be retrieved by Key() and Value().
+ // Modifies the state of the iterator.
+ Prev() bool
+
+ // End moves the iterator past the last element (one-past-the-end).
+ // Call Prev() to fetch the last element if any.
+ End()
+
+ // Last moves the iterator to the last element and returns true if there was a last element in the container.
+ // If Last() returns true, then last element's key and value can be retrieved by Key() and Value().
+ // Modifies the state of the iterator.
+ Last() bool
+
+ IteratorWithKey
+}
diff --git a/vendor/github.com/emirpasic/gods/containers/serialization.go b/vendor/github.com/emirpasic/gods/containers/serialization.go
new file mode 100644
index 0000000000..d7c90c83a0
--- /dev/null
+++ b/vendor/github.com/emirpasic/gods/containers/serialization.go
@@ -0,0 +1,17 @@
+// Copyright (c) 2015, Emir Pasic. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package containers
+
+// JSONSerializer provides JSON serialization
+type JSONSerializer interface {
+ // ToJSON outputs the JSON representation of containers's elements.
+ ToJSON() ([]byte, error)
+}
+
+// JSONDeserializer provides JSON deserialization
+type JSONDeserializer interface {
+ // FromJSON populates containers's elements from the input JSON representation.
+ FromJSON([]byte) error
+}
diff --git a/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go b/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go
new file mode 100644
index 0000000000..bfedac9eef
--- /dev/null
+++ b/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go
@@ -0,0 +1,228 @@
+// Copyright (c) 2015, Emir Pasic. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package arraylist implements the array list.
+//
+// Structure is not thread safe.
+//
+// Reference: https://en.wikipedia.org/wiki/List_%28abstract_data_type%29
+package arraylist
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/emirpasic/gods/lists"
+ "github.com/emirpasic/gods/utils"
+)
+
+func assertListImplementation() {
+ var _ lists.List = (*List)(nil)
+}
+
+// List holds the elements in a slice
+type List struct {
+ elements []interface{}
+ size int
+}
+
+const (
+ growthFactor = float32(2.0) // growth by 100%
+ shrinkFactor = float32(0.25) // shrink when size is 25% of capacity (0 means never shrink)
+)
+
+// New instantiates a new list and adds the passed values, if any, to the list
+func New(values ...interface{}) *List {
+ list := &List{}
+ if len(values) > 0 {
+ list.Add(values...)
+ }
+ return list
+}
+
+// Add appends a value at the end of the list
+func (list *List) Add(values ...interface{}) {
+ list.growBy(len(values))
+ for _, value := range values {
+ list.elements[list.size] = value
+ list.size++
+ }
+}
+
+// Get returns the element at index.
+// Second return parameter is true if index is within bounds of the array and array is not empty, otherwise false.
+func (list *List) Get(index int) (interface{}, bool) {
+
+ if !list.withinRange(index) {
+ return nil, false
+ }
+
+ return list.elements[index], true
+}
+
+// Remove removes the element at the given index from the list.
+func (list *List) Remove(index int) {
+
+ if !list.withinRange(index) {
+ return
+ }
+
+ list.elements[index] = nil // cleanup reference
+ copy(list.elements[index:], list.elements[index+1:list.size]) // shift to the left by one (slow operation, need ways to optimize this)
+ list.size--
+
+ list.shrink()
+}
+
+// Contains checks if elements (one or more) are present in the set.
+// All elements have to be present in the set for the method to return true.
+// Performance time complexity of n^2.
+// Returns true if no arguments are passed at all, i.e. set is always super-set of empty set.
+func (list *List) Contains(values ...interface{}) bool {
+
+ for _, searchValue := range values {
+ found := false
+ for _, element := range list.elements {
+ if element == searchValue {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return false
+ }
+ }
+ return true
+}
+
+// Values returns all elements in the list.
+func (list *List) Values() []interface{} {
+ newElements := make([]interface{}, list.size, list.size)
+ copy(newElements, list.elements[:list.size])
+ return newElements
+}
+
+//IndexOf returns index of provided element
+func (list *List) IndexOf(value interface{}) int {
+ if list.size == 0 {
+ return -1
+ }
+ for index, element := range list.elements {
+ if element == value {
+ return index
+ }
+ }
+ return -1
+}
+
+// Empty returns true if list does not contain any elements.
+func (list *List) Empty() bool {
+ return list.size == 0
+}
+
+// Size returns number of elements within the list.
+func (list *List) Size() int {
+ return list.size
+}
+
+// Clear removes all elements from the list.
+func (list *List) Clear() {
+ list.size = 0
+ list.elements = []interface{}{}
+}
+
+// Sort sorts values (in-place) using.
+func (list *List) Sort(comparator utils.Comparator) {
+ if len(list.elements) < 2 {
+ return
+ }
+ utils.Sort(list.elements[:list.size], comparator)
+}
+
+// Swap swaps the two values at the specified positions.
+func (list *List) Swap(i, j int) {
+ if list.withinRange(i) && list.withinRange(j) {
+ list.elements[i], list.elements[j] = list.elements[j], list.elements[i]
+ }
+}
+
+// Insert inserts values at specified index position shifting the value at that position (if any) and any subsequent elements to the right.
+// Does not do anything if position is negative or bigger than list's size
+// Note: position equal to list's size is valid, i.e. append.
+func (list *List) Insert(index int, values ...interface{}) {
+
+ if !list.withinRange(index) {
+ // Append
+ if index == list.size {
+ list.Add(values...)
+ }
+ return
+ }
+
+ l := len(values)
+ list.growBy(l)
+ list.size += l
+ copy(list.elements[index+l:], list.elements[index:list.size-l])
+ copy(list.elements[index:], values)
+}
+
+// Set the value at specified index
+// Does not do anything if position is negative or bigger than list's size
+// Note: position equal to list's size is valid, i.e. append.
+func (list *List) Set(index int, value interface{}) {
+
+ if !list.withinRange(index) {
+ // Append
+ if index == list.size {
+ list.Add(value)
+ }
+ return
+ }
+
+ list.elements[index] = value
+}
+
+// String returns a string representation of container
+func (list *List) String() string {
+ str := "ArrayList\n"
+ values := []string{}
+ for _, value := range list.elements[:list.size] {
+ values = append(values, fmt.Sprintf("%v", value))
+ }
+ str += strings.Join(values, ", ")
+ return str
+}
+
+// Check that the index is within bounds of the list
+func (list *List) withinRange(index int) bool {
+ return index >= 0 && index < list.size
+}
+
+func (list *List) resize(cap int) {
+ newElements := make([]interface{}, cap, cap)
+ copy(newElements, list.elements)
+ list.elements = newElements
+}
+
+// Expand the array if necessary, i.e. capacity will be reached if we add n elements
+func (list *List) growBy(n int) {
+ // When capacity is reached, grow by a factor of growthFactor and add number of elements
+ currentCapacity := cap(list.elements)
+ if list.size+n >= currentCapacity {
+ newCapacity := int(growthFactor * float32(currentCapacity+n))
+ list.resize(newCapacity)
+ }
+}
+
+// Shrink the array if necessary, i.e. when size is shrinkFactor percent of current capacity
+func (list *List) shrink() {
+ if shrinkFactor == 0.0 {
+ return
+ }
+ // Shrink when size is at shrinkFactor * capacity
+ currentCapacity := cap(list.elements)
+ if list.size <= int(float32(currentCapacity)*shrinkFactor) {
+ list.resize(list.size)
+ }
+}
diff --git a/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go b/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go
new file mode 100644
index 0000000000..b3a8738825
--- /dev/null
+++ b/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go
@@ -0,0 +1,79 @@
+// Copyright (c) 2015, Emir Pasic. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arraylist
+
+import "github.com/emirpasic/gods/containers"
+
+func assertEnumerableImplementation() {
+ var _ containers.EnumerableWithIndex = (*List)(nil)
+}
+
+// Each calls the given function once for each element, passing that element's index and value.
+func (list *List) Each(f func(index int, value interface{})) {
+ iterator := list.Iterator()
+ for iterator.Next() {
+ f(iterator.Index(), iterator.Value())
+ }
+}
+
+// Map invokes the given function once for each element and returns a
+// container containing the values returned by the given function.
+func (list *List) Map(f func(index int, value interface{}) interface{}) *List {
+ newList := &List{}
+ iterator := list.Iterator()
+ for iterator.Next() {
+ newList.Add(f(iterator.Index(), iterator.Value()))
+ }
+ return newList
+}
+
+// Select returns a new container containing all elements for which the given function returns a true value.
+func (list *List) Select(f func(index int, value interface{}) bool) *List {
+ newList := &List{}
+ iterator := list.Iterator()
+ for iterator.Next() {
+ if f(iterator.Index(), iterator.Value()) {
+ newList.Add(iterator.Value())
+ }
+ }
+ return newList
+}
+
+// Any passes each element of the collection to the given function and
+// returns true if the function ever returns true for any element.
+func (list *List) Any(f func(index int, value interface{}) bool) bool {
+ iterator := list.Iterator()
+ for iterator.Next() {
+ if f(iterator.Index(), iterator.Value()) {
+ return true
+ }
+ }
+ return false
+}
+
+// All passes each element of the collection to the given function and
+// returns true if the function returns true for all elements.
+func (list *List) All(f func(index int, value interface{}) bool) bool {
+ iterator := list.Iterator()
+ for iterator.Next() {
+ if !f(iterator.Index(), iterator.Value()) {
+ return false
+ }
+ }
+ return true
+}
+
+// Find passes each element of the container to the given function and returns
+// the first (index,value) for which the function is true or -1,nil otherwise
+// if no element matches the criteria.
+func (list *List) Find(f func(index int, value interface{}) bool) (int, interface{}) {
+ iterator := list.Iterator()
+ for iterator.Next() {
+ if f(iterator.Index(), iterator.Value()) {
+ return iterator.Index(), iterator.Value()
+ }
+ }
+ return -1, nil
+}
diff --git a/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go b/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go
new file mode 100644
index 0000000000..38a93f3a8f
--- /dev/null
+++ b/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go
@@ -0,0 +1,83 @@
+// Copyright (c) 2015, Emir Pasic. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arraylist
+
+import "github.com/emirpasic/gods/containers"
+
+func assertIteratorImplementation() {
+ var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil)
+}
+
+// Iterator holding the iterator's state
+type Iterator struct {
+ list *List
+ index int
+}
+
+// Iterator returns a stateful iterator whose values can be fetched by an index.
+func (list *List) Iterator() Iterator {
+ return Iterator{list: list, index: -1}
+}
+
+// Next moves the iterator to the next element and returns true if there was a next element in the container.
+// If Next() returns true, then next element's index and value can be retrieved by Index() and Value().
+// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
+// Modifies the state of the iterator.
+func (iterator *Iterator) Next() bool {
+ if iterator.index < iterator.list.size {
+ iterator.index++
+ }
+ return iterator.list.withinRange(iterator.index)
+}
+
+// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
+// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value().
+// Modifies the state of the iterator.
+func (iterator *Iterator) Prev() bool {
+ if iterator.index >= 0 {
+ iterator.index--
+ }
+ return iterator.list.withinRange(iterator.index)
+}
+
+// Value returns the current element's value.
+// Does not modify the state of the iterator.
+func (iterator *Iterator) Value() interface{} {
+ return iterator.list.elements[iterator.index]
+}
+
+// Index returns the current element's index.
+// Does not modify the state of the iterator.
+func (iterator *Iterator) Index() int {
+ return iterator.index
+}
+
+// Begin resets the iterator to its initial state (one-before-first)
+// Call Next() to fetch the first element if any.
+func (iterator *Iterator) Begin() {
+ iterator.index = -1
+}
+
+// End moves the iterator past the last element (one-past-the-end).
+// Call Prev() to fetch the last element if any.
+func (iterator *Iterator) End() {
+ iterator.index = iterator.list.size
+}
+
+// First moves the iterator to the first element and returns true if there was a first element in the container.
+// If First() returns true, then first element's index and value can be retrieved by Index() and Value().
+// Modifies the state of the iterator.
+func (iterator *Iterator) First() bool {
+ iterator.Begin()
+ return iterator.Next()
+}
+
+// Last moves the iterator to the last element and returns true if there was a last element in the container.
+// If Last() returns true, then last element's index and value can be retrieved by Index() and Value().
+// Modifies the state of the iterator.
+func (iterator *Iterator) Last() bool {
+ iterator.End()
+ return iterator.Prev()
+}
diff --git a/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go b/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go
new file mode 100644
index 0000000000..2f283fb97d
--- /dev/null
+++ b/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go
@@ -0,0 +1,29 @@
+// Copyright (c) 2015, Emir Pasic. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arraylist
+
+import (
+ "encoding/json"
+ "github.com/emirpasic/gods/containers"
+)
+
+func assertSerializationImplementation() {
+ var _ containers.JSONSerializer = (*List)(nil)
+ var _ containers.JSONDeserializer = (*List)(nil)
+}
+
+// ToJSON outputs the JSON representation of list's elements.
+func (list *List) ToJSON() ([]byte, error) {
+ return json.Marshal(list.elements[:list.size])
+}
+
+// FromJSON populates list's elements from the input JSON representation.
+func (list *List) FromJSON(data []byte) error {
+ err := json.Unmarshal(data, &list.elements)
+ if err == nil {
+ list.size = len(list.elements)
+ }
+ return err
+}
diff --git a/vendor/github.com/emirpasic/gods/lists/lists.go b/vendor/github.com/emirpasic/gods/lists/lists.go
new file mode 100644
index 0000000000..1f6bb08e94
--- /dev/null
+++ b/vendor/github.com/emirpasic/gods/lists/lists.go
@@ -0,0 +1,33 @@
+// Copyright (c) 2015, Emir Pasic. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package lists provides an abstract List interface.
+//
+// In computer science, a list or sequence is an abstract data type that represents an ordered sequence of values, where the same value may occur more than once. An instance of a list is a computer representation of the mathematical concept of a finite sequence; the (potentially) infinite analog of a list is a stream. Lists are a basic example of containers, as they contain other values. If the same value occurs multiple times, each occurrence is considered a distinct item.
+//
+// Reference: https://en.wikipedia.org/wiki/List_%28abstract_data_type%29
+package lists
+
+import (
+ "github.com/emirpasic/gods/containers"
+ "github.com/emirpasic/gods/utils"
+)
+
+// List interface that all lists implement
+type List interface {
+ Get(index int) (interface{}, bool)
+ Remove(index int)
+ Add(values ...interface{})
+ Contains(values ...interface{}) bool
+ Sort(comparator utils.Comparator)
+ Swap(index1, index2 int)
+ Insert(index int, values ...interface{})
+ Set(index int, value interface{})
+
+ containers.Container
+ // Empty() bool
+ // Size() int
+ // Clear()
+ // Values() []interface{}
+}
diff --git a/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go b/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go
new file mode 100644
index 0000000000..70b28cf52d
--- /dev/null
+++ b/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go
@@ -0,0 +1,163 @@
+// Copyright (c) 2015, Emir Pasic. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package binaryheap implements a binary heap backed by array list.
+//
+// Comparator defines this heap as either min or max heap.
+//
+// Structure is not thread safe.
+//
+// References: http://en.wikipedia.org/wiki/Binary_heap
+package binaryheap
+
+import (
+ "fmt"
+ "github.com/emirpasic/gods/lists/arraylist"
+ "github.com/emirpasic/gods/trees"
+ "github.com/emirpasic/gods/utils"
+ "strings"
+)
+
+func assertTreeImplementation() {
+ var _ trees.Tree = (*Heap)(nil)
+}
+
+// Heap holds elements in an array-list
+type Heap struct {
+ list *arraylist.List
+ Comparator utils.Comparator
+}
+
+// NewWith instantiates a new empty heap tree with the custom comparator.
+func NewWith(comparator utils.Comparator) *Heap {
+ return &Heap{list: arraylist.New(), Comparator: comparator}
+}
+
+// NewWithIntComparator instantiates a new empty heap with the IntComparator, i.e. elements are of type int.
+func NewWithIntComparator() *Heap {
+ return &Heap{list: arraylist.New(), Comparator: utils.IntComparator}
+}
+
+// NewWithStringComparator instantiates a new empty heap with the StringComparator, i.e. elements are of type string.
+func NewWithStringComparator() *Heap {
+ return &Heap{list: arraylist.New(), Comparator: utils.StringComparator}
+}
+
+// Push adds a value onto the heap and bubbles it up accordingly.
+func (heap *Heap) Push(values ...interface{}) {
+ if len(values) == 1 {
+ heap.list.Add(values[0])
+ heap.bubbleUp()
+ } else {
+ // Reference: https://en.wikipedia.org/wiki/Binary_heap#Building_a_heap
+ for _, value := range values {
+ heap.list.Add(value)
+ }
+ size := heap.list.Size()/2 + 1
+ for i := size; i >= 0; i-- {
+ heap.bubbleDownIndex(i)
+ }
+ }
+}
+
+// Pop removes top element on heap and returns it, or nil if heap is empty.
+// Second return parameter is true, unless the heap was empty and there was nothing to pop.
+func (heap *Heap) Pop() (value interface{}, ok bool) {
+ value, ok = heap.list.Get(0)
+ if !ok {
+ return
+ }
+ lastIndex := heap.list.Size() - 1
+ heap.list.Swap(0, lastIndex)
+ heap.list.Remove(lastIndex)
+ heap.bubbleDown()
+ return
+}
+
+// Peek returns top element on the heap without removing it, or nil if heap is empty.
+// Second return parameter is true, unless the heap was empty and there was nothing to peek.
+func (heap *Heap) Peek() (value interface{}, ok bool) {
+ return heap.list.Get(0)
+}
+
+// Empty returns true if heap does not contain any elements.
+func (heap *Heap) Empty() bool {
+ return heap.list.Empty()
+}
+
+// Size returns number of elements within the heap.
+func (heap *Heap) Size() int {
+ return heap.list.Size()
+}
+
+// Clear removes all elements from the heap.
+func (heap *Heap) Clear() {
+ heap.list.Clear()
+}
+
+// Values returns all elements in the heap.
+func (heap *Heap) Values() []interface{} {
+ return heap.list.Values()
+}
+
+// String returns a string representation of container
+func (heap *Heap) String() string {
+ str := "BinaryHeap\n"
+ values := []string{}
+ for _, value := range heap.list.Values() {
+ values = append(values, fmt.Sprintf("%v", value))
+ }
+ str += strings.Join(values, ", ")
+ return str
+}
+
+// Performs the "bubble down" operation. This is to place the element that is at the root
+// of the heap in its correct place so that the heap maintains the min/max-heap order property.
+func (heap *Heap) bubbleDown() {
+ heap.bubbleDownIndex(0)
+}
+
+// Performs the "bubble down" operation. This is to place the element that is at the index
+// of the heap in its correct place so that the heap maintains the min/max-heap order property.
+func (heap *Heap) bubbleDownIndex(index int) {
+ size := heap.list.Size()
+ for leftIndex := index<<1 + 1; leftIndex < size; leftIndex = index<<1 + 1 {
+ rightIndex := index<<1 + 2
+ smallerIndex := leftIndex
+ leftValue, _ := heap.list.Get(leftIndex)
+ rightValue, _ := heap.list.Get(rightIndex)
+ if rightIndex < size && heap.Comparator(leftValue, rightValue) > 0 {
+ smallerIndex = rightIndex
+ }
+ indexValue, _ := heap.list.Get(index)
+ smallerValue, _ := heap.list.Get(smallerIndex)
+ if heap.Comparator(indexValue, smallerValue) > 0 {
+ heap.list.Swap(index, smallerIndex)
+ } else {
+ break
+ }
+ index = smallerIndex
+ }
+}
+
+// Performs the "bubble up" operation. This is to place a newly inserted
+// element (i.e. last element in the list) in its correct place so that
+// the heap maintains the min/max-heap order property.
+func (heap *Heap) bubbleUp() {
+ index := heap.list.Size() - 1
+ for parentIndex := (index - 1) >> 1; index > 0; parentIndex = (index - 1) >> 1 {
+ indexValue, _ := heap.list.Get(index)
+ parentValue, _ := heap.list.Get(parentIndex)
+ if heap.Comparator(parentValue, indexValue) <= 0 {
+ break
+ }
+ heap.list.Swap(index, parentIndex)
+ index = parentIndex
+ }
+}
+
+// Check that the index is within bounds of the list
+func (heap *Heap) withinRange(index int) bool {
+ return index >= 0 && index < heap.list.Size()
+}
diff --git a/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go b/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go
new file mode 100644
index 0000000000..beeb8d7013
--- /dev/null
+++ b/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go
@@ -0,0 +1,84 @@
+// Copyright (c) 2015, Emir Pasic. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package binaryheap
+
+import "github.com/emirpasic/gods/containers"
+
+func assertIteratorImplementation() {
+ var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil)
+}
+
+// Iterator returns a stateful iterator whose values can be fetched by an index.
+type Iterator struct {
+ heap *Heap
+ index int
+}
+
+// Iterator returns a stateful iterator whose values can be fetched by an index.
+func (heap *Heap) Iterator() Iterator {
+ return Iterator{heap: heap, index: -1}
+}
+
+// Next moves the iterator to the next element and returns true if there was a next element in the container.
+// If Next() returns true, then next element's index and value can be retrieved by Index() and Value().
+// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
+// Modifies the state of the iterator.
+func (iterator *Iterator) Next() bool {
+ if iterator.index < iterator.heap.Size() {
+ iterator.index++
+ }
+ return iterator.heap.withinRange(iterator.index)
+}
+
+// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
+// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value().
+// Modifies the state of the iterator.
+func (iterator *Iterator) Prev() bool {
+ if iterator.index >= 0 {
+ iterator.index--
+ }
+ return iterator.heap.withinRange(iterator.index)
+}
+
+// Value returns the current element's value.
+// Does not modify the state of the iterator.
+func (iterator *Iterator) Value() interface{} {
+ value, _ := iterator.heap.list.Get(iterator.index)
+ return value
+}
+
+// Index returns the current element's index.
+// Does not modify the state of the iterator.
+func (iterator *Iterator) Index() int {
+ return iterator.index
+}
+
+// Begin resets the iterator to its initial state (one-before-first)
+// Call Next() to fetch the first element if any.
+func (iterator *Iterator) Begin() {
+ iterator.index = -1
+}
+
+// End moves the iterator past the last element (one-past-the-end).
+// Call Prev() to fetch the last element if any.
+func (iterator *Iterator) End() {
+ iterator.index = iterator.heap.Size()
+}
+
+// First moves the iterator to the first element and returns true if there was a first element in the container.
+// If First() returns true, then first element's index and value can be retrieved by Index() and Value().
+// Modifies the state of the iterator.
+func (iterator *Iterator) First() bool {
+ iterator.Begin()
+ return iterator.Next()
+}
+
+// Last moves the iterator to the last element and returns true if there was a last element in the container.
+// If Last() returns true, then last element's index and value can be retrieved by Index() and Value().
+// Modifies the state of the iterator.
+func (iterator *Iterator) Last() bool {
+ iterator.End()
+ return iterator.Prev()
+}
diff --git a/vendor/github.com/emirpasic/gods/trees/binaryheap/serialization.go b/vendor/github.com/emirpasic/gods/trees/binaryheap/serialization.go
new file mode 100644
index 0000000000..00d0c7719c
--- /dev/null
+++ b/vendor/github.com/emirpasic/gods/trees/binaryheap/serialization.go
@@ -0,0 +1,22 @@
+// Copyright (c) 2015, Emir Pasic. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package binaryheap
+
+import "github.com/emirpasic/gods/containers"
+
+func assertSerializationImplementation() {
+ var _ containers.JSONSerializer = (*Heap)(nil)
+ var _ containers.JSONDeserializer = (*Heap)(nil)
+}
+
+// ToJSON outputs the JSON representation of the heap.
+func (heap *Heap) ToJSON() ([]byte, error) {
+ return heap.list.ToJSON()
+}
+
+// FromJSON populates the heap from the input JSON representation.
+func (heap *Heap) FromJSON(data []byte) error {
+ return heap.list.FromJSON(data)
+}
diff --git a/vendor/github.com/emirpasic/gods/trees/trees.go b/vendor/github.com/emirpasic/gods/trees/trees.go
new file mode 100644
index 0000000000..a5a7427d34
--- /dev/null
+++ b/vendor/github.com/emirpasic/gods/trees/trees.go
@@ -0,0 +1,21 @@
+// Copyright (c) 2015, Emir Pasic. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package trees provides an abstract Tree interface.
+//
+// In computer science, a tree is a widely used abstract data type (ADT) or data structure implementing this ADT that simulates a hierarchical tree structure, with a root value and subtrees of children with a parent node, represented as a set of linked nodes.
+//
+// Reference: https://en.wikipedia.org/wiki/Tree_%28data_structure%29
+package trees
+
+import "github.com/emirpasic/gods/containers"
+
+// Tree interface that all trees implement
+type Tree interface {
+ containers.Container
+ // Empty() bool
+ // Size() int
+ // Clear()
+ // Values() []interface{}
+}
diff --git a/vendor/github.com/emirpasic/gods/utils/comparator.go b/vendor/github.com/emirpasic/gods/utils/comparator.go
new file mode 100644
index 0000000000..6a9afbf346
--- /dev/null
+++ b/vendor/github.com/emirpasic/gods/utils/comparator.go
@@ -0,0 +1,251 @@
+// Copyright (c) 2015, Emir Pasic. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package utils
+
+import "time"
+
+// Comparator will make type assertion (see IntComparator for example),
+// which will panic if a or b are not of the asserted type.
+//
+// Should return a number:
+// negative , if a < b
+// zero , if a == b
+// positive , if a > b
+type Comparator func(a, b interface{}) int
+
+// StringComparator provides a fast comparison on strings
+func StringComparator(a, b interface{}) int {
+ s1 := a.(string)
+ s2 := b.(string)
+ min := len(s2)
+ if len(s1) < len(s2) {
+ min = len(s1)
+ }
+ diff := 0
+ for i := 0; i < min && diff == 0; i++ {
+ diff = int(s1[i]) - int(s2[i])
+ }
+ if diff == 0 {
+ diff = len(s1) - len(s2)
+ }
+ if diff < 0 {
+ return -1
+ }
+ if diff > 0 {
+ return 1
+ }
+ return 0
+}
+
+// IntComparator provides a basic comparison on int
+func IntComparator(a, b interface{}) int {
+ aAsserted := a.(int)
+ bAsserted := b.(int)
+ switch {
+ case aAsserted > bAsserted:
+ return 1
+ case aAsserted < bAsserted:
+ return -1
+ default:
+ return 0
+ }
+}
+
+// Int8Comparator provides a basic comparison on int8
+func Int8Comparator(a, b interface{}) int {
+ aAsserted := a.(int8)
+ bAsserted := b.(int8)
+ switch {
+ case aAsserted > bAsserted:
+ return 1
+ case aAsserted < bAsserted:
+ return -1
+ default:
+ return 0
+ }
+}
+
+// Int16Comparator provides a basic comparison on int16
+func Int16Comparator(a, b interface{}) int {
+ aAsserted := a.(int16)
+ bAsserted := b.(int16)
+ switch {
+ case aAsserted > bAsserted:
+ return 1
+ case aAsserted < bAsserted:
+ return -1
+ default:
+ return 0
+ }
+}
+
+// Int32Comparator provides a basic comparison on int32
+func Int32Comparator(a, b interface{}) int {
+ aAsserted := a.(int32)
+ bAsserted := b.(int32)
+ switch {
+ case aAsserted > bAsserted:
+ return 1
+ case aAsserted < bAsserted:
+ return -1
+ default:
+ return 0
+ }
+}
+
+// Int64Comparator provides a basic comparison on int64
+func Int64Comparator(a, b interface{}) int {
+ aAsserted := a.(int64)
+ bAsserted := b.(int64)
+ switch {
+ case aAsserted > bAsserted:
+ return 1
+ case aAsserted < bAsserted:
+ return -1
+ default:
+ return 0
+ }
+}
+
+// UIntComparator provides a basic comparison on uint
+func UIntComparator(a, b interface{}) int {
+ aAsserted := a.(uint)
+ bAsserted := b.(uint)
+ switch {
+ case aAsserted > bAsserted:
+ return 1
+ case aAsserted < bAsserted:
+ return -1
+ default:
+ return 0
+ }
+}
+
+// UInt8Comparator provides a basic comparison on uint8
+func UInt8Comparator(a, b interface{}) int {
+ aAsserted := a.(uint8)
+ bAsserted := b.(uint8)
+ switch {
+ case aAsserted > bAsserted:
+ return 1
+ case aAsserted < bAsserted:
+ return -1
+ default:
+ return 0
+ }
+}
+
+// UInt16Comparator provides a basic comparison on uint16
+func UInt16Comparator(a, b interface{}) int {
+ aAsserted := a.(uint16)
+ bAsserted := b.(uint16)
+ switch {
+ case aAsserted > bAsserted:
+ return 1
+ case aAsserted < bAsserted:
+ return -1
+ default:
+ return 0
+ }
+}
+
+// UInt32Comparator provides a basic comparison on uint32
+func UInt32Comparator(a, b interface{}) int {
+ aAsserted := a.(uint32)
+ bAsserted := b.(uint32)
+ switch {
+ case aAsserted > bAsserted:
+ return 1
+ case aAsserted < bAsserted:
+ return -1
+ default:
+ return 0
+ }
+}
+
+// UInt64Comparator provides a basic comparison on uint64
+func UInt64Comparator(a, b interface{}) int {
+ aAsserted := a.(uint64)
+ bAsserted := b.(uint64)
+ switch {
+ case aAsserted > bAsserted:
+ return 1
+ case aAsserted < bAsserted:
+ return -1
+ default:
+ return 0
+ }
+}
+
+// Float32Comparator provides a basic comparison on float32
+func Float32Comparator(a, b interface{}) int {
+ aAsserted := a.(float32)
+ bAsserted := b.(float32)
+ switch {
+ case aAsserted > bAsserted:
+ return 1
+ case aAsserted < bAsserted:
+ return -1
+ default:
+ return 0
+ }
+}
+
+// Float64Comparator provides a basic comparison on float64
+func Float64Comparator(a, b interface{}) int {
+ aAsserted := a.(float64)
+ bAsserted := b.(float64)
+ switch {
+ case aAsserted > bAsserted:
+ return 1
+ case aAsserted < bAsserted:
+ return -1
+ default:
+ return 0
+ }
+}
+
+// ByteComparator provides a basic comparison on byte
+func ByteComparator(a, b interface{}) int {
+ aAsserted := a.(byte)
+ bAsserted := b.(byte)
+ switch {
+ case aAsserted > bAsserted:
+ return 1
+ case aAsserted < bAsserted:
+ return -1
+ default:
+ return 0
+ }
+}
+
+// RuneComparator provides a basic comparison on rune
+func RuneComparator(a, b interface{}) int {
+ aAsserted := a.(rune)
+ bAsserted := b.(rune)
+ switch {
+ case aAsserted > bAsserted:
+ return 1
+ case aAsserted < bAsserted:
+ return -1
+ default:
+ return 0
+ }
+}
+
+// TimeComparator provides a basic comparison on time.Time
+func TimeComparator(a, b interface{}) int {
+ aAsserted := a.(time.Time)
+ bAsserted := b.(time.Time)
+
+ switch {
+ case aAsserted.After(bAsserted):
+ return 1
+ case aAsserted.Before(bAsserted):
+ return -1
+ default:
+ return 0
+ }
+}
diff --git a/vendor/github.com/emirpasic/gods/utils/sort.go b/vendor/github.com/emirpasic/gods/utils/sort.go
new file mode 100644
index 0000000000..79ced1f5d2
--- /dev/null
+++ b/vendor/github.com/emirpasic/gods/utils/sort.go
@@ -0,0 +1,29 @@
+// Copyright (c) 2015, Emir Pasic. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package utils
+
+import "sort"
+
+// Sort sorts values (in-place) with respect to the given comparator.
+//
+// Uses Go's sort (hybrid of quicksort for large and then insertion sort for smaller slices).
+func Sort(values []interface{}, comparator Comparator) {
+ sort.Sort(sortable{values, comparator})
+}
+
+type sortable struct {
+ values []interface{}
+ comparator Comparator
+}
+
+func (s sortable) Len() int {
+ return len(s.values)
+}
+func (s sortable) Swap(i, j int) {
+ s.values[i], s.values[j] = s.values[j], s.values[i]
+}
+func (s sortable) Less(i, j int) bool {
+ return s.comparator(s.values[i], s.values[j]) < 0
+}
diff --git a/vendor/github.com/emirpasic/gods/utils/utils.go b/vendor/github.com/emirpasic/gods/utils/utils.go
new file mode 100644
index 0000000000..1ad49cbc07
--- /dev/null
+++ b/vendor/github.com/emirpasic/gods/utils/utils.go
@@ -0,0 +1,47 @@
+// Copyright (c) 2015, Emir Pasic. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package utils provides common utility functions.
+//
+// Provided functionalities:
+// - sorting
+// - comparators
+package utils
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// ToString converts a value to string.
+func ToString(value interface{}) string {
+ switch value.(type) {
+ case string:
+ return value.(string)
+ case int8:
+ return strconv.FormatInt(int64(value.(int8)), 10)
+ case int16:
+ return strconv.FormatInt(int64(value.(int16)), 10)
+ case int32:
+ return strconv.FormatInt(int64(value.(int32)), 10)
+ case int64:
+ return strconv.FormatInt(int64(value.(int64)), 10)
+ case uint8:
+ return strconv.FormatUint(uint64(value.(uint8)), 10)
+ case uint16:
+ return strconv.FormatUint(uint64(value.(uint16)), 10)
+ case uint32:
+ return strconv.FormatUint(uint64(value.(uint32)), 10)
+ case uint64:
+ return strconv.FormatUint(uint64(value.(uint64)), 10)
+ case float32:
+ return strconv.FormatFloat(float64(value.(float32)), 'g', -1, 64)
+ case float64:
+ return strconv.FormatFloat(float64(value.(float64)), 'g', -1, 64)
+ case bool:
+ return strconv.FormatBool(value.(bool))
+ default:
+ return fmt.Sprintf("%+v", value)
+ }
+}
diff --git a/vendor/github.com/jbenet/go-context/LICENSE b/vendor/github.com/jbenet/go-context/LICENSE
new file mode 100644
index 0000000000..c7386b3c94
--- /dev/null
+++ b/vendor/github.com/jbenet/go-context/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Juan Batiz-Benet
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/jbenet/go-context/io/ctxio.go b/vendor/github.com/jbenet/go-context/io/ctxio.go
new file mode 100644
index 0000000000..b4f2454235
--- /dev/null
+++ b/vendor/github.com/jbenet/go-context/io/ctxio.go
@@ -0,0 +1,120 @@
+// Package ctxio provides io.Reader and io.Writer wrappers that
+// respect context.Contexts. Use these at the interface between
+// your context code and your io.
+//
+// WARNING: read the code. see how writes and reads will continue
+// until you cancel the io. Maybe this package should provide
+// versions of io.ReadCloser and io.WriteCloser that automatically
+// call .Close when the context expires. But for now -- since in my
+// use cases I have long-lived connections with ephemeral io wrappers
+// -- this has yet to be a need.
+package ctxio
+
+import (
+ "io"
+
+ context "golang.org/x/net/context"
+)
+
+type ioret struct {
+ n int
+ err error
+}
+
+type Writer interface {
+ io.Writer
+}
+
+type ctxWriter struct {
+ w io.Writer
+ ctx context.Context
+}
+
+// NewWriter wraps a writer to make it respect given Context.
+// If there is a blocking write, the returned Writer will return
+// whenever the context is cancelled (the return values are n=0
+// and err=ctx.Err().)
+//
+// Note well: this wrapper DOES NOT ACTUALLY cancel the underlying
+// write-- there is no way to do that with the standard go io
+// interface. So the read and write _will_ happen or hang. So, use
+// this sparingly, make sure to cancel the read or write as necesary
+// (e.g. closing a connection whose context is up, etc.)
+//
+// Furthermore, in order to protect your memory from being read
+// _after_ you've cancelled the context, this io.Writer will
+// first make a **copy** of the buffer.
+func NewWriter(ctx context.Context, w io.Writer) *ctxWriter {
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ return &ctxWriter{ctx: ctx, w: w}
+}
+
+func (w *ctxWriter) Write(buf []byte) (int, error) {
+ buf2 := make([]byte, len(buf))
+ copy(buf2, buf)
+
+ c := make(chan ioret, 1)
+
+ go func() {
+ n, err := w.w.Write(buf2)
+ c <- ioret{n, err}
+ close(c)
+ }()
+
+ select {
+ case r := <-c:
+ return r.n, r.err
+ case <-w.ctx.Done():
+ return 0, w.ctx.Err()
+ }
+}
+
+type Reader interface {
+ io.Reader
+}
+
+type ctxReader struct {
+ r io.Reader
+ ctx context.Context
+}
+
+// NewReader wraps a reader to make it respect given Context.
+// If there is a blocking read, the returned Reader will return
+// whenever the context is cancelled (the return values are n=0
+// and err=ctx.Err().)
+//
+// Note well: this wrapper DOES NOT ACTUALLY cancel the underlying
+// write-- there is no way to do that with the standard go io
+// interface. So the read and write _will_ happen or hang. So, use
+// this sparingly, make sure to cancel the read or write as necesary
+// (e.g. closing a connection whose context is up, etc.)
+//
+// Furthermore, in order to protect your memory from being read
+// _before_ you've cancelled the context, this io.Reader will
+// allocate a buffer of the same size, and **copy** into the client's
+// if the read succeeds in time.
+func NewReader(ctx context.Context, r io.Reader) *ctxReader {
+ return &ctxReader{ctx: ctx, r: r}
+}
+
+func (r *ctxReader) Read(buf []byte) (int, error) {
+ buf2 := make([]byte, len(buf))
+
+ c := make(chan ioret, 1)
+
+ go func() {
+ n, err := r.r.Read(buf2)
+ c <- ioret{n, err}
+ close(c)
+ }()
+
+ select {
+ case ret := <-c:
+ copy(buf, buf2)
+ return ret.n, ret.err
+ case <-r.ctx.Done():
+ return 0, r.ctx.Err()
+ }
+}
diff --git a/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt b/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt
new file mode 100644
index 0000000000..51b98f897a
--- /dev/null
+++ b/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt
@@ -0,0 +1,4 @@
+Eugene Terentev <eugene@terentev.net>
+Kevin Burke <kev@inburke.com>
+Sergey Lukjanov <me@slukjanov.name>
+Wayne Ashley Berry <wayneashleyberry@gmail.com>
diff --git a/vendor/github.com/kevinburke/ssh_config/LICENSE b/vendor/github.com/kevinburke/ssh_config/LICENSE
new file mode 100644
index 0000000000..b9a770ac2a
--- /dev/null
+++ b/vendor/github.com/kevinburke/ssh_config/LICENSE
@@ -0,0 +1,49 @@
+Copyright (c) 2017 Kevin Burke.
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+
+===================
+
+The lexer and parser borrow heavily from github.com/pelletier/go-toml. The
+license for that project is copied below.
+
+The MIT License (MIT)
+
+Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/kevinburke/ssh_config/config.go b/vendor/github.com/kevinburke/ssh_config/config.go
new file mode 100644
index 0000000000..f400cef9c5
--- /dev/null
+++ b/vendor/github.com/kevinburke/ssh_config/config.go
@@ -0,0 +1,639 @@
+// Package ssh_config provides tools for manipulating SSH config files.
+//
+// Importantly, this parser attempts to preserve comments in a given file, so
+// you can manipulate a `ssh_config` file from a program, if your heart desires.
+//
+// The Get() and GetStrict() functions will attempt to read values from
+// $HOME/.ssh/config, falling back to /etc/ssh/ssh_config. The first argument is
+// the host name to match on ("example.com"), and the second argument is the key
+// you want to retrieve ("Port"). The keywords are case insensitive.
+//
+// port := ssh_config.Get("myhost", "Port")
+//
+// You can also manipulate an SSH config file and then print it or write it back
+// to disk.
+//
+// f, _ := os.Open(filepath.Join(os.Getenv("HOME"), ".ssh", "config"))
+// cfg, _ := ssh_config.Decode(f)
+// for _, host := range cfg.Hosts {
+// fmt.Println("patterns:", host.Patterns)
+// for _, node := range host.Nodes {
+// fmt.Println(node.String())
+// }
+// }
+//
+// // Write the cfg back to disk:
+// fmt.Println(cfg.String())
+//
+// BUG: the Match directive is currently unsupported; parsing a config with
+// a Match directive will trigger an error.
+package ssh_config
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ osuser "os/user"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+const version = "0.5"
+
+type configFinder func() string
+
+// UserSettings checks ~/.ssh and /etc/ssh for configuration files. The config
+// files are parsed and cached the first time Get() or GetStrict() is called.
+type UserSettings struct {
+ IgnoreErrors bool
+ systemConfig *Config
+ systemConfigFinder configFinder
+ userConfig *Config
+ userConfigFinder configFinder
+ loadConfigs sync.Once
+ onceErr error
+}
+
+func homedir() string {
+ user, err := osuser.Current()
+ if err == nil {
+ return user.HomeDir
+ } else {
+ return os.Getenv("HOME")
+ }
+}
+
+func userConfigFinder() string {
+ return filepath.Join(homedir(), ".ssh", "config")
+}
+
+// DefaultUserSettings is the default UserSettings and is used by Get and
+// GetStrict. It checks both $HOME/.ssh/config and /etc/ssh/ssh_config for keys,
+// and it will return parse errors (if any) instead of swallowing them.
+var DefaultUserSettings = &UserSettings{
+ IgnoreErrors: false,
+ systemConfigFinder: systemConfigFinder,
+ userConfigFinder: userConfigFinder,
+}
+
+func systemConfigFinder() string {
+ return filepath.Join("/", "etc", "ssh", "ssh_config")
+}
+
+func findVal(c *Config, alias, key string) (string, error) {
+ if c == nil {
+ return "", nil
+ }
+ val, err := c.Get(alias, key)
+ if err != nil || val == "" {
+ return "", err
+ }
+ if err := validate(key, val); err != nil {
+ return "", err
+ }
+ return val, nil
+}
+
+// Get finds the first value for key within a declaration that matches the
+// alias. Get returns the empty string if no value was found, or if IgnoreErrors
+// is false and we could not parse the configuration file. Use GetStrict to
+// disambiguate the latter cases.
+//
+// The match for key is case insensitive.
+//
+// Get is a wrapper around DefaultUserSettings.Get.
+func Get(alias, key string) string {
+ return DefaultUserSettings.Get(alias, key)
+}
+
+// GetStrict finds the first value for key within a declaration that matches the
+// alias. If key has a default value and no matching configuration is found, the
+// default will be returned. For more information on default values and the way
+// patterns are matched, see the manpage for ssh_config.
+//
+// error will be non-nil if and only if a user's configuration file or the
+// system configuration file could not be parsed, and u.IgnoreErrors is false.
+//
+// GetStrict is a wrapper around DefaultUserSettings.GetStrict.
+func GetStrict(alias, key string) (string, error) {
+ return DefaultUserSettings.GetStrict(alias, key)
+}
+
+// Get finds the first value for key within a declaration that matches the
+// alias. Get returns the empty string if no value was found, or if IgnoreErrors
+// is false and we could not parse the configuration file. Use GetStrict to
+// disambiguate the latter cases.
+//
+// The match for key is case insensitive.
+func (u *UserSettings) Get(alias, key string) string {
+ val, err := u.GetStrict(alias, key)
+ if err != nil {
+ return ""
+ }
+ return val
+}
+
+// GetStrict finds the first value for key within a declaration that matches the
+// alias. If key has a default value and no matching configuration is found, the
+// default will be returned. For more information on default values and the way
+// patterns are matched, see the manpage for ssh_config.
+//
+// error will be non-nil if and only if a user's configuration file or the
+// system configuration file could not be parsed, and u.IgnoreErrors is false.
+func (u *UserSettings) GetStrict(alias, key string) (string, error) {
+ u.loadConfigs.Do(func() {
+ // can't parse user file, that's ok.
+ var filename string
+ if u.userConfigFinder == nil {
+ filename = userConfigFinder()
+ } else {
+ filename = u.userConfigFinder()
+ }
+ var err error
+ u.userConfig, err = parseFile(filename)
+ if err != nil && os.IsNotExist(err) == false {
+ u.onceErr = err
+ return
+ }
+ if u.systemConfigFinder == nil {
+ filename = systemConfigFinder()
+ } else {
+ filename = u.systemConfigFinder()
+ }
+ u.systemConfig, err = parseFile(filename)
+ if err != nil && os.IsNotExist(err) == false {
+ u.onceErr = err
+ return
+ }
+ })
+ if u.onceErr != nil && u.IgnoreErrors == false {
+ return "", u.onceErr
+ }
+ val, err := findVal(u.userConfig, alias, key)
+ if err != nil || val != "" {
+ return val, err
+ }
+ val2, err2 := findVal(u.systemConfig, alias, key)
+ if err2 != nil || val2 != "" {
+ return val2, err2
+ }
+ return Default(key), nil
+}
+
+func parseFile(filename string) (*Config, error) {
+ return parseWithDepth(filename, 0)
+}
+
+func parseWithDepth(filename string, depth uint8) (*Config, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return decode(f, isSystem(filename), depth)
+}
+
+func isSystem(filename string) bool {
+ // TODO i'm not sure this is the best way to detect a system repo
+ return strings.HasPrefix(filepath.Clean(filename), "/etc/ssh")
+}
+
+// Decode reads r into a Config, or returns an error if r could not be parsed as
+// an SSH config file.
+func Decode(r io.Reader) (*Config, error) {
+ return decode(r, false, 0)
+}
+
+func decode(r io.Reader, system bool, depth uint8) (c *Config, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ if e, ok := r.(error); ok && e == ErrDepthExceeded {
+ err = e
+ return
+ }
+ err = errors.New(r.(string))
+ }
+ }()
+
+ c = parseSSH(lexSSH(r), system, depth)
+ return c, err
+}
+
+// Config represents an SSH config file.
+type Config struct {
+ // A list of hosts to match against. The file begins with an implicit
+ // "Host *" declaration matching all hosts.
+ Hosts []*Host
+ depth uint8
+ position Position
+}
+
+// Get finds the first value in the configuration that matches the alias and
+// contains key. Get returns the empty string if no value was found, or if the
+// Config contains an invalid conditional Include value.
+//
+// The match for key is case insensitive.
+func (c *Config) Get(alias, key string) (string, error) {
+ lowerKey := strings.ToLower(key)
+ for _, host := range c.Hosts {
+ if !host.Matches(alias) {
+ continue
+ }
+ for _, node := range host.Nodes {
+ switch t := node.(type) {
+ case *Empty:
+ continue
+ case *KV:
+ // "keys are case insensitive" per the spec
+ lkey := strings.ToLower(t.Key)
+ if lkey == "match" {
+ panic("can't handle Match directives")
+ }
+ if lkey == lowerKey {
+ return t.Value, nil
+ }
+ case *Include:
+ val := t.Get(alias, key)
+ if val != "" {
+ return val, nil
+ }
+ default:
+ return "", fmt.Errorf("unknown Node type %v", t)
+ }
+ }
+ }
+ return "", nil
+}
+
+// String returns a string representation of the Config file.
+func (c Config) String() string {
+ return marshal(c).String()
+}
+
+func (c Config) MarshalText() ([]byte, error) {
+ return marshal(c).Bytes(), nil
+}
+
+func marshal(c Config) *bytes.Buffer {
+ var buf bytes.Buffer
+ for i := range c.Hosts {
+ buf.WriteString(c.Hosts[i].String())
+ }
+ return &buf
+}
+
+// Pattern is a pattern in a Host declaration. Patterns are read-only values;
+// create a new one with NewPattern().
+type Pattern struct {
+ str string // Its appearance in the file, not the value that gets compiled.
+ regex *regexp.Regexp
+ not bool // True if this is a negated match
+}
+
+// String prints the string representation of the pattern.
+func (p Pattern) String() string {
+ return p.str
+}
+
+// Copied from regexp.go with * and ? removed.
+var specialBytes = []byte(`\.+()|[]{}^$`)
+
+func special(b byte) bool {
+ return bytes.IndexByte(specialBytes, b) >= 0
+}
+
+// NewPattern creates a new Pattern for matching hosts. NewPattern("*") creates
+// a Pattern that matches all hosts.
+//
+// From the manpage, a pattern consists of zero or more non-whitespace
+// characters, `*' (a wildcard that matches zero or more characters), or `?' (a
+// wildcard that matches exactly one character). For example, to specify a set
+// of declarations for any host in the ".co.uk" set of domains, the following
+// pattern could be used:
+//
+// Host *.co.uk
+//
+// The following pattern would match any host in the 192.168.0.[0-9] network range:
+//
+// Host 192.168.0.?
+func NewPattern(s string) (*Pattern, error) {
+ if s == "" {
+ return nil, errors.New("ssh_config: empty pattern")
+ }
+ negated := false
+ if s[0] == '!' {
+ negated = true
+ s = s[1:]
+ }
+ var buf bytes.Buffer
+ buf.WriteByte('^')
+ for i := 0; i < len(s); i++ {
+ // A byte loop is correct because all metacharacters are ASCII.
+ switch b := s[i]; b {
+ case '*':
+ buf.WriteString(".*")
+ case '?':
+ buf.WriteString(".?")
+ default:
+ // borrowing from QuoteMeta here.
+ if special(b) {
+ buf.WriteByte('\\')
+ }
+ buf.WriteByte(b)
+ }
+ }
+ buf.WriteByte('$')
+ r, err := regexp.Compile(buf.String())
+ if err != nil {
+ return nil, err
+ }
+ return &Pattern{str: s, regex: r, not: negated}, nil
+}
+
+// Host describes a Host directive and the keywords that follow it.
+type Host struct {
+ // A list of host patterns that should match this host.
+ Patterns []*Pattern
+ // A Node is either a key/value pair or a comment line.
+ Nodes []Node
+ // EOLComment is the comment (if any) terminating the Host line.
+ EOLComment string
+ hasEquals bool
+ leadingSpace uint16 // TODO: handle spaces vs tabs here.
+ // The file starts with an implicit "Host *" declaration.
+ implicit bool
+}
+
+// Matches returns true if the Host matches for the given alias. For
+// a description of the rules that provide a match, see the manpage for
+// ssh_config.
+func (h *Host) Matches(alias string) bool {
+ found := false
+ for i := range h.Patterns {
+ if h.Patterns[i].regex.MatchString(alias) {
+ if h.Patterns[i].not == true {
+ // Negated match. "A pattern entry may be negated by prefixing
+ // it with an exclamation mark (`!'). If a negated entry is
+ // matched, then the Host entry is ignored, regardless of
+ // whether any other patterns on the line match. Negated matches
+ // are therefore useful to provide exceptions for wildcard
+ // matches."
+ return false
+ }
+ found = true
+ }
+ }
+ return found
+}
+
+// String prints h as it would appear in a config file. Minor tweaks may be
+// present in the whitespace in the printed file.
+func (h *Host) String() string {
+ var buf bytes.Buffer
+ if h.implicit == false {
+ buf.WriteString(strings.Repeat(" ", int(h.leadingSpace)))
+ buf.WriteString("Host")
+ if h.hasEquals {
+ buf.WriteString(" = ")
+ } else {
+ buf.WriteString(" ")
+ }
+ for i, pat := range h.Patterns {
+ buf.WriteString(pat.String())
+ if i < len(h.Patterns)-1 {
+ buf.WriteString(" ")
+ }
+ }
+ if h.EOLComment != "" {
+ buf.WriteString(" #")
+ buf.WriteString(h.EOLComment)
+ }
+ buf.WriteByte('\n')
+ }
+ for i := range h.Nodes {
+ buf.WriteString(h.Nodes[i].String())
+ buf.WriteByte('\n')
+ }
+ return buf.String()
+}
+
+// Node represents a line in a Config.
+type Node interface {
+ Pos() Position
+ String() string
+}
+
+// KV is a line in the config file that contains a key, a value, and possibly
+// a comment.
+type KV struct {
+ Key string
+ Value string
+ Comment string
+ hasEquals bool
+ leadingSpace uint16 // Space before the key. TODO handle spaces vs tabs.
+ position Position
+}
+
+// Pos returns k's Position.
+func (k *KV) Pos() Position {
+ return k.position
+}
+
+// String prints k as it was parsed in the config file. There may be slight
+// changes to the whitespace between values.
+func (k *KV) String() string {
+ if k == nil {
+ return ""
+ }
+ equals := " "
+ if k.hasEquals {
+ equals = " = "
+ }
+ line := fmt.Sprintf("%s%s%s%s", strings.Repeat(" ", int(k.leadingSpace)), k.Key, equals, k.Value)
+ if k.Comment != "" {
+ line += " #" + k.Comment
+ }
+ return line
+}
+
+// Empty is a line in the config file that contains only whitespace or comments.
+type Empty struct {
+ Comment string
+ leadingSpace uint16 // TODO handle spaces vs tabs.
+ position Position
+}
+
+// Pos returns e's Position.
+func (e *Empty) Pos() Position {
+ return e.position
+}
+
+// String prints e as it was parsed in the config file.
+func (e *Empty) String() string {
+ if e == nil {
+ return ""
+ }
+ if e.Comment == "" {
+ return ""
+ }
+ return fmt.Sprintf("%s#%s", strings.Repeat(" ", int(e.leadingSpace)), e.Comment)
+}
+
+// Include holds the result of an Include directive, including the config files
+// that have been parsed as part of that directive. At most 5 levels of Include
+// statements will be parsed.
+type Include struct {
+ // Comment is the contents of any comment at the end of the Include
+ // statement.
+ Comment string
+ parsed bool
+ // an include directive can include several different files, and wildcards
+ directives []string
+
+ mu sync.Mutex
+ // 1:1 mapping between matches and keys in files array; matches preserves
+ // ordering
+ matches []string
+ // actual filenames are listed here
+ files map[string]*Config
+ leadingSpace uint16
+ position Position
+ depth uint8
+ hasEquals bool
+}
+
+const maxRecurseDepth = 5
+
+// ErrDepthExceeded is returned if too many Include directives are parsed.
+// Usually this indicates a recursive loop (an Include directive pointing to the
+// file it contains).
+var ErrDepthExceeded = errors.New("ssh_config: max recurse depth exceeded")
+
+func removeDups(arr []string) []string {
+ // Use map to record duplicates as we find them.
+ encountered := make(map[string]bool, len(arr))
+ result := make([]string, 0)
+
+ for v := range arr {
+ if encountered[arr[v]] == false {
+ encountered[arr[v]] = true
+ result = append(result, arr[v])
+ }
+ }
+ return result
+}
+
+// NewInclude creates a new Include with a list of file globs to include.
+// Configuration files are parsed greedily (e.g. as soon as this function runs).
+// Any error encountered while parsing nested configuration files will be
+// returned.
+func NewInclude(directives []string, hasEquals bool, pos Position, comment string, system bool, depth uint8) (*Include, error) {
+ if depth > maxRecurseDepth {
+ return nil, ErrDepthExceeded
+ }
+ inc := &Include{
+ Comment: comment,
+ directives: directives,
+ files: make(map[string]*Config),
+ position: pos,
+ leadingSpace: uint16(pos.Col) - 1,
+ depth: depth,
+ hasEquals: hasEquals,
+ }
+ // no need for inc.mu.Lock() since nothing else can access this inc
+ matches := make([]string, 0)
+ for i := range directives {
+ var path string
+ if filepath.IsAbs(directives[i]) {
+ path = directives[i]
+ } else if system {
+ path = filepath.Join("/etc/ssh", directives[i])
+ } else {
+ path = filepath.Join(homedir(), ".ssh", directives[i])
+ }
+ theseMatches, err := filepath.Glob(path)
+ if err != nil {
+ return nil, err
+ }
+ matches = append(matches, theseMatches...)
+ }
+ matches = removeDups(matches)
+ inc.matches = matches
+ for i := range matches {
+ config, err := parseWithDepth(matches[i], depth)
+ if err != nil {
+ return nil, err
+ }
+ inc.files[matches[i]] = config
+ }
+ return inc, nil
+}
+
+// Pos returns the position of the Include directive in the larger file.
+func (i *Include) Pos() Position {
+ return i.position
+}
+
+// Get finds the first value in the Include statement matching the alias and the
+// given key.
+func (inc *Include) Get(alias, key string) string {
+ inc.mu.Lock()
+ defer inc.mu.Unlock()
+ // TODO: we search files in any order which is not correct
+ for i := range inc.matches {
+ cfg := inc.files[inc.matches[i]]
+ if cfg == nil {
+ panic("nil cfg")
+ }
+ val, err := cfg.Get(alias, key)
+ if err == nil && val != "" {
+ return val
+ }
+ }
+ return ""
+}
+
+// String prints out a string representation of this Include directive. Note
+// included Config files are not printed as part of this representation.
+func (inc *Include) String() string {
+ equals := " "
+ if inc.hasEquals {
+ equals = " = "
+ }
+ line := fmt.Sprintf("%sInclude%s%s", strings.Repeat(" ", int(inc.leadingSpace)), equals, strings.Join(inc.directives, " "))
+ if inc.Comment != "" {
+ line += " #" + inc.Comment
+ }
+ return line
+}
+
+var matchAll *Pattern
+
+func init() {
+ var err error
+ matchAll, err = NewPattern("*")
+ if err != nil {
+ panic(err)
+ }
+}
+
+func newConfig() *Config {
+ return &Config{
+ Hosts: []*Host{
+ &Host{
+ implicit: true,
+ Patterns: []*Pattern{matchAll},
+ Nodes: make([]Node, 0),
+ },
+ },
+ depth: 0,
+ }
+}
diff --git a/vendor/github.com/kevinburke/ssh_config/lexer.go b/vendor/github.com/kevinburke/ssh_config/lexer.go
new file mode 100644
index 0000000000..b0c6a8650c
--- /dev/null
+++ b/vendor/github.com/kevinburke/ssh_config/lexer.go
@@ -0,0 +1,241 @@
+package ssh_config
+
+import (
+ "io"
+
+ buffruneio "github.com/pelletier/go-buffruneio"
+)
+
+// Define state functions
+type sshLexStateFn func() sshLexStateFn
+
+type sshLexer struct {
+ input *buffruneio.Reader // Textual source
+ buffer []rune // Runes composing the current token
+ tokens chan token
+ line uint32
+ col uint16
+ endbufferLine uint32
+ endbufferCol uint16
+}
+
+func (s *sshLexer) lexComment(previousState sshLexStateFn) sshLexStateFn {
+ return func() sshLexStateFn {
+ growingString := ""
+ for next := s.peek(); next != '\n' && next != eof; next = s.peek() {
+ if next == '\r' && s.follow("\r\n") {
+ break
+ }
+ growingString += string(next)
+ s.next()
+ }
+ s.emitWithValue(tokenComment, growingString)
+ s.skip()
+ return previousState
+ }
+}
+
+// lex the space after an equals sign in a function
+func (s *sshLexer) lexRspace() sshLexStateFn {
+ for {
+ next := s.peek()
+ if !isSpace(next) {
+ break
+ }
+ s.skip()
+ }
+ return s.lexRvalue
+}
+
+func (s *sshLexer) lexEquals() sshLexStateFn {
+ for {
+ next := s.peek()
+ if next == '=' {
+ s.emit(tokenEquals)
+ s.skip()
+ return s.lexRspace
+ }
+ // TODO error handling here; newline eof etc.
+ if !isSpace(next) {
+ break
+ }
+ s.skip()
+ }
+ return s.lexRvalue
+}
+
+func (s *sshLexer) lexKey() sshLexStateFn {
+ growingString := ""
+
+ for r := s.peek(); isKeyChar(r); r = s.peek() {
+ // simplified a lot here
+ if isSpace(r) || r == '=' {
+ s.emitWithValue(tokenKey, growingString)
+ s.skip()
+ return s.lexEquals
+ }
+ growingString += string(r)
+ s.next()
+ }
+ s.emitWithValue(tokenKey, growingString)
+ return s.lexEquals
+}
+
+func (s *sshLexer) lexRvalue() sshLexStateFn {
+ growingString := ""
+ for {
+ next := s.peek()
+ switch next {
+ case '\r':
+ if s.follow("\r\n") {
+ s.emitWithValue(tokenString, growingString)
+ s.skip()
+ return s.lexVoid
+ }
+ case '\n':
+ s.emitWithValue(tokenString, growingString)
+ s.skip()
+ return s.lexVoid
+ case '#':
+ s.emitWithValue(tokenString, growingString)
+ s.skip()
+ return s.lexComment(s.lexVoid)
+ case eof:
+ s.next()
+ }
+ if next == eof {
+ break
+ }
+ growingString += string(next)
+ s.next()
+ }
+ s.emit(tokenEOF)
+ return nil
+}
+
+func (s *sshLexer) read() rune {
+ r, _, err := s.input.ReadRune()
+ if err != nil {
+ panic(err)
+ }
+ if r == '\n' {
+ s.endbufferLine++
+ s.endbufferCol = 1
+ } else {
+ s.endbufferCol++
+ }
+ return r
+}
+
+func (s *sshLexer) next() rune {
+ r := s.read()
+
+ if r != eof {
+ s.buffer = append(s.buffer, r)
+ }
+ return r
+}
+
+func (s *sshLexer) lexVoid() sshLexStateFn {
+ for {
+ next := s.peek()
+ switch next {
+ case '#':
+ s.skip()
+ return s.lexComment(s.lexVoid)
+ case '\r':
+ fallthrough
+ case '\n':
+ s.emit(tokenEmptyLine)
+ s.skip()
+ continue
+ }
+
+ if isSpace(next) {
+ s.skip()
+ }
+
+ if isKeyStartChar(next) {
+ return s.lexKey
+ }
+
+ // removed IsKeyStartChar and lexKey. probably will need to readd
+
+ if next == eof {
+ s.next()
+ break
+ }
+ }
+
+ s.emit(tokenEOF)
+ return nil
+}
+
+func (s *sshLexer) ignore() {
+ s.buffer = make([]rune, 0)
+ s.line = s.endbufferLine
+ s.col = s.endbufferCol
+}
+
+func (s *sshLexer) skip() {
+ s.next()
+ s.ignore()
+}
+
+func (s *sshLexer) emit(t tokenType) {
+ s.emitWithValue(t, string(s.buffer))
+}
+
+func (s *sshLexer) emitWithValue(t tokenType, value string) {
+ tok := token{
+ Position: Position{s.line, s.col},
+ typ: t,
+ val: value,
+ }
+ s.tokens <- tok
+ s.ignore()
+}
+
+func (s *sshLexer) peek() rune {
+ r, _, err := s.input.ReadRune()
+ if err != nil {
+ panic(err)
+ }
+ s.input.UnreadRune()
+ return r
+}
+
+func (s *sshLexer) follow(next string) bool {
+ for _, expectedRune := range next {
+ r, _, err := s.input.ReadRune()
+ defer s.input.UnreadRune()
+ if err != nil {
+ panic(err)
+ }
+ if expectedRune != r {
+ return false
+ }
+ }
+ return true
+}
+
+func (s *sshLexer) run() {
+ for state := s.lexVoid; state != nil; {
+ state = state()
+ }
+ close(s.tokens)
+}
+
+func lexSSH(input io.Reader) chan token {
+ bufferedInput := buffruneio.NewReader(input)
+ l := &sshLexer{
+ input: bufferedInput,
+ tokens: make(chan token),
+ line: 1,
+ col: 1,
+ endbufferLine: 1,
+ endbufferCol: 1,
+ }
+ go l.run()
+ return l.tokens
+}
diff --git a/vendor/github.com/kevinburke/ssh_config/parser.go b/vendor/github.com/kevinburke/ssh_config/parser.go
new file mode 100644
index 0000000000..02745b4b29
--- /dev/null
+++ b/vendor/github.com/kevinburke/ssh_config/parser.go
@@ -0,0 +1,185 @@
+package ssh_config
+
+import (
+ "fmt"
+ "strings"
+)
+
+type sshParser struct {
+ flow chan token
+ config *Config
+ tokensBuffer []token
+ currentTable []string
+ seenTableKeys []string
+ // /etc/ssh parser or local parser - used to find the default for relative
+ // filepaths in the Include directive
+ system bool
+ depth uint8
+}
+
+type sshParserStateFn func() sshParserStateFn
+
+// Formats and panics an error message based on a token
+func (p *sshParser) raiseErrorf(tok *token, msg string, args ...interface{}) {
+ // TODO this format is ugly
+ panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...))
+}
+
+func (p *sshParser) raiseError(tok *token, err error) {
+ if err == ErrDepthExceeded {
+ panic(err)
+ }
+ // TODO this format is ugly
+ panic(tok.Position.String() + ": " + err.Error())
+}
+
+func (p *sshParser) run() {
+ for state := p.parseStart; state != nil; {
+ state = state()
+ }
+}
+
+func (p *sshParser) peek() *token {
+ if len(p.tokensBuffer) != 0 {
+ return &(p.tokensBuffer[0])
+ }
+
+ tok, ok := <-p.flow
+ if !ok {
+ return nil
+ }
+ p.tokensBuffer = append(p.tokensBuffer, tok)
+ return &tok
+}
+
+func (p *sshParser) getToken() *token {
+ if len(p.tokensBuffer) != 0 {
+ tok := p.tokensBuffer[0]
+ p.tokensBuffer = p.tokensBuffer[1:]
+ return &tok
+ }
+ tok, ok := <-p.flow
+ if !ok {
+ return nil
+ }
+ return &tok
+}
+
+func (p *sshParser) parseStart() sshParserStateFn {
+ tok := p.peek()
+
+ // end of stream, parsing is finished
+ if tok == nil {
+ return nil
+ }
+
+ switch tok.typ {
+ case tokenComment, tokenEmptyLine:
+ return p.parseComment
+ case tokenKey:
+ return p.parseKV
+ case tokenEOF:
+ return nil
+ default:
+ p.raiseErrorf(tok, fmt.Sprintf("unexpected token %q\n", tok))
+ }
+ return nil
+}
+
+func (p *sshParser) parseKV() sshParserStateFn {
+ key := p.getToken()
+ hasEquals := false
+ val := p.getToken()
+ if val.typ == tokenEquals {
+ hasEquals = true
+ val = p.getToken()
+ }
+ comment := ""
+ tok := p.peek()
+ if tok == nil {
+ tok = &token{typ: tokenEOF}
+ }
+ if tok.typ == tokenComment && tok.Position.Line == val.Position.Line {
+ tok = p.getToken()
+ comment = tok.val
+ }
+ if strings.ToLower(key.val) == "match" {
+ // https://github.com/kevinburke/ssh_config/issues/6
+ p.raiseErrorf(val, "ssh_config: Match directive parsing is unsupported")
+ return nil
+ }
+ if strings.ToLower(key.val) == "host" {
+ strPatterns := strings.Split(val.val, " ")
+ patterns := make([]*Pattern, 0)
+ for i := range strPatterns {
+ if strPatterns[i] == "" {
+ continue
+ }
+ pat, err := NewPattern(strPatterns[i])
+ if err != nil {
+ p.raiseErrorf(val, "Invalid host pattern: %v", err)
+ return nil
+ }
+ patterns = append(patterns, pat)
+ }
+ p.config.Hosts = append(p.config.Hosts, &Host{
+ Patterns: patterns,
+ Nodes: make([]Node, 0),
+ EOLComment: comment,
+ hasEquals: hasEquals,
+ })
+ return p.parseStart
+ }
+ lastHost := p.config.Hosts[len(p.config.Hosts)-1]
+ if strings.ToLower(key.val) == "include" {
+ inc, err := NewInclude(strings.Split(val.val, " "), hasEquals, key.Position, comment, p.system, p.depth+1)
+ if err == ErrDepthExceeded {
+ p.raiseError(val, err)
+ return nil
+ }
+ if err != nil {
+ p.raiseErrorf(val, "Error parsing Include directive: %v", err)
+ return nil
+ }
+ lastHost.Nodes = append(lastHost.Nodes, inc)
+ return p.parseStart
+ }
+ kv := &KV{
+ Key: key.val,
+ Value: val.val,
+ Comment: comment,
+ hasEquals: hasEquals,
+ leadingSpace: uint16(key.Position.Col) - 1,
+ position: key.Position,
+ }
+ lastHost.Nodes = append(lastHost.Nodes, kv)
+ return p.parseStart
+}
+
+func (p *sshParser) parseComment() sshParserStateFn {
+ comment := p.getToken()
+ lastHost := p.config.Hosts[len(p.config.Hosts)-1]
+ lastHost.Nodes = append(lastHost.Nodes, &Empty{
+ Comment: comment.val,
+ // account for the "#" as well
+ leadingSpace: comment.Position.Col - 2,
+ position: comment.Position,
+ })
+ return p.parseStart
+}
+
+func parseSSH(flow chan token, system bool, depth uint8) *Config {
+ result := newConfig()
+ result.position = Position{1, 1}
+ parser := &sshParser{
+ flow: flow,
+ config: result,
+ tokensBuffer: make([]token, 0),
+ currentTable: make([]string, 0),
+ seenTableKeys: make([]string, 0),
+ system: system,
+ depth: depth,
+ }
+ parser.run()
+ return result
+}
diff --git a/vendor/github.com/kevinburke/ssh_config/position.go b/vendor/github.com/kevinburke/ssh_config/position.go
new file mode 100644
index 0000000000..7304bc3b7f
--- /dev/null
+++ b/vendor/github.com/kevinburke/ssh_config/position.go
@@ -0,0 +1,25 @@
+package ssh_config
+
+import "fmt"
+
+// Position of a document element within a SSH document.
+//
+// Line and Col are both 1-indexed positions for the element's line number and
+// column number, respectively. Values of zero or less will cause Invalid(),
+// to return true.
+type Position struct {
+ Line uint32 // line within the document
+ Col uint16 // column within the line
+}
+
+// String representation of the position.
+// Displays 1-indexed line and column numbers.
+func (p Position) String() string {
+ return fmt.Sprintf("(%d, %d)", p.Line, p.Col)
+}
+
+// Invalid returns whether or not the position is valid (i.e. with negative or
+// null values)
+func (p Position) Invalid() bool {
+ return p.Line <= 0 || p.Col <= 0
+}
diff --git a/vendor/github.com/kevinburke/ssh_config/token.go b/vendor/github.com/kevinburke/ssh_config/token.go
new file mode 100644
index 0000000000..a0ecbb2bb7
--- /dev/null
+++ b/vendor/github.com/kevinburke/ssh_config/token.go
@@ -0,0 +1,49 @@
+package ssh_config
+
+import "fmt"
+
+type token struct {
+ Position
+ typ tokenType
+ val string
+}
+
+func (t token) String() string {
+ switch t.typ {
+ case tokenEOF:
+ return "EOF"
+ }
+ return fmt.Sprintf("%q", t.val)
+}
+
+type tokenType int
+
+const (
+ eof = -(iota + 1)
+)
+
+const (
+ tokenError tokenType = iota
+ tokenEOF
+ tokenEmptyLine
+ tokenComment
+ tokenKey
+ tokenEquals
+ tokenString
+)
+
+func isSpace(r rune) bool {
+ return r == ' ' || r == '\t'
+}
+
+func isKeyStartChar(r rune) bool {
+ return !(isSpace(r) || r == '\r' || r == '\n' || r == eof)
+}
+
+// I'm not sure that this is correct
+func isKeyChar(r rune) bool {
+ // Keys start with the first character that isn't whitespace or [ and end
+ // with the last non-whitespace character before the equals sign. Keys
+ // cannot contain a # character."
+ return !(r == '\r' || r == '\n' || r == eof || r == '=')
+}
diff --git a/vendor/github.com/kevinburke/ssh_config/validators.go b/vendor/github.com/kevinburke/ssh_config/validators.go
new file mode 100644
index 0000000000..29fab6a9d2
--- /dev/null
+++ b/vendor/github.com/kevinburke/ssh_config/validators.go
@@ -0,0 +1,162 @@
+package ssh_config
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Default returns the default value for the given keyword, for example "22" if
+// the keyword is "Port". Default returns the empty string if the keyword has no
+// default, or if the keyword is unknown. Keyword matching is case-insensitive.
+//
+// Default values are provided by OpenSSH_7.4p1 on a Mac.
+func Default(keyword string) string {
+ return defaults[strings.ToLower(keyword)]
+}
+
+// Arguments where the value must be "yes" or "no" and *only* yes or no.
+var yesnos = map[string]bool{
+ strings.ToLower("BatchMode"): true,
+ strings.ToLower("CanonicalizeFallbackLocal"): true,
+ strings.ToLower("ChallengeResponseAuthentication"): true,
+ strings.ToLower("CheckHostIP"): true,
+ strings.ToLower("ClearAllForwardings"): true,
+ strings.ToLower("Compression"): true,
+ strings.ToLower("EnableSSHKeysign"): true,
+ strings.ToLower("ExitOnForwardFailure"): true,
+ strings.ToLower("ForwardAgent"): true,
+ strings.ToLower("ForwardX11"): true,
+ strings.ToLower("ForwardX11Trusted"): true,
+ strings.ToLower("GatewayPorts"): true,
+ strings.ToLower("GSSAPIAuthentication"): true,
+ strings.ToLower("GSSAPIDelegateCredentials"): true,
+ strings.ToLower("HostbasedAuthentication"): true,
+ strings.ToLower("IdentitiesOnly"): true,
+ strings.ToLower("KbdInteractiveAuthentication"): true,
+ strings.ToLower("NoHostAuthenticationForLocalhost"): true,
+ strings.ToLower("PasswordAuthentication"): true,
+ strings.ToLower("PermitLocalCommand"): true,
+ strings.ToLower("PubkeyAuthentication"): true,
+ strings.ToLower("RhostsRSAAuthentication"): true,
+ strings.ToLower("RSAAuthentication"): true,
+ strings.ToLower("StreamLocalBindUnlink"): true,
+ strings.ToLower("TCPKeepAlive"): true,
+ strings.ToLower("UseKeychain"): true,
+ strings.ToLower("UsePrivilegedPort"): true,
+ strings.ToLower("VisualHostKey"): true,
+}
+
+var uints = map[string]bool{
+ strings.ToLower("CanonicalizeMaxDots"): true,
+ strings.ToLower("CompressionLevel"): true, // 1 to 9
+ strings.ToLower("ConnectionAttempts"): true,
+ strings.ToLower("ConnectTimeout"): true,
+ strings.ToLower("NumberOfPasswordPrompts"): true,
+ strings.ToLower("Port"): true,
+ strings.ToLower("ServerAliveCountMax"): true,
+ strings.ToLower("ServerAliveInterval"): true,
+}
+
+func mustBeYesOrNo(lkey string) bool {
+ return yesnos[lkey]
+}
+
+func mustBeUint(lkey string) bool {
+ return uints[lkey]
+}
+
+func validate(key, val string) error {
+ lkey := strings.ToLower(key)
+ if mustBeYesOrNo(lkey) && (val != "yes" && val != "no") {
+ return fmt.Errorf("ssh_config: value for key %q must be 'yes' or 'no', got %q", key, val)
+ }
+ if mustBeUint(lkey) {
+ _, err := strconv.ParseUint(val, 10, 64)
+ if err != nil {
+ return fmt.Errorf("ssh_config: %v", err)
+ }
+ }
+ return nil
+}
+
+var defaults = map[string]string{
+ strings.ToLower("AddKeysToAgent"): "no",
+ strings.ToLower("AddressFamily"): "any",
+ strings.ToLower("BatchMode"): "no",
+ strings.ToLower("CanonicalizeFallbackLocal"): "yes",
+ strings.ToLower("CanonicalizeHostname"): "no",
+ strings.ToLower("CanonicalizeMaxDots"): "1",
+ strings.ToLower("ChallengeResponseAuthentication"): "yes",
+ strings.ToLower("CheckHostIP"): "yes",
+ // TODO is this still the correct cipher
+ strings.ToLower("Cipher"): "3des",
+ strings.ToLower("Ciphers"): "chacha20-poly1305@openssh.com,aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,aes128-cbc,aes192-cbc,aes256-cbc",
+ strings.ToLower("ClearAllForwardings"): "no",
+ strings.ToLower("Compression"): "no",
+ strings.ToLower("CompressionLevel"): "6",
+ strings.ToLower("ConnectionAttempts"): "1",
+ strings.ToLower("ControlMaster"): "no",
+ strings.ToLower("EnableSSHKeysign"): "no",
+ strings.ToLower("EscapeChar"): "~",
+ strings.ToLower("ExitOnForwardFailure"): "no",
+ strings.ToLower("FingerprintHash"): "sha256",
+ strings.ToLower("ForwardAgent"): "no",
+ strings.ToLower("ForwardX11"): "no",
+ strings.ToLower("ForwardX11Timeout"): "20m",
+ strings.ToLower("ForwardX11Trusted"): "no",
+ strings.ToLower("GatewayPorts"): "no",
+ strings.ToLower("GlobalKnownHostsFile"): "/etc/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts2",
+ strings.ToLower("GSSAPIAuthentication"): "no",
+ strings.ToLower("GSSAPIDelegateCredentials"): "no",
+ strings.ToLower("HashKnownHosts"): "no",
+ strings.ToLower("HostbasedAuthentication"): "no",
+
+ strings.ToLower("HostbasedKeyTypes"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa",
+ strings.ToLower("HostKeyAlgorithms"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa",
+ // HostName has a dynamic default (the value passed at the command line).
+
+ strings.ToLower("IdentitiesOnly"): "no",
+ strings.ToLower("IdentityFile"): "~/.ssh/identity",
+
+ // IPQoS has a dynamic default based on interactive or non-interactive
+ // sessions.
+
+ strings.ToLower("KbdInteractiveAuthentication"): "yes",
+
+ strings.ToLower("KexAlgorithms"): "curve25519-sha256,curve25519-sha256@libssh.org,ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group-exchange-sha1,diffie-hellman-group14-sha1",
+ strings.ToLower("LogLevel"): "INFO",
+ strings.ToLower("MACs"): "umac-64-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,hmac-sha1-etm@openssh.com,umac-64@openssh.com,umac-128@openssh.com,hmac-sha2-256,hmac-sha2-512,hmac-sha1",
+
+ strings.ToLower("NoHostAuthenticationForLocalhost"): "no",
+ strings.ToLower("NumberOfPasswordPrompts"): "3",
+ strings.ToLower("PasswordAuthentication"): "yes",
+ strings.ToLower("PermitLocalCommand"): "no",
+ strings.ToLower("Port"): "22",
+
+ strings.ToLower("PreferredAuthentications"): "gssapi-with-mic,hostbased,publickey,keyboard-interactive,password",
+ strings.ToLower("Protocol"): "2",
+ strings.ToLower("ProxyUseFdpass"): "no",
+ strings.ToLower("PubkeyAcceptedKeyTypes"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa",
+ strings.ToLower("PubkeyAuthentication"): "yes",
+ strings.ToLower("RekeyLimit"): "default none",
+ strings.ToLower("RhostsRSAAuthentication"): "no",
+ strings.ToLower("RSAAuthentication"): "yes",
+
+ strings.ToLower("ServerAliveCountMax"): "3",
+ strings.ToLower("ServerAliveInterval"): "0",
+ strings.ToLower("StreamLocalBindMask"): "0177",
+ strings.ToLower("StreamLocalBindUnlink"): "no",
+ strings.ToLower("StrictHostKeyChecking"): "ask",
+ strings.ToLower("TCPKeepAlive"): "yes",
+ strings.ToLower("Tunnel"): "no",
+ strings.ToLower("TunnelDevice"): "any:any",
+ strings.ToLower("UpdateHostKeys"): "no",
+ strings.ToLower("UseKeychain"): "no",
+ strings.ToLower("UsePrivilegedPort"): "no",
+
+ strings.ToLower("UserKnownHostsFile"): "~/.ssh/known_hosts ~/.ssh/known_hosts2",
+ strings.ToLower("VerifyHostKeyDNS"): "no",
+ strings.ToLower("VisualHostKey"): "no",
+ strings.ToLower("XAuthLocation"): "/usr/X11R6/bin/xauth",
+}
diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE
new file mode 100644
index 0000000000..f9c841a51e
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go
new file mode 100644
index 0000000000..fb87bef94f
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/homedir.go
@@ -0,0 +1,157 @@
+package homedir
+
+import (
+ "bytes"
+ "errors"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// DisableCache will disable caching of the home directory. Caching is enabled
+// by default.
+var DisableCache bool
+
+var homedirCache string
+var cacheLock sync.RWMutex
+
+// Dir returns the home directory for the executing user.
+//
+// This uses an OS-specific method for discovering the home directory.
+// An error is returned if a home directory cannot be detected.
+func Dir() (string, error) {
+ if !DisableCache {
+ cacheLock.RLock()
+ cached := homedirCache
+ cacheLock.RUnlock()
+ if cached != "" {
+ return cached, nil
+ }
+ }
+
+ cacheLock.Lock()
+ defer cacheLock.Unlock()
+
+ var result string
+ var err error
+ if runtime.GOOS == "windows" {
+ result, err = dirWindows()
+ } else {
+ // Unix-like system, so just assume Unix
+ result, err = dirUnix()
+ }
+
+ if err != nil {
+ return "", err
+ }
+ homedirCache = result
+ return result, nil
+}
+
+// Expand expands the path to include the home directory if the path
+// is prefixed with `~`. If it isn't prefixed with `~`, the path is
+// returned as-is.
+func Expand(path string) (string, error) {
+ if len(path) == 0 {
+ return path, nil
+ }
+
+ if path[0] != '~' {
+ return path, nil
+ }
+
+ if len(path) > 1 && path[1] != '/' && path[1] != '\\' {
+ return "", errors.New("cannot expand user-specific home dir")
+ }
+
+ dir, err := Dir()
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(dir, path[1:]), nil
+}
+
+func dirUnix() (string, error) {
+ homeEnv := "HOME"
+ if runtime.GOOS == "plan9" {
+ // On plan9, env vars are lowercase.
+ homeEnv = "home"
+ }
+
+ // First prefer the HOME environmental variable
+ if home := os.Getenv(homeEnv); home != "" {
+ return home, nil
+ }
+
+ var stdout bytes.Buffer
+
+ // If that fails, try OS specific commands
+ if runtime.GOOS == "darwin" {
+ cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`)
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err == nil {
+ result := strings.TrimSpace(stdout.String())
+ if result != "" {
+ return result, nil
+ }
+ }
+ } else {
+ cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err != nil {
+ // If the error is ErrNotFound, we ignore it. Otherwise, return it.
+ if err != exec.ErrNotFound {
+ return "", err
+ }
+ } else {
+ if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
+ // username:password:uid:gid:gecos:home:shell
+ passwdParts := strings.SplitN(passwd, ":", 7)
+ if len(passwdParts) > 5 {
+ return passwdParts[5], nil
+ }
+ }
+ }
+ }
+
+ // If all else fails, try the shell
+ stdout.Reset()
+ cmd := exec.Command("sh", "-c", "cd && pwd")
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err != nil {
+ return "", err
+ }
+
+ result := strings.TrimSpace(stdout.String())
+ if result == "" {
+ return "", errors.New("blank output when reading home directory")
+ }
+
+ return result, nil
+}
+
+func dirWindows() (string, error) {
+ // First prefer the HOME environmental variable
+ if home := os.Getenv("HOME"); home != "" {
+ return home, nil
+ }
+
+ // Prefer standard environment variable USERPROFILE
+ if home := os.Getenv("USERPROFILE"); home != "" {
+ return home, nil
+ }
+
+ drive := os.Getenv("HOMEDRIVE")
+ path := os.Getenv("HOMEPATH")
+ home := drive + path
+ if drive == "" || path == "" {
+ return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank")
+ }
+
+ return home, nil
+}
diff --git a/vendor/github.com/pelletier/go-buffruneio/buffruneio.go b/vendor/github.com/pelletier/go-buffruneio/buffruneio.go
new file mode 100644
index 0000000000..4e6d6ea610
--- /dev/null
+++ b/vendor/github.com/pelletier/go-buffruneio/buffruneio.go
@@ -0,0 +1,117 @@
+// Package buffruneio is a wrapper around bufio to provide buffered runes access with unlimited unreads.
+package buffruneio
+
+import (
+ "bufio"
+ "container/list"
+ "errors"
+ "io"
+)
+
+// Rune to indicate end of file.
+const (
+ EOF = -(iota + 1)
+)
+
+// ErrNoRuneToUnread is returned by UnreadRune() when the read index is already at the beginning of the buffer.
+var ErrNoRuneToUnread = errors.New("no rune to unwind")
+
+// Reader implements runes buffering for an io.Reader object.
+type Reader struct {
+ buffer *list.List
+ current *list.Element
+ input *bufio.Reader
+}
+
+// NewReader returns a new Reader.
+func NewReader(rd io.Reader) *Reader {
+ return &Reader{
+ buffer: list.New(),
+ input: bufio.NewReader(rd),
+ }
+}
+
+type runeWithSize struct {
+ r rune
+ size int
+}
+
+func (rd *Reader) feedBuffer() error {
+ r, size, err := rd.input.ReadRune()
+
+ if err != nil {
+ if err != io.EOF {
+ return err
+ }
+ r = EOF
+ }
+
+ newRuneWithSize := runeWithSize{r, size}
+
+ rd.buffer.PushBack(newRuneWithSize)
+ if rd.current == nil {
+ rd.current = rd.buffer.Back()
+ }
+ return nil
+}
+
+// ReadRune reads the next rune from buffer, or from the underlying reader if needed.
+func (rd *Reader) ReadRune() (rune, int, error) {
+ if rd.current == rd.buffer.Back() || rd.current == nil {
+ err := rd.feedBuffer()
+ if err != nil {
+ return EOF, 0, err
+ }
+ }
+
+ runeWithSize := rd.current.Value.(runeWithSize)
+ rd.current = rd.current.Next()
+ return runeWithSize.r, runeWithSize.size, nil
+}
+
+// UnreadRune pushes back the previously read rune in the buffer, extending it if needed.
+func (rd *Reader) UnreadRune() error {
+ if rd.current == rd.buffer.Front() {
+ return ErrNoRuneToUnread
+ }
+ if rd.current == nil {
+ rd.current = rd.buffer.Back()
+ } else {
+ rd.current = rd.current.Prev()
+ }
+ return nil
+}
+
+// Forget removes runes stored before the current stream position index.
+func (rd *Reader) Forget() {
+ if rd.current == nil {
+ rd.current = rd.buffer.Back()
+ }
+ for ; rd.current != rd.buffer.Front(); rd.buffer.Remove(rd.current.Prev()) {
+ }
+}
+
+// PeekRune returns at most the next n runes, reading from the uderlying source if
+// needed. Does not move the current index. It includes EOF if reached.
+func (rd *Reader) PeekRunes(n int) []rune {
+ res := make([]rune, 0, n)
+ cursor := rd.current
+ for i := 0; i < n; i++ {
+ if cursor == nil {
+ err := rd.feedBuffer()
+ if err != nil {
+ return res
+ }
+ cursor = rd.buffer.Back()
+ }
+ if cursor != nil {
+ r := cursor.Value.(runeWithSize).r
+ res = append(res, r)
+ if r == EOF {
+ return res
+ }
+ cursor = cursor.Next()
+ }
+ }
+ return res
+}
diff --git a/vendor/github.com/src-d/gcfg/LICENSE b/vendor/github.com/src-d/gcfg/LICENSE
new file mode 100644
index 0000000000..87a5cede33
--- /dev/null
+++ b/vendor/github.com/src-d/gcfg/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go
+Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/src-d/gcfg/doc.go b/vendor/github.com/src-d/gcfg/doc.go
new file mode 100644
index 0000000000..2edcb41a08
--- /dev/null
+++ b/vendor/github.com/src-d/gcfg/doc.go
@@ -0,0 +1,145 @@
+// Package gcfg reads "INI-style" text-based configuration files with
+// "name=value" pairs grouped into sections (gcfg files).
+//
+// This package is still a work in progress; see the sections below for planned
+// changes.
+//
+// Syntax
+//
+// The syntax is based on that used by git config:
+// http://git-scm.com/docs/git-config#_syntax .
+// There are some (planned) differences compared to the git config format:
+// - improve data portability:
+// - must be encoded in UTF-8 (for now) and must not contain the 0 byte
+// - include and "path" type is not supported
+// (path type may be implementable as a user-defined type)
+// - internationalization
+// - section and variable names can contain unicode letters, unicode digits
+// (as defined in http://golang.org/ref/spec#Characters ) and hyphens
+// (U+002D), starting with a unicode letter
+// - disallow potentially ambiguous or misleading definitions:
+// - `[sec.sub]` format is not allowed (deprecated in gitconfig)
+// - `[sec ""]` is not allowed
+// - use `[sec]` for section name "sec" and empty subsection name
+// - (planned) within a single file, definitions must be contiguous for each:
+// - section: '[secA]' -> '[secB]' -> '[secA]' is an error
+// - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error
+// - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error
+//
+// Data structure
+//
+// The functions in this package read values into a user-defined struct.
+// Each section corresponds to a struct field in the config struct, and each
+// variable in a section corresponds to a data field in the section struct.
+// The mapping of each section or variable name to fields is done either based
+// on the "gcfg" struct tag or by matching the name of the section or variable,
+// ignoring case. In the latter case, hyphens '-' in section and variable names
+// correspond to underscores '_' in field names.
+// Fields must be exported; to use a section or variable name starting with a
+// letter that is neither upper- or lower-case, prefix the field name with 'X'.
+// (See https://code.google.com/p/go/issues/detail?id=5763#c4 .)
+//
+// For sections with subsections, the corresponding field in config must be a
+// map, rather than a struct, with string keys and pointer-to-struct values.
+// Values for subsection variables are stored in the map with the subsection
+// name used as the map key.
+// (Note that unlike section and variable names, subsection names are case
+// sensitive.)
+// When using a map, and there is a section with the same section name but
+// without a subsection name, its values are stored with the empty string used
+// as the key.
+// It is possible to provide default values for subsections in the section
+// "default-<sectionname>" (or by setting values in the corresponding struct
+// field "Default_<sectionname>").
+//
+// The functions in this package panic if config is not a pointer to a struct,
+// or when a field is not of a suitable type (either a struct or a map with
+// string keys and pointer-to-struct values).
+//
+// Parsing of values
+//
+// The section structs in the config struct may contain single-valued or
+// multi-valued variables. Variables of unnamed slice type (that is, a type
+// starting with `[]`) are treated as multi-value; all others (including named
+// slice types) are treated as single-valued variables.
+//
+// Single-valued variables are handled based on the type as follows.
+// Unnamed pointer types (that is, types starting with `*`) are dereferenced,
+// and if necessary, a new instance is allocated.
+//
+// For types implementing the encoding.TextUnmarshaler interface, the
+// UnmarshalText method is used to set the value. Implementing this method is
+// the recommended way for parsing user-defined types.
+//
+// For fields of string kind, the value string is assigned to the field, after
+// unquoting and unescaping as needed.
+// For fields of bool kind, the field is set to true if the value is "true",
+// "yes", "on" or "1", and set to false if the value is "false", "no", "off" or
+// "0", ignoring case. In addition, single-valued bool fields can be specified
+// with a "blank" value (variable name without equals sign and value); in such
+// case the value is set to true.
+//
+// Predefined integer types [u]int(|8|16|32|64) and big.Int are parsed as
+// decimal or hexadecimal (if having '0x' prefix). (This is to prevent
+// unintuitively handling zero-padded numbers as octal.) Other types having
+// [u]int* as the underlying type, such as os.FileMode and uintptr allow
+// decimal, hexadecimal, or octal values.
+// Parsing mode for integer types can be overridden using the struct tag option
+// ",int=mode" where mode is a combination of the 'd', 'h', and 'o' characters
+// (each standing for decimal, hexadecimal, and octal, respectively.)
+//
+// All other types are parsed using fmt.Sscanf with the "%v" verb.
+//
+// For multi-valued variables, each individual value is parsed as above and
+// appended to the slice. If the first value is specified as a "blank" value
+// (variable name without equals sign and value), a new slice is allocated;
+// that is any values previously set in the slice will be ignored.
+//
+// The types subpackage for provides helpers for parsing "enum-like" and integer
+// types.
+//
+// Error handling
+//
+// There are 3 types of errors:
+//
+// - programmer errors / panics:
+// - invalid configuration structure
+// - data errors:
+// - fatal errors:
+// - invalid configuration syntax
+// - warnings:
+// - data that doesn't belong to any part of the config structure
+//
+// Programmer errors trigger panics. These are should be fixed by the programmer
+// before releasing code that uses gcfg.
+//
+// Data errors cause gcfg to return a non-nil error value. This includes the
+// case when there are extra unknown key-value definitions in the configuration
+// data (extra data).
+// However, in some occasions it is desirable to be able to proceed in
+// situations when the only data error is that of extra data.
+// These errors are handled at a different (warning) priority and can be
+// filtered out programmatically. To ignore extra data warnings, wrap the
+// gcfg.Read*Into invocation into a call to gcfg.FatalOnly.
+//
+// TODO
+//
+// The following is a list of changes under consideration:
+// - documentation
+// - self-contained syntax documentation
+// - more practical examples
+// - move TODOs to issue tracker (eventually)
+// - syntax
+// - reconsider valid escape sequences
+// (gitconfig doesn't support \r in value, \t in subsection name, etc.)
+// - reading / parsing gcfg files
+// - define internal representation structure
+// - support multiple inputs (readers, strings, files)
+// - support declaring encoding (?)
+// - support varying fields sets for subsections (?)
+// - writing gcfg files
+// - error handling
+// - make error context accessible programmatically?
+// - limit input size?
+//
+package gcfg // import "github.com/src-d/gcfg"
diff --git a/vendor/github.com/src-d/gcfg/errors.go b/vendor/github.com/src-d/gcfg/errors.go
new file mode 100644
index 0000000000..853c76021d
--- /dev/null
+++ b/vendor/github.com/src-d/gcfg/errors.go
@@ -0,0 +1,41 @@
+package gcfg
+
+import (
+ "gopkg.in/warnings.v0"
+)
+
+// FatalOnly filters the results of a Read*Into invocation and returns only
+// fatal errors. That is, errors (warnings) indicating data for unknown
+// sections / variables is ignored. Example invocation:
+//
+// err := gcfg.FatalOnly(gcfg.ReadFileInto(&cfg, configFile))
+// if err != nil {
+// ...
+//
+func FatalOnly(err error) error {
+ return warnings.FatalOnly(err)
+}
+
+func isFatal(err error) bool {
+ _, ok := err.(extraData)
+ return !ok
+}
+
+type extraData struct {
+ section string
+ subsection *string
+ variable *string
+}
+
+func (e extraData) Error() string {
+ s := "can't store data at section \"" + e.section + "\""
+ if e.subsection != nil {
+ s += ", subsection \"" + *e.subsection + "\""
+ }
+ if e.variable != nil {
+ s += ", variable \"" + *e.variable + "\""
+ }
+ return s
+}
+
+var _ error = extraData{}
diff --git a/vendor/github.com/src-d/gcfg/go1_0.go b/vendor/github.com/src-d/gcfg/go1_0.go
new file mode 100644
index 0000000000..6670210791
--- /dev/null
+++ b/vendor/github.com/src-d/gcfg/go1_0.go
@@ -0,0 +1,7 @@
+// +build !go1.2
+
+package gcfg
+
+type textUnmarshaler interface {
+ UnmarshalText(text []byte) error
+}
diff --git a/vendor/github.com/src-d/gcfg/go1_2.go b/vendor/github.com/src-d/gcfg/go1_2.go
new file mode 100644
index 0000000000..6f5843bc7c
--- /dev/null
+++ b/vendor/github.com/src-d/gcfg/go1_2.go
@@ -0,0 +1,9 @@
+// +build go1.2
+
+package gcfg
+
+import (
+ "encoding"
+)
+
+type textUnmarshaler encoding.TextUnmarshaler
diff --git a/vendor/github.com/src-d/gcfg/read.go b/vendor/github.com/src-d/gcfg/read.go
new file mode 100644
index 0000000000..fff0448c78
--- /dev/null
+++ b/vendor/github.com/src-d/gcfg/read.go
@@ -0,0 +1,273 @@
+package gcfg
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+
+ "github.com/src-d/gcfg/scanner"
+ "github.com/src-d/gcfg/token"
+ "gopkg.in/warnings.v0"
+)
+
+var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t', 'b': '\b'}
+
+// no error: invalid literals should be caught by scanner
+func unquote(s string) string {
+ u, q, esc := make([]rune, 0, len(s)), false, false
+ for _, c := range s {
+ if esc {
+ uc, ok := unescape[c]
+ switch {
+ case ok:
+ u = append(u, uc)
+ fallthrough
+ case !q && c == '\n':
+ esc = false
+ continue
+ }
+ panic("invalid escape sequence")
+ }
+ switch c {
+ case '"':
+ q = !q
+ case '\\':
+ esc = true
+ default:
+ u = append(u, c)
+ }
+ }
+ if q {
+ panic("missing end quote")
+ }
+ if esc {
+ panic("invalid escape sequence")
+ }
+ return string(u)
+}
+
+func read(c *warnings.Collector, callback func(string, string, string, string, bool) error,
+ fset *token.FileSet, file *token.File, src []byte) error {
+ //
+ var s scanner.Scanner
+ var errs scanner.ErrorList
+ s.Init(file, src, func(p token.Position, m string) { errs.Add(p, m) }, 0)
+ sect, sectsub := "", ""
+ pos, tok, lit := s.Scan()
+ errfn := func(msg string) error {
+ return fmt.Errorf("%s: %s", fset.Position(pos), msg)
+ }
+ for {
+ if errs.Len() > 0 {
+ if err := c.Collect(errs.Err()); err != nil {
+ return err
+ }
+ }
+ switch tok {
+ case token.EOF:
+ return nil
+ case token.EOL, token.COMMENT:
+ pos, tok, lit = s.Scan()
+ case token.LBRACK:
+ pos, tok, lit = s.Scan()
+ if errs.Len() > 0 {
+ if err := c.Collect(errs.Err()); err != nil {
+ return err
+ }
+ }
+ if tok != token.IDENT {
+ if err := c.Collect(errfn("expected section name")); err != nil {
+ return err
+ }
+ }
+ sect, sectsub = lit, ""
+ pos, tok, lit = s.Scan()
+ if errs.Len() > 0 {
+ if err := c.Collect(errs.Err()); err != nil {
+ return err
+ }
+ }
+ if tok == token.STRING {
+ sectsub = unquote(lit)
+ if sectsub == "" {
+ if err := c.Collect(errfn("empty subsection name")); err != nil {
+ return err
+ }
+ }
+ pos, tok, lit = s.Scan()
+ if errs.Len() > 0 {
+ if err := c.Collect(errs.Err()); err != nil {
+ return err
+ }
+ }
+ }
+ if tok != token.RBRACK {
+ if sectsub == "" {
+ if err := c.Collect(errfn("expected subsection name or right bracket")); err != nil {
+ return err
+ }
+ }
+ if err := c.Collect(errfn("expected right bracket")); err != nil {
+ return err
+ }
+ }
+ pos, tok, lit = s.Scan()
+ if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
+ if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil {
+ return err
+ }
+ }
+ // If a section/subsection header was found, ensure a
+ // container object is created, even if there are no
+ // variables further down.
+ err := c.Collect(callback(sect, sectsub, "", "", true))
+ if err != nil {
+ return err
+ }
+ case token.IDENT:
+ if sect == "" {
+ if err := c.Collect(errfn("expected section header")); err != nil {
+ return err
+ }
+ }
+ n := lit
+ pos, tok, lit = s.Scan()
+ if errs.Len() > 0 {
+ return errs.Err()
+ }
+ blank, v := tok == token.EOF || tok == token.EOL || tok == token.COMMENT, ""
+ if !blank {
+ if tok != token.ASSIGN {
+ if err := c.Collect(errfn("expected '='")); err != nil {
+ return err
+ }
+ }
+ pos, tok, lit = s.Scan()
+ if errs.Len() > 0 {
+ if err := c.Collect(errs.Err()); err != nil {
+ return err
+ }
+ }
+ if tok != token.STRING {
+ if err := c.Collect(errfn("expected value")); err != nil {
+ return err
+ }
+ }
+ v = unquote(lit)
+ pos, tok, lit = s.Scan()
+ if errs.Len() > 0 {
+ if err := c.Collect(errs.Err()); err != nil {
+ return err
+ }
+ }
+ if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
+ if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil {
+ return err
+ }
+ }
+ }
+ err := c.Collect(callback(sect, sectsub, n, v, blank))
+ if err != nil {
+ return err
+ }
+ default:
+ if sect == "" {
+ if err := c.Collect(errfn("expected section header")); err != nil {
+ return err
+ }
+ }
+ if err := c.Collect(errfn("expected section header or variable declaration")); err != nil {
+ return err
+ }
+ }
+ }
+ panic("never reached")
+}
+
+func readInto(config interface{}, fset *token.FileSet, file *token.File,
+ src []byte) error {
+ //
+ c := warnings.NewCollector(isFatal)
+ firstPassCallback := func(s string, ss string, k string, v string, bv bool) error {
+ return set(c, config, s, ss, k, v, bv, false)
+ }
+ err := read(c, firstPassCallback, fset, file, src)
+ if err != nil {
+ return err
+ }
+ secondPassCallback := func(s string, ss string, k string, v string, bv bool) error {
+ return set(c, config, s, ss, k, v, bv, true)
+ }
+ err = read(c, secondPassCallback, fset, file, src)
+ if err != nil {
+ return err
+ }
+ return c.Done()
+}
+
+// ReadWithCallback reads gcfg formatted data from reader and calls
+// callback with each section and option found.
+//
+// Callback is called with section, subsection, option key, option value
+// and blank value flag as arguments.
+//
+// When a section is found, callback is called with nil subsection, option key
+// and option value.
+//
+// When a subsection is found, callback is called with nil option key and
+// option value.
+//
+// If blank value flag is true, it means that the value was not set for an option
+// (as opposed to set to empty string).
+//
+// If callback returns an error, ReadWithCallback terminates with an error too.
+func ReadWithCallback(reader io.Reader, callback func(string, string, string, string, bool) error) error {
+ src, err := ioutil.ReadAll(reader)
+ if err != nil {
+ return err
+ }
+
+ fset := token.NewFileSet()
+ file := fset.AddFile("", fset.Base(), len(src))
+ c := warnings.NewCollector(isFatal)
+
+ return read(c, callback, fset, file, src)
+}
+
+// ReadInto reads gcfg formatted data from reader and sets the values into the
+// corresponding fields in config.
+func ReadInto(config interface{}, reader io.Reader) error {
+ src, err := ioutil.ReadAll(reader)
+ if err != nil {
+ return err
+ }
+ fset := token.NewFileSet()
+ file := fset.AddFile("", fset.Base(), len(src))
+ return readInto(config, fset, file, src)
+}
+
+// ReadStringInto reads gcfg formatted data from str and sets the values into
+// the corresponding fields in config.
+func ReadStringInto(config interface{}, str string) error {
+ r := strings.NewReader(str)
+ return ReadInto(config, r)
+}
+
+// ReadFileInto reads gcfg formatted data from the file filename and sets the
+// values into the corresponding fields in config.
+func ReadFileInto(config interface{}, filename string) error {
+ f, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ src, err := ioutil.ReadAll(f)
+ if err != nil {
+ return err
+ }
+ fset := token.NewFileSet()
+ file := fset.AddFile(filename, fset.Base(), len(src))
+ return readInto(config, fset, file, src)
+}
diff --git a/vendor/github.com/src-d/gcfg/scanner/errors.go b/vendor/github.com/src-d/gcfg/scanner/errors.go
new file mode 100644
index 0000000000..f3fcecacbb
--- /dev/null
+++ b/vendor/github.com/src-d/gcfg/scanner/errors.go
@@ -0,0 +1,121 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scanner
+
+import (
+ "fmt"
+ "io"
+ "sort"
+)
+
+import (
+ "github.com/src-d/gcfg/token"
+)
+
+// In an ErrorList, an error is represented by an *Error.
+// The position Pos, if valid, points to the beginning of
+// the offending token, and the error condition is described
+// by Msg.
+//
+type Error struct {
+ Pos token.Position
+ Msg string
+}
+
+// Error implements the error interface.
+func (e Error) Error() string {
+ if e.Pos.Filename != "" || e.Pos.IsValid() {
+ // don't print "<unknown position>"
+ // TODO(gri) reconsider the semantics of Position.IsValid
+ return e.Pos.String() + ": " + e.Msg
+ }
+ return e.Msg
+}
+
+// ErrorList is a list of *Errors.
+// The zero value for an ErrorList is an empty ErrorList ready to use.
+//
+type ErrorList []*Error
+
+// Add adds an Error with given position and error message to an ErrorList.
+func (p *ErrorList) Add(pos token.Position, msg string) {
+ *p = append(*p, &Error{pos, msg})
+}
+
+// Reset resets an ErrorList to no errors.
+func (p *ErrorList) Reset() { *p = (*p)[0:0] }
+
+// ErrorList implements the sort Interface.
+func (p ErrorList) Len() int { return len(p) }
+func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p ErrorList) Less(i, j int) bool {
+ e := &p[i].Pos
+ f := &p[j].Pos
+ if e.Filename < f.Filename {
+ return true
+ }
+ if e.Filename == f.Filename {
+ return e.Offset < f.Offset
+ }
+ return false
+}
+
+// Sort sorts an ErrorList. *Error entries are sorted by position,
+// other errors are sorted by error message, and before any *Error
+// entry.
+//
+func (p ErrorList) Sort() {
+ sort.Sort(p)
+}
+
+// RemoveMultiples sorts an ErrorList and removes all but the first error per line.
+func (p *ErrorList) RemoveMultiples() {
+ sort.Sort(p)
+ var last token.Position // initial last.Line is != any legal error line
+ i := 0
+ for _, e := range *p {
+ if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line {
+ last = e.Pos
+ (*p)[i] = e
+ i++
+ }
+ }
+ (*p) = (*p)[0:i]
+}
+
+// An ErrorList implements the error interface.
+func (p ErrorList) Error() string {
+ switch len(p) {
+ case 0:
+ return "no errors"
+ case 1:
+ return p[0].Error()
+ }
+ return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1)
+}
+
+// Err returns an error equivalent to this error list.
+// If the list is empty, Err returns nil.
+func (p ErrorList) Err() error {
+ if len(p) == 0 {
+ return nil
+ }
+ return p
+}
+
+// PrintError is a utility function that prints a list of errors to w,
+// one error per line, if the err parameter is an ErrorList. Otherwise
+// it prints the err string.
+//
+func PrintError(w io.Writer, err error) {
+ if list, ok := err.(ErrorList); ok {
+ for _, e := range list {
+ fmt.Fprintf(w, "%s\n", e)
+ }
+ } else if err != nil {
+ fmt.Fprintf(w, "%s\n", err)
+ }
+}
diff --git a/vendor/github.com/src-d/gcfg/scanner/scanner.go b/vendor/github.com/src-d/gcfg/scanner/scanner.go
new file mode 100644
index 0000000000..b1eef06f69
--- /dev/null
+++ b/vendor/github.com/src-d/gcfg/scanner/scanner.go
@@ -0,0 +1,342 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package scanner implements a scanner for gcfg configuration text.
+// It takes a []byte as source which can then be tokenized
+// through repeated calls to the Scan method.
+//
+// Note that the API for the scanner package may change to accommodate new
+// features or implementation changes in gcfg.
+//
+package scanner
+
+import (
+ "fmt"
+ "path/filepath"
+ "unicode"
+ "unicode/utf8"
+)
+
+import (
+ "github.com/src-d/gcfg/token"
+)
+
+// An ErrorHandler may be provided to Scanner.Init. If a syntax error is
+// encountered and a handler was installed, the handler is called with a
+// position and an error message. The position points to the beginning of
+// the offending token.
+//
+type ErrorHandler func(pos token.Position, msg string)
+
+// A Scanner holds the scanner's internal state while processing
+// a given text. It can be allocated as part of another data
+// structure but must be initialized via Init before use.
+//
+type Scanner struct {
+ // immutable state
+ file *token.File // source file handle
+ dir string // directory portion of file.Name()
+ src []byte // source
+ err ErrorHandler // error reporting; or nil
+ mode Mode // scanning mode
+
+ // scanning state
+ ch rune // current character
+ offset int // character offset
+ rdOffset int // reading offset (position after current character)
+ lineOffset int // current line offset
+ nextVal bool // next token is expected to be a value
+
+ // public state - ok to modify
+ ErrorCount int // number of errors encountered
+}
+
+// Read the next Unicode char into s.ch.
+// s.ch < 0 means end-of-file.
+//
+func (s *Scanner) next() {
+ if s.rdOffset < len(s.src) {
+ s.offset = s.rdOffset
+ if s.ch == '\n' {
+ s.lineOffset = s.offset
+ s.file.AddLine(s.offset)
+ }
+ r, w := rune(s.src[s.rdOffset]), 1
+ switch {
+ case r == 0:
+ s.error(s.offset, "illegal character NUL")
+ case r >= 0x80:
+ // not ASCII
+ r, w = utf8.DecodeRune(s.src[s.rdOffset:])
+ if r == utf8.RuneError && w == 1 {
+ s.error(s.offset, "illegal UTF-8 encoding")
+ }
+ }
+ s.rdOffset += w
+ s.ch = r
+ } else {
+ s.offset = len(s.src)
+ if s.ch == '\n' {
+ s.lineOffset = s.offset
+ s.file.AddLine(s.offset)
+ }
+ s.ch = -1 // eof
+ }
+}
+
+// A mode value is a set of flags (or 0).
+// They control scanner behavior.
+//
+type Mode uint
+
+const (
+ ScanComments Mode = 1 << iota // return comments as COMMENT tokens
+)
+
+// Init prepares the scanner s to tokenize the text src by setting the
+// scanner at the beginning of src. The scanner uses the file set file
+// for position information and it adds line information for each line.
+// It is ok to re-use the same file when re-scanning the same file as
+// line information which is already present is ignored. Init causes a
+// panic if the file size does not match the src size.
+//
+// Calls to Scan will invoke the error handler err if they encounter a
+// syntax error and err is not nil. Also, for each error encountered,
+// the Scanner field ErrorCount is incremented by one. The mode parameter
+// determines how comments are handled.
+//
+// Note that Init may call err if there is an error in the first character
+// of the file.
+//
+func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) {
+ // Explicitly initialize all fields since a scanner may be reused.
+ if file.Size() != len(src) {
+ panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src)))
+ }
+ s.file = file
+ s.dir, _ = filepath.Split(file.Name())
+ s.src = src
+ s.err = err
+ s.mode = mode
+
+ s.ch = ' '
+ s.offset = 0
+ s.rdOffset = 0
+ s.lineOffset = 0
+ s.ErrorCount = 0
+ s.nextVal = false
+
+ s.next()
+}
+
+func (s *Scanner) error(offs int, msg string) {
+ if s.err != nil {
+ s.err(s.file.Position(s.file.Pos(offs)), msg)
+ }
+ s.ErrorCount++
+}
+
+func (s *Scanner) scanComment() string {
+ // initial [;#] already consumed
+ offs := s.offset - 1 // position of initial [;#]
+
+ for s.ch != '\n' && s.ch >= 0 {
+ s.next()
+ }
+ return string(s.src[offs:s.offset])
+}
+
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+func isDigit(ch rune) bool {
+ return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+func (s *Scanner) scanIdentifier() string {
+ offs := s.offset
+ for isLetter(s.ch) || isDigit(s.ch) || s.ch == '-' {
+ s.next()
+ }
+ return string(s.src[offs:s.offset])
+}
+
+func (s *Scanner) scanEscape(val bool) {
+ offs := s.offset
+ ch := s.ch
+ s.next() // always make progress
+ switch ch {
+ case '\\', '"':
+ // ok
+ case 'n', 't', 'b':
+ if val {
+ break // ok
+ }
+ fallthrough
+ default:
+ s.error(offs, "unknown escape sequence")
+ }
+}
+
+func (s *Scanner) scanString() string {
+ // '"' opening already consumed
+ offs := s.offset - 1
+
+ for s.ch != '"' {
+ ch := s.ch
+ s.next()
+ if ch == '\n' || ch < 0 {
+ s.error(offs, "string not terminated")
+ break
+ }
+ if ch == '\\' {
+ s.scanEscape(false)
+ }
+ }
+
+ s.next()
+
+ return string(s.src[offs:s.offset])
+}
+
+func stripCR(b []byte) []byte {
+ c := make([]byte, len(b))
+ i := 0
+ for _, ch := range b {
+ if ch != '\r' {
+ c[i] = ch
+ i++
+ }
+ }
+ return c[:i]
+}
+
+func (s *Scanner) scanValString() string {
+ offs := s.offset
+
+ hasCR := false
+ end := offs
+ inQuote := false
+loop:
+ for inQuote || s.ch >= 0 && s.ch != '\n' && s.ch != ';' && s.ch != '#' {
+ ch := s.ch
+ s.next()
+ switch {
+ case inQuote && ch == '\\':
+ s.scanEscape(true)
+ case !inQuote && ch == '\\':
+ if s.ch == '\r' {
+ hasCR = true
+ s.next()
+ }
+ if s.ch != '\n' {
+ s.scanEscape(true)
+ } else {
+ s.next()
+ }
+ case ch == '"':
+ inQuote = !inQuote
+ case ch == '\r':
+ hasCR = true
+ case ch < 0 || inQuote && ch == '\n':
+ s.error(offs, "string not terminated")
+ break loop
+ }
+ if inQuote || !isWhiteSpace(ch) {
+ end = s.offset
+ }
+ }
+
+ lit := s.src[offs:end]
+ if hasCR {
+ lit = stripCR(lit)
+ }
+
+ return string(lit)
+}
+
+func isWhiteSpace(ch rune) bool {
+ return ch == ' ' || ch == '\t' || ch == '\r'
+}
+
+func (s *Scanner) skipWhitespace() {
+ for isWhiteSpace(s.ch) {
+ s.next()
+ }
+}
+
+// Scan scans the next token and returns the token position, the token,
+// and its literal string if applicable. The source end is indicated by
+// token.EOF.
+//
+// If the returned token is a literal (token.IDENT, token.STRING) or
+// token.COMMENT, the literal string has the corresponding value.
+//
+// If the returned token is token.ILLEGAL, the literal string is the
+// offending character.
+//
+// In all other cases, Scan returns an empty literal string.
+//
+// For more tolerant parsing, Scan will return a valid token if
+// possible even if a syntax error was encountered. Thus, even
+// if the resulting token sequence contains no illegal tokens,
+// a client may not assume that no error occurred. Instead it
+// must check the scanner's ErrorCount or the number of calls
+// of the error handler, if there was one installed.
+//
+// Scan adds line information to the file added to the file
+// set with Init. Token positions are relative to that file
+// and thus relative to the file set.
+//
+func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) {
+scanAgain:
+ s.skipWhitespace()
+
+ // current token start
+ pos = s.file.Pos(s.offset)
+
+ // determine token value
+ switch ch := s.ch; {
+ case s.nextVal:
+ lit = s.scanValString()
+ tok = token.STRING
+ s.nextVal = false
+ case isLetter(ch):
+ lit = s.scanIdentifier()
+ tok = token.IDENT
+ default:
+ s.next() // always make progress
+ switch ch {
+ case -1:
+ tok = token.EOF
+ case '\n':
+ tok = token.EOL
+ case '"':
+ tok = token.STRING
+ lit = s.scanString()
+ case '[':
+ tok = token.LBRACK
+ case ']':
+ tok = token.RBRACK
+ case ';', '#':
+ // comment
+ lit = s.scanComment()
+ if s.mode&ScanComments == 0 {
+ // skip comment
+ goto scanAgain
+ }
+ tok = token.COMMENT
+ case '=':
+ tok = token.ASSIGN
+ s.nextVal = true
+ default:
+ s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch))
+ tok = token.ILLEGAL
+ lit = string(ch)
+ }
+ }
+
+ return
+}
diff --git a/vendor/github.com/src-d/gcfg/set.go b/vendor/github.com/src-d/gcfg/set.go
new file mode 100644
index 0000000000..771258f0ef
--- /dev/null
+++ b/vendor/github.com/src-d/gcfg/set.go
@@ -0,0 +1,332 @@
+package gcfg
+
+import (
+ "bytes"
+ "encoding/gob"
+ "fmt"
+ "math/big"
+ "reflect"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/src-d/gcfg/types"
+ "gopkg.in/warnings.v0"
+)
+
+type tag struct {
+ ident string
+ intMode string
+}
+
+func newTag(ts string) tag {
+ t := tag{}
+ s := strings.Split(ts, ",")
+ t.ident = s[0]
+ for _, tse := range s[1:] {
+ if strings.HasPrefix(tse, "int=") {
+ t.intMode = tse[len("int="):]
+ }
+ }
+ return t
+}
+
+func fieldFold(v reflect.Value, name string) (reflect.Value, tag) {
+ var n string
+ r0, _ := utf8.DecodeRuneInString(name)
+ if unicode.IsLetter(r0) && !unicode.IsLower(r0) && !unicode.IsUpper(r0) {
+ n = "X"
+ }
+ n += strings.Replace(name, "-", "_", -1)
+ f, ok := v.Type().FieldByNameFunc(func(fieldName string) bool {
+ if !v.FieldByName(fieldName).CanSet() {
+ return false
+ }
+ f, _ := v.Type().FieldByName(fieldName)
+ t := newTag(f.Tag.Get("gcfg"))
+ if t.ident != "" {
+ return strings.EqualFold(t.ident, name)
+ }
+ return strings.EqualFold(n, fieldName)
+ })
+ if !ok {
+ return reflect.Value{}, tag{}
+ }
+ return v.FieldByName(f.Name), newTag(f.Tag.Get("gcfg"))
+}
+
+type setter func(destp interface{}, blank bool, val string, t tag) error
+
+var errUnsupportedType = fmt.Errorf("unsupported type")
+var errBlankUnsupported = fmt.Errorf("blank value not supported for type")
+
+var setters = []setter{
+ typeSetter, textUnmarshalerSetter, kindSetter, scanSetter,
+}
+
+func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error {
+ dtu, ok := d.(textUnmarshaler)
+ if !ok {
+ return errUnsupportedType
+ }
+ if blank {
+ return errBlankUnsupported
+ }
+ return dtu.UnmarshalText([]byte(val))
+}
+
+func boolSetter(d interface{}, blank bool, val string, t tag) error {
+ if blank {
+ reflect.ValueOf(d).Elem().Set(reflect.ValueOf(true))
+ return nil
+ }
+ b, err := types.ParseBool(val)
+ if err == nil {
+ reflect.ValueOf(d).Elem().Set(reflect.ValueOf(b))
+ }
+ return err
+}
+
+func intMode(mode string) types.IntMode {
+ var m types.IntMode
+ if strings.ContainsAny(mode, "dD") {
+ m |= types.Dec
+ }
+ if strings.ContainsAny(mode, "hH") {
+ m |= types.Hex
+ }
+ if strings.ContainsAny(mode, "oO") {
+ m |= types.Oct
+ }
+ return m
+}
+
+var typeModes = map[reflect.Type]types.IntMode{
+ reflect.TypeOf(int(0)): types.Dec | types.Hex,
+ reflect.TypeOf(int8(0)): types.Dec | types.Hex,
+ reflect.TypeOf(int16(0)): types.Dec | types.Hex,
+ reflect.TypeOf(int32(0)): types.Dec | types.Hex,
+ reflect.TypeOf(int64(0)): types.Dec | types.Hex,
+ reflect.TypeOf(uint(0)): types.Dec | types.Hex,
+ reflect.TypeOf(uint8(0)): types.Dec | types.Hex,
+ reflect.TypeOf(uint16(0)): types.Dec | types.Hex,
+ reflect.TypeOf(uint32(0)): types.Dec | types.Hex,
+ reflect.TypeOf(uint64(0)): types.Dec | types.Hex,
+ // use default mode (allow dec/hex/oct) for uintptr type
+ reflect.TypeOf(big.Int{}): types.Dec | types.Hex,
+}
+
+func intModeDefault(t reflect.Type) types.IntMode {
+ m, ok := typeModes[t]
+ if !ok {
+ m = types.Dec | types.Hex | types.Oct
+ }
+ return m
+}
+
+func intSetter(d interface{}, blank bool, val string, t tag) error {
+ if blank {
+ return errBlankUnsupported
+ }
+ mode := intMode(t.intMode)
+ if mode == 0 {
+ mode = intModeDefault(reflect.TypeOf(d).Elem())
+ }
+ return types.ParseInt(d, val, mode)
+}
+
+func stringSetter(d interface{}, blank bool, val string, t tag) error {
+ if blank {
+ return errBlankUnsupported
+ }
+ dsp, ok := d.(*string)
+ if !ok {
+ return errUnsupportedType
+ }
+ *dsp = val
+ return nil
+}
+
+var kindSetters = map[reflect.Kind]setter{
+ reflect.String: stringSetter,
+ reflect.Bool: boolSetter,
+ reflect.Int: intSetter,
+ reflect.Int8: intSetter,
+ reflect.Int16: intSetter,
+ reflect.Int32: intSetter,
+ reflect.Int64: intSetter,
+ reflect.Uint: intSetter,
+ reflect.Uint8: intSetter,
+ reflect.Uint16: intSetter,
+ reflect.Uint32: intSetter,
+ reflect.Uint64: intSetter,
+ reflect.Uintptr: intSetter,
+}
+
+var typeSetters = map[reflect.Type]setter{
+ reflect.TypeOf(big.Int{}): intSetter,
+}
+
+func typeSetter(d interface{}, blank bool, val string, tt tag) error {
+ t := reflect.ValueOf(d).Type().Elem()
+ setter, ok := typeSetters[t]
+ if !ok {
+ return errUnsupportedType
+ }
+ return setter(d, blank, val, tt)
+}
+
+func kindSetter(d interface{}, blank bool, val string, tt tag) error {
+ k := reflect.ValueOf(d).Type().Elem().Kind()
+ setter, ok := kindSetters[k]
+ if !ok {
+ return errUnsupportedType
+ }
+ return setter(d, blank, val, tt)
+}
+
+func scanSetter(d interface{}, blank bool, val string, tt tag) error {
+ if blank {
+ return errBlankUnsupported
+ }
+ return types.ScanFully(d, val, 'v')
+}
+
+func newValue(c *warnings.Collector, sect string, vCfg reflect.Value,
+ vType reflect.Type) (reflect.Value, error) {
+ //
+ pv := reflect.New(vType)
+ dfltName := "default-" + sect
+ dfltField, _ := fieldFold(vCfg, dfltName)
+ var err error
+ if dfltField.IsValid() {
+ b := bytes.NewBuffer(nil)
+ ge := gob.NewEncoder(b)
+ if err = c.Collect(ge.EncodeValue(dfltField)); err != nil {
+ return pv, err
+ }
+ gd := gob.NewDecoder(bytes.NewReader(b.Bytes()))
+ if err = c.Collect(gd.DecodeValue(pv.Elem())); err != nil {
+ return pv, err
+ }
+ }
+ return pv, nil
+}
+
+func set(c *warnings.Collector, cfg interface{}, sect, sub, name string,
+ value string, blankValue bool, subsectPass bool) error {
+ //
+ vPCfg := reflect.ValueOf(cfg)
+ if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct {
+ panic(fmt.Errorf("config must be a pointer to a struct"))
+ }
+ vCfg := vPCfg.Elem()
+ vSect, _ := fieldFold(vCfg, sect)
+ if !vSect.IsValid() {
+ err := extraData{section: sect}
+ return c.Collect(err)
+ }
+ isSubsect := vSect.Kind() == reflect.Map
+ if subsectPass != isSubsect {
+ return nil
+ }
+ if isSubsect {
+ vst := vSect.Type()
+ if vst.Key().Kind() != reflect.String ||
+ vst.Elem().Kind() != reflect.Ptr ||
+ vst.Elem().Elem().Kind() != reflect.Struct {
+ panic(fmt.Errorf("map field for section must have string keys and "+
+ " pointer-to-struct values: section %q", sect))
+ }
+ if vSect.IsNil() {
+ vSect.Set(reflect.MakeMap(vst))
+ }
+ k := reflect.ValueOf(sub)
+ pv := vSect.MapIndex(k)
+ if !pv.IsValid() {
+ vType := vSect.Type().Elem().Elem()
+ var err error
+ if pv, err = newValue(c, sect, vCfg, vType); err != nil {
+ return err
+ }
+ vSect.SetMapIndex(k, pv)
+ }
+ vSect = pv.Elem()
+ } else if vSect.Kind() != reflect.Struct {
+ panic(fmt.Errorf("field for section must be a map or a struct: "+
+ "section %q", sect))
+ } else if sub != "" {
+ err := extraData{section: sect, subsection: &sub}
+ return c.Collect(err)
+ }
+ // Empty name is a special value, meaning that only the
+ // section/subsection object is to be created, with no values set.
+ if name == "" {
+ return nil
+ }
+ vVar, t := fieldFold(vSect, name)
+ if !vVar.IsValid() {
+ var err error
+ if isSubsect {
+ err = extraData{section: sect, subsection: &sub, variable: &name}
+ } else {
+ err = extraData{section: sect, variable: &name}
+ }
+ return c.Collect(err)
+ }
+ // vVal is either single-valued var, or newly allocated value within multi-valued var
+ var vVal reflect.Value
+ // multi-value if unnamed slice type
+ isMulti := vVar.Type().Name() == "" && vVar.Kind() == reflect.Slice ||
+ vVar.Type().Name() == "" && vVar.Kind() == reflect.Ptr && vVar.Type().Elem().Name() == "" && vVar.Type().Elem().Kind() == reflect.Slice
+ if isMulti && vVar.Kind() == reflect.Ptr {
+ if vVar.IsNil() {
+ vVar.Set(reflect.New(vVar.Type().Elem()))
+ }
+ vVar = vVar.Elem()
+ }
+ if isMulti && blankValue {
+ vVar.Set(reflect.Zero(vVar.Type()))
+ return nil
+ }
+ if isMulti {
+ vVal = reflect.New(vVar.Type().Elem()).Elem()
+ } else {
+ vVal = vVar
+ }
+ isDeref := vVal.Type().Name() == "" && vVal.Type().Kind() == reflect.Ptr
+ isNew := isDeref && vVal.IsNil()
+ // vAddr is address of value to set (dereferenced & allocated as needed)
+ var vAddr reflect.Value
+ switch {
+ case isNew:
+ vAddr = reflect.New(vVal.Type().Elem())
+ case isDeref && !isNew:
+ vAddr = vVal
+ default:
+ vAddr = vVal.Addr()
+ }
+ vAddrI := vAddr.Interface()
+ err, ok := error(nil), false
+ for _, s := range setters {
+ err = s(vAddrI, blankValue, value, t)
+ if err == nil {
+ ok = true
+ break
+ }
+ if err != errUnsupportedType {
+ return err
+ }
+ }
+ if !ok {
+ // in case all setters returned errUnsupportedType
+ return err
+ }
+ if isNew { // set reference if it was dereferenced and newly allocated
+ vVal.Set(vAddr)
+ }
+ if isMulti { // append if multi-valued
+ vVar.Set(reflect.Append(vVar, vVal))
+ }
+ return nil
+}
diff --git a/vendor/github.com/src-d/gcfg/token/position.go b/vendor/github.com/src-d/gcfg/token/position.go
new file mode 100644
index 0000000000..fc45c1e769
--- /dev/null
+++ b/vendor/github.com/src-d/gcfg/token/position.go
@@ -0,0 +1,435 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO(gri) consider making this a separate package outside the go directory.
+
+package token
+
+import (
+ "fmt"
+ "sort"
+ "sync"
+)
+
+// -----------------------------------------------------------------------------
+// Positions
+
+// Position describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+//
+type Position struct {
+ Filename string // filename, if any
+ Offset int // offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (pos *Position) IsValid() bool { return pos.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+// file:line:column valid position with file name
+// line:column valid position without file name
+// file invalid position with file name
+// - invalid position without file name
+//
+func (pos Position) String() string {
+ s := pos.Filename
+ if pos.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
+ }
+ if s == "" {
+ s = "-"
+ }
+ return s
+}
+
+// Pos is a compact encoding of a source position within a file set.
+// It can be converted into a Position for a more convenient, but much
+// larger, representation.
+//
+// The Pos value for a given file is a number in the range [base, base+size],
+// where base and size are specified when adding the file to the file set via
+// AddFile.
+//
+// To create the Pos value for a specific source offset, first add
+// the respective file to the current file set (via FileSet.AddFile)
+// and then call File.Pos(offset) for that file. Given a Pos value p
+// for a specific file set fset, the corresponding Position value is
+// obtained by calling fset.Position(p).
+//
+// Pos values can be compared directly with the usual comparison operators:
+// If two Pos values p and q are in the same file, comparing p and q is
+// equivalent to comparing the respective source file offsets. If p and q
+// are in different files, p < q is true if the file implied by p was added
+// to the respective file set before the file implied by q.
+//
+type Pos int
+
+// The zero value for Pos is NoPos; there is no file and line information
+// associated with it, and NoPos().IsValid() is false. NoPos is always
+// smaller than any other Pos value. The corresponding Position value
+// for NoPos is the zero value for Position.
+//
+const NoPos Pos = 0
+
+// IsValid returns true if the position is valid.
+func (p Pos) IsValid() bool {
+ return p != NoPos
+}
+
+// -----------------------------------------------------------------------------
+// File
+
+// A File is a handle for a file belonging to a FileSet.
+// A File has a name, size, and line offset table.
+//
+type File struct {
+ set *FileSet
+ name string // file name as provided to AddFile
+ base int // Pos value range for this file is [base...base+size]
+ size int // file size as provided to AddFile
+
+ // lines and infos are protected by set.mutex
+ lines []int
+ infos []lineInfo
+}
+
+// Name returns the file name of file f as registered with AddFile.
+func (f *File) Name() string {
+ return f.name
+}
+
+// Base returns the base offset of file f as registered with AddFile.
+func (f *File) Base() int {
+ return f.base
+}
+
+// Size returns the size of file f as registered with AddFile.
+func (f *File) Size() int {
+ return f.size
+}
+
+// LineCount returns the number of lines in file f.
+func (f *File) LineCount() int {
+ f.set.mutex.RLock()
+ n := len(f.lines)
+ f.set.mutex.RUnlock()
+ return n
+}
+
+// AddLine adds the line offset for a new line.
+// The line offset must be larger than the offset for the previous line
+// and smaller than the file size; otherwise the line offset is ignored.
+//
+func (f *File) AddLine(offset int) {
+ f.set.mutex.Lock()
+ if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size {
+ f.lines = append(f.lines, offset)
+ }
+ f.set.mutex.Unlock()
+}
+
+// SetLines sets the line offsets for a file and returns true if successful.
+// The line offsets are the offsets of the first character of each line;
+// for instance for the content "ab\nc\n" the line offsets are {0, 3}.
+// An empty file has an empty line offset table.
+// Each line offset must be larger than the offset for the previous line
+// and smaller than the file size; otherwise SetLines fails and returns
+// false.
+//
+func (f *File) SetLines(lines []int) bool {
+ // verify validity of lines table
+ size := f.size
+ for i, offset := range lines {
+ if i > 0 && offset <= lines[i-1] || size <= offset {
+ return false
+ }
+ }
+
+ // set lines table
+ f.set.mutex.Lock()
+ f.lines = lines
+ f.set.mutex.Unlock()
+ return true
+}
+
+// SetLinesForContent sets the line offsets for the given file content.
+func (f *File) SetLinesForContent(content []byte) {
+ var lines []int
+ line := 0
+ for offset, b := range content {
+ if line >= 0 {
+ lines = append(lines, line)
+ }
+ line = -1
+ if b == '\n' {
+ line = offset + 1
+ }
+ }
+
+ // set lines table
+ f.set.mutex.Lock()
+ f.lines = lines
+ f.set.mutex.Unlock()
+}
+
+// A lineInfo object describes alternative file and line number
+// information (such as provided via a //line comment in a .go
+// file) for a given file offset.
+type lineInfo struct {
+ // fields are exported to make them accessible to gob
+ Offset int
+ Filename string
+ Line int
+}
+
+// AddLineInfo adds alternative file and line number information for
+// a given file offset. The offset must be larger than the offset for
+// the previously added alternative line info and smaller than the
+// file size; otherwise the information is ignored.
+//
+// AddLineInfo is typically used to register alternative position
+// information for //line filename:line comments in source files.
+//
+func (f *File) AddLineInfo(offset int, filename string, line int) {
+ f.set.mutex.Lock()
+ if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size {
+ f.infos = append(f.infos, lineInfo{offset, filename, line})
+ }
+ f.set.mutex.Unlock()
+}
+
+// Pos returns the Pos value for the given file offset;
+// the offset must be <= f.Size().
+// f.Pos(f.Offset(p)) == p.
+//
+func (f *File) Pos(offset int) Pos {
+ if offset > f.size {
+ panic("illegal file offset")
+ }
+ return Pos(f.base + offset)
+}
+
+// Offset returns the offset for the given file position p;
+// p must be a valid Pos value in that file.
+// f.Offset(f.Pos(offset)) == offset.
+//
+func (f *File) Offset(p Pos) int {
+ if int(p) < f.base || int(p) > f.base+f.size {
+ panic("illegal Pos value")
+ }
+ return int(p) - f.base
+}
+
+// Line returns the line number for the given file position p;
+// p must be a Pos value in that file or NoPos.
+//
+func (f *File) Line(p Pos) int {
+ // TODO(gri) this can be implemented much more efficiently
+ return f.Position(p).Line
+}
+
+func searchLineInfos(a []lineInfo, x int) int {
+ return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1
+}
+
+// info returns the file name, line, and column number for a file offset.
+func (f *File) info(offset int) (filename string, line, column int) {
+ filename = f.name
+ if i := searchInts(f.lines, offset); i >= 0 {
+ line, column = i+1, offset-f.lines[i]+1
+ }
+ if len(f.infos) > 0 {
+ // almost no files have extra line infos
+ if i := searchLineInfos(f.infos, offset); i >= 0 {
+ alt := &f.infos[i]
+ filename = alt.Filename
+ if i := searchInts(f.lines, alt.Offset); i >= 0 {
+ line += alt.Line - i - 1
+ }
+ }
+ }
+ return
+}
+
+func (f *File) position(p Pos) (pos Position) {
+ offset := int(p) - f.base
+ pos.Offset = offset
+ pos.Filename, pos.Line, pos.Column = f.info(offset)
+ return
+}
+
+// Position returns the Position value for the given file position p;
+// p must be a Pos value in that file or NoPos.
+//
+func (f *File) Position(p Pos) (pos Position) {
+ if p != NoPos {
+ if int(p) < f.base || int(p) > f.base+f.size {
+ panic("illegal Pos value")
+ }
+ pos = f.position(p)
+ }
+ return
+}
+
+// -----------------------------------------------------------------------------
+// FileSet
+
+// A FileSet represents a set of source files.
+// Methods of file sets are synchronized; multiple goroutines
+// may invoke them concurrently.
+//
+type FileSet struct {
+ mutex sync.RWMutex // protects the file set
+ base int // base offset for the next file
+ files []*File // list of files in the order added to the set
+ last *File // cache of last file looked up
+}
+
+// NewFileSet creates a new file set.
+func NewFileSet() *FileSet {
+ s := new(FileSet)
+ s.base = 1 // 0 == NoPos
+ return s
+}
+
+// Base returns the minimum base offset that must be provided to
+// AddFile when adding the next file.
+//
+func (s *FileSet) Base() int {
+ s.mutex.RLock()
+ b := s.base
+ s.mutex.RUnlock()
+ return b
+
+}
+
+// AddFile adds a new file with a given filename, base offset, and file size
+// to the file set s and returns the file. Multiple files may have the same
+// name. The base offset must not be smaller than the FileSet's Base(), and
+// size must not be negative.
+//
+// Adding the file will set the file set's Base() value to base + size + 1
+// as the minimum base value for the next file. The following relationship
+// exists between a Pos value p for a given file offset offs:
+//
+// int(p) = base + offs
+//
+// with offs in the range [0, size] and thus p in the range [base, base+size].
+// For convenience, File.Pos may be used to create file-specific position
+// values from a file offset.
+//
+func (s *FileSet) AddFile(filename string, base, size int) *File {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ if base < s.base || size < 0 {
+ panic("illegal base or size")
+ }
+ // base >= s.base && size >= 0
+ f := &File{s, filename, base, size, []int{0}, nil}
+ base += size + 1 // +1 because EOF also has a position
+ if base < 0 {
+ panic("token.Pos offset overflow (> 2G of source code in file set)")
+ }
+ // add the file to the file set
+ s.base = base
+ s.files = append(s.files, f)
+ s.last = f
+ return f
+}
+
+// Iterate calls f for the files in the file set in the order they were added
+// until f returns false.
+//
+func (s *FileSet) Iterate(f func(*File) bool) {
+ for i := 0; ; i++ {
+ var file *File
+ s.mutex.RLock()
+ if i < len(s.files) {
+ file = s.files[i]
+ }
+ s.mutex.RUnlock()
+ if file == nil || !f(file) {
+ break
+ }
+ }
+}
+
+func searchFiles(a []*File, x int) int {
+ return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1
+}
+
+func (s *FileSet) file(p Pos) *File {
+ // common case: p is in last file
+ if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
+ return f
+ }
+ // p is not in last file - search all files
+ if i := searchFiles(s.files, int(p)); i >= 0 {
+ f := s.files[i]
+ // f.base <= int(p) by definition of searchFiles
+ if int(p) <= f.base+f.size {
+ s.last = f
+ return f
+ }
+ }
+ return nil
+}
+
+// File returns the file that contains the position p.
+// If no such file is found (for instance for p == NoPos),
+// the result is nil.
+//
+func (s *FileSet) File(p Pos) (f *File) {
+ if p != NoPos {
+ s.mutex.RLock()
+ f = s.file(p)
+ s.mutex.RUnlock()
+ }
+ return
+}
+
+// Position converts a Pos in the fileset into a general Position.
+func (s *FileSet) Position(p Pos) (pos Position) {
+ if p != NoPos {
+ s.mutex.RLock()
+ if f := s.file(p); f != nil {
+ pos = f.position(p)
+ }
+ s.mutex.RUnlock()
+ }
+ return
+}
+
+// -----------------------------------------------------------------------------
+// Helper functions
+
+func searchInts(a []int, x int) int {
+ // This function body is a manually inlined version of:
+ //
+ // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
+ //
+ // With better compiler optimizations, this may not be needed in the
+ // future, but at the moment this change improves the go/printer
+ // benchmark performance by ~30%. This has a direct impact on the
+ // speed of gofmt and thus seems worthwhile (2011-04-29).
+ // TODO(gri): Remove this when compilers have caught up.
+ i, j := 0, len(a)
+ for i < j {
+ h := i + (j-i)/2 // avoid overflow when computing h
+ // i ≤ h < j
+ if a[h] <= x {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ return i - 1
+}
diff --git a/vendor/github.com/src-d/gcfg/token/serialize.go b/vendor/github.com/src-d/gcfg/token/serialize.go
new file mode 100644
index 0000000000..4adc8f9e33
--- /dev/null
+++ b/vendor/github.com/src-d/gcfg/token/serialize.go
@@ -0,0 +1,56 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package token
+
+type serializedFile struct {
+ // fields correspond 1:1 to fields with same (lower-case) name in File
+ Name string
+ Base int
+ Size int
+ Lines []int
+ Infos []lineInfo
+}
+
+type serializedFileSet struct {
+ Base int
+ Files []serializedFile
+}
+
+// Read calls decode to deserialize a file set into s; s must not be nil.
+func (s *FileSet) Read(decode func(interface{}) error) error {
+ var ss serializedFileSet
+ if err := decode(&ss); err != nil {
+ return err
+ }
+
+ s.mutex.Lock()
+ s.base = ss.Base
+ files := make([]*File, len(ss.Files))
+ for i := 0; i < len(ss.Files); i++ {
+ f := &ss.Files[i]
+ files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos}
+ }
+ s.files = files
+ s.last = nil
+ s.mutex.Unlock()
+
+ return nil
+}
+
+// Write calls encode to serialize the file set s.
+func (s *FileSet) Write(encode func(interface{}) error) error {
+ var ss serializedFileSet
+
+ s.mutex.Lock()
+ ss.Base = s.base
+ files := make([]serializedFile, len(s.files))
+ for i, f := range s.files {
+ files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos}
+ }
+ ss.Files = files
+ s.mutex.Unlock()
+
+ return encode(ss)
+}
diff --git a/vendor/github.com/src-d/gcfg/token/token.go b/vendor/github.com/src-d/gcfg/token/token.go
new file mode 100644
index 0000000000..b3c7c83fa9
--- /dev/null
+++ b/vendor/github.com/src-d/gcfg/token/token.go
@@ -0,0 +1,83 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package token defines constants representing the lexical tokens of the gcfg
+// configuration syntax and basic operations on tokens (printing, predicates).
+//
+// Note that the API for the token package may change to accommodate new
+// features or implementation changes in gcfg.
+//
+package token
+
+import "strconv"
+
+// Token is the set of lexical tokens of the gcfg configuration syntax.
+type Token int
+
+// The list of tokens.
+const (
+ // Special tokens
+ ILLEGAL Token = iota
+ EOF
+ COMMENT
+
+ literal_beg
+ // Identifiers and basic type literals
+ // (these tokens stand for classes of literals)
+ IDENT // section-name, variable-name
+ STRING // "subsection-name", variable value
+ literal_end
+
+ operator_beg
+ // Operators and delimiters
+ ASSIGN // =
+ LBRACK // [
+ RBRACK // ]
+ EOL // \n
+ operator_end
+)
+
+var tokens = [...]string{
+ ILLEGAL: "ILLEGAL",
+
+ EOF: "EOF",
+ COMMENT: "COMMENT",
+
+ IDENT: "IDENT",
+ STRING: "STRING",
+
+ ASSIGN: "=",
+ LBRACK: "[",
+ RBRACK: "]",
+ EOL: "\n",
+}
+
+// String returns the string corresponding to the token tok.
+// For operators and delimiters, the string is the actual token character
+// sequence (e.g., for the token ASSIGN, the string is "="). For all other
+// tokens the string corresponds to the token constant name (e.g. for the
+// token IDENT, the string is "IDENT").
+//
+func (tok Token) String() string {
+ s := ""
+ if 0 <= tok && tok < Token(len(tokens)) {
+ s = tokens[tok]
+ }
+ if s == "" {
+ s = "token(" + strconv.Itoa(int(tok)) + ")"
+ }
+ return s
+}
+
+// Predicates
+
+// IsLiteral returns true for tokens corresponding to identifiers
+// and basic type literals; it returns false otherwise.
+//
+func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+//
+func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end }
diff --git a/vendor/github.com/src-d/gcfg/types/bool.go b/vendor/github.com/src-d/gcfg/types/bool.go
new file mode 100644
index 0000000000..8dcae0d8cf
--- /dev/null
+++ b/vendor/github.com/src-d/gcfg/types/bool.go
@@ -0,0 +1,23 @@
+package types
+
+// BoolValues defines the name and value mappings for ParseBool.
+var BoolValues = map[string]interface{}{
+ "true": true, "yes": true, "on": true, "1": true,
+ "false": false, "no": false, "off": false, "0": false,
+}
+
+var boolParser = func() *EnumParser {
+ ep := &EnumParser{}
+ ep.AddVals(BoolValues)
+ return ep
+}()
+
+// ParseBool parses bool values according to the definitions in BoolValues.
+// Parsing is case-insensitive.
+func ParseBool(s string) (bool, error) {
+ v, err := boolParser.Parse(s)
+ if err != nil {
+ return false, err
+ }
+ return v.(bool), nil
+}
diff --git a/vendor/github.com/src-d/gcfg/types/doc.go b/vendor/github.com/src-d/gcfg/types/doc.go
new file mode 100644
index 0000000000..9f9c345f6e
--- /dev/null
+++ b/vendor/github.com/src-d/gcfg/types/doc.go
@@ -0,0 +1,4 @@
+// Package types defines helpers for type conversions.
+//
+// The API for this package is not finalized yet.
+package types
diff --git a/vendor/github.com/src-d/gcfg/types/enum.go b/vendor/github.com/src-d/gcfg/types/enum.go
new file mode 100644
index 0000000000..1a0c7ef453
--- /dev/null
+++ b/vendor/github.com/src-d/gcfg/types/enum.go
@@ -0,0 +1,44 @@
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// EnumParser parses "enum" values; i.e. a predefined set of strings to
+// predefined values.
+type EnumParser struct {
+ Type string // type name; if not set, use type of first value added
+ CaseMatch bool // if true, matching of strings is case-sensitive
+ // PrefixMatch bool
+ vals map[string]interface{}
+}
+
+// AddVals adds strings and values to an EnumParser.
+func (ep *EnumParser) AddVals(vals map[string]interface{}) {
+ if ep.vals == nil {
+ ep.vals = make(map[string]interface{})
+ }
+ for k, v := range vals {
+ if ep.Type == "" {
+ ep.Type = reflect.TypeOf(v).Name()
+ }
+ if !ep.CaseMatch {
+ k = strings.ToLower(k)
+ }
+ ep.vals[k] = v
+ }
+}
+
+// Parse parses the string and returns the value or an error.
+func (ep EnumParser) Parse(s string) (interface{}, error) {
+ if !ep.CaseMatch {
+ s = strings.ToLower(s)
+ }
+ v, ok := ep.vals[s]
+ if !ok {
+ return false, fmt.Errorf("failed to parse %s %#q", ep.Type, s)
+ }
+ return v, nil
+}
diff --git a/vendor/github.com/src-d/gcfg/types/int.go b/vendor/github.com/src-d/gcfg/types/int.go
new file mode 100644
index 0000000000..af7e75c125
--- /dev/null
+++ b/vendor/github.com/src-d/gcfg/types/int.go
@@ -0,0 +1,86 @@
+package types
+
+import (
+ "fmt"
+ "strings"
+)
+
+// An IntMode is a mode for parsing integer values, representing a set of
+// accepted bases.
+type IntMode uint8
+
+// IntMode values for ParseInt; can be combined using binary or.
+const (
+ Dec IntMode = 1 << iota
+ Hex
+ Oct
+)
+
+// String returns a string representation of IntMode; e.g. `IntMode(Dec|Hex)`.
+func (m IntMode) String() string {
+ var modes []string
+ if m&Dec != 0 {
+ modes = append(modes, "Dec")
+ }
+ if m&Hex != 0 {
+ modes = append(modes, "Hex")
+ }
+ if m&Oct != 0 {
+ modes = append(modes, "Oct")
+ }
+ return "IntMode(" + strings.Join(modes, "|") + ")"
+}
+
+var errIntAmbig = fmt.Errorf("ambiguous integer value; must include '0' prefix")
+
+func prefix0(val string) bool {
+ return strings.HasPrefix(val, "0") || strings.HasPrefix(val, "-0")
+}
+
+func prefix0x(val string) bool {
+ return strings.HasPrefix(val, "0x") || strings.HasPrefix(val, "-0x")
+}
+
+// ParseInt parses val using mode into intptr, which must be a pointer to an
+// integer kind type. Non-decimal value require prefix `0` or `0x` in the cases
+// when mode permits ambiguity of base; otherwise the prefix can be omitted.
+func ParseInt(intptr interface{}, val string, mode IntMode) error {
+ val = strings.TrimSpace(val)
+ verb := byte(0)
+ switch mode {
+ case Dec:
+ verb = 'd'
+ case Dec + Hex:
+ if prefix0x(val) {
+ verb = 'v'
+ } else {
+ verb = 'd'
+ }
+ case Dec + Oct:
+ if prefix0(val) && !prefix0x(val) {
+ verb = 'v'
+ } else {
+ verb = 'd'
+ }
+ case Dec + Hex + Oct:
+ verb = 'v'
+ case Hex:
+ if prefix0x(val) {
+ verb = 'v'
+ } else {
+ verb = 'x'
+ }
+ case Oct:
+ verb = 'o'
+ case Hex + Oct:
+ if prefix0(val) {
+ verb = 'v'
+ } else {
+ return errIntAmbig
+ }
+ }
+ if verb == 0 {
+ panic("unsupported mode")
+ }
+ return ScanFully(intptr, val, verb)
+}
diff --git a/vendor/github.com/src-d/gcfg/types/scan.go b/vendor/github.com/src-d/gcfg/types/scan.go
new file mode 100644
index 0000000000..db2f6ed3ca
--- /dev/null
+++ b/vendor/github.com/src-d/gcfg/types/scan.go
@@ -0,0 +1,23 @@
+package types
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+)
+
+// ScanFully uses fmt.Sscanf with verb to fully scan val into ptr.
+func ScanFully(ptr interface{}, val string, verb byte) error {
+ t := reflect.ValueOf(ptr).Elem().Type()
+ // attempt to read extra bytes to make sure the value is consumed
+ var b []byte
+ n, err := fmt.Sscanf(val, "%"+string(verb)+"%s", ptr, &b)
+ switch {
+ case n < 1 || n == 1 && err != io.EOF:
+ return fmt.Errorf("failed to parse %q as %v: %v", val, t, err)
+ case n > 1:
+ return fmt.Errorf("failed to parse %q as %v: extra characters %q", val, t, string(b))
+ }
+ // n == 1 && err == io.EOF
+ return nil
+}
diff --git a/vendor/github.com/xanzy/ssh-agent/LICENSE b/vendor/github.com/xanzy/ssh-agent/LICENSE
new file mode 100644
index 0000000000..8f71f43fee
--- /dev/null
+++ b/vendor/github.com/xanzy/ssh-agent/LICENSE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/vendor/github.com/xanzy/ssh-agent/pageant_windows.go b/vendor/github.com/xanzy/ssh-agent/pageant_windows.go
new file mode 100644
index 0000000000..6295607966
--- /dev/null
+++ b/vendor/github.com/xanzy/ssh-agent/pageant_windows.go
@@ -0,0 +1,146 @@
+//
+// Copyright (c) 2014 David Mzareulyan
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software
+// and associated documentation files (the "Software"), to deal in the Software without restriction,
+// including without limitation the rights to use, copy, modify, merge, publish, distribute,
+// sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
+// is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all copies or substantial
+// portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+// BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+//
+
+// +build windows
+
+package sshagent
+
+// see https://github.com/Yasushi/putty/blob/master/windows/winpgntc.c#L155
+// see https://github.com/paramiko/paramiko/blob/master/paramiko/win_pageant.py
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "sync"
+ "syscall"
+ "unsafe"
+)
+
+// Maximum size of message can be sent to pageant
+const MaxMessageLen = 8192
+
+var (
+ ErrPageantNotFound = errors.New("pageant process not found")
+ ErrSendMessage = errors.New("error sending message")
+
+ ErrMessageTooLong = errors.New("message too long")
+ ErrInvalidMessageFormat = errors.New("invalid message format")
+ ErrResponseTooLong = errors.New("response too long")
+)
+
+const (
+ agentCopydataID = 0x804e50ba
+ wmCopydata = 74
+)
+
+type copyData struct {
+ dwData uintptr
+ cbData uint32
+ lpData unsafe.Pointer
+}
+
+var (
+ lock sync.Mutex
+
+ winFindWindow = winAPI("user32.dll", "FindWindowW")
+ winGetCurrentThreadID = winAPI("kernel32.dll", "GetCurrentThreadId")
+ winSendMessage = winAPI("user32.dll", "SendMessageW")
+)
+
+func winAPI(dllName, funcName string) func(...uintptr) (uintptr, uintptr, error) {
+ proc := syscall.MustLoadDLL(dllName).MustFindProc(funcName)
+ return func(a ...uintptr) (uintptr, uintptr, error) { return proc.Call(a...) }
+}
+
+// Available returns true if Pageant is running
+func Available() bool { return pageantWindow() != 0 }
+
+// Query sends message msg to Pageant and returns response or error.
+// 'msg' is raw agent request with length prefix
+// Response is raw agent response with length prefix
+func query(msg []byte) ([]byte, error) {
+ if len(msg) > MaxMessageLen {
+ return nil, ErrMessageTooLong
+ }
+
+ msgLen := binary.BigEndian.Uint32(msg[:4])
+ if len(msg) != int(msgLen)+4 {
+ return nil, ErrInvalidMessageFormat
+ }
+
+ lock.Lock()
+ defer lock.Unlock()
+
+ paWin := pageantWindow()
+
+ if paWin == 0 {
+ return nil, ErrPageantNotFound
+ }
+
+ thID, _, _ := winGetCurrentThreadID()
+ mapName := fmt.Sprintf("PageantRequest%08x", thID)
+ pMapName, _ := syscall.UTF16PtrFromString(mapName)
+
+ mmap, err := syscall.CreateFileMapping(syscall.InvalidHandle, nil, syscall.PAGE_READWRITE, 0, MaxMessageLen+4, pMapName)
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.CloseHandle(mmap)
+
+ ptr, err := syscall.MapViewOfFile(mmap, syscall.FILE_MAP_WRITE, 0, 0, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.UnmapViewOfFile(ptr)
+
+ mmSlice := (*(*[MaxMessageLen]byte)(unsafe.Pointer(ptr)))[:]
+
+ copy(mmSlice, msg)
+
+ mapNameBytesZ := append([]byte(mapName), 0)
+
+ cds := copyData{
+ dwData: agentCopydataID,
+ cbData: uint32(len(mapNameBytesZ)),
+ lpData: unsafe.Pointer(&(mapNameBytesZ[0])),
+ }
+
+ resp, _, _ := winSendMessage(paWin, wmCopydata, 0, uintptr(unsafe.Pointer(&cds)))
+
+ if resp == 0 {
+ return nil, ErrSendMessage
+ }
+
+ respLen := binary.BigEndian.Uint32(mmSlice[:4])
+ if respLen > MaxMessageLen-4 {
+ return nil, ErrResponseTooLong
+ }
+
+ respData := make([]byte, respLen+4)
+ copy(respData, mmSlice)
+
+ return respData, nil
+}
+
+func pageantWindow() uintptr {
+ nameP, _ := syscall.UTF16PtrFromString("Pageant")
+ h, _, _ := winFindWindow(uintptr(unsafe.Pointer(nameP)), uintptr(unsafe.Pointer(nameP)))
+ return h
+}
diff --git a/vendor/github.com/xanzy/ssh-agent/sshagent.go b/vendor/github.com/xanzy/ssh-agent/sshagent.go
new file mode 100644
index 0000000000..259fea2b63
--- /dev/null
+++ b/vendor/github.com/xanzy/ssh-agent/sshagent.go
@@ -0,0 +1,49 @@
+//
+// Copyright 2015, Sander van Harmelen
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// +build !windows
+
+package sshagent
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "os"
+
+ "golang.org/x/crypto/ssh/agent"
+)
+
+// New returns a new agent.Agent that uses a unix socket
+func New() (agent.Agent, net.Conn, error) {
+ if !Available() {
+ return nil, nil, errors.New("SSH agent requested but SSH_AUTH_SOCK not-specified")
+ }
+
+ sshAuthSock := os.Getenv("SSH_AUTH_SOCK")
+
+ conn, err := net.Dial("unix", sshAuthSock)
+ if err != nil {
+ return nil, nil, fmt.Errorf("Error connecting to SSH_AUTH_SOCK: %v", err)
+ }
+
+ return agent.NewClient(conn), conn, nil
+}
+
+// Available returns true is a auth socket is defined
+func Available() bool {
+ return os.Getenv("SSH_AUTH_SOCK") != ""
+}
diff --git a/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go b/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go
new file mode 100644
index 0000000000..c46710e88e
--- /dev/null
+++ b/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go
@@ -0,0 +1,80 @@
+//
+// Copyright (c) 2014 David Mzareulyan
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software
+// and associated documentation files (the "Software"), to deal in the Software without restriction,
+// including without limitation the rights to use, copy, modify, merge, publish, distribute,
+// sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
+// is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all copies or substantial
+// portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+// BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+//
+
+// +build windows
+
+package sshagent
+
+import (
+ "errors"
+ "io"
+ "net"
+ "sync"
+
+ "golang.org/x/crypto/ssh/agent"
+)
+
+// New returns a new agent.Agent and the (custom) connection it uses
+// to communicate with a running pagent.exe instance (see README.md)
+func New() (agent.Agent, net.Conn, error) {
+ if !Available() {
+ return nil, nil, errors.New("SSH agent requested but Pageant not running")
+ }
+
+ return agent.NewClient(&conn{}), nil, nil
+}
+
+type conn struct {
+ sync.Mutex
+ buf []byte
+}
+
+func (c *conn) Close() {
+ c.Lock()
+ defer c.Unlock()
+ c.buf = nil
+}
+
+func (c *conn) Write(p []byte) (int, error) {
+ c.Lock()
+ defer c.Unlock()
+
+ resp, err := query(p)
+ if err != nil {
+ return 0, err
+ }
+
+ c.buf = append(c.buf, resp...)
+
+ return len(p), nil
+}
+
+func (c *conn) Read(p []byte) (int, error) {
+ c.Lock()
+ defer c.Unlock()
+
+ if len(c.buf) == 0 {
+ return 0, io.EOF
+ }
+
+ n := copy(p, c.buf)
+ c.buf = c.buf[n:]
+
+ return n, nil
+}