summaryrefslogtreecommitdiffstats
path: root/vendor/gopkg.in
diff options
context:
space:
mode:
authorLauris BH <lauris@nix.lv>2020-03-17 18:19:58 +0200
committerGitHub <noreply@github.com>2020-03-17 12:19:58 -0400
commit43c09134a972dc421aa06fb303697671c828cf67 (patch)
tree6bf8da5bc0a23ce516cd5598ec982b6715fb6716 /vendor/gopkg.in
parent2f928316dbc2bb71137e857d97039d4f51f7d405 (diff)
downloadgitea-43c09134a972dc421aa06fb303697671c828cf67.tar.gz
gitea-43c09134a972dc421aa06fb303697671c828cf67.zip
Migrate to go-git/go-git v5.0.0 (#10735)
Diffstat (limited to 'vendor/gopkg.in')
-rw-r--r--vendor/gopkg.in/src-d/go-billy.v4/.gitignore4
-rw-r--r--vendor/gopkg.in/src-d/go-billy.v4/.travis.yml17
-rw-r--r--vendor/gopkg.in/src-d/go-billy.v4/DCO25
-rw-r--r--vendor/gopkg.in/src-d/go-billy.v4/LICENSE201
-rw-r--r--vendor/gopkg.in/src-d/go-billy.v4/MAINTAINERS1
-rw-r--r--vendor/gopkg.in/src-d/go-billy.v4/Makefile25
-rw-r--r--vendor/gopkg.in/src-d/go-billy.v4/README.md72
-rw-r--r--vendor/gopkg.in/src-d/go-billy.v4/appveyor.yml15
-rw-r--r--vendor/gopkg.in/src-d/go-billy.v4/fs.go202
-rw-r--r--vendor/gopkg.in/src-d/go-billy.v4/go.mod8
-rw-r--r--vendor/gopkg.in/src-d/go-billy.v4/go.sum12
-rw-r--r--vendor/gopkg.in/src-d/go-billy.v4/helper/chroot/chroot.go242
-rw-r--r--vendor/gopkg.in/src-d/go-billy.v4/helper/polyfill/polyfill.go105
-rw-r--r--vendor/gopkg.in/src-d/go-billy.v4/osfs/os.go139
-rw-r--r--vendor/gopkg.in/src-d/go-billy.v4/osfs/os_posix.go21
-rw-r--r--vendor/gopkg.in/src-d/go-billy.v4/osfs/os_windows.go57
-rw-r--r--vendor/gopkg.in/src-d/go-billy.v4/util/glob.go111
-rw-r--r--vendor/gopkg.in/src-d/go-billy.v4/util/util.go224
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/.gitignore4
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/.travis.yml37
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/CODE_OF_CONDUCT.md74
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/COMPATIBILITY.md111
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/CONTRIBUTING.md59
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/DCO36
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/LICENSE201
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/MAINTAINERS3
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/Makefile52
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/README.md123
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/appveyor.yml21
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/blame.go302
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/common.go22
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/config/branch.go90
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/config/config.go407
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/config/modules.go139
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/config/refspec.go150
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/doc.go10
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/go.mod29
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/go.sum92
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/internal/revision/parser.go622
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/internal/revision/scanner.go117
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/internal/revision/token.go28
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/internal/url/url.go37
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/object_walker.go104
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/options.go492
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/buffer_lru.go98
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/common.go39
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/object_lru.go101
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/error.go35
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/filemode/filemode.go188
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph/commitgraph.go35
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph/doc.go103
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph/encoder.go188
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph/file.go259
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph/memory.go72
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/common.go99
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/decoder.go37
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/doc.go122
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/encoder.go77
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/option.go117
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/section.go146
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/diff/patch.go58
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/diff/unified_encoder.go360
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/gitignore/dir.go136
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/gitignore/doc.go70
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/gitignore/matcher.go30
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/gitignore/pattern.go153
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/decoder.go177
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/doc.go128
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/encoder.go142
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/idxfile.go346
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/writer.go186
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/decoder.go477
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/doc.go360
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/encoder.go150
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/index.go213
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/match.go186
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/doc.go2
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/reader.go114
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/writer.go109
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/common.go78
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/delta_index.go297
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/delta_selector.go369
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/diff_delta.go200
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/doc.go39
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/encoder.go219
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/error.go30
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/fsobject.go116
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/object_pack.go164
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/packfile.go562
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/parser.go483
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/patch_delta.go229
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/scanner.go466
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/pktline/encoder.go122
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/format/pktline/scanner.go134
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/hash.go73
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/memory.go61
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object.go111
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/blob.go144
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/change.go157
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/change_adaptor.go61
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit.go430
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker.go327
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_bfs.go100
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_bfs_filtered.go176
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_ctime.go103
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_file.go145
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commitgraph/commitnode.go98
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commitgraph/commitnode_graph.go131
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commitgraph/commitnode_object.go90
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commitgraph/commitnode_walker_ctime.go105
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commitgraph/doc.go7
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/common.go12
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/difftree.go37
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/file.go137
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/merge_base.go210
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/object.go237
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/patch.go346
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tag.go357
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tree.go520
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/object/treenoder.go136
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs.go203
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs_decode.go288
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs_encode.go176
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability/capability.go252
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability/list.go196
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/common.go70
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/doc.go724
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/report_status.go165
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/shallowupd.go92
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/common.go33
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/demux.go148
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/doc.go31
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/muxer.go65
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/srvresp.go127
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq.go168
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq_decode.go257
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq_encode.go145
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq.go122
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq_decode.go250
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq_encode.go75
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/uppackreq.go98
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/uppackresp.go109
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/reference.go209
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/revision.go11
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/revlist/revlist.go230
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/doc.go2
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/index.go9
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/object.go288
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/reference.go240
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/shallow.go10
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/storer.go15
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/client/client.go48
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/common.go274
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/file/client.go156
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/file/server.go53
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/git/common.go109
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/common.go281
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/receive_pack.go106
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/upload_pack.go123
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common/common.go467
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common/server.go73
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/server/loader.go64
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/server/server.go422
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/ssh/auth_method.go322
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/ssh/common.go228
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/prune.go66
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/references.go264
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/remote.go1114
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/repository.go1545
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/status.go79
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/config.go61
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/deltaobject.go37
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit.go1099
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go81
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit_setref.go90
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/writers.go284
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/index.go54
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/module.go20
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/object.go815
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/reference.go44
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/shallow.go54
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/storage.go73
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/storage/memory/storage.go320
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/storage/storer.go30
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/submodule.go357
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/utils/binary/read.go180
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/utils/binary/write.go50
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/utils/diff/diff.go61
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/utils/ioutil/common.go170
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/change.go149
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/difftree.go424
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/doc.go34
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/doubleiter.go187
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/filesystem/node.go196
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/index/node.go90
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/internal/frame/frame.go91
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/iter.go216
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/noder/noder.go59
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/noder/path.go90
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/worktree.go954
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/worktree_bsd.go26
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/worktree_commit.go228
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/worktree_linux.go26
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/worktree_status.go660
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/worktree_unix_other.go26
-rw-r--r--vendor/gopkg.in/src-d/go-git.v4/worktree_windows.go35
-rw-r--r--vendor/gopkg.in/yaml.v2/decode.go38
-rw-r--r--vendor/gopkg.in/yaml.v2/resolve.go2
-rw-r--r--vendor/gopkg.in/yaml.v2/scannerc.go16
209 files changed, 55 insertions, 35749 deletions
diff --git a/vendor/gopkg.in/src-d/go-billy.v4/.gitignore b/vendor/gopkg.in/src-d/go-billy.v4/.gitignore
deleted file mode 100644
index 7aeb46699c..0000000000
--- a/vendor/gopkg.in/src-d/go-billy.v4/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-/coverage.txt
-/vendor
-Gopkg.lock
-Gopkg.toml
diff --git a/vendor/gopkg.in/src-d/go-billy.v4/.travis.yml b/vendor/gopkg.in/src-d/go-billy.v4/.travis.yml
deleted file mode 100644
index a70b470d4a..0000000000
--- a/vendor/gopkg.in/src-d/go-billy.v4/.travis.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-language: go
-
-go:
- - 1.9.x
- - 1.10.x
-
-go_import_path: gopkg.in/src-d/go-billy.v4
-
-install:
- - go get -v -t ./...
-
-script:
- - make test-coverage
- - ./.ci/test-building-binaries-for-supported-os.sh
-
-after_success:
- - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/gopkg.in/src-d/go-billy.v4/DCO b/vendor/gopkg.in/src-d/go-billy.v4/DCO
deleted file mode 100644
index 29c1b92089..0000000000
--- a/vendor/gopkg.in/src-d/go-billy.v4/DCO
+++ /dev/null
@@ -1,25 +0,0 @@
- Developer's Certificate of Origin 1.1
-
- By making a contribution to this project, I certify that:
-
- (a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
- (b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
- (c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
- (d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
diff --git a/vendor/gopkg.in/src-d/go-billy.v4/LICENSE b/vendor/gopkg.in/src-d/go-billy.v4/LICENSE
deleted file mode 100644
index 9d60756894..0000000000
--- a/vendor/gopkg.in/src-d/go-billy.v4/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2017 Sourced Technologies S.L.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/gopkg.in/src-d/go-billy.v4/MAINTAINERS b/vendor/gopkg.in/src-d/go-billy.v4/MAINTAINERS
deleted file mode 100644
index 8dbba477d8..0000000000
--- a/vendor/gopkg.in/src-d/go-billy.v4/MAINTAINERS
+++ /dev/null
@@ -1 +0,0 @@
-Máximo Cuadros <mcuadros@gmail.com> (@mcuadros)
diff --git a/vendor/gopkg.in/src-d/go-billy.v4/Makefile b/vendor/gopkg.in/src-d/go-billy.v4/Makefile
deleted file mode 100644
index 19e743378c..0000000000
--- a/vendor/gopkg.in/src-d/go-billy.v4/Makefile
+++ /dev/null
@@ -1,25 +0,0 @@
-# General
-WORKDIR = $(PWD)
-
-# Go parameters
-GOCMD = go
-GOTEST = $(GOCMD) test -v
-
-# Coverage
-COVERAGE_REPORT = coverage.txt
-COVERAGE_PROFILE = profile.out
-COVERAGE_MODE = atomic
-
-test-coverage:
- cd $(WORKDIR); \
- echo "" > $(COVERAGE_REPORT); \
- for dir in `find . -name "*.go" | grep -o '.*/' | sort | uniq`; do \
- $(GOTEST) $$dir -coverprofile=$(COVERAGE_PROFILE) -covermode=$(COVERAGE_MODE); \
- if [ $$? != 0 ]; then \
- exit 2; \
- fi; \
- if [ -f $(COVERAGE_PROFILE) ]; then \
- cat $(COVERAGE_PROFILE) >> $(COVERAGE_REPORT); \
- rm $(COVERAGE_PROFILE); \
- fi; \
- done; \
diff --git a/vendor/gopkg.in/src-d/go-billy.v4/README.md b/vendor/gopkg.in/src-d/go-billy.v4/README.md
deleted file mode 100644
index ae4a3f8691..0000000000
--- a/vendor/gopkg.in/src-d/go-billy.v4/README.md
+++ /dev/null
@@ -1,72 +0,0 @@
-# go-billy [![GoDoc](https://godoc.org/gopkg.in/src-d/go-billy.v4?status.svg)](https://godoc.org/gopkg.in/src-d/go-billy.v4) [![Build Status](https://travis-ci.com/src-d/go-billy.svg)](https://travis-ci.com/src-d/go-billy) [![Build status](https://ci.appveyor.com/api/projects/status/vx2qn6vlakbi724t?svg=true)](https://ci.appveyor.com/project/mcuadros/go-billy) [![codecov](https://codecov.io/gh/src-d/go-billy/branch/master/graph/badge.svg)](https://codecov.io/gh/src-d/go-billy)
-
-The missing interface filesystem abstraction for Go.
-Billy implements an interface based on the `os` standard library, allowing to develop applications without dependency on the underlying storage. Makes it virtually free to implement mocks and testing over filesystem operations.
-
-Billy was born as part of [src-d/go-git](https://github.com/src-d/go-git) project.
-
-## Installation
-
-```go
-go get -u gopkg.in/src-d/go-billy.v4/...
-```
-
-## Usage
-
-Billy exposes filesystems using the
-[`Filesystem` interface](https://godoc.org/github.com/src-d/go-billy#Filesystem).
-Each filesystem implementation gives you a `New` method, whose arguments depend on
-the implementation itself, that returns a new `Filesystem`.
-
-The following example caches in memory all readable files in a directory from any
-billy's filesystem implementation.
-
-```go
-func LoadToMemory(origin billy.Filesystem, path string) (*memory.Memory, error) {
- memory := memory.New()
-
- files, err := origin.ReadDir("/")
- if err != nil {
- return nil, err
- }
-
- for _, file := range files {
- if file.IsDir() {
- continue
- }
-
- src, err := origin.Open(file.Name())
- if err != nil {
- return nil, err
- }
-
- dst, err := memory.Create(file.Name())
- if err != nil {
- return nil, err
- }
-
- if _, err = io.Copy(dst, src); err != nil {
- return nil, err
- }
-
- if err := dst.Close(); err != nil {
- return nil, err
- }
-
- if err := src.Close(); err != nil {
- return nil, err
- }
- }
-
- return memory, nil
-}
-```
-
-## Why billy?
-
-The library billy deals with storage systems and Billy is the name of a well-known, IKEA
-bookcase. That's it.
-
-## License
-
-Apache License Version 2.0, see [LICENSE](LICENSE)
diff --git a/vendor/gopkg.in/src-d/go-billy.v4/appveyor.yml b/vendor/gopkg.in/src-d/go-billy.v4/appveyor.yml
deleted file mode 100644
index 91c0b40c0e..0000000000
--- a/vendor/gopkg.in/src-d/go-billy.v4/appveyor.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-version: "{build}"
-platform: x64
-
-clone_folder: c:\gopath\src\gopkg.in\src-d\go-billy.v4
-
-environment:
- GOPATH: c:\gopath
-
-install:
- - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- - go version
- - go get -v -t ./...
-
-build_script:
- - go test -v ./...
diff --git a/vendor/gopkg.in/src-d/go-billy.v4/fs.go b/vendor/gopkg.in/src-d/go-billy.v4/fs.go
deleted file mode 100644
index a9efccdeb2..0000000000
--- a/vendor/gopkg.in/src-d/go-billy.v4/fs.go
+++ /dev/null
@@ -1,202 +0,0 @@
-package billy
-
-import (
- "errors"
- "io"
- "os"
- "time"
-)
-
-var (
- ErrReadOnly = errors.New("read-only filesystem")
- ErrNotSupported = errors.New("feature not supported")
- ErrCrossedBoundary = errors.New("chroot boundary crossed")
-)
-
-// Capability holds the supported features of a billy filesystem. This does
-// not mean that the capability has to be supported by the underlying storage.
-// For example, a billy filesystem may support WriteCapability but the
-// storage be mounted in read only mode.
-type Capability uint64
-
-const (
- // WriteCapability means that the fs is writable.
- WriteCapability Capability = 1 << iota
- // ReadCapability means that the fs is readable.
- ReadCapability
- // ReadAndWriteCapability is the ability to open a file in read and write mode.
- ReadAndWriteCapability
- // SeekCapability means it is able to move position inside the file.
- SeekCapability
- // TruncateCapability means that a file can be truncated.
- TruncateCapability
- // LockCapability is the ability to lock a file.
- LockCapability
-
- // DefaultCapabilities lists all capable features supported by filesystems
- // without Capability interface. This list should not be changed until a
- // major version is released.
- DefaultCapabilities Capability = WriteCapability | ReadCapability |
- ReadAndWriteCapability | SeekCapability | TruncateCapability |
- LockCapability
-
- // AllCapabilities lists all capable features.
- AllCapabilities Capability = WriteCapability | ReadCapability |
- ReadAndWriteCapability | SeekCapability | TruncateCapability |
- LockCapability
-)
-
-// Filesystem abstract the operations in a storage-agnostic interface.
-// Each method implementation mimics the behavior of the equivalent functions
-// at the os package from the standard library.
-type Filesystem interface {
- Basic
- TempFile
- Dir
- Symlink
- Chroot
-}
-
-// Basic abstract the basic operations in a storage-agnostic interface as
-// an extension to the Basic interface.
-type Basic interface {
- // Create creates the named file with mode 0666 (before umask), truncating
- // it if it already exists. If successful, methods on the returned File can
- // be used for I/O; the associated file descriptor has mode O_RDWR.
- Create(filename string) (File, error)
- // Open opens the named file for reading. If successful, methods on the
- // returned file can be used for reading; the associated file descriptor has
- // mode O_RDONLY.
- Open(filename string) (File, error)
- // OpenFile is the generalized open call; most users will use Open or Create
- // instead. It opens the named file with specified flag (O_RDONLY etc.) and
- // perm, (0666 etc.) if applicable. If successful, methods on the returned
- // File can be used for I/O.
- OpenFile(filename string, flag int, perm os.FileMode) (File, error)
- // Stat returns a FileInfo describing the named file.
- Stat(filename string) (os.FileInfo, error)
- // Rename renames (moves) oldpath to newpath. If newpath already exists and
- // is not a directory, Rename replaces it. OS-specific restrictions may
- // apply when oldpath and newpath are in different directories.
- Rename(oldpath, newpath string) error
- // Remove removes the named file or directory.
- Remove(filename string) error
- // Join joins any number of path elements into a single path, adding a
- // Separator if necessary. Join calls filepath.Clean on the result; in
- // particular, all empty strings are ignored. On Windows, the result is a
- // UNC path if and only if the first path element is a UNC path.
- Join(elem ...string) string
-}
-
-type TempFile interface {
- // TempFile creates a new temporary file in the directory dir with a name
- // beginning with prefix, opens the file for reading and writing, and
- // returns the resulting *os.File. If dir is the empty string, TempFile
- // uses the default directory for temporary files (see os.TempDir).
- // Multiple programs calling TempFile simultaneously will not choose the
- // same file. The caller can use f.Name() to find the pathname of the file.
- // It is the caller's responsibility to remove the file when no longer
- // needed.
- TempFile(dir, prefix string) (File, error)
-}
-
-// Dir abstract the dir related operations in a storage-agnostic interface as
-// an extension to the Basic interface.
-type Dir interface {
- // ReadDir reads the directory named by dirname and returns a list of
- // directory entries sorted by filename.
- ReadDir(path string) ([]os.FileInfo, error)
- // MkdirAll creates a directory named path, along with any necessary
- // parents, and returns nil, or else returns an error. The permission bits
- // perm are used for all directories that MkdirAll creates. If path is/
- // already a directory, MkdirAll does nothing and returns nil.
- MkdirAll(filename string, perm os.FileMode) error
-}
-
-// Symlink abstract the symlink related operations in a storage-agnostic
-// interface as an extension to the Basic interface.
-type Symlink interface {
- // Lstat returns a FileInfo describing the named file. If the file is a
- // symbolic link, the returned FileInfo describes the symbolic link. Lstat
- // makes no attempt to follow the link.
- Lstat(filename string) (os.FileInfo, error)
- // Symlink creates a symbolic-link from link to target. target may be an
- // absolute or relative path, and need not refer to an existing node.
- // Parent directories of link are created as necessary.
- Symlink(target, link string) error
- // Readlink returns the target path of link.
- Readlink(link string) (string, error)
-}
-
-// Change abstract the FileInfo change related operations in a storage-agnostic
-// interface as an extension to the Basic interface
-type Change interface {
- // Chmod changes the mode of the named file to mode. If the file is a
- // symbolic link, it changes the mode of the link's target.
- Chmod(name string, mode os.FileMode) error
- // Lchown changes the numeric uid and gid of the named file. If the file is
- // a symbolic link, it changes the uid and gid of the link itself.
- Lchown(name string, uid, gid int) error
- // Chown changes the numeric uid and gid of the named file. If the file is a
- // symbolic link, it changes the uid and gid of the link's target.
- Chown(name string, uid, gid int) error
- // Chtimes changes the access and modification times of the named file,
- // similar to the Unix utime() or utimes() functions.
- //
- // The underlying filesystem may truncate or round the values to a less
- // precise time unit.
- Chtimes(name string, atime time.Time, mtime time.Time) error
-}
-
-// Chroot abstract the chroot related operations in a storage-agnostic interface
-// as an extension to the Basic interface.
-type Chroot interface {
- // Chroot returns a new filesystem from the same type where the new root is
- // the given path. Files outside of the designated directory tree cannot be
- // accessed.
- Chroot(path string) (Filesystem, error)
- // Root returns the root path of the filesystem.
- Root() string
-}
-
-// File represent a file, being a subset of the os.File
-type File interface {
- // Name returns the name of the file as presented to Open.
- Name() string
- io.Writer
- io.Reader
- io.ReaderAt
- io.Seeker
- io.Closer
- // Lock locks the file like e.g. flock. It protects against access from
- // other processes.
- Lock() error
- // Unlock unlocks the file.
- Unlock() error
- // Truncate the file.
- Truncate(size int64) error
-}
-
-// Capable interface can return the available features of a filesystem.
-type Capable interface {
- // Capabilities returns the capabilities of a filesystem in bit flags.
- Capabilities() Capability
-}
-
-// Capabilities returns the features supported by a filesystem. If the FS
-// does not implement Capable interface it returns all features.
-func Capabilities(fs Basic) Capability {
- capable, ok := fs.(Capable)
- if !ok {
- return DefaultCapabilities
- }
-
- return capable.Capabilities()
-}
-
-// CapabilityCheck tests the filesystem for the provided capabilities and
-// returns true in case it supports all of them.
-func CapabilityCheck(fs Basic, capabilities Capability) bool {
- fsCaps := Capabilities(fs)
- return fsCaps&capabilities == capabilities
-}
diff --git a/vendor/gopkg.in/src-d/go-billy.v4/go.mod b/vendor/gopkg.in/src-d/go-billy.v4/go.mod
deleted file mode 100644
index e5227de0ae..0000000000
--- a/vendor/gopkg.in/src-d/go-billy.v4/go.mod
+++ /dev/null
@@ -1,8 +0,0 @@
-module gopkg.in/src-d/go-billy.v4
-
-require (
- github.com/kr/pretty v0.1.0 // indirect
- github.com/kr/pty v1.1.8 // indirect
- golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e
- gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127
-)
diff --git a/vendor/gopkg.in/src-d/go-billy.v4/go.sum b/vendor/gopkg.in/src-d/go-billy.v4/go.sum
deleted file mode 100644
index 5e9ed217e9..0000000000
--- a/vendor/gopkg.in/src-d/go-billy.v4/go.sum
+++ /dev/null
@@ -1,12 +0,0 @@
-github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9 h1:lkiLiLBHGoH3XnqSLUIaBsilGMUjI+Uy2Xu2JLUtTas=
-golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/vendor/gopkg.in/src-d/go-billy.v4/helper/chroot/chroot.go b/vendor/gopkg.in/src-d/go-billy.v4/helper/chroot/chroot.go
deleted file mode 100644
index 44ddb3db53..0000000000
--- a/vendor/gopkg.in/src-d/go-billy.v4/helper/chroot/chroot.go
+++ /dev/null
@@ -1,242 +0,0 @@
-package chroot
-
-import (
- "os"
- "path/filepath"
- "strings"
-
- "gopkg.in/src-d/go-billy.v4"
- "gopkg.in/src-d/go-billy.v4/helper/polyfill"
-)
-
-// ChrootHelper is a helper to implement billy.Chroot.
-type ChrootHelper struct {
- underlying billy.Filesystem
- base string
-}
-
-// New creates a new filesystem wrapping up the given 'fs'.
-// The created filesystem has its base in the given ChrootHelperectory of the
-// underlying filesystem.
-func New(fs billy.Basic, base string) billy.Filesystem {
- return &ChrootHelper{
- underlying: polyfill.New(fs),
- base: base,
- }
-}
-
-func (fs *ChrootHelper) underlyingPath(filename string) (string, error) {
- if isCrossBoundaries(filename) {
- return "", billy.ErrCrossedBoundary
- }
-
- return fs.Join(fs.Root(), filename), nil
-}
-
-func isCrossBoundaries(path string) bool {
- path = filepath.ToSlash(path)
- path = filepath.Clean(path)
-
- return strings.HasPrefix(path, ".."+string(filepath.Separator))
-}
-
-func (fs *ChrootHelper) Create(filename string) (billy.File, error) {
- fullpath, err := fs.underlyingPath(filename)
- if err != nil {
- return nil, err
- }
-
- f, err := fs.underlying.Create(fullpath)
- if err != nil {
- return nil, err
- }
-
- return newFile(fs, f, filename), nil
-}
-
-func (fs *ChrootHelper) Open(filename string) (billy.File, error) {
- fullpath, err := fs.underlyingPath(filename)
- if err != nil {
- return nil, err
- }
-
- f, err := fs.underlying.Open(fullpath)
- if err != nil {
- return nil, err
- }
-
- return newFile(fs, f, filename), nil
-}
-
-func (fs *ChrootHelper) OpenFile(filename string, flag int, mode os.FileMode) (billy.File, error) {
- fullpath, err := fs.underlyingPath(filename)
- if err != nil {
- return nil, err
- }
-
- f, err := fs.underlying.OpenFile(fullpath, flag, mode)
- if err != nil {
- return nil, err
- }
-
- return newFile(fs, f, filename), nil
-}
-
-func (fs *ChrootHelper) Stat(filename string) (os.FileInfo, error) {
- fullpath, err := fs.underlyingPath(filename)
- if err != nil {
- return nil, err
- }
-
- return fs.underlying.Stat(fullpath)
-}
-
-func (fs *ChrootHelper) Rename(from, to string) error {
- var err error
- from, err = fs.underlyingPath(from)
- if err != nil {
- return err
- }
-
- to, err = fs.underlyingPath(to)
- if err != nil {
- return err
- }
-
- return fs.underlying.Rename(from, to)
-}
-
-func (fs *ChrootHelper) Remove(path string) error {
- fullpath, err := fs.underlyingPath(path)
- if err != nil {
- return err
- }
-
- return fs.underlying.Remove(fullpath)
-}
-
-func (fs *ChrootHelper) Join(elem ...string) string {
- return fs.underlying.Join(elem...)
-}
-
-func (fs *ChrootHelper) TempFile(dir, prefix string) (billy.File, error) {
- fullpath, err := fs.underlyingPath(dir)
- if err != nil {
- return nil, err
- }
-
- f, err := fs.underlying.(billy.TempFile).TempFile(fullpath, prefix)
- if err != nil {
- return nil, err
- }
-
- return newFile(fs, f, fs.Join(dir, filepath.Base(f.Name()))), nil
-}
-
-func (fs *ChrootHelper) ReadDir(path string) ([]os.FileInfo, error) {
- fullpath, err := fs.underlyingPath(path)
- if err != nil {
- return nil, err
- }
-
- return fs.underlying.(billy.Dir).ReadDir(fullpath)
-}
-
-func (fs *ChrootHelper) MkdirAll(filename string, perm os.FileMode) error {
- fullpath, err := fs.underlyingPath(filename)
- if err != nil {
- return err
- }
-
- return fs.underlying.(billy.Dir).MkdirAll(fullpath, perm)
-}
-
-func (fs *ChrootHelper) Lstat(filename string) (os.FileInfo, error) {
- fullpath, err := fs.underlyingPath(filename)
- if err != nil {
- return nil, err
- }
-
- return fs.underlying.(billy.Symlink).Lstat(fullpath)
-}
-
-func (fs *ChrootHelper) Symlink(target, link string) error {
- target = filepath.FromSlash(target)
-
- // only rewrite target if it's already absolute
- if filepath.IsAbs(target) || strings.HasPrefix(target, string(filepath.Separator)) {
- target = fs.Join(fs.Root(), target)
- target = filepath.Clean(filepath.FromSlash(target))
- }
-
- link, err := fs.underlyingPath(link)
- if err != nil {
- return err
- }
-
- return fs.underlying.(billy.Symlink).Symlink(target, link)
-}
-
-func (fs *ChrootHelper) Readlink(link string) (string, error) {
- fullpath, err := fs.underlyingPath(link)
- if err != nil {
- return "", err
- }
-
- target, err := fs.underlying.(billy.Symlink).Readlink(fullpath)
- if err != nil {
- return "", err
- }
-
- if !filepath.IsAbs(target) && !strings.HasPrefix(target, string(filepath.Separator)) {
- return target, nil
- }
-
- target, err = filepath.Rel(fs.base, target)
- if err != nil {
- return "", err
- }
-
- return string(os.PathSeparator) + target, nil
-}
-
-func (fs *ChrootHelper) Chroot(path string) (billy.Filesystem, error) {
- fullpath, err := fs.underlyingPath(path)
- if err != nil {
- return nil, err
- }
-
- return New(fs.underlying, fullpath), nil
-}
-
-func (fs *ChrootHelper) Root() string {
- return fs.base
-}
-
-func (fs *ChrootHelper) Underlying() billy.Basic {
- return fs.underlying
-}
-
-// Capabilities implements the Capable interface.
-func (fs *ChrootHelper) Capabilities() billy.Capability {
- return billy.Capabilities(fs.underlying)
-}
-
-type file struct {
- billy.File
- name string
-}
-
-func newFile(fs billy.Filesystem, f billy.File, filename string) billy.File {
- filename = fs.Join(fs.Root(), filename)
- filename, _ = filepath.Rel(fs.Root(), filename)
-
- return &file{
- File: f,
- name: filename,
- }
-}
-
-func (f *file) Name() string {
- return f.name
-}
diff --git a/vendor/gopkg.in/src-d/go-billy.v4/helper/polyfill/polyfill.go b/vendor/gopkg.in/src-d/go-billy.v4/helper/polyfill/polyfill.go
deleted file mode 100644
index f613c255d9..0000000000
--- a/vendor/gopkg.in/src-d/go-billy.v4/helper/polyfill/polyfill.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package polyfill
-
-import (
- "os"
- "path/filepath"
-
- "gopkg.in/src-d/go-billy.v4"
-)
-
-// Polyfill is a helper that implements all missing method from billy.Filesystem.
-type Polyfill struct {
- billy.Basic
- c capabilities
-}
-
-type capabilities struct{ tempfile, dir, symlink, chroot bool }
-
-// New creates a new filesystem wrapping up 'fs' the intercepts all the calls
-// made and errors if fs doesn't implement any of the billy interfaces.
-func New(fs billy.Basic) billy.Filesystem {
- if original, ok := fs.(billy.Filesystem); ok {
- return original
- }
-
- h := &Polyfill{Basic: fs}
-
- _, h.c.tempfile = h.Basic.(billy.TempFile)
- _, h.c.dir = h.Basic.(billy.Dir)
- _, h.c.symlink = h.Basic.(billy.Symlink)
- _, h.c.chroot = h.Basic.(billy.Chroot)
- return h
-}
-
-func (h *Polyfill) TempFile(dir, prefix string) (billy.File, error) {
- if !h.c.tempfile {
- return nil, billy.ErrNotSupported
- }
-
- return h.Basic.(billy.TempFile).TempFile(dir, prefix)
-}
-
-func (h *Polyfill) ReadDir(path string) ([]os.FileInfo, error) {
- if !h.c.dir {
- return nil, billy.ErrNotSupported
- }
-
- return h.Basic.(billy.Dir).ReadDir(path)
-}
-
-func (h *Polyfill) MkdirAll(filename string, perm os.FileMode) error {
- if !h.c.dir {
- return billy.ErrNotSupported
- }
-
- return h.Basic.(billy.Dir).MkdirAll(filename, perm)
-}
-
-func (h *Polyfill) Symlink(target, link string) error {
- if !h.c.symlink {
- return billy.ErrNotSupported
- }
-
- return h.Basic.(billy.Symlink).Symlink(target, link)
-}
-
-func (h *Polyfill) Readlink(link string) (string, error) {
- if !h.c.symlink {
- return "", billy.ErrNotSupported
- }
-
- return h.Basic.(billy.Symlink).Readlink(link)
-}
-
-func (h *Polyfill) Lstat(path string) (os.FileInfo, error) {
- if !h.c.symlink {
- return nil, billy.ErrNotSupported
- }
-
- return h.Basic.(billy.Symlink).Lstat(path)
-}
-
-func (h *Polyfill) Chroot(path string) (billy.Filesystem, error) {
- if !h.c.chroot {
- return nil, billy.ErrNotSupported
- }
-
- return h.Basic.(billy.Chroot).Chroot(path)
-}
-
-func (h *Polyfill) Root() string {
- if !h.c.chroot {
- return string(filepath.Separator)
- }
-
- return h.Basic.(billy.Chroot).Root()
-}
-
-func (h *Polyfill) Underlying() billy.Basic {
- return h.Basic
-}
-
-// Capabilities implements the Capable interface.
-func (h *Polyfill) Capabilities() billy.Capability {
- return billy.Capabilities(h.Basic)
-}
diff --git a/vendor/gopkg.in/src-d/go-billy.v4/osfs/os.go b/vendor/gopkg.in/src-d/go-billy.v4/osfs/os.go
deleted file mode 100644
index ff35a3ba96..0000000000
--- a/vendor/gopkg.in/src-d/go-billy.v4/osfs/os.go
+++ /dev/null
@@ -1,139 +0,0 @@
-// Package osfs provides a billy filesystem for the OS.
-package osfs // import "gopkg.in/src-d/go-billy.v4/osfs"
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
- "sync"
-
- "gopkg.in/src-d/go-billy.v4"
- "gopkg.in/src-d/go-billy.v4/helper/chroot"
-)
-
-const (
- defaultDirectoryMode = 0755
- defaultCreateMode = 0666
-)
-
-// OS is a filesystem based on the os filesystem.
-type OS struct{}
-
-// New returns a new OS filesystem.
-func New(baseDir string) billy.Filesystem {
- return chroot.New(&OS{}, baseDir)
-}
-
-func (fs *OS) Create(filename string) (billy.File, error) {
- return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, defaultCreateMode)
-}
-
-func (fs *OS) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) {
- if flag&os.O_CREATE != 0 {
- if err := fs.createDir(filename); err != nil {
- return nil, err
- }
- }
-
- f, err := os.OpenFile(filename, flag, perm)
- if err != nil {
- return nil, err
- }
- return &file{File: f}, err
-}
-
-func (fs *OS) createDir(fullpath string) error {
- dir := filepath.Dir(fullpath)
- if dir != "." {
- if err := os.MkdirAll(dir, defaultDirectoryMode); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (fs *OS) ReadDir(path string) ([]os.FileInfo, error) {
- l, err := ioutil.ReadDir(path)
- if err != nil {
- return nil, err
- }
-
- var s = make([]os.FileInfo, len(l))
- for i, f := range l {
- s[i] = f
- }
-
- return s, nil
-}
-
-func (fs *OS) Rename(from, to string) error {
- if err := fs.createDir(to); err != nil {
- return err
- }
-
- return os.Rename(from, to)
-}
-
-func (fs *OS) MkdirAll(path string, perm os.FileMode) error {
- return os.MkdirAll(path, defaultDirectoryMode)
-}
-
-func (fs *OS) Open(filename string) (billy.File, error) {
- return fs.OpenFile(filename, os.O_RDONLY, 0)
-}
-
-func (fs *OS) Stat(filename string) (os.FileInfo, error) {
- return os.Stat(filename)
-}
-
-func (fs *OS) Remove(filename string) error {
- return os.Remove(filename)
-}
-
-func (fs *OS) TempFile(dir, prefix string) (billy.File, error) {
- if err := fs.createDir(dir + string(os.PathSeparator)); err != nil {
- return nil, err
- }
-
- f, err := ioutil.TempFile(dir, prefix)
- if err != nil {
- return nil, err
- }
- return &file{File: f}, nil
-}
-
-func (fs *OS) Join(elem ...string) string {
- return filepath.Join(elem...)
-}
-
-func (fs *OS) RemoveAll(path string) error {
- return os.RemoveAll(filepath.Clean(path))
-}
-
-func (fs *OS) Lstat(filename string) (os.FileInfo, error) {
- return os.Lstat(filepath.Clean(filename))
-}
-
-func (fs *OS) Symlink(target, link string) error {
- if err := fs.createDir(link); err != nil {
- return err
- }
-
- return os.Symlink(target, link)
-}
-
-func (fs *OS) Readlink(link string) (string, error) {
- return os.Readlink(link)
-}
-
-// Capabilities implements the Capable interface.
-func (fs *OS) Capabilities() billy.Capability {
- return billy.DefaultCapabilities
-}
-
-// file is a wrapper for an os.File which adds support for file locking.
-type file struct {
- *os.File
- m sync.Mutex
-}
diff --git a/vendor/gopkg.in/src-d/go-billy.v4/osfs/os_posix.go b/vendor/gopkg.in/src-d/go-billy.v4/osfs/os_posix.go
deleted file mode 100644
index 144cde1c18..0000000000
--- a/vendor/gopkg.in/src-d/go-billy.v4/osfs/os_posix.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// +build !windows
-
-package osfs
-
-import (
- "golang.org/x/sys/unix"
-)
-
-func (f *file) Lock() error {
- f.m.Lock()
- defer f.m.Unlock()
-
- return unix.Flock(int(f.File.Fd()), unix.LOCK_EX)
-}
-
-func (f *file) Unlock() error {
- f.m.Lock()
- defer f.m.Unlock()
-
- return unix.Flock(int(f.File.Fd()), unix.LOCK_UN)
-}
diff --git a/vendor/gopkg.in/src-d/go-billy.v4/osfs/os_windows.go b/vendor/gopkg.in/src-d/go-billy.v4/osfs/os_windows.go
deleted file mode 100644
index 5eb98829d0..0000000000
--- a/vendor/gopkg.in/src-d/go-billy.v4/osfs/os_windows.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// +build windows
-
-package osfs
-
-import (
- "os"
- "runtime"
- "unsafe"
-
- "golang.org/x/sys/windows"
-)
-
-type fileInfo struct {
- os.FileInfo
- name string
-}
-
-func (fi *fileInfo) Name() string {
- return fi.name
-}
-
-var (
- kernel32DLL = windows.NewLazySystemDLL("kernel32.dll")
- lockFileExProc = kernel32DLL.NewProc("LockFileEx")
- unlockFileProc = kernel32DLL.NewProc("UnlockFile")
-)
-
-const (
- lockfileExclusiveLock = 0x2
-)
-
-func (f *file) Lock() error {
- f.m.Lock()
- defer f.m.Unlock()
-
- var overlapped windows.Overlapped
- // err is always non-nil as per sys/windows semantics.
- ret, _, err := lockFileExProc.Call(f.File.Fd(), lockfileExclusiveLock, 0, 0xFFFFFFFF, 0,
- uintptr(unsafe.Pointer(&overlapped)))
- runtime.KeepAlive(&overlapped)
- if ret == 0 {
- return err
- }
- return nil
-}
-
-func (f *file) Unlock() error {
- f.m.Lock()
- defer f.m.Unlock()
-
- // err is always non-nil as per sys/windows semantics.
- ret, _, err := unlockFileProc.Call(f.File.Fd(), 0, 0, 0xFFFFFFFF, 0)
- if ret == 0 {
- return err
- }
- return nil
-}
diff --git a/vendor/gopkg.in/src-d/go-billy.v4/util/glob.go b/vendor/gopkg.in/src-d/go-billy.v4/util/glob.go
deleted file mode 100644
index fdcb3e5f0a..0000000000
--- a/vendor/gopkg.in/src-d/go-billy.v4/util/glob.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package util
-
-import (
- "path/filepath"
- "sort"
- "strings"
-
- "gopkg.in/src-d/go-billy.v4"
-)
-
-// Glob returns the names of all files matching pattern or nil
-// if there is no matching file. The syntax of patterns is the same
-// as in Match. The pattern may describe hierarchical names such as
-// /usr/*/bin/ed (assuming the Separator is '/').
-//
-// Glob ignores file system errors such as I/O errors reading directories.
-// The only possible returned error is ErrBadPattern, when pattern
-// is malformed.
-//
-// Function originally from https://golang.org/src/path/filepath/match_test.go
-func Glob(fs billy.Filesystem, pattern string) (matches []string, err error) {
- if !hasMeta(pattern) {
- if _, err = fs.Lstat(pattern); err != nil {
- return nil, nil
- }
- return []string{pattern}, nil
- }
-
- dir, file := filepath.Split(pattern)
- // Prevent infinite recursion. See issue 15879.
- if dir == pattern {
- return nil, filepath.ErrBadPattern
- }
-
- var m []string
- m, err = Glob(fs, cleanGlobPath(dir))
- if err != nil {
- return
- }
- for _, d := range m {
- matches, err = glob(fs, d, file, matches)
- if err != nil {
- return
- }
- }
- return
-}
-
-// cleanGlobPath prepares path for glob matching.
-func cleanGlobPath(path string) string {
- switch path {
- case "":
- return "."
- case string(filepath.Separator):
- // do nothing to the path
- return path
- default:
- return path[0 : len(path)-1] // chop off trailing separator
- }
-}
-
-// glob searches for files matching pattern in the directory dir
-// and appends them to matches. If the directory cannot be
-// opened, it returns the existing matches. New matches are
-// added in lexicographical order.
-func glob(fs billy.Filesystem, dir, pattern string, matches []string) (m []string, e error) {
- m = matches
- fi, err := fs.Stat(dir)
- if err != nil {
- return
- }
-
- if !fi.IsDir() {
- return
- }
-
- names, _ := readdirnames(fs, dir)
- sort.Strings(names)
-
- for _, n := range names {
- matched, err := filepath.Match(pattern, n)
- if err != nil {
- return m, err
- }
- if matched {
- m = append(m, filepath.Join(dir, n))
- }
- }
- return
-}
-
-// hasMeta reports whether path contains any of the magic characters
-// recognized by Match.
-func hasMeta(path string) bool {
- // TODO(niemeyer): Should other magic characters be added here?
- return strings.ContainsAny(path, "*?[")
-}
-
-func readdirnames(fs billy.Filesystem, dir string) ([]string, error) {
- files, err := fs.ReadDir(dir)
- if err != nil {
- return nil, err
- }
-
- var names []string
- for _, file := range files {
- names = append(names, file.Name())
- }
-
- return names, nil
-}
diff --git a/vendor/gopkg.in/src-d/go-billy.v4/util/util.go b/vendor/gopkg.in/src-d/go-billy.v4/util/util.go
deleted file mode 100644
index cf7fb57f75..0000000000
--- a/vendor/gopkg.in/src-d/go-billy.v4/util/util.go
+++ /dev/null
@@ -1,224 +0,0 @@
-package util
-
-import (
- "io"
- "os"
- "path/filepath"
- "strconv"
- "sync"
- "time"
-
- "gopkg.in/src-d/go-billy.v4"
-)
-
-// RemoveAll removes path and any children it contains. It removes everything it
-// can but returns the first error it encounters. If the path does not exist,
-// RemoveAll returns nil (no error).
-func RemoveAll(fs billy.Basic, path string) error {
- fs, path = getUnderlyingAndPath(fs, path)
-
- if r, ok := fs.(removerAll); ok {
- return r.RemoveAll(path)
- }
-
- return removeAll(fs, path)
-}
-
-type removerAll interface {
- RemoveAll(string) error
-}
-
-func removeAll(fs billy.Basic, path string) error {
- // This implementation is adapted from os.RemoveAll.
-
- // Simple case: if Remove works, we're done.
- err := fs.Remove(path)
- if err == nil || os.IsNotExist(err) {
- return nil
- }
-
- // Otherwise, is this a directory we need to recurse into?
- dir, serr := fs.Stat(path)
- if serr != nil {
- if os.IsNotExist(serr) {
- return nil
- }
-
- return serr
- }
-
- if !dir.IsDir() {
- // Not a directory; return the error from Remove.
- return err
- }
-
- dirfs, ok := fs.(billy.Dir)
- if !ok {
- return billy.ErrNotSupported
- }
-
- // Directory.
- fis, err := dirfs.ReadDir(path)
- if err != nil {
- if os.IsNotExist(err) {
- // Race. It was deleted between the Lstat and Open.
- // Return nil per RemoveAll's docs.
- return nil
- }
-
- return err
- }
-
- // Remove contents & return first error.
- err = nil
- for _, fi := range fis {
- cpath := fs.Join(path, fi.Name())
- err1 := removeAll(fs, cpath)
- if err == nil {
- err = err1
- }
- }
-
- // Remove directory.
- err1 := fs.Remove(path)
- if err1 == nil || os.IsNotExist(err1) {
- return nil
- }
-
- if err == nil {
- err = err1
- }
-
- return err
-
-}
-
-// WriteFile writes data to a file named by filename in the given filesystem.
-// If the file does not exist, WriteFile creates it with permissions perm;
-// otherwise WriteFile truncates it before writing.
-func WriteFile(fs billy.Basic, filename string, data []byte, perm os.FileMode) error {
- f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
- if err != nil {
- return err
- }
-
- n, err := f.Write(data)
- if err == nil && n < len(data) {
- err = io.ErrShortWrite
- }
-
- if err1 := f.Close(); err == nil {
- err = err1
- }
-
- return err
-}
-
-// Random number state.
-// We generate random temporary file names so that there's a good
-// chance the file doesn't exist yet - keeps the number of tries in
-// TempFile to a minimum.
-var rand uint32
-var randmu sync.Mutex
-
-func reseed() uint32 {
- return uint32(time.Now().UnixNano() + int64(os.Getpid()))
-}
-
-func nextSuffix() string {
- randmu.Lock()
- r := rand
- if r == 0 {
- r = reseed()
- }
- r = r*1664525 + 1013904223 // constants from Numerical Recipes
- rand = r
- randmu.Unlock()
- return strconv.Itoa(int(1e9 + r%1e9))[1:]
-}
-
-// TempFile creates a new temporary file in the directory dir with a name
-// beginning with prefix, opens the file for reading and writing, and returns
-// the resulting *os.File. If dir is the empty string, TempFile uses the default
-// directory for temporary files (see os.TempDir). Multiple programs calling
-// TempFile simultaneously will not choose the same file. The caller can use
-// f.Name() to find the pathname of the file. It is the caller's responsibility
-// to remove the file when no longer needed.
-func TempFile(fs billy.Basic, dir, prefix string) (f billy.File, err error) {
- // This implementation is based on stdlib ioutil.TempFile.
-
- if dir == "" {
- dir = os.TempDir()
- }
-
- nconflict := 0
- for i := 0; i < 10000; i++ {
- name := filepath.Join(dir, prefix+nextSuffix())
- f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
- if os.IsExist(err) {
- if nconflict++; nconflict > 10 {
- randmu.Lock()
- rand = reseed()
- randmu.Unlock()
- }
- continue
- }
- break
- }
- return
-}
-
-// TempDir creates a new temporary directory in the directory dir
-// with a name beginning with prefix and returns the path of the
-// new directory. If dir is the empty string, TempDir uses the
-// default directory for temporary files (see os.TempDir).
-// Multiple programs calling TempDir simultaneously
-// will not choose the same directory. It is the caller's responsibility
-// to remove the directory when no longer needed.
-func TempDir(fs billy.Dir, dir, prefix string) (name string, err error) {
- // This implementation is based on stdlib ioutil.TempDir
-
- if dir == "" {
- dir = os.TempDir()
- }
-
- nconflict := 0
- for i := 0; i < 10000; i++ {
- try := filepath.Join(dir, prefix+nextSuffix())
- err = fs.MkdirAll(try, 0700)
- if os.IsExist(err) {
- if nconflict++; nconflict > 10 {
- randmu.Lock()
- rand = reseed()
- randmu.Unlock()
- }
- continue
- }
- if os.IsNotExist(err) {
- if _, err := os.Stat(dir); os.IsNotExist(err) {
- return "", err
- }
- }
- if err == nil {
- name = try
- }
- break
- }
- return
-}
-
-type underlying interface {
- Underlying() billy.Basic
-}
-
-func getUnderlyingAndPath(fs billy.Basic, path string) (billy.Basic, string) {
- u, ok := fs.(underlying)
- if !ok {
- return fs, path
- }
- if ch, ok := fs.(billy.Chroot); ok {
- path = fs.Join(ch.Root(), path)
- }
-
- return u.Underlying(), path
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/.gitignore b/vendor/gopkg.in/src-d/go-git.v4/.gitignore
deleted file mode 100644
index 038dd9f1ed..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-coverage.out
-*~
-coverage.txt
-profile.out
diff --git a/vendor/gopkg.in/src-d/go-git.v4/.travis.yml b/vendor/gopkg.in/src-d/go-git.v4/.travis.yml
deleted file mode 100644
index 3a65f3e082..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/.travis.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-language: go
-
-go:
- - "1.11"
- - "1.12"
-
-go_import_path: gopkg.in/src-d/go-git.v4
-
-env:
- - GIT_VERSION=master
- - GIT_VERSION=v1.9.3
- - GIT_VERSION=v2.11.0
-
-cache:
- directories:
- - $HOME/.git-dist
-
-before_script:
- - export GIT_DIST_PATH=$HOME/.git-dist
- - make build-git
-
-before_install:
- - git config --global user.email "travis@example.com"
- - git config --global user.name "Travis CI"
-
-install:
- - go get -v -t ./...
-
-script:
- - export GIT_EXEC_PATH=$GIT_DIST_PATH
- - export PATH=$GIT_DIST_PATH:$PATH
- - git version
- - make test-coverage
- - go vet ./...
-
-after_success:
- - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/gopkg.in/src-d/go-git.v4/CODE_OF_CONDUCT.md b/vendor/gopkg.in/src-d/go-git.v4/CODE_OF_CONDUCT.md
deleted file mode 100644
index a689fa3c34..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,74 +0,0 @@
-# Contributor Covenant Code of Conduct
-
-## Our Pledge
-
-In the interest of fostering an open and welcoming environment, we as
-contributors and maintainers pledge to making participation in our project and
-our community a harassment-free experience for everyone, regardless of age, body
-size, disability, ethnicity, gender identity and expression, level of experience,
-education, socio-economic status, nationality, personal appearance, race,
-religion, or sexual identity and orientation.
-
-## Our Standards
-
-Examples of behavior that contributes to creating a positive environment
-include:
-
-* Using welcoming and inclusive language
-* Being respectful of differing viewpoints and experiences
-* Gracefully accepting constructive criticism
-* Focusing on what is best for the community
-* Showing empathy towards other community members
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery and unwelcome sexual attention or
- advances
-* Trolling, insulting/derogatory comments, and personal or political attacks
-* Public or private harassment
-* Publishing others' private information, such as a physical or electronic
- address, without explicit permission
-* Other conduct which could reasonably be considered inappropriate in a
- professional setting
-
-## Our Responsibilities
-
-Project maintainers are responsible for clarifying the standards of acceptable
-behavior and are expected to take appropriate and fair corrective action in
-response to any instances of unacceptable behavior.
-
-Project maintainers have the right and responsibility to remove, edit, or
-reject comments, commits, code, wiki edits, issues, and other contributions
-that are not aligned to this Code of Conduct, or to ban temporarily or
-permanently any contributor for other behaviors that they deem inappropriate,
-threatening, offensive, or harmful.
-
-## Scope
-
-This Code of Conduct applies both within project spaces and in public spaces
-when an individual is representing the project or its community. Examples of
-representing a project or community include using an official project e-mail
-address, posting via an official social media account, or acting as an appointed
-representative at an online or offline event. Representation of a project may be
-further defined and clarified by project maintainers.
-
-## Enforcement
-
-Instances of abusive, harassing, or otherwise unacceptable behavior may be
-reported by contacting the project team at conduct@sourced.tech. All
-complaints will be reviewed and investigated and will result in a response that
-is deemed necessary and appropriate to the circumstances. The project team is
-obligated to maintain confidentiality with regard to the reporter of an incident.
-Further details of specific enforcement policies may be posted separately.
-
-Project maintainers who do not follow or enforce the Code of Conduct in good
-faith may face temporary or permanent repercussions as determined by other
-members of the project's leadership.
-
-## Attribution
-
-This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
-available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
-
-[homepage]: https://www.contributor-covenant.org
-
diff --git a/vendor/gopkg.in/src-d/go-git.v4/COMPATIBILITY.md b/vendor/gopkg.in/src-d/go-git.v4/COMPATIBILITY.md
deleted file mode 100644
index 4a3da62fcd..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/COMPATIBILITY.md
+++ /dev/null
@@ -1,111 +0,0 @@
-Supported Capabilities
-======================
-
-Here is a non-comprehensive table of git commands and features whose equivalent
-is supported by go-git.
-
-| Feature | Status | Notes |
-|---------------------------------------|--------|-------|
-| **config** |
-| config | ✔ | Reading and modifying per-repository configuration (`.git/config`) is supported. Global configuration (`$HOME/.gitconfig`) is not. |
-| **getting and creating repositories** |
-| init | ✔ | Plain init and `--bare` are supported. Flags `--template`, `--separate-git-dir` and `--shared` are not. |
-| clone | ✔ | Plain clone and equivalents to `--progress`, `--single-branch`, `--depth`, `--origin`, `--recurse-submodules` are supported. Others are not. |
-| **basic snapshotting** |
-| add | ✔ | Plain add is supported. Any other flag aren't supported |
-| status | ✔ |
-| commit | ✔ |
-| reset | ✔ |
-| rm | ✔ |
-| mv | ✔ |
-| **branching and merging** |
-| branch | ✔ |
-| checkout | ✔ | Basic usages of checkout are supported. |
-| merge | ✖ |
-| mergetool | ✖ |
-| stash | ✖ |
-| tag | ✔ |
-| **sharing and updating projects** |
-| fetch | ✔ |
-| pull | ✔ | Only supports merges where the merge can be resolved as a fast-forward. |
-| push | ✔ |
-| remote | ✔ |
-| submodule | ✔ |
-| **inspection and comparison** |
-| show | ✔ |
-| log | ✔ |
-| shortlog | (see log) |
-| describe | |
-| **patching** |
-| apply | ✖ |
-| cherry-pick | ✖ |
-| diff | ✔ | Patch object with UnifiedDiff output representation |
-| rebase | ✖ |
-| revert | ✖ |
-| **debugging** |
-| bisect | ✖ |
-| blame | ✔ |
-| grep | ✔ |
-| **email** ||
-| am | ✖ |
-| apply | ✖ |
-| format-patch | ✖ |
-| send-email | ✖ |
-| request-pull | ✖ |
-| **external systems** |
-| svn | ✖ |
-| fast-import | ✖ |
-| **administration** |
-| clean | ✔ |
-| gc | ✖ |
-| fsck | ✖ |
-| reflog | ✖ |
-| filter-branch | ✖ |
-| instaweb | ✖ |
-| archive | ✖ |
-| bundle | ✖ |
-| prune | ✖ |
-| repack | ✖ |
-| **server admin** |
-| daemon | |
-| update-server-info | |
-| **advanced** |
-| notes | ✖ |
-| replace | ✖ |
-| worktree | ✖ |
-| annotate | (see blame) |
-| **gpg** |
-| git-verify-commit | ✔ |
-| git-verify-tag | ✔ |
-| **plumbing commands** |
-| cat-file | ✔ |
-| check-ignore | |
-| commit-tree | |
-| count-objects | |
-| diff-index | |
-| for-each-ref | ✔ |
-| hash-object | ✔ |
-| ls-files | ✔ |
-| merge-base | ✔ | Calculates the merge-base only between two commits, and supports `--independent` and `--is-ancestor` modifiers; Does not support `--fork-point` nor `--octopus` modifiers. |
-| read-tree | |
-| rev-list | ✔ |
-| rev-parse | |
-| show-ref | ✔ |
-| symbolic-ref | ✔ |
-| update-index | |
-| update-ref | |
-| verify-pack | |
-| write-tree | |
-| **protocols** |
-| http(s):// (dumb) | ✖ |
-| http(s):// (smart) | ✔ |
-| git:// | ✔ |
-| ssh:// | ✔ |
-| file:// | ✔ |
-| custom | ✔ |
-| **other features** |
-| gitignore | ✔ |
-| gitattributes | ✖ |
-| index version | |
-| packfile version | |
-| push-certs | ✖ |
diff --git a/vendor/gopkg.in/src-d/go-git.v4/CONTRIBUTING.md b/vendor/gopkg.in/src-d/go-git.v4/CONTRIBUTING.md
deleted file mode 100644
index bdb5f73341..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/CONTRIBUTING.md
+++ /dev/null
@@ -1,59 +0,0 @@
-# Contributing Guidelines
-
-source{d} go-git project is [Apache 2.0 licensed](LICENSE) and accepts
-contributions via GitHub pull requests. This document outlines some of the
-conventions on development workflow, commit message formatting, contact points,
-and other resources to make it easier to get your contribution accepted.
-
-## Certificate of Origin
-
-By contributing to this project you agree to the [Developer Certificate of
-Origin (DCO)](DCO). This document was created by the Linux Kernel community and is a
-simple statement that you, as a contributor, have the legal right to make the
-contribution.
-
-In order to show your agreement with the DCO you should include at the end of commit message,
-the following line: `Signed-off-by: John Doe <john.doe@example.com>`, using your real name.
-
-This can be done easily using the [`-s`](https://github.com/git/git/blob/b2c150d3aa82f6583b9aadfecc5f8fa1c74aca09/Documentation/git-commit.txt#L154-L161) flag on the `git commit`.
-
-## Support Channels
-
-The official support channels, for both users and contributors, are:
-
-- [StackOverflow go-git tag](https://stackoverflow.com/questions/tagged/go-git) for user questions.
-- GitHub [Issues](https://github.com/src-d/go-git/issues)* for bug reports and feature requests.
-- Slack: #go-git room in the [source{d} Slack](https://join.slack.com/t/sourced-community/shared_invite/enQtMjc4Njk5MzEyNzM2LTFjNzY4NjEwZGEwMzRiNTM4MzRlMzQ4MmIzZjkwZmZlM2NjODUxZmJjNDI1OTcxNDAyMmZlNmFjODZlNTg0YWM)
-
-*Before opening a new issue or submitting a new pull request, it's helpful to
-search the project - it's likely that another user has already reported the
-issue you're facing, or it's a known issue that we're already aware of.
-
-
-## How to Contribute
-
-Pull Requests (PRs) are the main and exclusive way to contribute to the official go-git project.
-In order for a PR to be accepted it needs to pass a list of requirements:
-
-- You should be able to run the same query using `git`. We don't accept features that are not implemented in the official git implementation.
-- The expected behavior must match the [official git implementation](https://github.com/git/git).
-- The actual behavior must be correctly explained with natural language and providing a minimum working example in Go that reproduces it.
-- All PRs must be written in idiomatic Go, formatted according to [gofmt](https://golang.org/cmd/gofmt/), and without any warnings from [go lint](https://github.com/golang/lint) nor [go vet](https://golang.org/cmd/vet/).
-- They should in general include tests, and those shall pass.
-- If the PR is a bug fix, it has to include a suite of unit tests for the new functionality.
-- If the PR is a new feature, it has to come with a suite of unit tests, that tests the new functionality.
-- In any case, all the PRs have to pass the personal evaluation of at least one of the [maintainers](MAINTAINERS) of go-git.
-
-### Format of the commit message
-
-Every commit message should describe what was changed, under which context and, if applicable, the GitHub issue it relates to:
-
-```
-plumbing: packp, Skip argument validations for unknown capabilities. Fixes #623
-```
-
-The format can be described more formally as follows:
-
-```
-<package>: <subpackage>, <what changed>. [Fixes #<issue-number>]
-```
diff --git a/vendor/gopkg.in/src-d/go-git.v4/DCO b/vendor/gopkg.in/src-d/go-git.v4/DCO
deleted file mode 100644
index 3aca339def..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/DCO
+++ /dev/null
@@ -1,36 +0,0 @@
-Developer Certificate of Origin
-Version 1.1
-
-Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
-660 York Street, Suite 102,
-San Francisco, CA 94110 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-
-Developer's Certificate of Origin 1.1
-
-By making a contribution to this project, I certify that:
-
-(a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
-(b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
-(c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
-(d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved. \ No newline at end of file
diff --git a/vendor/gopkg.in/src-d/go-git.v4/LICENSE b/vendor/gopkg.in/src-d/go-git.v4/LICENSE
deleted file mode 100644
index 8aa3d854cf..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2018 Sourced Technologies, S.L.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/gopkg.in/src-d/go-git.v4/MAINTAINERS b/vendor/gopkg.in/src-d/go-git.v4/MAINTAINERS
deleted file mode 100644
index ff2129c45f..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/MAINTAINERS
+++ /dev/null
@@ -1,3 +0,0 @@
-Máximo Cuadros <mcuadros@gmail.com> (@mcuadros)
-Jeremy Stribling <strib@alum.mit.edu> (@strib)
-Ori Rawlings <orirawlings@gmail.com> (@orirawlings)
diff --git a/vendor/gopkg.in/src-d/go-git.v4/Makefile b/vendor/gopkg.in/src-d/go-git.v4/Makefile
deleted file mode 100644
index d576778f4d..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/Makefile
+++ /dev/null
@@ -1,52 +0,0 @@
-# General
-WORKDIR = $(PWD)
-
-# Go parameters
-GOCMD = go
-GOTEST = $(GOCMD) test -v
-
-# Git config
-GIT_VERSION ?=
-GIT_DIST_PATH ?= $(PWD)/.git-dist
-GIT_REPOSITORY = http://github.com/git/git.git
-
-# Coverage
-COVERAGE_REPORT = coverage.txt
-COVERAGE_PROFILE = profile.out
-COVERAGE_MODE = atomic
-
-ifneq ($(origin CI), undefined)
- WORKDIR := $(GOPATH)/src/gopkg.in/src-d/go-git.v4
-endif
-
-build-git:
- @if [ -f $(GIT_DIST_PATH)/git ]; then \
- echo "nothing to do, using cache $(GIT_DIST_PATH)"; \
- else \
- git clone $(GIT_REPOSITORY) -b $(GIT_VERSION) --depth 1 --single-branch $(GIT_DIST_PATH); \
- cd $(GIT_DIST_PATH); \
- make configure; \
- ./configure; \
- make all; \
- fi
-
-test:
- @cd $(WORKDIR); \
- $(GOTEST) ./...
-
-test-coverage:
- @cd $(WORKDIR); \
- echo "" > $(COVERAGE_REPORT); \
- for dir in `find . -name "*.go" | grep -o '.*/' | sort | uniq`; do \
- $(GOTEST) $$dir -coverprofile=$(COVERAGE_PROFILE) -covermode=$(COVERAGE_MODE); \
- if [ $$? != 0 ]; then \
- exit 2; \
- fi; \
- if [ -f $(COVERAGE_PROFILE) ]; then \
- cat $(COVERAGE_PROFILE) >> $(COVERAGE_REPORT); \
- rm $(COVERAGE_PROFILE); \
- fi; \
- done; \
-
-clean:
- rm -rf $(GIT_DIST_PATH) \ No newline at end of file
diff --git a/vendor/gopkg.in/src-d/go-git.v4/README.md b/vendor/gopkg.in/src-d/go-git.v4/README.md
deleted file mode 100644
index ed9306c83f..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/README.md
+++ /dev/null
@@ -1,123 +0,0 @@
-![go-git logo](https://cdn.rawgit.com/src-d/artwork/02036484/go-git/files/go-git-github-readme-header.png)
-[![GoDoc](https://godoc.org/gopkg.in/src-d/go-git.v4?status.svg)](https://godoc.org/github.com/src-d/go-git) [![Build Status](https://travis-ci.org/src-d/go-git.svg)](https://travis-ci.org/src-d/go-git) [![Build status](https://ci.appveyor.com/api/projects/status/nyidskwifo4py6ub?svg=true)](https://ci.appveyor.com/project/mcuadros/go-git) [![codecov.io](https://codecov.io/github/src-d/go-git/coverage.svg)](https://codecov.io/github/src-d/go-git) [![Go Report Card](https://goreportcard.com/badge/github.com/src-d/go-git)](https://goreportcard.com/report/github.com/src-d/go-git)
-
-*go-git* is a highly extensible git implementation library written in **pure Go**.
-
-It can be used to manipulate git repositories at low level *(plumbing)* or high level *(porcelain)*, through an idiomatic Go API. It also supports several types of storage, such as in-memory filesystems, or custom implementations thanks to the [`Storer`](https://godoc.org/gopkg.in/src-d/go-git.v4/plumbing/storer) interface.
-
-It's being actively developed since 2015 and is being used extensively by [source{d}](https://sourced.tech/) and [Keybase](https://keybase.io/blog/encrypted-git-for-everyone), and by many other libraries and tools.
-
-Comparison with git
--------------------
-
-*go-git* aims to be fully compatible with [git](https://github.com/git/git), all the *porcelain* operations are implemented to work exactly as *git* does.
-
-*git* is a humongous project with years of development by thousands of contributors, making it challenging for *go-git* to implement all the features. You can find a comparison of *go-git* vs *git* in the [compatibility documentation](COMPATIBILITY.md).
-
-
-Installation
-------------
-
-The recommended way to install *go-git* is:
-
-```
-go get -u gopkg.in/src-d/go-git.v4/...
-```
-
-> We use [gopkg.in](http://labix.org/gopkg.in) to version the API, this means that when `go get` clones the package, it's the latest tag matching `v4.*` that is cloned and not the master branch.
-
-Examples
---------
-
-> Please note that the `CheckIfError` and `Info` functions used in the examples are from the [examples package](https://github.com/src-d/go-git/blob/master/_examples/common.go#L17) just to be used in the examples.
-
-
-### Basic example
-
-A basic example that mimics the standard `git clone` command
-
-```go
-// Clone the given repository to the given directory
-Info("git clone https://github.com/src-d/go-git")
-
-_, err := git.PlainClone("/tmp/foo", false, &git.CloneOptions{
- URL: "https://github.com/src-d/go-git",
- Progress: os.Stdout,
-})
-
-CheckIfError(err)
-```
-
-Outputs:
-```
-Counting objects: 4924, done.
-Compressing objects: 100% (1333/1333), done.
-Total 4924 (delta 530), reused 6 (delta 6), pack-reused 3533
-```
-
-### In-memory example
-
-Cloning a repository into memory and printing the history of HEAD, just like `git log` does
-
-
-```go
-// Clones the given repository in memory, creating the remote, the local
-// branches and fetching the objects, exactly as:
-Info("git clone https://github.com/src-d/go-siva")
-
-r, err := git.Clone(memory.NewStorage(), nil, &git.CloneOptions{
- URL: "https://github.com/src-d/go-siva",
-})
-
-CheckIfError(err)
-
-// Gets the HEAD history from HEAD, just like this command:
-Info("git log")
-
-// ... retrieves the branch pointed by HEAD
-ref, err := r.Head()
-CheckIfError(err)
-
-
-// ... retrieves the commit history
-cIter, err := r.Log(&git.LogOptions{From: ref.Hash()})
-CheckIfError(err)
-
-// ... just iterates over the commits, printing it
-err = cIter.ForEach(func(c *object.Commit) error {
- fmt.Println(c)
- return nil
-})
-CheckIfError(err)
-```
-
-Outputs:
-```
-commit ded8054fd0c3994453e9c8aacaf48d118d42991e
-Author: Santiago M. Mola <santi@mola.io>
-Date: Sat Nov 12 21:18:41 2016 +0100
-
- index: ReadFrom/WriteTo returns IndexReadError/IndexWriteError. (#9)
-
-commit df707095626f384ce2dc1a83b30f9a21d69b9dfc
-Author: Santiago M. Mola <santi@mola.io>
-Date: Fri Nov 11 13:23:22 2016 +0100
-
- readwriter: fix bug when writing index. (#10)
-
- When using ReadWriter on an existing siva file, absolute offset for
- index entries was not being calculated correctly.
-...
-```
-
-You can find this [example](_examples/log/main.go) and many others in the [examples](_examples) folder.
-
-Contribute
-----------
-
-[Contributions](https://github.com/src-d/go-git/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) are more than welcome, if you are interested please take a look to
-our [Contributing Guidelines](CONTRIBUTING.md).
-
-License
--------
-Apache License Version 2.0, see [LICENSE](LICENSE)
diff --git a/vendor/gopkg.in/src-d/go-git.v4/appveyor.yml b/vendor/gopkg.in/src-d/go-git.v4/appveyor.yml
deleted file mode 100644
index 160616bec9..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/appveyor.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-version: "{build}"
-platform: x64
-
-matrix:
- allow_failures:
- - platform: x64
-
-clone_folder: c:\gopath\src\gopkg.in\src-d\go-git.v4
-
-environment:
- GOPATH: c:\gopath
-
-install:
- - set PATH=%GOPATH%\bin;c:\go\bin;"C:\Program Files\Git\mingw64\bin";%PATH%
- - go version
- - go get -v -t ./...
- - git config --global user.email "travis@example.com"
- - git config --global user.name "Travis CI
-
-build_script:
- - go test -v ./...
diff --git a/vendor/gopkg.in/src-d/go-git.v4/blame.go b/vendor/gopkg.in/src-d/go-git.v4/blame.go
deleted file mode 100644
index f6108519ad..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/blame.go
+++ /dev/null
@@ -1,302 +0,0 @@
-package git
-
-import (
- "bytes"
- "errors"
- "fmt"
- "strconv"
- "strings"
- "time"
- "unicode/utf8"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/object"
- "gopkg.in/src-d/go-git.v4/utils/diff"
-)
-
-// BlameResult represents the result of a Blame operation.
-type BlameResult struct {
- // Path is the path of the File that we're blaming.
- Path string
- // Rev (Revision) is the hash of the specified Commit used to generate this result.
- Rev plumbing.Hash
- // Lines contains every line with its authorship.
- Lines []*Line
-}
-
-// Blame returns a BlameResult with the information about the last author of
-// each line from file `path` at commit `c`.
-func Blame(c *object.Commit, path string) (*BlameResult, error) {
- // The file to blame is identified by the input arguments:
- // commit and path. commit is a Commit object obtained from a Repository. Path
- // represents a path to a specific file contained into the repository.
- //
- // Blaming a file is a two step process:
- //
- // 1. Create a linear history of the commits affecting a file. We use
- // revlist.New for that.
- //
- // 2. Then build a graph with a node for every line in every file in
- // the history of the file.
- //
- // Each node is assigned a commit: Start by the nodes in the first
- // commit. Assign that commit as the creator of all its lines.
- //
- // Then jump to the nodes in the next commit, and calculate the diff
- // between the two files. Newly created lines get
- // assigned the new commit as its origin. Modified lines also get
- // this new commit. Untouched lines retain the old commit.
- //
- // All this work is done in the assignOrigin function which holds all
- // the internal relevant data in a "blame" struct, that is not
- // exported.
- //
- // TODO: ways to improve the efficiency of this function:
- // 1. Improve revlist
- // 2. Improve how to traverse the history (example a backward traversal will
- // be much more efficient)
- //
- // TODO: ways to improve the function in general:
- // 1. Add memoization between revlist and assign.
- // 2. It is using much more memory than needed, see the TODOs below.
-
- b := new(blame)
- b.fRev = c
- b.path = path
-
- // get all the file revisions
- if err := b.fillRevs(); err != nil {
- return nil, err
- }
-
- // calculate the line tracking graph and fill in
- // file contents in data.
- if err := b.fillGraphAndData(); err != nil {
- return nil, err
- }
-
- file, err := b.fRev.File(b.path)
- if err != nil {
- return nil, err
- }
- finalLines, err := file.Lines()
- if err != nil {
- return nil, err
- }
-
- // Each node (line) holds the commit where it was introduced or
- // last modified. To achieve that we use the FORWARD algorithm
- // described in Zimmermann, et al. "Mining Version Archives for
- // Co-changed Lines", in proceedings of the Mining Software
- // Repositories workshop, Shanghai, May 22-23, 2006.
- lines, err := newLines(finalLines, b.sliceGraph(len(b.graph)-1))
- if err != nil {
- return nil, err
- }
-
- return &BlameResult{
- Path: path,
- Rev: c.Hash,
- Lines: lines,
- }, nil
-}
-
-// Line values represent the contents and author of a line in BlamedResult values.
-type Line struct {
- // Author is the email address of the last author that modified the line.
- Author string
- // Text is the original text of the line.
- Text string
- // Date is when the original text of the line was introduced
- Date time.Time
- // Hash is the commit hash that introduced the original line
- Hash plumbing.Hash
-}
-
-func newLine(author, text string, date time.Time, hash plumbing.Hash) *Line {
- return &Line{
- Author: author,
- Text: text,
- Hash: hash,
- Date: date,
- }
-}
-
-func newLines(contents []string, commits []*object.Commit) ([]*Line, error) {
- lcontents := len(contents)
- lcommits := len(commits)
-
- if lcontents != lcommits {
- if lcontents == lcommits-1 && contents[lcontents-1] != "\n" {
- contents = append(contents, "\n")
- } else {
- return nil, errors.New("contents and commits have different length")
- }
- }
-
- result := make([]*Line, 0, lcontents)
- for i := range contents {
- result = append(result, newLine(
- commits[i].Author.Email, contents[i],
- commits[i].Author.When, commits[i].Hash,
- ))
- }
-
- return result, nil
-}
-
-// this struct is internally used by the blame function to hold its
-// inputs, outputs and state.
-type blame struct {
- // the path of the file to blame
- path string
- // the commit of the final revision of the file to blame
- fRev *object.Commit
- // the chain of revisions affecting the the file to blame
- revs []*object.Commit
- // the contents of the file across all its revisions
- data []string
- // the graph of the lines in the file across all the revisions
- graph [][]*object.Commit
-}
-
-// calculate the history of a file "path", starting from commit "from", sorted by commit date.
-func (b *blame) fillRevs() error {
- var err error
-
- b.revs, err = references(b.fRev, b.path)
- return err
-}
-
-// build graph of a file from its revision history
-func (b *blame) fillGraphAndData() error {
- //TODO: not all commits are needed, only the current rev and the prev
- b.graph = make([][]*object.Commit, len(b.revs))
- b.data = make([]string, len(b.revs)) // file contents in all the revisions
- // for every revision of the file, starting with the first
- // one...
- for i, rev := range b.revs {
- // get the contents of the file
- file, err := rev.File(b.path)
- if err != nil {
- return nil
- }
- b.data[i], err = file.Contents()
- if err != nil {
- return err
- }
- nLines := countLines(b.data[i])
- // create a node for each line
- b.graph[i] = make([]*object.Commit, nLines)
- // assign a commit to each node
- // if this is the first revision, then the node is assigned to
- // this first commit.
- if i == 0 {
- for j := 0; j < nLines; j++ {
- b.graph[i][j] = b.revs[i]
- }
- } else {
- // if this is not the first commit, then assign to the old
- // commit or to the new one, depending on what the diff
- // says.
- b.assignOrigin(i, i-1)
- }
- }
- return nil
-}
-
-// sliceGraph returns a slice of commits (one per line) for a particular
-// revision of a file (0=first revision).
-func (b *blame) sliceGraph(i int) []*object.Commit {
- fVs := b.graph[i]
- result := make([]*object.Commit, 0, len(fVs))
- for _, v := range fVs {
- c := *v
- result = append(result, &c)
- }
- return result
-}
-
-// Assigns origin to vertexes in current (c) rev from data in its previous (p)
-// revision
-func (b *blame) assignOrigin(c, p int) {
- // assign origin based on diff info
- hunks := diff.Do(b.data[p], b.data[c])
- sl := -1 // source line
- dl := -1 // destination line
- for h := range hunks {
- hLines := countLines(hunks[h].Text)
- for hl := 0; hl < hLines; hl++ {
- switch {
- case hunks[h].Type == 0:
- sl++
- dl++
- b.graph[c][dl] = b.graph[p][sl]
- case hunks[h].Type == 1:
- dl++
- b.graph[c][dl] = b.revs[c]
- case hunks[h].Type == -1:
- sl++
- default:
- panic("unreachable")
- }
- }
- }
-}
-
-// GoString prints the results of a Blame using git-blame's style.
-func (b *blame) GoString() string {
- var buf bytes.Buffer
-
- file, err := b.fRev.File(b.path)
- if err != nil {
- panic("PrettyPrint: internal error in repo.Data")
- }
- contents, err := file.Contents()
- if err != nil {
- panic("PrettyPrint: internal error in repo.Data")
- }
-
- lines := strings.Split(contents, "\n")
- // max line number length
- mlnl := len(strconv.Itoa(len(lines)))
- // max author length
- mal := b.maxAuthorLength()
- format := fmt.Sprintf("%%s (%%-%ds %%%dd) %%s\n",
- mal, mlnl)
-
- fVs := b.graph[len(b.graph)-1]
- for ln, v := range fVs {
- fmt.Fprintf(&buf, format, v.Hash.String()[:8],
- prettyPrintAuthor(fVs[ln]), ln+1, lines[ln])
- }
- return buf.String()
-}
-
-// utility function to pretty print the author.
-func prettyPrintAuthor(c *object.Commit) string {
- return fmt.Sprintf("%s %s", c.Author.Name, c.Author.When.Format("2006-01-02"))
-}
-
-// utility function to calculate the number of runes needed
-// to print the longest author name in the blame of a file.
-func (b *blame) maxAuthorLength() int {
- memo := make(map[plumbing.Hash]struct{}, len(b.graph)-1)
- fVs := b.graph[len(b.graph)-1]
- m := 0
- for ln := range fVs {
- if _, ok := memo[fVs[ln].Hash]; ok {
- continue
- }
- memo[fVs[ln].Hash] = struct{}{}
- m = max(m, utf8.RuneCountInString(prettyPrintAuthor(fVs[ln])))
- }
- return m
-}
-
-func max(a, b int) int {
- if a > b {
- return a
- }
- return b
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/common.go b/vendor/gopkg.in/src-d/go-git.v4/common.go
deleted file mode 100644
index f837a2654c..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/common.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package git
-
-import "strings"
-
-const defaultDotGitPath = ".git"
-
-// countLines returns the number of lines in a string à la git, this is
-// The newline character is assumed to be '\n'. The empty string
-// contains 0 lines. If the last line of the string doesn't end with a
-// newline, it will still be considered a line.
-func countLines(s string) int {
- if s == "" {
- return 0
- }
-
- nEOL := strings.Count(s, "\n")
- if strings.HasSuffix(s, "\n") {
- return nEOL
- }
-
- return nEOL + 1
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/config/branch.go b/vendor/gopkg.in/src-d/go-git.v4/config/branch.go
deleted file mode 100644
index 20dde6e030..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/config/branch.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package config
-
-import (
- "errors"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- format "gopkg.in/src-d/go-git.v4/plumbing/format/config"
-)
-
-var (
- errBranchEmptyName = errors.New("branch config: empty name")
- errBranchInvalidMerge = errors.New("branch config: invalid merge")
- errBranchInvalidRebase = errors.New("branch config: rebase must be one of 'true' or 'interactive'")
-)
-
-// Branch contains information on the
-// local branches and which remote to track
-type Branch struct {
- // Name of branch
- Name string
- // Remote name of remote to track
- Remote string
- // Merge is the local refspec for the branch
- Merge plumbing.ReferenceName
- // Rebase instead of merge when pulling. Valid values are
- // "true" and "interactive". "false" is undocumented and
- // typically represented by the non-existence of this field
- Rebase string
-
- raw *format.Subsection
-}
-
-// Validate validates fields of branch
-func (b *Branch) Validate() error {
- if b.Name == "" {
- return errBranchEmptyName
- }
-
- if b.Merge != "" && !b.Merge.IsBranch() {
- return errBranchInvalidMerge
- }
-
- if b.Rebase != "" &&
- b.Rebase != "true" &&
- b.Rebase != "interactive" &&
- b.Rebase != "false" {
- return errBranchInvalidRebase
- }
-
- return nil
-}
-
-func (b *Branch) marshal() *format.Subsection {
- if b.raw == nil {
- b.raw = &format.Subsection{}
- }
-
- b.raw.Name = b.Name
-
- if b.Remote == "" {
- b.raw.RemoveOption(remoteSection)
- } else {
- b.raw.SetOption(remoteSection, b.Remote)
- }
-
- if b.Merge == "" {
- b.raw.RemoveOption(mergeKey)
- } else {
- b.raw.SetOption(mergeKey, string(b.Merge))
- }
-
- if b.Rebase == "" {
- b.raw.RemoveOption(rebaseKey)
- } else {
- b.raw.SetOption(rebaseKey, b.Rebase)
- }
-
- return b.raw
-}
-
-func (b *Branch) unmarshal(s *format.Subsection) error {
- b.raw = s
-
- b.Name = b.raw.Name
- b.Remote = b.raw.Options.Get(remoteSection)
- b.Merge = plumbing.ReferenceName(b.raw.Options.Get(mergeKey))
- b.Rebase = b.raw.Options.Get(rebaseKey)
-
- return b.Validate()
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/config/config.go b/vendor/gopkg.in/src-d/go-git.v4/config/config.go
deleted file mode 100644
index ea614e96dd..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/config/config.go
+++ /dev/null
@@ -1,407 +0,0 @@
-// Package config contains the abstraction of multiple config files
-package config
-
-import (
- "bytes"
- "errors"
- "fmt"
- "sort"
- "strconv"
-
- "gopkg.in/src-d/go-git.v4/internal/url"
- format "gopkg.in/src-d/go-git.v4/plumbing/format/config"
-)
-
-const (
- // DefaultFetchRefSpec is the default refspec used for fetch.
- DefaultFetchRefSpec = "+refs/heads/*:refs/remotes/%s/*"
- // DefaultPushRefSpec is the default refspec used for push.
- DefaultPushRefSpec = "refs/heads/*:refs/heads/*"
-)
-
-// ConfigStorer generic storage of Config object
-type ConfigStorer interface {
- Config() (*Config, error)
- SetConfig(*Config) error
-}
-
-var (
- ErrInvalid = errors.New("config invalid key in remote or branch")
- ErrRemoteConfigNotFound = errors.New("remote config not found")
- ErrRemoteConfigEmptyURL = errors.New("remote config: empty URL")
- ErrRemoteConfigEmptyName = errors.New("remote config: empty name")
-)
-
-// Config contains the repository configuration
-// ftp://www.kernel.org/pub/software/scm/git/docs/git-config.html#FILES
-type Config struct {
- Core struct {
- // IsBare if true this repository is assumed to be bare and has no
- // working directory associated with it.
- IsBare bool
- // Worktree is the path to the root of the working tree.
- Worktree string
- // CommentChar is the character indicating the start of a
- // comment for commands like commit and tag
- CommentChar string
- }
-
- Pack struct {
- // Window controls the size of the sliding window for delta
- // compression. The default is 10. A value of 0 turns off
- // delta compression entirely.
- Window uint
- }
-
- // Remotes list of repository remotes, the key of the map is the name
- // of the remote, should equal to RemoteConfig.Name.
- Remotes map[string]*RemoteConfig
- // Submodules list of repository submodules, the key of the map is the name
- // of the submodule, should equal to Submodule.Name.
- Submodules map[string]*Submodule
- // Branches list of branches, the key is the branch name and should
- // equal Branch.Name
- Branches map[string]*Branch
- // Raw contains the raw information of a config file. The main goal is
- // preserve the parsed information from the original format, to avoid
- // dropping unsupported fields.
- Raw *format.Config
-}
-
-// NewConfig returns a new empty Config.
-func NewConfig() *Config {
- config := &Config{
- Remotes: make(map[string]*RemoteConfig),
- Submodules: make(map[string]*Submodule),
- Branches: make(map[string]*Branch),
- Raw: format.New(),
- }
-
- config.Pack.Window = DefaultPackWindow
-
- return config
-}
-
-// Validate validates the fields and sets the default values.
-func (c *Config) Validate() error {
- for name, r := range c.Remotes {
- if r.Name != name {
- return ErrInvalid
- }
-
- if err := r.Validate(); err != nil {
- return err
- }
- }
-
- for name, b := range c.Branches {
- if b.Name != name {
- return ErrInvalid
- }
-
- if err := b.Validate(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-const (
- remoteSection = "remote"
- submoduleSection = "submodule"
- branchSection = "branch"
- coreSection = "core"
- packSection = "pack"
- fetchKey = "fetch"
- urlKey = "url"
- bareKey = "bare"
- worktreeKey = "worktree"
- commentCharKey = "commentChar"
- windowKey = "window"
- mergeKey = "merge"
- rebaseKey = "rebase"
-
- // DefaultPackWindow holds the number of previous objects used to
- // generate deltas. The value 10 is the same used by git command.
- DefaultPackWindow = uint(10)
-)
-
-// Unmarshal parses a git-config file and stores it.
-func (c *Config) Unmarshal(b []byte) error {
- r := bytes.NewBuffer(b)
- d := format.NewDecoder(r)
-
- c.Raw = format.New()
- if err := d.Decode(c.Raw); err != nil {
- return err
- }
-
- c.unmarshalCore()
- if err := c.unmarshalPack(); err != nil {
- return err
- }
- unmarshalSubmodules(c.Raw, c.Submodules)
-
- if err := c.unmarshalBranches(); err != nil {
- return err
- }
-
- return c.unmarshalRemotes()
-}
-
-func (c *Config) unmarshalCore() {
- s := c.Raw.Section(coreSection)
- if s.Options.Get(bareKey) == "true" {
- c.Core.IsBare = true
- }
-
- c.Core.Worktree = s.Options.Get(worktreeKey)
- c.Core.CommentChar = s.Options.Get(commentCharKey)
-}
-
-func (c *Config) unmarshalPack() error {
- s := c.Raw.Section(packSection)
- window := s.Options.Get(windowKey)
- if window == "" {
- c.Pack.Window = DefaultPackWindow
- } else {
- winUint, err := strconv.ParseUint(window, 10, 32)
- if err != nil {
- return err
- }
- c.Pack.Window = uint(winUint)
- }
- return nil
-}
-
-func (c *Config) unmarshalRemotes() error {
- s := c.Raw.Section(remoteSection)
- for _, sub := range s.Subsections {
- r := &RemoteConfig{}
- if err := r.unmarshal(sub); err != nil {
- return err
- }
-
- c.Remotes[r.Name] = r
- }
-
- return nil
-}
-
-func unmarshalSubmodules(fc *format.Config, submodules map[string]*Submodule) {
- s := fc.Section(submoduleSection)
- for _, sub := range s.Subsections {
- m := &Submodule{}
- m.unmarshal(sub)
-
- if m.Validate() == ErrModuleBadPath {
- continue
- }
-
- submodules[m.Name] = m
- }
-}
-
-func (c *Config) unmarshalBranches() error {
- bs := c.Raw.Section(branchSection)
- for _, sub := range bs.Subsections {
- b := &Branch{}
-
- if err := b.unmarshal(sub); err != nil {
- return err
- }
-
- c.Branches[b.Name] = b
- }
- return nil
-}
-
-// Marshal returns Config encoded as a git-config file.
-func (c *Config) Marshal() ([]byte, error) {
- c.marshalCore()
- c.marshalPack()
- c.marshalRemotes()
- c.marshalSubmodules()
- c.marshalBranches()
-
- buf := bytes.NewBuffer(nil)
- if err := format.NewEncoder(buf).Encode(c.Raw); err != nil {
- return nil, err
- }
-
- return buf.Bytes(), nil
-}
-
-func (c *Config) marshalCore() {
- s := c.Raw.Section(coreSection)
- s.SetOption(bareKey, fmt.Sprintf("%t", c.Core.IsBare))
-
- if c.Core.Worktree != "" {
- s.SetOption(worktreeKey, c.Core.Worktree)
- }
-}
-
-func (c *Config) marshalPack() {
- s := c.Raw.Section(packSection)
- if c.Pack.Window != DefaultPackWindow {
- s.SetOption(windowKey, fmt.Sprintf("%d", c.Pack.Window))
- }
-}
-
-func (c *Config) marshalRemotes() {
- s := c.Raw.Section(remoteSection)
- newSubsections := make(format.Subsections, 0, len(c.Remotes))
- added := make(map[string]bool)
- for _, subsection := range s.Subsections {
- if remote, ok := c.Remotes[subsection.Name]; ok {
- newSubsections = append(newSubsections, remote.marshal())
- added[subsection.Name] = true
- }
- }
-
- remoteNames := make([]string, 0, len(c.Remotes))
- for name := range c.Remotes {
- remoteNames = append(remoteNames, name)
- }
-
- sort.Strings(remoteNames)
-
- for _, name := range remoteNames {
- if !added[name] {
- newSubsections = append(newSubsections, c.Remotes[name].marshal())
- }
- }
-
- s.Subsections = newSubsections
-}
-
-func (c *Config) marshalSubmodules() {
- s := c.Raw.Section(submoduleSection)
- s.Subsections = make(format.Subsections, len(c.Submodules))
-
- var i int
- for _, r := range c.Submodules {
- section := r.marshal()
- // the submodule section at config is a subset of the .gitmodule file
- // we should remove the non-valid options for the config file.
- section.RemoveOption(pathKey)
- s.Subsections[i] = section
- i++
- }
-}
-
-func (c *Config) marshalBranches() {
- s := c.Raw.Section(branchSection)
- newSubsections := make(format.Subsections, 0, len(c.Branches))
- added := make(map[string]bool)
- for _, subsection := range s.Subsections {
- if branch, ok := c.Branches[subsection.Name]; ok {
- newSubsections = append(newSubsections, branch.marshal())
- added[subsection.Name] = true
- }
- }
-
- branchNames := make([]string, 0, len(c.Branches))
- for name := range c.Branches {
- branchNames = append(branchNames, name)
- }
-
- sort.Strings(branchNames)
-
- for _, name := range branchNames {
- if !added[name] {
- newSubsections = append(newSubsections, c.Branches[name].marshal())
- }
- }
-
- s.Subsections = newSubsections
-}
-
-// RemoteConfig contains the configuration for a given remote repository.
-type RemoteConfig struct {
- // Name of the remote
- Name string
- // URLs the URLs of a remote repository. It must be non-empty. Fetch will
- // always use the first URL, while push will use all of them.
- URLs []string
- // Fetch the default set of "refspec" for fetch operation
- Fetch []RefSpec
-
- // raw representation of the subsection, filled by marshal or unmarshal are
- // called
- raw *format.Subsection
-}
-
-// Validate validates the fields and sets the default values.
-func (c *RemoteConfig) Validate() error {
- if c.Name == "" {
- return ErrRemoteConfigEmptyName
- }
-
- if len(c.URLs) == 0 {
- return ErrRemoteConfigEmptyURL
- }
-
- for _, r := range c.Fetch {
- if err := r.Validate(); err != nil {
- return err
- }
- }
-
- if len(c.Fetch) == 0 {
- c.Fetch = []RefSpec{RefSpec(fmt.Sprintf(DefaultFetchRefSpec, c.Name))}
- }
-
- return nil
-}
-
-func (c *RemoteConfig) unmarshal(s *format.Subsection) error {
- c.raw = s
-
- fetch := []RefSpec{}
- for _, f := range c.raw.Options.GetAll(fetchKey) {
- rs := RefSpec(f)
- if err := rs.Validate(); err != nil {
- return err
- }
-
- fetch = append(fetch, rs)
- }
-
- c.Name = c.raw.Name
- c.URLs = append([]string(nil), c.raw.Options.GetAll(urlKey)...)
- c.Fetch = fetch
-
- return nil
-}
-
-func (c *RemoteConfig) marshal() *format.Subsection {
- if c.raw == nil {
- c.raw = &format.Subsection{}
- }
-
- c.raw.Name = c.Name
- if len(c.URLs) == 0 {
- c.raw.RemoveOption(urlKey)
- } else {
- c.raw.SetOption(urlKey, c.URLs...)
- }
-
- if len(c.Fetch) == 0 {
- c.raw.RemoveOption(fetchKey)
- } else {
- var values []string
- for _, rs := range c.Fetch {
- values = append(values, rs.String())
- }
-
- c.raw.SetOption(fetchKey, values...)
- }
-
- return c.raw
-}
-
-func (c *RemoteConfig) IsFirstURLLocal() bool {
- return url.IsLocalEndpoint(c.URLs[0])
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/config/modules.go b/vendor/gopkg.in/src-d/go-git.v4/config/modules.go
deleted file mode 100644
index 90758d9327..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/config/modules.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package config
-
-import (
- "bytes"
- "errors"
- "regexp"
-
- format "gopkg.in/src-d/go-git.v4/plumbing/format/config"
-)
-
-var (
- ErrModuleEmptyURL = errors.New("module config: empty URL")
- ErrModuleEmptyPath = errors.New("module config: empty path")
- ErrModuleBadPath = errors.New("submodule has an invalid path")
-)
-
-var (
- // Matches module paths with dotdot ".." components.
- dotdotPath = regexp.MustCompile(`(^|[/\\])\.\.([/\\]|$)`)
-)
-
-// Modules defines the submodules properties, represents a .gitmodules file
-// https://www.kernel.org/pub/software/scm/git/docs/gitmodules.html
-type Modules struct {
- // Submodules is a map of submodules being the key the name of the submodule.
- Submodules map[string]*Submodule
-
- raw *format.Config
-}
-
-// NewModules returns a new empty Modules
-func NewModules() *Modules {
- return &Modules{
- Submodules: make(map[string]*Submodule),
- raw: format.New(),
- }
-}
-
-const (
- pathKey = "path"
- branchKey = "branch"
-)
-
-// Unmarshal parses a git-config file and stores it.
-func (m *Modules) Unmarshal(b []byte) error {
- r := bytes.NewBuffer(b)
- d := format.NewDecoder(r)
-
- m.raw = format.New()
- if err := d.Decode(m.raw); err != nil {
- return err
- }
-
- unmarshalSubmodules(m.raw, m.Submodules)
- return nil
-}
-
-// Marshal returns Modules encoded as a git-config file.
-func (m *Modules) Marshal() ([]byte, error) {
- s := m.raw.Section(submoduleSection)
- s.Subsections = make(format.Subsections, len(m.Submodules))
-
- var i int
- for _, r := range m.Submodules {
- s.Subsections[i] = r.marshal()
- i++
- }
-
- buf := bytes.NewBuffer(nil)
- if err := format.NewEncoder(buf).Encode(m.raw); err != nil {
- return nil, err
- }
-
- return buf.Bytes(), nil
-}
-
-// Submodule defines a submodule.
-type Submodule struct {
- // Name module name
- Name string
- // Path defines the path, relative to the top-level directory of the Git
- // working tree.
- Path string
- // URL defines a URL from which the submodule repository can be cloned.
- URL string
- // Branch is a remote branch name for tracking updates in the upstream
- // submodule. Optional value.
- Branch string
-
- // raw representation of the subsection, filled by marshal or unmarshal are
- // called.
- raw *format.Subsection
-}
-
-// Validate validates the fields and sets the default values.
-func (m *Submodule) Validate() error {
- if m.Path == "" {
- return ErrModuleEmptyPath
- }
-
- if m.URL == "" {
- return ErrModuleEmptyURL
- }
-
- if dotdotPath.MatchString(m.Path) {
- return ErrModuleBadPath
- }
-
- return nil
-}
-
-func (m *Submodule) unmarshal(s *format.Subsection) {
- m.raw = s
-
- m.Name = m.raw.Name
- m.Path = m.raw.Option(pathKey)
- m.URL = m.raw.Option(urlKey)
- m.Branch = m.raw.Option(branchKey)
-}
-
-func (m *Submodule) marshal() *format.Subsection {
- if m.raw == nil {
- m.raw = &format.Subsection{}
- }
-
- m.raw.Name = m.Name
- if m.raw.Name == "" {
- m.raw.Name = m.Path
- }
-
- m.raw.SetOption(pathKey, m.Path)
- m.raw.SetOption(urlKey, m.URL)
-
- if m.Branch != "" {
- m.raw.SetOption(branchKey, m.Branch)
- }
-
- return m.raw
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/config/refspec.go b/vendor/gopkg.in/src-d/go-git.v4/config/refspec.go
deleted file mode 100644
index 14bb40069c..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/config/refspec.go
+++ /dev/null
@@ -1,150 +0,0 @@
-package config
-
-import (
- "errors"
- "strings"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
-)
-
-const (
- refSpecWildcard = "*"
- refSpecForce = "+"
- refSpecSeparator = ":"
-)
-
-var (
- ErrRefSpecMalformedSeparator = errors.New("malformed refspec, separators are wrong")
- ErrRefSpecMalformedWildcard = errors.New("malformed refspec, mismatched number of wildcards")
-)
-
-// RefSpec is a mapping from local branches to remote references.
-// The format of the refspec is an optional +, followed by <src>:<dst>, where
-// <src> is the pattern for references on the remote side and <dst> is where
-// those references will be written locally. The + tells Git to update the
-// reference even if it isn’t a fast-forward.
-// eg.: "+refs/heads/*:refs/remotes/origin/*"
-//
-// https://git-scm.com/book/es/v2/Git-Internals-The-Refspec
-type RefSpec string
-
-// Validate validates the RefSpec
-func (s RefSpec) Validate() error {
- spec := string(s)
- if strings.Count(spec, refSpecSeparator) != 1 {
- return ErrRefSpecMalformedSeparator
- }
-
- sep := strings.Index(spec, refSpecSeparator)
- if sep == len(spec)-1 {
- return ErrRefSpecMalformedSeparator
- }
-
- ws := strings.Count(spec[0:sep], refSpecWildcard)
- wd := strings.Count(spec[sep+1:], refSpecWildcard)
- if ws == wd && ws < 2 && wd < 2 {
- return nil
- }
-
- return ErrRefSpecMalformedWildcard
-}
-
-// IsForceUpdate returns if update is allowed in non fast-forward merges.
-func (s RefSpec) IsForceUpdate() bool {
- return s[0] == refSpecForce[0]
-}
-
-// IsDelete returns true if the refspec indicates a delete (empty src).
-func (s RefSpec) IsDelete() bool {
- return s[0] == refSpecSeparator[0]
-}
-
-// Src return the src side.
-func (s RefSpec) Src() string {
- spec := string(s)
-
- var start int
- if s.IsForceUpdate() {
- start = 1
- } else {
- start = 0
- }
- end := strings.Index(spec, refSpecSeparator)
-
- return spec[start:end]
-}
-
-// Match match the given plumbing.ReferenceName against the source.
-func (s RefSpec) Match(n plumbing.ReferenceName) bool {
- if !s.IsWildcard() {
- return s.matchExact(n)
- }
-
- return s.matchGlob(n)
-}
-
-// IsWildcard returns true if the RefSpec contains a wildcard.
-func (s RefSpec) IsWildcard() bool {
- return strings.Contains(string(s), refSpecWildcard)
-}
-
-func (s RefSpec) matchExact(n plumbing.ReferenceName) bool {
- return s.Src() == n.String()
-}
-
-func (s RefSpec) matchGlob(n plumbing.ReferenceName) bool {
- src := s.Src()
- name := n.String()
- wildcard := strings.Index(src, refSpecWildcard)
-
- var prefix, suffix string
- prefix = src[0:wildcard]
- if len(src) > wildcard+1 {
- suffix = src[wildcard+1:]
- }
-
- return len(name) >= len(prefix)+len(suffix) &&
- strings.HasPrefix(name, prefix) &&
- strings.HasSuffix(name, suffix)
-}
-
-// Dst returns the destination for the given remote reference.
-func (s RefSpec) Dst(n plumbing.ReferenceName) plumbing.ReferenceName {
- spec := string(s)
- start := strings.Index(spec, refSpecSeparator) + 1
- dst := spec[start:]
- src := s.Src()
-
- if !s.IsWildcard() {
- return plumbing.ReferenceName(dst)
- }
-
- name := n.String()
- ws := strings.Index(src, refSpecWildcard)
- wd := strings.Index(dst, refSpecWildcard)
- match := name[ws : len(name)-(len(src)-(ws+1))]
-
- return plumbing.ReferenceName(dst[0:wd] + match + dst[wd+1:])
-}
-
-func (s RefSpec) Reverse() RefSpec {
- spec := string(s)
- separator := strings.Index(spec, refSpecSeparator)
-
- return RefSpec(spec[separator+1:] + refSpecSeparator + spec[:separator])
-}
-
-func (s RefSpec) String() string {
- return string(s)
-}
-
-// MatchAny returns true if any of the RefSpec match with the given ReferenceName.
-func MatchAny(l []RefSpec, n plumbing.ReferenceName) bool {
- for _, r := range l {
- if r.Match(n) {
- return true
- }
- }
-
- return false
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/doc.go b/vendor/gopkg.in/src-d/go-git.v4/doc.go
deleted file mode 100644
index 60f2261e41..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/doc.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// A highly extensible git implementation in pure Go.
-//
-// go-git aims to reach the completeness of libgit2 or jgit, nowadays covers the
-// majority of the plumbing read operations and some of the main write
-// operations, but lacks the main porcelain operations such as merges.
-//
-// It is highly extensible, we have been following the open/close principle in
-// its design to facilitate extensions, mainly focusing the efforts on the
-// persistence of the objects.
-package git // import "gopkg.in/src-d/go-git.v4"
diff --git a/vendor/gopkg.in/src-d/go-git.v4/go.mod b/vendor/gopkg.in/src-d/go-git.v4/go.mod
deleted file mode 100644
index 6f8b3d2e62..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/go.mod
+++ /dev/null
@@ -1,29 +0,0 @@
-module gopkg.in/src-d/go-git.v4
-
-require (
- github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 // indirect
- github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect
- github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5
- github.com/emirpasic/gods v1.12.0
- github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
- github.com/gliderlabs/ssh v0.2.2
- github.com/google/go-cmp v0.3.0
- github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99
- github.com/jessevdk/go-flags v1.4.0
- github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd
- github.com/mitchellh/go-homedir v1.1.0
- github.com/pelletier/go-buffruneio v0.2.0 // indirect
- github.com/pkg/errors v0.8.1 // indirect
- github.com/sergi/go-diff v1.0.0
- github.com/src-d/gcfg v1.4.0
- github.com/stretchr/objx v0.2.0 // indirect
- github.com/xanzy/ssh-agent v0.2.1
- golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
- golang.org/x/net v0.0.0-20190724013045-ca1201d0de80
- golang.org/x/text v0.3.2
- golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a // indirect
- gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127
- gopkg.in/src-d/go-billy.v4 v4.3.2
- gopkg.in/src-d/go-git-fixtures.v3 v3.5.0
- gopkg.in/warnings.v0 v0.1.2 // indirect
-)
diff --git a/vendor/gopkg.in/src-d/go-git.v4/go.sum b/vendor/gopkg.in/src-d/go-git.v4/go.sum
deleted file mode 100644
index 65551c1658..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/go.sum
+++ /dev/null
@@ -1,92 +0,0 @@
-github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=
-github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
-github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
-github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
-github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
-github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
-github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=
-github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
-github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
-github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
-github.com/gliderlabs/ssh v0.1.3 h1:cBU46h1lYQk5f2Z+jZbewFKy+1zzE2aUX/ilcPDAm9M=
-github.com/gliderlabs/ssh v0.1.3/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
-github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=
-github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
-github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
-github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
-github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=
-github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
-github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e h1:RgQk53JHp/Cjunrr1WlsXSZpqXn+uREuHvUVcK82CV8=
-github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
-github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY=
-github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/pelletier/go-buffruneio v0.2.0 h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA=
-github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
-github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
-github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
-github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4=
-github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
-github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70=
-github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
-golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd h1:sMHc2rZHuzQmrbVoSpt9HgerkXPyIeCSO6k0zUMGfFk=
-golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
-golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190420063019-afa5a82059c6 h1:HdqqaWmYAUI7/dmByKKEw+yxDksGSo+9GjkUc9Zp34E=
-golang.org/x/net v0.0.0-20190420063019-afa5a82059c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190502183928-7f726cade0ab h1:9RfW3ktsOZxgo9YNbBAjq1FWzc/igwEcUzZz8IXgSbk=
-golang.org/x/net v0.0.0-20190502183928-7f726cade0ab/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9 h1:lkiLiLBHGoH3XnqSLUIaBsilGMUjI+Uy2Xu2JLUtTas=
-golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e h1:D5TXcfTk7xF7hvieo4QErS3qqCB4teTffacDWr7CI+0=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/src-d/go-billy.v4 v4.3.0 h1:KtlZ4c1OWbIs4jCv5ZXrTqG8EQocr0g/d4DjNg70aek=
-gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
-gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg=
-gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98=
-gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg=
-gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
-gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
-gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
diff --git a/vendor/gopkg.in/src-d/go-git.v4/internal/revision/parser.go b/vendor/gopkg.in/src-d/go-git.v4/internal/revision/parser.go
deleted file mode 100644
index d2c509e50d..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/internal/revision/parser.go
+++ /dev/null
@@ -1,622 +0,0 @@
-// Package revision extracts git revision from string
-// More informations about revision : https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html
-package revision
-
-import (
- "bytes"
- "fmt"
- "io"
- "regexp"
- "strconv"
- "time"
-)
-
-// ErrInvalidRevision is emitted if string doesn't match valid revision
-type ErrInvalidRevision struct {
- s string
-}
-
-func (e *ErrInvalidRevision) Error() string {
- return "Revision invalid : " + e.s
-}
-
-// Revisioner represents a revision component.
-// A revision is made of multiple revision components
-// obtained after parsing a revision string,
-// for instance revision "master~" will be converted in
-// two revision components Ref and TildePath
-type Revisioner interface {
-}
-
-// Ref represents a reference name : HEAD, master
-type Ref string
-
-// TildePath represents ~, ~{n}
-type TildePath struct {
- Depth int
-}
-
-// CaretPath represents ^, ^{n}
-type CaretPath struct {
- Depth int
-}
-
-// CaretReg represents ^{/foo bar}
-type CaretReg struct {
- Regexp *regexp.Regexp
- Negate bool
-}
-
-// CaretType represents ^{commit}
-type CaretType struct {
- ObjectType string
-}
-
-// AtReflog represents @{n}
-type AtReflog struct {
- Depth int
-}
-
-// AtCheckout represents @{-n}
-type AtCheckout struct {
- Depth int
-}
-
-// AtUpstream represents @{upstream}, @{u}
-type AtUpstream struct {
- BranchName string
-}
-
-// AtPush represents @{push}
-type AtPush struct {
- BranchName string
-}
-
-// AtDate represents @{"2006-01-02T15:04:05Z"}
-type AtDate struct {
- Date time.Time
-}
-
-// ColonReg represents :/foo bar
-type ColonReg struct {
- Regexp *regexp.Regexp
- Negate bool
-}
-
-// ColonPath represents :./<path> :<path>
-type ColonPath struct {
- Path string
-}
-
-// ColonStagePath represents :<n>:/<path>
-type ColonStagePath struct {
- Path string
- Stage int
-}
-
-// Parser represents a parser
-// use to tokenize and transform to revisioner chunks
-// a given string
-type Parser struct {
- s *scanner
- currentParsedChar struct {
- tok token
- lit string
- }
- unreadLastChar bool
-}
-
-// NewParserFromString returns a new instance of parser from a string.
-func NewParserFromString(s string) *Parser {
- return NewParser(bytes.NewBufferString(s))
-}
-
-// NewParser returns a new instance of parser.
-func NewParser(r io.Reader) *Parser {
- return &Parser{s: newScanner(r)}
-}
-
-// scan returns the next token from the underlying scanner
-// or the last scanned token if an unscan was requested
-func (p *Parser) scan() (token, string, error) {
- if p.unreadLastChar {
- p.unreadLastChar = false
- return p.currentParsedChar.tok, p.currentParsedChar.lit, nil
- }
-
- tok, lit, err := p.s.scan()
-
- p.currentParsedChar.tok, p.currentParsedChar.lit = tok, lit
-
- return tok, lit, err
-}
-
-// unscan pushes the previously read token back onto the buffer.
-func (p *Parser) unscan() { p.unreadLastChar = true }
-
-// Parse explode a revision string into revisioner chunks
-func (p *Parser) Parse() ([]Revisioner, error) {
- var rev Revisioner
- var revs []Revisioner
- var tok token
- var err error
-
- for {
- tok, _, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- switch tok {
- case at:
- rev, err = p.parseAt()
- case tilde:
- rev, err = p.parseTilde()
- case caret:
- rev, err = p.parseCaret()
- case colon:
- rev, err = p.parseColon()
- case eof:
- err = p.validateFullRevision(&revs)
-
- if err != nil {
- return []Revisioner{}, err
- }
-
- return revs, nil
- default:
- p.unscan()
- rev, err = p.parseRef()
- }
-
- if err != nil {
- return []Revisioner{}, err
- }
-
- revs = append(revs, rev)
- }
-}
-
-// validateFullRevision ensures all revisioner chunks make a valid revision
-func (p *Parser) validateFullRevision(chunks *[]Revisioner) error {
- var hasReference bool
-
- for i, chunk := range *chunks {
- switch chunk.(type) {
- case Ref:
- if i == 0 {
- hasReference = true
- } else {
- return &ErrInvalidRevision{`reference must be defined once at the beginning`}
- }
- case AtDate:
- if len(*chunks) == 1 || hasReference && len(*chunks) == 2 {
- return nil
- }
-
- return &ErrInvalidRevision{`"@" statement is not valid, could be : <refname>@{<ISO-8601 date>}, @{<ISO-8601 date>}`}
- case AtReflog:
- if len(*chunks) == 1 || hasReference && len(*chunks) == 2 {
- return nil
- }
-
- return &ErrInvalidRevision{`"@" statement is not valid, could be : <refname>@{<n>}, @{<n>}`}
- case AtCheckout:
- if len(*chunks) == 1 {
- return nil
- }
-
- return &ErrInvalidRevision{`"@" statement is not valid, could be : @{-<n>}`}
- case AtUpstream:
- if len(*chunks) == 1 || hasReference && len(*chunks) == 2 {
- return nil
- }
-
- return &ErrInvalidRevision{`"@" statement is not valid, could be : <refname>@{upstream}, @{upstream}, <refname>@{u}, @{u}`}
- case AtPush:
- if len(*chunks) == 1 || hasReference && len(*chunks) == 2 {
- return nil
- }
-
- return &ErrInvalidRevision{`"@" statement is not valid, could be : <refname>@{push}, @{push}`}
- case TildePath, CaretPath, CaretReg:
- if !hasReference {
- return &ErrInvalidRevision{`"~" or "^" statement must have a reference defined at the beginning`}
- }
- case ColonReg:
- if len(*chunks) == 1 {
- return nil
- }
-
- return &ErrInvalidRevision{`":" statement is not valid, could be : :/<regexp>`}
- case ColonPath:
- if i == len(*chunks)-1 && hasReference || len(*chunks) == 1 {
- return nil
- }
-
- return &ErrInvalidRevision{`":" statement is not valid, could be : <revision>:<path>`}
- case ColonStagePath:
- if len(*chunks) == 1 {
- return nil
- }
-
- return &ErrInvalidRevision{`":" statement is not valid, could be : :<n>:<path>`}
- }
- }
-
- return nil
-}
-
-// parseAt extract @ statements
-func (p *Parser) parseAt() (Revisioner, error) {
- var tok, nextTok token
- var lit, nextLit string
- var err error
-
- tok, _, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- if tok != obrace {
- p.unscan()
-
- return Ref("HEAD"), nil
- }
-
- tok, lit, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- nextTok, nextLit, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- switch {
- case tok == word && (lit == "u" || lit == "upstream") && nextTok == cbrace:
- return AtUpstream{}, nil
- case tok == word && lit == "push" && nextTok == cbrace:
- return AtPush{}, nil
- case tok == number && nextTok == cbrace:
- n, _ := strconv.Atoi(lit)
-
- return AtReflog{n}, nil
- case tok == minus && nextTok == number:
- n, _ := strconv.Atoi(nextLit)
-
- t, _, err := p.scan()
-
- if err != nil {
- return nil, err
- }
-
- if t != cbrace {
- return nil, &ErrInvalidRevision{fmt.Sprintf(`missing "}" in @{-n} structure`)}
- }
-
- return AtCheckout{n}, nil
- default:
- p.unscan()
-
- date := lit
-
- for {
- tok, lit, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- switch {
- case tok == cbrace:
- t, err := time.Parse("2006-01-02T15:04:05Z", date)
-
- if err != nil {
- return nil, &ErrInvalidRevision{fmt.Sprintf(`wrong date "%s" must fit ISO-8601 format : 2006-01-02T15:04:05Z`, date)}
- }
-
- return AtDate{t}, nil
- default:
- date += lit
- }
- }
- }
-}
-
-// parseTilde extract ~ statements
-func (p *Parser) parseTilde() (Revisioner, error) {
- var tok token
- var lit string
- var err error
-
- tok, lit, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- switch {
- case tok == number:
- n, _ := strconv.Atoi(lit)
-
- return TildePath{n}, nil
- default:
- p.unscan()
- return TildePath{1}, nil
- }
-}
-
-// parseCaret extract ^ statements
-func (p *Parser) parseCaret() (Revisioner, error) {
- var tok token
- var lit string
- var err error
-
- tok, lit, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- switch {
- case tok == obrace:
- r, err := p.parseCaretBraces()
-
- if err != nil {
- return nil, err
- }
-
- return r, nil
- case tok == number:
- n, _ := strconv.Atoi(lit)
-
- if n > 2 {
- return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" found must be 0, 1 or 2 after "^"`, lit)}
- }
-
- return CaretPath{n}, nil
- default:
- p.unscan()
- return CaretPath{1}, nil
- }
-}
-
-// parseCaretBraces extract ^{<data>} statements
-func (p *Parser) parseCaretBraces() (Revisioner, error) {
- var tok, nextTok token
- var lit, _ string
- start := true
- var re string
- var negate bool
- var err error
-
- for {
- tok, lit, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- nextTok, _, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- switch {
- case tok == word && nextTok == cbrace && (lit == "commit" || lit == "tree" || lit == "blob" || lit == "tag" || lit == "object"):
- return CaretType{lit}, nil
- case re == "" && tok == cbrace:
- return CaretType{"tag"}, nil
- case re == "" && tok == emark && nextTok == emark:
- re += lit
- case re == "" && tok == emark && nextTok == minus:
- negate = true
- case re == "" && tok == emark:
- return nil, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component sequences starting with "/!" others than those defined are reserved`)}
- case re == "" && tok == slash:
- p.unscan()
- case tok != slash && start:
- return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" is not a valid revision suffix brace component`, lit)}
- case tok != cbrace:
- p.unscan()
- re += lit
- case tok == cbrace:
- p.unscan()
-
- reg, err := regexp.Compile(re)
-
- if err != nil {
- return CaretReg{}, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component, %s`, err.Error())}
- }
-
- return CaretReg{reg, negate}, nil
- }
-
- start = false
- }
-}
-
-// parseColon extract : statements
-func (p *Parser) parseColon() (Revisioner, error) {
- var tok token
- var err error
-
- tok, _, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- switch tok {
- case slash:
- return p.parseColonSlash()
- default:
- p.unscan()
- return p.parseColonDefault()
- }
-}
-
-// parseColonSlash extract :/<data> statements
-func (p *Parser) parseColonSlash() (Revisioner, error) {
- var tok, nextTok token
- var lit string
- var re string
- var negate bool
- var err error
-
- for {
- tok, lit, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- nextTok, _, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- switch {
- case tok == emark && nextTok == emark:
- re += lit
- case re == "" && tok == emark && nextTok == minus:
- negate = true
- case re == "" && tok == emark:
- return nil, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component sequences starting with "/!" others than those defined are reserved`)}
- case tok == eof:
- p.unscan()
- reg, err := regexp.Compile(re)
-
- if err != nil {
- return ColonReg{}, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component, %s`, err.Error())}
- }
-
- return ColonReg{reg, negate}, nil
- default:
- p.unscan()
- re += lit
- }
- }
-}
-
-// parseColonDefault extract :<data> statements
-func (p *Parser) parseColonDefault() (Revisioner, error) {
- var tok token
- var lit string
- var path string
- var stage int
- var err error
- var n = -1
-
- tok, lit, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- nextTok, _, err := p.scan()
-
- if err != nil {
- return nil, err
- }
-
- if tok == number && nextTok == colon {
- n, _ = strconv.Atoi(lit)
- }
-
- switch n {
- case 0, 1, 2, 3:
- stage = n
- default:
- path += lit
- p.unscan()
- }
-
- for {
- tok, lit, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- switch {
- case tok == eof && n == -1:
- return ColonPath{path}, nil
- case tok == eof:
- return ColonStagePath{path, stage}, nil
- default:
- path += lit
- }
- }
-}
-
-// parseRef extract reference name
-func (p *Parser) parseRef() (Revisioner, error) {
- var tok, prevTok token
- var lit, buf string
- var endOfRef bool
- var err error
-
- for {
- tok, lit, err = p.scan()
-
- if err != nil {
- return nil, err
- }
-
- switch tok {
- case eof, at, colon, tilde, caret:
- endOfRef = true
- }
-
- err := p.checkRefFormat(tok, lit, prevTok, buf, endOfRef)
-
- if err != nil {
- return "", err
- }
-
- if endOfRef {
- p.unscan()
- return Ref(buf), nil
- }
-
- buf += lit
- prevTok = tok
- }
-}
-
-// checkRefFormat ensure reference name follow rules defined here :
-// https://git-scm.com/docs/git-check-ref-format
-func (p *Parser) checkRefFormat(token token, literal string, previousToken token, buffer string, endOfRef bool) error {
- switch token {
- case aslash, space, control, qmark, asterisk, obracket:
- return &ErrInvalidRevision{fmt.Sprintf(`must not contains "%s"`, literal)}
- }
-
- switch {
- case (token == dot || token == slash) && buffer == "":
- return &ErrInvalidRevision{fmt.Sprintf(`must not start with "%s"`, literal)}
- case previousToken == slash && endOfRef:
- return &ErrInvalidRevision{`must not end with "/"`}
- case previousToken == dot && endOfRef:
- return &ErrInvalidRevision{`must not end with "."`}
- case token == dot && previousToken == slash:
- return &ErrInvalidRevision{`must not contains "/."`}
- case previousToken == dot && token == dot:
- return &ErrInvalidRevision{`must not contains ".."`}
- case previousToken == slash && token == slash:
- return &ErrInvalidRevision{`must not contains consecutively "/"`}
- case (token == slash || endOfRef) && len(buffer) > 4 && buffer[len(buffer)-5:] == ".lock":
- return &ErrInvalidRevision{"cannot end with .lock"}
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/internal/revision/scanner.go b/vendor/gopkg.in/src-d/go-git.v4/internal/revision/scanner.go
deleted file mode 100644
index fb5f333f7f..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/internal/revision/scanner.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package revision
-
-import (
- "bufio"
- "io"
- "unicode"
-)
-
-// runeCategoryValidator takes a rune as input and
-// validates it belongs to a rune category
-type runeCategoryValidator func(r rune) bool
-
-// tokenizeExpression aggegates a series of runes matching check predicate into a single
-// string and provides given tokenType as token type
-func tokenizeExpression(ch rune, tokenType token, check runeCategoryValidator, r *bufio.Reader) (token, string, error) {
- var data []rune
- data = append(data, ch)
-
- for {
- c, _, err := r.ReadRune()
-
- if c == zeroRune {
- break
- }
-
- if err != nil {
- return tokenError, "", err
- }
-
- if check(c) {
- data = append(data, c)
- } else {
- err := r.UnreadRune()
-
- if err != nil {
- return tokenError, "", err
- }
-
- return tokenType, string(data), nil
- }
- }
-
- return tokenType, string(data), nil
-}
-
-var zeroRune = rune(0)
-
-// scanner represents a lexical scanner.
-type scanner struct {
- r *bufio.Reader
-}
-
-// newScanner returns a new instance of scanner.
-func newScanner(r io.Reader) *scanner {
- return &scanner{r: bufio.NewReader(r)}
-}
-
-// Scan extracts tokens and their strings counterpart
-// from the reader
-func (s *scanner) scan() (token, string, error) {
- ch, _, err := s.r.ReadRune()
-
- if err != nil && err != io.EOF {
- return tokenError, "", err
- }
-
- switch ch {
- case zeroRune:
- return eof, "", nil
- case ':':
- return colon, string(ch), nil
- case '~':
- return tilde, string(ch), nil
- case '^':
- return caret, string(ch), nil
- case '.':
- return dot, string(ch), nil
- case '/':
- return slash, string(ch), nil
- case '{':
- return obrace, string(ch), nil
- case '}':
- return cbrace, string(ch), nil
- case '-':
- return minus, string(ch), nil
- case '@':
- return at, string(ch), nil
- case '\\':
- return aslash, string(ch), nil
- case '?':
- return qmark, string(ch), nil
- case '*':
- return asterisk, string(ch), nil
- case '[':
- return obracket, string(ch), nil
- case '!':
- return emark, string(ch), nil
- }
-
- if unicode.IsSpace(ch) {
- return space, string(ch), nil
- }
-
- if unicode.IsControl(ch) {
- return control, string(ch), nil
- }
-
- if unicode.IsLetter(ch) {
- return tokenizeExpression(ch, word, unicode.IsLetter, s.r)
- }
-
- if unicode.IsNumber(ch) {
- return tokenizeExpression(ch, number, unicode.IsNumber, s.r)
- }
-
- return tokenError, string(ch), nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/internal/revision/token.go b/vendor/gopkg.in/src-d/go-git.v4/internal/revision/token.go
deleted file mode 100644
index abc4048869..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/internal/revision/token.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package revision
-
-// token represents a entity extracted from string parsing
-type token int
-
-const (
- eof token = iota
-
- aslash
- asterisk
- at
- caret
- cbrace
- colon
- control
- dot
- emark
- minus
- number
- obrace
- obracket
- qmark
- slash
- space
- tilde
- tokenError
- word
-)
diff --git a/vendor/gopkg.in/src-d/go-git.v4/internal/url/url.go b/vendor/gopkg.in/src-d/go-git.v4/internal/url/url.go
deleted file mode 100644
index 0f0d709d93..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/internal/url/url.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package url
-
-import (
- "regexp"
-)
-
-var (
- isSchemeRegExp = regexp.MustCompile(`^[^:]+://`)
- scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P<user>[^@]+)@)?(?P<host>[^:\s]+):(?:(?P<port>[0-9]{1,5})/)?(?P<path>[^\\].*)$`)
-)
-
-// MatchesScheme returns true if the given string matches a URL-like
-// format scheme.
-func MatchesScheme(url string) bool {
- return isSchemeRegExp.MatchString(url)
-}
-
-// MatchesScpLike returns true if the given string matches an SCP-like
-// format scheme.
-func MatchesScpLike(url string) bool {
- return scpLikeUrlRegExp.MatchString(url)
-}
-
-// FindScpLikeComponents returns the user, host, port and path of the
-// given SCP-like URL.
-func FindScpLikeComponents(url string) (user, host, port, path string) {
- m := scpLikeUrlRegExp.FindStringSubmatch(url)
- return m[1], m[2], m[3], m[4]
-}
-
-// IsLocalEndpoint returns true if the given URL string specifies a
-// local file endpoint. For example, on a Linux machine,
-// `/home/user/src/go-git` would match as a local endpoint, but
-// `https://github.com/src-d/go-git` would not.
-func IsLocalEndpoint(url string) bool {
- return !MatchesScheme(url) && !MatchesScpLike(url)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/object_walker.go b/vendor/gopkg.in/src-d/go-git.v4/object_walker.go
deleted file mode 100644
index f8b19cdb09..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/object_walker.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package git
-
-import (
- "fmt"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/filemode"
- "gopkg.in/src-d/go-git.v4/plumbing/object"
- "gopkg.in/src-d/go-git.v4/storage"
-)
-
-type objectWalker struct {
- Storer storage.Storer
- // seen is the set of objects seen in the repo.
- // seen map can become huge if walking over large
- // repos. Thus using struct{} as the value type.
- seen map[plumbing.Hash]struct{}
-}
-
-func newObjectWalker(s storage.Storer) *objectWalker {
- return &objectWalker{s, map[plumbing.Hash]struct{}{}}
-}
-
-// walkAllRefs walks all (hash) refererences from the repo.
-func (p *objectWalker) walkAllRefs() error {
- // Walk over all the references in the repo.
- it, err := p.Storer.IterReferences()
- if err != nil {
- return err
- }
- defer it.Close()
- err = it.ForEach(func(ref *plumbing.Reference) error {
- // Exit this iteration early for non-hash references.
- if ref.Type() != plumbing.HashReference {
- return nil
- }
- return p.walkObjectTree(ref.Hash())
- })
- return err
-}
-
-func (p *objectWalker) isSeen(hash plumbing.Hash) bool {
- _, seen := p.seen[hash]
- return seen
-}
-
-func (p *objectWalker) add(hash plumbing.Hash) {
- p.seen[hash] = struct{}{}
-}
-
-// walkObjectTree walks over all objects and remembers references
-// to them in the objectWalker. This is used instead of the revlist
-// walks because memory usage is tight with huge repos.
-func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error {
- // Check if we have already seen, and mark this object
- if p.isSeen(hash) {
- return nil
- }
- p.add(hash)
- // Fetch the object.
- obj, err := object.GetObject(p.Storer, hash)
- if err != nil {
- return fmt.Errorf("Getting object %s failed: %v", hash, err)
- }
- // Walk all children depending on object type.
- switch obj := obj.(type) {
- case *object.Commit:
- err = p.walkObjectTree(obj.TreeHash)
- if err != nil {
- return err
- }
- for _, h := range obj.ParentHashes {
- err = p.walkObjectTree(h)
- if err != nil {
- return err
- }
- }
- case *object.Tree:
- for i := range obj.Entries {
- // Shortcut for blob objects:
- // 'or' the lower bits of a mode and check that it
- // it matches a filemode.Executable. The type information
- // is in the higher bits, but this is the cleanest way
- // to handle plain files with different modes.
- // Other non-tree objects are somewhat rare, so they
- // are not special-cased.
- if obj.Entries[i].Mode|0755 == filemode.Executable {
- p.add(obj.Entries[i].Hash)
- continue
- }
- // Normal walk for sub-trees (and symlinks etc).
- err = p.walkObjectTree(obj.Entries[i].Hash)
- if err != nil {
- return err
- }
- }
- case *object.Tag:
- return p.walkObjectTree(obj.Target)
- default:
- // Error out on unhandled object types.
- return fmt.Errorf("Unknown object %X %s %T\n", obj.ID(), obj.Type(), obj)
- }
- return nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/options.go b/vendor/gopkg.in/src-d/go-git.v4/options.go
deleted file mode 100644
index 0f728e7c27..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/options.go
+++ /dev/null
@@ -1,492 +0,0 @@
-package git
-
-import (
- "errors"
- "regexp"
- "strings"
-
- "golang.org/x/crypto/openpgp"
- "gopkg.in/src-d/go-git.v4/config"
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/object"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband"
- "gopkg.in/src-d/go-git.v4/plumbing/transport"
-)
-
-// SubmoduleRescursivity defines how depth will affect any submodule recursive
-// operation.
-type SubmoduleRescursivity uint
-
-const (
- // DefaultRemoteName name of the default Remote, just like git command.
- DefaultRemoteName = "origin"
-
- // NoRecurseSubmodules disables the recursion for a submodule operation.
- NoRecurseSubmodules SubmoduleRescursivity = 0
- // DefaultSubmoduleRecursionDepth allow recursion in a submodule operation.
- DefaultSubmoduleRecursionDepth SubmoduleRescursivity = 10
-)
-
-var (
- ErrMissingURL = errors.New("URL field is required")
-)
-
-// CloneOptions describes how a clone should be performed.
-type CloneOptions struct {
- // The (possibly remote) repository URL to clone from.
- URL string
- // Auth credentials, if required, to use with the remote repository.
- Auth transport.AuthMethod
- // Name of the remote to be added, by default `origin`.
- RemoteName string
- // Remote branch to clone.
- ReferenceName plumbing.ReferenceName
- // Fetch only ReferenceName if true.
- SingleBranch bool
- // No checkout of HEAD after clone if true.
- NoCheckout bool
- // Limit fetching to the specified number of commits.
- Depth int
- // RecurseSubmodules after the clone is created, initialize all submodules
- // within, using their default settings. This option is ignored if the
- // cloned repository does not have a worktree.
- RecurseSubmodules SubmoduleRescursivity
- // Progress is where the human readable information sent by the server is
- // stored, if nil nothing is stored and the capability (if supported)
- // no-progress, is sent to the server to avoid send this information.
- Progress sideband.Progress
- // Tags describe how the tags will be fetched from the remote repository,
- // by default is AllTags.
- Tags TagMode
-}
-
-// Validate validates the fields and sets the default values.
-func (o *CloneOptions) Validate() error {
- if o.URL == "" {
- return ErrMissingURL
- }
-
- if o.RemoteName == "" {
- o.RemoteName = DefaultRemoteName
- }
-
- if o.ReferenceName == "" {
- o.ReferenceName = plumbing.HEAD
- }
-
- if o.Tags == InvalidTagMode {
- o.Tags = AllTags
- }
-
- return nil
-}
-
-// PullOptions describes how a pull should be performed.
-type PullOptions struct {
- // Name of the remote to be pulled. If empty, uses the default.
- RemoteName string
- // Remote branch to clone. If empty, uses HEAD.
- ReferenceName plumbing.ReferenceName
- // Fetch only ReferenceName if true.
- SingleBranch bool
- // Limit fetching to the specified number of commits.
- Depth int
- // Auth credentials, if required, to use with the remote repository.
- Auth transport.AuthMethod
- // RecurseSubmodules controls if new commits of all populated submodules
- // should be fetched too.
- RecurseSubmodules SubmoduleRescursivity
- // Progress is where the human readable information sent by the server is
- // stored, if nil nothing is stored and the capability (if supported)
- // no-progress, is sent to the server to avoid send this information.
- Progress sideband.Progress
- // Force allows the pull to update a local branch even when the remote
- // branch does not descend from it.
- Force bool
-}
-
-// Validate validates the fields and sets the default values.
-func (o *PullOptions) Validate() error {
- if o.RemoteName == "" {
- o.RemoteName = DefaultRemoteName
- }
-
- if o.ReferenceName == "" {
- o.ReferenceName = plumbing.HEAD
- }
-
- return nil
-}
-
-type TagMode int
-
-const (
- InvalidTagMode TagMode = iota
- // TagFollowing any tag that points into the histories being fetched is also
- // fetched. TagFollowing requires a server with `include-tag` capability
- // in order to fetch the annotated tags objects.
- TagFollowing
- // AllTags fetch all tags from the remote (i.e., fetch remote tags
- // refs/tags/* into local tags with the same name)
- AllTags
- //NoTags fetch no tags from the remote at all
- NoTags
-)
-
-// FetchOptions describes how a fetch should be performed
-type FetchOptions struct {
- // Name of the remote to fetch from. Defaults to origin.
- RemoteName string
- RefSpecs []config.RefSpec
- // Depth limit fetching to the specified number of commits from the tip of
- // each remote branch history.
- Depth int
- // Auth credentials, if required, to use with the remote repository.
- Auth transport.AuthMethod
- // Progress is where the human readable information sent by the server is
- // stored, if nil nothing is stored and the capability (if supported)
- // no-progress, is sent to the server to avoid send this information.
- Progress sideband.Progress
- // Tags describe how the tags will be fetched from the remote repository,
- // by default is TagFollowing.
- Tags TagMode
- // Force allows the fetch to update a local branch even when the remote
- // branch does not descend from it.
- Force bool
-}
-
-// Validate validates the fields and sets the default values.
-func (o *FetchOptions) Validate() error {
- if o.RemoteName == "" {
- o.RemoteName = DefaultRemoteName
- }
-
- if o.Tags == InvalidTagMode {
- o.Tags = TagFollowing
- }
-
- for _, r := range o.RefSpecs {
- if err := r.Validate(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// PushOptions describes how a push should be performed.
-type PushOptions struct {
- // RemoteName is the name of the remote to be pushed to.
- RemoteName string
- // RefSpecs specify what destination ref to update with what source
- // object. A refspec with empty src can be used to delete a reference.
- RefSpecs []config.RefSpec
- // Auth credentials, if required, to use with the remote repository.
- Auth transport.AuthMethod
- // Progress is where the human readable information sent by the server is
- // stored, if nil nothing is stored.
- Progress sideband.Progress
- // Prune specify that remote refs that match given RefSpecs and that do
- // not exist locally will be removed.
- Prune bool
-}
-
-// Validate validates the fields and sets the default values.
-func (o *PushOptions) Validate() error {
- if o.RemoteName == "" {
- o.RemoteName = DefaultRemoteName
- }
-
- if len(o.RefSpecs) == 0 {
- o.RefSpecs = []config.RefSpec{
- config.RefSpec(config.DefaultPushRefSpec),
- }
- }
-
- for _, r := range o.RefSpecs {
- if err := r.Validate(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// SubmoduleUpdateOptions describes how a submodule update should be performed.
-type SubmoduleUpdateOptions struct {
- // Init, if true initializes the submodules recorded in the index.
- Init bool
- // NoFetch tell to the update command to not fetch new objects from the
- // remote site.
- NoFetch bool
- // RecurseSubmodules the update is performed not only in the submodules of
- // the current repository but also in any nested submodules inside those
- // submodules (and so on). Until the SubmoduleRescursivity is reached.
- RecurseSubmodules SubmoduleRescursivity
- // Auth credentials, if required, to use with the remote repository.
- Auth transport.AuthMethod
-}
-
-var (
- ErrBranchHashExclusive = errors.New("Branch and Hash are mutually exclusive")
- ErrCreateRequiresBranch = errors.New("Branch is mandatory when Create is used")
-)
-
-// CheckoutOptions describes how a checkout operation should be performed.
-type CheckoutOptions struct {
- // Hash is the hash of the commit to be checked out. If used, HEAD will be
- // in detached mode. If Create is not used, Branch and Hash are mutually
- // exclusive.
- Hash plumbing.Hash
- // Branch to be checked out, if Branch and Hash are empty is set to `master`.
- Branch plumbing.ReferenceName
- // Create a new branch named Branch and start it at Hash.
- Create bool
- // Force, if true when switching branches, proceed even if the index or the
- // working tree differs from HEAD. This is used to throw away local changes
- Force bool
- // Keep, if true when switching branches, local changes (the index or the
- // working tree changes) will be kept so that they can be committed to the
- // target branch. Force and Keep are mutually exclusive, should not be both
- // set to true.
- Keep bool
-}
-
-// Validate validates the fields and sets the default values.
-func (o *CheckoutOptions) Validate() error {
- if !o.Create && !o.Hash.IsZero() && o.Branch != "" {
- return ErrBranchHashExclusive
- }
-
- if o.Create && o.Branch == "" {
- return ErrCreateRequiresBranch
- }
-
- if o.Branch == "" {
- o.Branch = plumbing.Master
- }
-
- return nil
-}
-
-// ResetMode defines the mode of a reset operation.
-type ResetMode int8
-
-const (
- // MixedReset resets the index but not the working tree (i.e., the changed
- // files are preserved but not marked for commit) and reports what has not
- // been updated. This is the default action.
- MixedReset ResetMode = iota
- // HardReset resets the index and working tree. Any changes to tracked files
- // in the working tree are discarded.
- HardReset
- // MergeReset resets the index and updates the files in the working tree
- // that are different between Commit and HEAD, but keeps those which are
- // different between the index and working tree (i.e. which have changes
- // which have not been added).
- //
- // If a file that is different between Commit and the index has unstaged
- // changes, reset is aborted.
- MergeReset
- // SoftReset does not touch the index file or the working tree at all (but
- // resets the head to <commit>, just like all modes do). This leaves all
- // your changed files "Changes to be committed", as git status would put it.
- SoftReset
-)
-
-// ResetOptions describes how a reset operation should be performed.
-type ResetOptions struct {
- // Commit, if commit is present set the current branch head (HEAD) to it.
- Commit plumbing.Hash
- // Mode, form resets the current branch head to Commit and possibly updates
- // the index (resetting it to the tree of Commit) and the working tree
- // depending on Mode. If empty MixedReset is used.
- Mode ResetMode
-}
-
-// Validate validates the fields and sets the default values.
-func (o *ResetOptions) Validate(r *Repository) error {
- if o.Commit == plumbing.ZeroHash {
- ref, err := r.Head()
- if err != nil {
- return err
- }
-
- o.Commit = ref.Hash()
- }
-
- return nil
-}
-
-type LogOrder int8
-
-const (
- LogOrderDefault LogOrder = iota
- LogOrderDFS
- LogOrderDFSPost
- LogOrderBSF
- LogOrderCommitterTime
-)
-
-// LogOptions describes how a log action should be performed.
-type LogOptions struct {
- // When the From option is set the log will only contain commits
- // reachable from it. If this option is not set, HEAD will be used as
- // the default From.
- From plumbing.Hash
-
- // The default traversal algorithm is Depth-first search
- // set Order=LogOrderCommitterTime for ordering by committer time (more compatible with `git log`)
- // set Order=LogOrderBSF for Breadth-first search
- Order LogOrder
-
- // Show only those commits in which the specified file was inserted/updated.
- // It is equivalent to running `git log -- <file-name>`.
- FileName *string
-
- // Pretend as if all the refs in refs/, along with HEAD, are listed on the command line as <commit>.
- // It is equivalent to running `git log --all`.
- // If set on true, the From option will be ignored.
- All bool
-}
-
-var (
- ErrMissingAuthor = errors.New("author field is required")
-)
-
-// CommitOptions describes how a commit operation should be performed.
-type CommitOptions struct {
- // All automatically stage files that have been modified and deleted, but
- // new files you have not told Git about are not affected.
- All bool
- // Author is the author's signature of the commit.
- Author *object.Signature
- // Committer is the committer's signature of the commit. If Committer is
- // nil the Author signature is used.
- Committer *object.Signature
- // Parents are the parents commits for the new commit, by default when
- // len(Parents) is zero, the hash of HEAD reference is used.
- Parents []plumbing.Hash
- // SignKey denotes a key to sign the commit with. A nil value here means the
- // commit will not be signed. The private key must be present and already
- // decrypted.
- SignKey *openpgp.Entity
-}
-
-// Validate validates the fields and sets the default values.
-func (o *CommitOptions) Validate(r *Repository) error {
- if o.Author == nil {
- return ErrMissingAuthor
- }
-
- if o.Committer == nil {
- o.Committer = o.Author
- }
-
- if len(o.Parents) == 0 {
- head, err := r.Head()
- if err != nil && err != plumbing.ErrReferenceNotFound {
- return err
- }
-
- if head != nil {
- o.Parents = []plumbing.Hash{head.Hash()}
- }
- }
-
- return nil
-}
-
-var (
- ErrMissingName = errors.New("name field is required")
- ErrMissingTagger = errors.New("tagger field is required")
- ErrMissingMessage = errors.New("message field is required")
-)
-
-// CreateTagOptions describes how a tag object should be created.
-type CreateTagOptions struct {
- // Tagger defines the signature of the tag creator.
- Tagger *object.Signature
- // Message defines the annotation of the tag. It is canonicalized during
- // validation into the format expected by git - no leading whitespace and
- // ending in a newline.
- Message string
- // SignKey denotes a key to sign the tag with. A nil value here means the tag
- // will not be signed. The private key must be present and already decrypted.
- SignKey *openpgp.Entity
-}
-
-// Validate validates the fields and sets the default values.
-func (o *CreateTagOptions) Validate(r *Repository, hash plumbing.Hash) error {
- if o.Tagger == nil {
- return ErrMissingTagger
- }
-
- if o.Message == "" {
- return ErrMissingMessage
- }
-
- // Canonicalize the message into the expected message format.
- o.Message = strings.TrimSpace(o.Message) + "\n"
-
- return nil
-}
-
-// ListOptions describes how a remote list should be performed.
-type ListOptions struct {
- // Auth credentials, if required, to use with the remote repository.
- Auth transport.AuthMethod
-}
-
-// CleanOptions describes how a clean should be performed.
-type CleanOptions struct {
- Dir bool
-}
-
-// GrepOptions describes how a grep should be performed.
-type GrepOptions struct {
- // Patterns are compiled Regexp objects to be matched.
- Patterns []*regexp.Regexp
- // InvertMatch selects non-matching lines.
- InvertMatch bool
- // CommitHash is the hash of the commit from which worktree should be derived.
- CommitHash plumbing.Hash
- // ReferenceName is the branch or tag name from which worktree should be derived.
- ReferenceName plumbing.ReferenceName
- // PathSpecs are compiled Regexp objects of pathspec to use in the matching.
- PathSpecs []*regexp.Regexp
-}
-
-var (
- ErrHashOrReference = errors.New("ambiguous options, only one of CommitHash or ReferenceName can be passed")
-)
-
-// Validate validates the fields and sets the default values.
-func (o *GrepOptions) Validate(w *Worktree) error {
- if !o.CommitHash.IsZero() && o.ReferenceName != "" {
- return ErrHashOrReference
- }
-
- // If none of CommitHash and ReferenceName are provided, set commit hash of
- // the repository's head.
- if o.CommitHash.IsZero() && o.ReferenceName == "" {
- ref, err := w.r.Head()
- if err != nil {
- return err
- }
- o.CommitHash = ref.Hash()
- }
-
- return nil
-}
-
-// PlainOpenOptions describes how opening a plain repository should be
-// performed.
-type PlainOpenOptions struct {
- // DetectDotGit defines whether parent directories should be
- // walked until a .git directory or file is found.
- DetectDotGit bool
-}
-
-// Validate validates the fields and sets the default values.
-func (o *PlainOpenOptions) Validate() error { return nil }
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/buffer_lru.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/buffer_lru.go
deleted file mode 100644
index acaf195203..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/buffer_lru.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package cache
-
-import (
- "container/list"
- "sync"
-)
-
-// BufferLRU implements an object cache with an LRU eviction policy and a
-// maximum size (measured in object size).
-type BufferLRU struct {
- MaxSize FileSize
-
- actualSize FileSize
- ll *list.List
- cache map[int64]*list.Element
- mut sync.Mutex
-}
-
-// NewBufferLRU creates a new BufferLRU with the given maximum size. The maximum
-// size will never be exceeded.
-func NewBufferLRU(maxSize FileSize) *BufferLRU {
- return &BufferLRU{MaxSize: maxSize}
-}
-
-// NewBufferLRUDefault creates a new BufferLRU with the default cache size.
-func NewBufferLRUDefault() *BufferLRU {
- return &BufferLRU{MaxSize: DefaultMaxSize}
-}
-
-type buffer struct {
- Key int64
- Slice []byte
-}
-
-// Put puts a buffer into the cache. If the buffer is already in the cache, it
-// will be marked as used. Otherwise, it will be inserted. A buffers might
-// be evicted to make room for the new one.
-func (c *BufferLRU) Put(key int64, slice []byte) {
- c.mut.Lock()
- defer c.mut.Unlock()
-
- if c.cache == nil {
- c.actualSize = 0
- c.cache = make(map[int64]*list.Element, 1000)
- c.ll = list.New()
- }
-
- bufSize := FileSize(len(slice))
- if ee, ok := c.cache[key]; ok {
- oldBuf := ee.Value.(buffer)
- // in this case bufSize is a delta: new size - old size
- bufSize -= FileSize(len(oldBuf.Slice))
- c.ll.MoveToFront(ee)
- ee.Value = buffer{key, slice}
- } else {
- if bufSize > c.MaxSize {
- return
- }
- ee := c.ll.PushFront(buffer{key, slice})
- c.cache[key] = ee
- }
-
- c.actualSize += bufSize
- for c.actualSize > c.MaxSize {
- last := c.ll.Back()
- lastObj := last.Value.(buffer)
- lastSize := FileSize(len(lastObj.Slice))
-
- c.ll.Remove(last)
- delete(c.cache, lastObj.Key)
- c.actualSize -= lastSize
- }
-}
-
-// Get returns a buffer by its key. It marks the buffer as used. If the buffer
-// is not in the cache, (nil, false) will be returned.
-func (c *BufferLRU) Get(key int64) ([]byte, bool) {
- c.mut.Lock()
- defer c.mut.Unlock()
-
- ee, ok := c.cache[key]
- if !ok {
- return nil, false
- }
-
- c.ll.MoveToFront(ee)
- return ee.Value.(buffer).Slice, true
-}
-
-// Clear the content of this buffer cache.
-func (c *BufferLRU) Clear() {
- c.mut.Lock()
- defer c.mut.Unlock()
-
- c.ll = nil
- c.cache = nil
- c.actualSize = 0
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/common.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/common.go
deleted file mode 100644
index 2b7f36a56f..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/common.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package cache
-
-import "gopkg.in/src-d/go-git.v4/plumbing"
-
-const (
- Byte FileSize = 1 << (iota * 10)
- KiByte
- MiByte
- GiByte
-)
-
-type FileSize int64
-
-const DefaultMaxSize FileSize = 96 * MiByte
-
-// Object is an interface to a object cache.
-type Object interface {
- // Put puts the given object into the cache. Whether this object will
- // actually be put into the cache or not is implementation specific.
- Put(o plumbing.EncodedObject)
- // Get gets an object from the cache given its hash. The second return value
- // is true if the object was returned, and false otherwise.
- Get(k plumbing.Hash) (plumbing.EncodedObject, bool)
- // Clear clears every object from the cache.
- Clear()
-}
-
-// Buffer is an interface to a buffer cache.
-type Buffer interface {
- // Put puts a buffer into the cache. If the buffer is already in the cache,
- // it will be marked as used. Otherwise, it will be inserted. Buffer might
- // be evicted to make room for the new one.
- Put(key int64, slice []byte)
- // Get returns a buffer by its key. It marks the buffer as used. If the
- // buffer is not in the cache, (nil, false) will be returned.
- Get(key int64) ([]byte, bool)
- // Clear clears every object from the cache.
- Clear()
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/object_lru.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/object_lru.go
deleted file mode 100644
index cd3712b7d7..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/object_lru.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package cache
-
-import (
- "container/list"
- "sync"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
-)
-
-// ObjectLRU implements an object cache with an LRU eviction policy and a
-// maximum size (measured in object size).
-type ObjectLRU struct {
- MaxSize FileSize
-
- actualSize FileSize
- ll *list.List
- cache map[interface{}]*list.Element
- mut sync.Mutex
-}
-
-// NewObjectLRU creates a new ObjectLRU with the given maximum size. The maximum
-// size will never be exceeded.
-func NewObjectLRU(maxSize FileSize) *ObjectLRU {
- return &ObjectLRU{MaxSize: maxSize}
-}
-
-// NewObjectLRUDefault creates a new ObjectLRU with the default cache size.
-func NewObjectLRUDefault() *ObjectLRU {
- return &ObjectLRU{MaxSize: DefaultMaxSize}
-}
-
-// Put puts an object into the cache. If the object is already in the cache, it
-// will be marked as used. Otherwise, it will be inserted. A single object might
-// be evicted to make room for the new object.
-func (c *ObjectLRU) Put(obj plumbing.EncodedObject) {
- c.mut.Lock()
- defer c.mut.Unlock()
-
- if c.cache == nil {
- c.actualSize = 0
- c.cache = make(map[interface{}]*list.Element, 1000)
- c.ll = list.New()
- }
-
- objSize := FileSize(obj.Size())
- key := obj.Hash()
- if ee, ok := c.cache[key]; ok {
- oldObj := ee.Value.(plumbing.EncodedObject)
- // in this case objSize is a delta: new size - old size
- objSize -= FileSize(oldObj.Size())
- c.ll.MoveToFront(ee)
- ee.Value = obj
- } else {
- if objSize > c.MaxSize {
- return
- }
- ee := c.ll.PushFront(obj)
- c.cache[key] = ee
- }
-
- c.actualSize += objSize
- for c.actualSize > c.MaxSize {
- last := c.ll.Back()
- if last == nil {
- c.actualSize = 0
- break
- }
-
- lastObj := last.Value.(plumbing.EncodedObject)
- lastSize := FileSize(lastObj.Size())
-
- c.ll.Remove(last)
- delete(c.cache, lastObj.Hash())
- c.actualSize -= lastSize
- }
-}
-
-// Get returns an object by its hash. It marks the object as used. If the object
-// is not in the cache, (nil, false) will be returned.
-func (c *ObjectLRU) Get(k plumbing.Hash) (plumbing.EncodedObject, bool) {
- c.mut.Lock()
- defer c.mut.Unlock()
-
- ee, ok := c.cache[k]
- if !ok {
- return nil, false
- }
-
- c.ll.MoveToFront(ee)
- return ee.Value.(plumbing.EncodedObject), true
-}
-
-// Clear the content of this object cache.
-func (c *ObjectLRU) Clear() {
- c.mut.Lock()
- defer c.mut.Unlock()
-
- c.ll = nil
- c.cache = nil
- c.actualSize = 0
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/error.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/error.go
deleted file mode 100644
index a3ebed3f6c..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/error.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package plumbing
-
-import "fmt"
-
-type PermanentError struct {
- Err error
-}
-
-func NewPermanentError(err error) *PermanentError {
- if err == nil {
- return nil
- }
-
- return &PermanentError{Err: err}
-}
-
-func (e *PermanentError) Error() string {
- return fmt.Sprintf("permanent client error: %s", e.Err.Error())
-}
-
-type UnexpectedError struct {
- Err error
-}
-
-func NewUnexpectedError(err error) *UnexpectedError {
- if err == nil {
- return nil
- }
-
- return &UnexpectedError{Err: err}
-}
-
-func (e *UnexpectedError) Error() string {
- return fmt.Sprintf("unexpected client error: %s", e.Err.Error())
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/filemode/filemode.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/filemode/filemode.go
deleted file mode 100644
index 0994bc4d75..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/filemode/filemode.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package filemode
-
-import (
- "encoding/binary"
- "fmt"
- "os"
- "strconv"
-)
-
-// A FileMode represents the kind of tree entries used by git. It
-// resembles regular file systems modes, although FileModes are
-// considerably simpler (there are not so many), and there are some,
-// like Submodule that has no file system equivalent.
-type FileMode uint32
-
-const (
- // Empty is used as the FileMode of tree elements when comparing
- // trees in the following situations:
- //
- // - the mode of tree elements before their creation. - the mode of
- // tree elements after their deletion. - the mode of unmerged
- // elements when checking the index.
- //
- // Empty has no file system equivalent. As Empty is the zero value
- // of FileMode, it is also returned by New and
- // NewFromOsNewFromOSFileMode along with an error, when they fail.
- Empty FileMode = 0
- // Dir represent a Directory.
- Dir FileMode = 0040000
- // Regular represent non-executable files. Please note this is not
- // the same as golang regular files, which include executable files.
- Regular FileMode = 0100644
- // Deprecated represent non-executable files with the group writable
- // bit set. This mode was supported by the first versions of git,
- // but it has been deprecatred nowadays. This library uses them
- // internally, so you can read old packfiles, but will treat them as
- // Regulars when interfacing with the outside world. This is the
- // standard git behaviuor.
- Deprecated FileMode = 0100664
- // Executable represents executable files.
- Executable FileMode = 0100755
- // Symlink represents symbolic links to files.
- Symlink FileMode = 0120000
- // Submodule represents git submodules. This mode has no file system
- // equivalent.
- Submodule FileMode = 0160000
-)
-
-// New takes the octal string representation of a FileMode and returns
-// the FileMode and a nil error. If the string can not be parsed to a
-// 32 bit unsigned octal number, it returns Empty and the parsing error.
-//
-// Example: "40000" means Dir, "100644" means Regular.
-//
-// Please note this function does not check if the returned FileMode
-// is valid in git or if it is malformed. For instance, "1" will
-// return the malformed FileMode(1) and a nil error.
-func New(s string) (FileMode, error) {
- n, err := strconv.ParseUint(s, 8, 32)
- if err != nil {
- return Empty, err
- }
-
- return FileMode(n), nil
-}
-
-// NewFromOSFileMode returns the FileMode used by git to represent
-// the provided file system modes and a nil error on success. If the
-// file system mode cannot be mapped to any valid git mode (as with
-// sockets or named pipes), it will return Empty and an error.
-//
-// Note that some git modes cannot be generated from os.FileModes, like
-// Deprecated and Submodule; while Empty will be returned, along with an
-// error, only when the method fails.
-func NewFromOSFileMode(m os.FileMode) (FileMode, error) {
- if m.IsRegular() {
- if isSetTemporary(m) {
- return Empty, fmt.Errorf("no equivalent git mode for %s", m)
- }
- if isSetCharDevice(m) {
- return Empty, fmt.Errorf("no equivalent git mode for %s", m)
- }
- if isSetUserExecutable(m) {
- return Executable, nil
- }
- return Regular, nil
- }
-
- if m.IsDir() {
- return Dir, nil
- }
-
- if isSetSymLink(m) {
- return Symlink, nil
- }
-
- return Empty, fmt.Errorf("no equivalent git mode for %s", m)
-}
-
-func isSetCharDevice(m os.FileMode) bool {
- return m&os.ModeCharDevice != 0
-}
-
-func isSetTemporary(m os.FileMode) bool {
- return m&os.ModeTemporary != 0
-}
-
-func isSetUserExecutable(m os.FileMode) bool {
- return m&0100 != 0
-}
-
-func isSetSymLink(m os.FileMode) bool {
- return m&os.ModeSymlink != 0
-}
-
-// Bytes return a slice of 4 bytes with the mode in little endian
-// encoding.
-func (m FileMode) Bytes() []byte {
- ret := make([]byte, 4)
- binary.LittleEndian.PutUint32(ret, uint32(m))
- return ret[:]
-}
-
-// IsMalformed returns if the FileMode should not appear in a git packfile,
-// this is: Empty and any other mode not mentioned as a constant in this
-// package.
-func (m FileMode) IsMalformed() bool {
- return m != Dir &&
- m != Regular &&
- m != Deprecated &&
- m != Executable &&
- m != Symlink &&
- m != Submodule
-}
-
-// String returns the FileMode as a string in the standatd git format,
-// this is, an octal number padded with ceros to 7 digits. Malformed
-// modes are printed in that same format, for easier debugging.
-//
-// Example: Regular is "0100644", Empty is "0000000".
-func (m FileMode) String() string {
- return fmt.Sprintf("%07o", uint32(m))
-}
-
-// IsRegular returns if the FileMode represents that of a regular file,
-// this is, either Regular or Deprecated. Please note that Executable
-// are not regular even though in the UNIX tradition, they usually are:
-// See the IsFile method.
-func (m FileMode) IsRegular() bool {
- return m == Regular ||
- m == Deprecated
-}
-
-// IsFile returns if the FileMode represents that of a file, this is,
-// Regular, Deprecated, Excutable or Link.
-func (m FileMode) IsFile() bool {
- return m == Regular ||
- m == Deprecated ||
- m == Executable ||
- m == Symlink
-}
-
-// ToOSFileMode returns the os.FileMode to be used when creating file
-// system elements with the given git mode and a nil error on success.
-//
-// When the provided mode cannot be mapped to a valid file system mode
-// (e.g. Submodule) it returns os.FileMode(0) and an error.
-//
-// The returned file mode does not take into account the umask.
-func (m FileMode) ToOSFileMode() (os.FileMode, error) {
- switch m {
- case Dir:
- return os.ModePerm | os.ModeDir, nil
- case Submodule:
- return os.ModePerm | os.ModeDir, nil
- case Regular:
- return os.FileMode(0644), nil
- // Deprecated is no longer allowed: treated as a Regular instead
- case Deprecated:
- return os.FileMode(0644), nil
- case Executable:
- return os.FileMode(0755), nil
- case Symlink:
- return os.ModePerm | os.ModeSymlink, nil
- }
-
- return os.FileMode(0), fmt.Errorf("malformed mode (%s)", m)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph/commitgraph.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph/commitgraph.go
deleted file mode 100644
index e43cd8978a..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph/commitgraph.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package commitgraph
-
-import (
- "time"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
-)
-
-// CommitData is a reduced representation of Commit as presented in the commit graph
-// file. It is merely useful as an optimization for walking the commit graphs.
-type CommitData struct {
- // TreeHash is the hash of the root tree of the commit.
- TreeHash plumbing.Hash
- // ParentIndexes are the indexes of the parent commits of the commit.
- ParentIndexes []int
- // ParentHashes are the hashes of the parent commits of the commit.
- ParentHashes []plumbing.Hash
- // Generation number is the pre-computed generation in the commit graph
- // or zero if not available
- Generation int
- // When is the timestamp of the commit.
- When time.Time
-}
-
-// Index represents a representation of commit graph that allows indexed
-// access to the nodes using commit object hash
-type Index interface {
- // GetIndexByHash gets the index in the commit graph from commit hash, if available
- GetIndexByHash(h plumbing.Hash) (int, error)
- // GetNodeByIndex gets the commit node from the commit graph using index
- // obtained from child node, if available
- GetCommitDataByIndex(i int) (*CommitData, error)
- // Hashes returns all the hashes that are available in the index
- Hashes() []plumbing.Hash
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph/doc.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph/doc.go
deleted file mode 100644
index 41cd8b1e31..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph/doc.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Package commitgraph implements encoding and decoding of commit-graph files.
-//
-// Git commit graph format
-// =======================
-//
-// The Git commit graph stores a list of commit OIDs and some associated
-// metadata, including:
-//
-// - The generation number of the commit. Commits with no parents have
-// generation number 1; commits with parents have generation number
-// one more than the maximum generation number of its parents. We
-// reserve zero as special, and can be used to mark a generation
-// number invalid or as "not computed".
-//
-// - The root tree OID.
-//
-// - The commit date.
-//
-// - The parents of the commit, stored using positional references within
-// the graph file.
-//
-// These positional references are stored as unsigned 32-bit integers
-// corresponding to the array position within the list of commit OIDs. Due
-// to some special constants we use to track parents, we can store at most
-// (1 << 30) + (1 << 29) + (1 << 28) - 1 (around 1.8 billion) commits.
-//
-// == Commit graph files have the following format:
-//
-// In order to allow extensions that add extra data to the graph, we organize
-// the body into "chunks" and provide a binary lookup table at the beginning
-// of the body. The header includes certain values, such as number of chunks
-// and hash type.
-//
-// All 4-byte numbers are in network order.
-//
-// HEADER:
-//
-// 4-byte signature:
-// The signature is: {'C', 'G', 'P', 'H'}
-//
-// 1-byte version number:
-// Currently, the only valid version is 1.
-//
-// 1-byte Hash Version (1 = SHA-1)
-// We infer the hash length (H) from this value.
-//
-// 1-byte number (C) of "chunks"
-//
-// 1-byte (reserved for later use)
-// Current clients should ignore this value.
-//
-// CHUNK LOOKUP:
-//
-// (C + 1) * 12 bytes listing the table of contents for the chunks:
-// First 4 bytes describe the chunk id. Value 0 is a terminating label.
-// Other 8 bytes provide the byte-offset in current file for chunk to
-// start. (Chunks are ordered contiguously in the file, so you can infer
-// the length using the next chunk position if necessary.) Each chunk
-// ID appears at most once.
-//
-// The remaining data in the body is described one chunk at a time, and
-// these chunks may be given in any order. Chunks are required unless
-// otherwise specified.
-//
-// CHUNK DATA:
-//
-// OID Fanout (ID: {'O', 'I', 'D', 'F'}) (256 * 4 bytes)
-// The ith entry, F[i], stores the number of OIDs with first
-// byte at most i. Thus F[255] stores the total
-// number of commits (N).
-//
-// OID Lookup (ID: {'O', 'I', 'D', 'L'}) (N * H bytes)
-// The OIDs for all commits in the graph, sorted in ascending order.
-//
-// Commit Data (ID: {'C', 'D', 'A', 'T' }) (N * (H + 16) bytes)
-// * The first H bytes are for the OID of the root tree.
-// * The next 8 bytes are for the positions of the first two parents
-// of the ith commit. Stores value 0x7000000 if no parent in that
-// position. If there are more than two parents, the second value
-// has its most-significant bit on and the other bits store an array
-// position into the Extra Edge List chunk.
-// * The next 8 bytes store the generation number of the commit and
-// the commit time in seconds since EPOCH. The generation number
-// uses the higher 30 bits of the first 4 bytes, while the commit
-// time uses the 32 bits of the second 4 bytes, along with the lowest
-// 2 bits of the lowest byte, storing the 33rd and 34th bit of the
-// commit time.
-//
-// Extra Edge List (ID: {'E', 'D', 'G', 'E'}) [Optional]
-// This list of 4-byte values store the second through nth parents for
-// all octopus merges. The second parent value in the commit data stores
-// an array position within this list along with the most-significant bit
-// on. Starting at that array position, iterate through this list of commit
-// positions for the parents until reaching a value with the most-significant
-// bit on. The other bits correspond to the position of the last parent.
-//
-// TRAILER:
-//
-// H-byte HASH-checksum of all of the above.
-//
-// Source:
-// https://raw.githubusercontent.com/git/git/master/Documentation/technical/commit-graph-format.txt
-package commitgraph
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph/encoder.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph/encoder.go
deleted file mode 100644
index 615e833c1e..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph/encoder.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package commitgraph
-
-import (
- "crypto/sha1"
- "hash"
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/utils/binary"
-)
-
-// Encoder writes MemoryIndex structs to an output stream.
-type Encoder struct {
- io.Writer
- hash hash.Hash
-}
-
-// NewEncoder returns a new stream encoder that writes to w.
-func NewEncoder(w io.Writer) *Encoder {
- h := sha1.New()
- mw := io.MultiWriter(w, h)
- return &Encoder{mw, h}
-}
-
-// Encode writes an index into the commit-graph file
-func (e *Encoder) Encode(idx Index) error {
- // Get all the hashes in the input index
- hashes := idx.Hashes()
-
- // Sort the inout and prepare helper structures we'll need for encoding
- hashToIndex, fanout, extraEdgesCount := e.prepare(idx, hashes)
-
- chunkSignatures := [][]byte{oidFanoutSignature, oidLookupSignature, commitDataSignature}
- chunkSizes := []uint64{4 * 256, uint64(len(hashes)) * 20, uint64(len(hashes)) * 36}
- if extraEdgesCount > 0 {
- chunkSignatures = append(chunkSignatures, extraEdgeListSignature)
- chunkSizes = append(chunkSizes, uint64(extraEdgesCount)*4)
- }
-
- if err := e.encodeFileHeader(len(chunkSignatures)); err != nil {
- return err
- }
- if err := e.encodeChunkHeaders(chunkSignatures, chunkSizes); err != nil {
- return err
- }
- if err := e.encodeFanout(fanout); err != nil {
- return err
- }
- if err := e.encodeOidLookup(hashes); err != nil {
- return err
- }
- if extraEdges, err := e.encodeCommitData(hashes, hashToIndex, idx); err == nil {
- if err = e.encodeExtraEdges(extraEdges); err != nil {
- return err
- }
- } else {
- return err
- }
-
- return e.encodeChecksum()
-}
-
-func (e *Encoder) prepare(idx Index, hashes []plumbing.Hash) (hashToIndex map[plumbing.Hash]uint32, fanout []uint32, extraEdgesCount uint32) {
- // Sort the hashes and build our index
- plumbing.HashesSort(hashes)
- hashToIndex = make(map[plumbing.Hash]uint32)
- fanout = make([]uint32, 256)
- for i, hash := range hashes {
- hashToIndex[hash] = uint32(i)
- fanout[hash[0]]++
- }
-
- // Convert the fanout to cumulative values
- for i := 1; i <= 0xff; i++ {
- fanout[i] += fanout[i-1]
- }
-
- // Find out if we will need extra edge table
- for i := 0; i < len(hashes); i++ {
- v, _ := idx.GetCommitDataByIndex(i)
- if len(v.ParentHashes) > 2 {
- extraEdgesCount += uint32(len(v.ParentHashes) - 1)
- break
- }
- }
-
- return
-}
-
-func (e *Encoder) encodeFileHeader(chunkCount int) (err error) {
- if _, err = e.Write(commitFileSignature); err == nil {
- _, err = e.Write([]byte{1, 1, byte(chunkCount), 0})
- }
- return
-}
-
-func (e *Encoder) encodeChunkHeaders(chunkSignatures [][]byte, chunkSizes []uint64) (err error) {
- // 8 bytes of file header, 12 bytes for each chunk header and 12 byte for terminator
- offset := uint64(8 + len(chunkSignatures)*12 + 12)
- for i, signature := range chunkSignatures {
- if _, err = e.Write(signature); err == nil {
- err = binary.WriteUint64(e, offset)
- }
- if err != nil {
- return
- }
- offset += chunkSizes[i]
- }
- if _, err = e.Write(lastSignature); err == nil {
- err = binary.WriteUint64(e, offset)
- }
- return
-}
-
-func (e *Encoder) encodeFanout(fanout []uint32) (err error) {
- for i := 0; i <= 0xff; i++ {
- if err = binary.WriteUint32(e, fanout[i]); err != nil {
- return
- }
- }
- return
-}
-
-func (e *Encoder) encodeOidLookup(hashes []plumbing.Hash) (err error) {
- for _, hash := range hashes {
- if _, err = e.Write(hash[:]); err != nil {
- return err
- }
- }
- return
-}
-
-func (e *Encoder) encodeCommitData(hashes []plumbing.Hash, hashToIndex map[plumbing.Hash]uint32, idx Index) (extraEdges []uint32, err error) {
- for _, hash := range hashes {
- origIndex, _ := idx.GetIndexByHash(hash)
- commitData, _ := idx.GetCommitDataByIndex(origIndex)
- if _, err = e.Write(commitData.TreeHash[:]); err != nil {
- return
- }
-
- var parent1, parent2 uint32
- if len(commitData.ParentHashes) == 0 {
- parent1 = parentNone
- parent2 = parentNone
- } else if len(commitData.ParentHashes) == 1 {
- parent1 = hashToIndex[commitData.ParentHashes[0]]
- parent2 = parentNone
- } else if len(commitData.ParentHashes) == 2 {
- parent1 = hashToIndex[commitData.ParentHashes[0]]
- parent2 = hashToIndex[commitData.ParentHashes[1]]
- } else if len(commitData.ParentHashes) > 2 {
- parent1 = hashToIndex[commitData.ParentHashes[0]]
- parent2 = uint32(len(extraEdges)) | parentOctopusUsed
- for _, parentHash := range commitData.ParentHashes[1:] {
- extraEdges = append(extraEdges, hashToIndex[parentHash])
- }
- extraEdges[len(extraEdges)-1] |= parentLast
- }
-
- if err = binary.WriteUint32(e, parent1); err == nil {
- err = binary.WriteUint32(e, parent2)
- }
- if err != nil {
- return
- }
-
- unixTime := uint64(commitData.When.Unix())
- unixTime |= uint64(commitData.Generation) << 34
- if err = binary.WriteUint64(e, unixTime); err != nil {
- return
- }
- }
- return
-}
-
-func (e *Encoder) encodeExtraEdges(extraEdges []uint32) (err error) {
- for _, parent := range extraEdges {
- if err = binary.WriteUint32(e, parent); err != nil {
- return
- }
- }
- return
-}
-
-func (e *Encoder) encodeChecksum() error {
- _, err := e.Write(e.hash.Sum(nil)[:20])
- return err
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph/file.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph/file.go
deleted file mode 100644
index 1f82abd754..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph/file.go
+++ /dev/null
@@ -1,259 +0,0 @@
-package commitgraph
-
-import (
- "bytes"
- encbin "encoding/binary"
- "errors"
- "io"
- "time"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/utils/binary"
-)
-
-var (
- // ErrUnsupportedVersion is returned by OpenFileIndex when the commit graph
- // file version is not supported.
- ErrUnsupportedVersion = errors.New("Unsupported version")
- // ErrUnsupportedHash is returned by OpenFileIndex when the commit graph
- // hash function is not supported. Currently only SHA-1 is defined and
- // supported
- ErrUnsupportedHash = errors.New("Unsupported hash algorithm")
- // ErrMalformedCommitGraphFile is returned by OpenFileIndex when the commit
- // graph file is corrupted.
- ErrMalformedCommitGraphFile = errors.New("Malformed commit graph file")
-
- commitFileSignature = []byte{'C', 'G', 'P', 'H'}
- oidFanoutSignature = []byte{'O', 'I', 'D', 'F'}
- oidLookupSignature = []byte{'O', 'I', 'D', 'L'}
- commitDataSignature = []byte{'C', 'D', 'A', 'T'}
- extraEdgeListSignature = []byte{'E', 'D', 'G', 'E'}
- lastSignature = []byte{0, 0, 0, 0}
-
- parentNone = uint32(0x70000000)
- parentOctopusUsed = uint32(0x80000000)
- parentOctopusMask = uint32(0x7fffffff)
- parentLast = uint32(0x80000000)
-)
-
-type fileIndex struct {
- reader io.ReaderAt
- fanout [256]int
- oidFanoutOffset int64
- oidLookupOffset int64
- commitDataOffset int64
- extraEdgeListOffset int64
-}
-
-// OpenFileIndex opens a serialized commit graph file in the format described at
-// https://github.com/git/git/blob/master/Documentation/technical/commit-graph-format.txt
-func OpenFileIndex(reader io.ReaderAt) (Index, error) {
- fi := &fileIndex{reader: reader}
-
- if err := fi.verifyFileHeader(); err != nil {
- return nil, err
- }
- if err := fi.readChunkHeaders(); err != nil {
- return nil, err
- }
- if err := fi.readFanout(); err != nil {
- return nil, err
- }
-
- return fi, nil
-}
-
-func (fi *fileIndex) verifyFileHeader() error {
- // Verify file signature
- var signature = make([]byte, 4)
- if _, err := fi.reader.ReadAt(signature, 0); err != nil {
- return err
- }
- if !bytes.Equal(signature, commitFileSignature) {
- return ErrMalformedCommitGraphFile
- }
-
- // Read and verify the file header
- var header = make([]byte, 4)
- if _, err := fi.reader.ReadAt(header, 4); err != nil {
- return err
- }
- if header[0] != 1 {
- return ErrUnsupportedVersion
- }
- if header[1] != 1 {
- return ErrUnsupportedHash
- }
-
- return nil
-}
-
-func (fi *fileIndex) readChunkHeaders() error {
- var chunkID = make([]byte, 4)
- for i := 0; ; i++ {
- chunkHeader := io.NewSectionReader(fi.reader, 8+(int64(i)*12), 12)
- if _, err := io.ReadAtLeast(chunkHeader, chunkID, 4); err != nil {
- return err
- }
- chunkOffset, err := binary.ReadUint64(chunkHeader)
- if err != nil {
- return err
- }
-
- if bytes.Equal(chunkID, oidFanoutSignature) {
- fi.oidFanoutOffset = int64(chunkOffset)
- } else if bytes.Equal(chunkID, oidLookupSignature) {
- fi.oidLookupOffset = int64(chunkOffset)
- } else if bytes.Equal(chunkID, commitDataSignature) {
- fi.commitDataOffset = int64(chunkOffset)
- } else if bytes.Equal(chunkID, extraEdgeListSignature) {
- fi.extraEdgeListOffset = int64(chunkOffset)
- } else if bytes.Equal(chunkID, lastSignature) {
- break
- }
- }
-
- if fi.oidFanoutOffset <= 0 || fi.oidLookupOffset <= 0 || fi.commitDataOffset <= 0 {
- return ErrMalformedCommitGraphFile
- }
-
- return nil
-}
-
-func (fi *fileIndex) readFanout() error {
- fanoutReader := io.NewSectionReader(fi.reader, fi.oidFanoutOffset, 256*4)
- for i := 0; i < 256; i++ {
- fanoutValue, err := binary.ReadUint32(fanoutReader)
- if err != nil {
- return err
- }
- if fanoutValue > 0x7fffffff {
- return ErrMalformedCommitGraphFile
- }
- fi.fanout[i] = int(fanoutValue)
- }
- return nil
-}
-
-func (fi *fileIndex) GetIndexByHash(h plumbing.Hash) (int, error) {
- var oid plumbing.Hash
-
- // Find the hash in the oid lookup table
- var low int
- if h[0] == 0 {
- low = 0
- } else {
- low = fi.fanout[h[0]-1]
- }
- high := fi.fanout[h[0]]
- for low < high {
- mid := (low + high) >> 1
- offset := fi.oidLookupOffset + int64(mid)*20
- if _, err := fi.reader.ReadAt(oid[:], offset); err != nil {
- return 0, err
- }
- cmp := bytes.Compare(h[:], oid[:])
- if cmp < 0 {
- high = mid
- } else if cmp == 0 {
- return mid, nil
- } else {
- low = mid + 1
- }
- }
-
- return 0, plumbing.ErrObjectNotFound
-}
-
-func (fi *fileIndex) GetCommitDataByIndex(idx int) (*CommitData, error) {
- if idx >= fi.fanout[0xff] {
- return nil, plumbing.ErrObjectNotFound
- }
-
- offset := fi.commitDataOffset + int64(idx)*36
- commitDataReader := io.NewSectionReader(fi.reader, offset, 36)
-
- treeHash, err := binary.ReadHash(commitDataReader)
- if err != nil {
- return nil, err
- }
- parent1, err := binary.ReadUint32(commitDataReader)
- if err != nil {
- return nil, err
- }
- parent2, err := binary.ReadUint32(commitDataReader)
- if err != nil {
- return nil, err
- }
- genAndTime, err := binary.ReadUint64(commitDataReader)
- if err != nil {
- return nil, err
- }
-
- var parentIndexes []int
- if parent2&parentOctopusUsed == parentOctopusUsed {
- // Octopus merge
- parentIndexes = []int{int(parent1 & parentOctopusMask)}
- offset := fi.extraEdgeListOffset + 4*int64(parent2&parentOctopusMask)
- buf := make([]byte, 4)
- for {
- _, err := fi.reader.ReadAt(buf, offset)
- if err != nil {
- return nil, err
- }
-
- parent := encbin.BigEndian.Uint32(buf)
- offset += 4
- parentIndexes = append(parentIndexes, int(parent&parentOctopusMask))
- if parent&parentLast == parentLast {
- break
- }
- }
- } else if parent2 != parentNone {
- parentIndexes = []int{int(parent1 & parentOctopusMask), int(parent2 & parentOctopusMask)}
- } else if parent1 != parentNone {
- parentIndexes = []int{int(parent1 & parentOctopusMask)}
- }
-
- parentHashes, err := fi.getHashesFromIndexes(parentIndexes)
- if err != nil {
- return nil, err
- }
-
- return &CommitData{
- TreeHash: treeHash,
- ParentIndexes: parentIndexes,
- ParentHashes: parentHashes,
- Generation: int(genAndTime >> 34),
- When: time.Unix(int64(genAndTime&0x3FFFFFFFF), 0),
- }, nil
-}
-
-func (fi *fileIndex) getHashesFromIndexes(indexes []int) ([]plumbing.Hash, error) {
- hashes := make([]plumbing.Hash, len(indexes))
-
- for i, idx := range indexes {
- if idx >= fi.fanout[0xff] {
- return nil, ErrMalformedCommitGraphFile
- }
-
- offset := fi.oidLookupOffset + int64(idx)*20
- if _, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil {
- return nil, err
- }
- }
-
- return hashes, nil
-}
-
-// Hashes returns all the hashes that are available in the index
-func (fi *fileIndex) Hashes() []plumbing.Hash {
- hashes := make([]plumbing.Hash, fi.fanout[0xff])
- for i := 0; i < fi.fanout[0xff]; i++ {
- offset := fi.oidLookupOffset + int64(i)*20
- if n, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil || n < 20 {
- return nil
- }
- }
- return hashes
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph/memory.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph/memory.go
deleted file mode 100644
index f5afd4c598..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph/memory.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package commitgraph
-
-import (
- "gopkg.in/src-d/go-git.v4/plumbing"
-)
-
-// MemoryIndex provides a way to build the commit-graph in memory
-// for later encoding to file.
-type MemoryIndex struct {
- commitData []*CommitData
- indexMap map[plumbing.Hash]int
-}
-
-// NewMemoryIndex creates in-memory commit graph representation
-func NewMemoryIndex() *MemoryIndex {
- return &MemoryIndex{
- indexMap: make(map[plumbing.Hash]int),
- }
-}
-
-// GetIndexByHash gets the index in the commit graph from commit hash, if available
-func (mi *MemoryIndex) GetIndexByHash(h plumbing.Hash) (int, error) {
- i, ok := mi.indexMap[h]
- if ok {
- return i, nil
- }
-
- return 0, plumbing.ErrObjectNotFound
-}
-
-// GetCommitDataByIndex gets the commit node from the commit graph using index
-// obtained from child node, if available
-func (mi *MemoryIndex) GetCommitDataByIndex(i int) (*CommitData, error) {
- if i >= len(mi.commitData) {
- return nil, plumbing.ErrObjectNotFound
- }
-
- commitData := mi.commitData[i]
-
- // Map parent hashes to parent indexes
- if commitData.ParentIndexes == nil {
- parentIndexes := make([]int, len(commitData.ParentHashes))
- for i, parentHash := range commitData.ParentHashes {
- var err error
- if parentIndexes[i], err = mi.GetIndexByHash(parentHash); err != nil {
- return nil, err
- }
- }
- commitData.ParentIndexes = parentIndexes
- }
-
- return commitData, nil
-}
-
-// Hashes returns all the hashes that are available in the index
-func (mi *MemoryIndex) Hashes() []plumbing.Hash {
- hashes := make([]plumbing.Hash, 0, len(mi.indexMap))
- for k := range mi.indexMap {
- hashes = append(hashes, k)
- }
- return hashes
-}
-
-// Add adds new node to the memory index
-func (mi *MemoryIndex) Add(hash plumbing.Hash, commitData *CommitData) {
- // The parent indexes are calculated lazily in GetNodeByIndex
- // which allows adding nodes out of order as long as all parents
- // are eventually resolved
- commitData.ParentIndexes = nil
- mi.indexMap[hash] = len(mi.commitData)
- mi.commitData = append(mi.commitData, commitData)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/common.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/common.go
deleted file mode 100644
index 8f98ad1741..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/common.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package config
-
-// New creates a new config instance.
-func New() *Config {
- return &Config{}
-}
-
-// Config contains all the sections, comments and includes from a config file.
-type Config struct {
- Comment *Comment
- Sections Sections
- Includes Includes
-}
-
-// Includes is a list of Includes in a config file.
-type Includes []*Include
-
-// Include is a reference to an included config file.
-type Include struct {
- Path string
- Config *Config
-}
-
-// Comment string without the prefix '#' or ';'.
-type Comment string
-
-const (
- // NoSubsection token is passed to Config.Section and Config.SetSection to
- // represent the absence of a section.
- NoSubsection = ""
-)
-
-// Section returns a existing section with the given name or creates a new one.
-func (c *Config) Section(name string) *Section {
- for i := len(c.Sections) - 1; i >= 0; i-- {
- s := c.Sections[i]
- if s.IsName(name) {
- return s
- }
- }
-
- s := &Section{Name: name}
- c.Sections = append(c.Sections, s)
- return s
-}
-
-// AddOption adds an option to a given section and subsection. Use the
-// NoSubsection constant for the subsection argument if no subsection is wanted.
-func (c *Config) AddOption(section string, subsection string, key string, value string) *Config {
- if subsection == "" {
- c.Section(section).AddOption(key, value)
- } else {
- c.Section(section).Subsection(subsection).AddOption(key, value)
- }
-
- return c
-}
-
-// SetOption sets an option to a given section and subsection. Use the
-// NoSubsection constant for the subsection argument if no subsection is wanted.
-func (c *Config) SetOption(section string, subsection string, key string, value string) *Config {
- if subsection == "" {
- c.Section(section).SetOption(key, value)
- } else {
- c.Section(section).Subsection(subsection).SetOption(key, value)
- }
-
- return c
-}
-
-// RemoveSection removes a section from a config file.
-func (c *Config) RemoveSection(name string) *Config {
- result := Sections{}
- for _, s := range c.Sections {
- if !s.IsName(name) {
- result = append(result, s)
- }
- }
-
- c.Sections = result
- return c
-}
-
-// RemoveSubsection remove s a subsection from a config file.
-func (c *Config) RemoveSubsection(section string, subsection string) *Config {
- for _, s := range c.Sections {
- if s.IsName(section) {
- result := Subsections{}
- for _, ss := range s.Subsections {
- if !ss.IsName(subsection) {
- result = append(result, ss)
- }
- }
- s.Subsections = result
- }
- }
-
- return c
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/decoder.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/decoder.go
deleted file mode 100644
index 0f02ce1930..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/decoder.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package config
-
-import (
- "io"
-
- "github.com/src-d/gcfg"
-)
-
-// A Decoder reads and decodes config files from an input stream.
-type Decoder struct {
- io.Reader
-}
-
-// NewDecoder returns a new decoder that reads from r.
-func NewDecoder(r io.Reader) *Decoder {
- return &Decoder{r}
-}
-
-// Decode reads the whole config from its input and stores it in the
-// value pointed to by config.
-func (d *Decoder) Decode(config *Config) error {
- cb := func(s string, ss string, k string, v string, bv bool) error {
- if ss == "" && k == "" {
- config.Section(s)
- return nil
- }
-
- if ss != "" && k == "" {
- config.Section(s).Subsection(ss)
- return nil
- }
-
- config.AddOption(s, ss, k, v)
- return nil
- }
- return gcfg.ReadWithCallback(d, cb)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/doc.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/doc.go
deleted file mode 100644
index 3986c83658..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/doc.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Package config implements encoding and decoding of git config files.
-//
-// Configuration File
-// ------------------
-//
-// The Git configuration file contains a number of variables that affect
-// the Git commands' behavior. The `.git/config` file in each repository
-// is used to store the configuration for that repository, and
-// `$HOME/.gitconfig` is used to store a per-user configuration as
-// fallback values for the `.git/config` file. The file `/etc/gitconfig`
-// can be used to store a system-wide default configuration.
-//
-// The configuration variables are used by both the Git plumbing
-// and the porcelains. The variables are divided into sections, wherein
-// the fully qualified variable name of the variable itself is the last
-// dot-separated segment and the section name is everything before the last
-// dot. The variable names are case-insensitive, allow only alphanumeric
-// characters and `-`, and must start with an alphabetic character. Some
-// variables may appear multiple times; we say then that the variable is
-// multivalued.
-//
-// Syntax
-// ~~~~~~
-//
-// The syntax is fairly flexible and permissive; whitespaces are mostly
-// ignored. The '#' and ';' characters begin comments to the end of line,
-// blank lines are ignored.
-//
-// The file consists of sections and variables. A section begins with
-// the name of the section in square brackets and continues until the next
-// section begins. Section names are case-insensitive. Only alphanumeric
-// characters, `-` and `.` are allowed in section names. Each variable
-// must belong to some section, which means that there must be a section
-// header before the first setting of a variable.
-//
-// Sections can be further divided into subsections. To begin a subsection
-// put its name in double quotes, separated by space from the section name,
-// in the section header, like in the example below:
-//
-// --------
-// [section "subsection"]
-//
-// --------
-//
-// Subsection names are case sensitive and can contain any characters except
-// newline (doublequote `"` and backslash can be included by escaping them
-// as `\"` and `\\`, respectively). Section headers cannot span multiple
-// lines. Variables may belong directly to a section or to a given subsection.
-// You can have `[section]` if you have `[section "subsection"]`, but you
-// don't need to.
-//
-// There is also a deprecated `[section.subsection]` syntax. With this
-// syntax, the subsection name is converted to lower-case and is also
-// compared case sensitively. These subsection names follow the same
-// restrictions as section names.
-//
-// All the other lines (and the remainder of the line after the section
-// header) are recognized as setting variables, in the form
-// 'name = value' (or just 'name', which is a short-hand to say that
-// the variable is the boolean "true").
-// The variable names are case-insensitive, allow only alphanumeric characters
-// and `-`, and must start with an alphabetic character.
-//
-// A line that defines a value can be continued to the next line by
-// ending it with a `\`; the backquote and the end-of-line are
-// stripped. Leading whitespaces after 'name =', the remainder of the
-// line after the first comment character '#' or ';', and trailing
-// whitespaces of the line are discarded unless they are enclosed in
-// double quotes. Internal whitespaces within the value are retained
-// verbatim.
-//
-// Inside double quotes, double quote `"` and backslash `\` characters
-// must be escaped: use `\"` for `"` and `\\` for `\`.
-//
-// The following escape sequences (beside `\"` and `\\`) are recognized:
-// `\n` for newline character (NL), `\t` for horizontal tabulation (HT, TAB)
-// and `\b` for backspace (BS). Other char escape sequences (including octal
-// escape sequences) are invalid.
-//
-// Includes
-// ~~~~~~~~
-//
-// You can include one config file from another by setting the special
-// `include.path` variable to the name of the file to be included. The
-// variable takes a pathname as its value, and is subject to tilde
-// expansion.
-//
-// The included file is expanded immediately, as if its contents had been
-// found at the location of the include directive. If the value of the
-// `include.path` variable is a relative path, the path is considered to be
-// relative to the configuration file in which the include directive was
-// found. See below for examples.
-//
-//
-// Example
-// ~~~~~~~
-//
-// # Core variables
-// [core]
-// ; Don't trust file modes
-// filemode = false
-//
-// # Our diff algorithm
-// [diff]
-// external = /usr/local/bin/diff-wrapper
-// renames = true
-//
-// [branch "devel"]
-// remote = origin
-// merge = refs/heads/devel
-//
-// # Proxy settings
-// [core]
-// gitProxy="ssh" for "kernel.org"
-// gitProxy=default-proxy ; for the rest
-//
-// [include]
-// path = /path/to/foo.inc ; include by absolute path
-// path = foo ; expand "foo" relative to the current file
-// path = ~/foo ; expand "foo" in your `$HOME` directory
-//
-package config
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/encoder.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/encoder.go
deleted file mode 100644
index 4eac8968ad..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/encoder.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package config
-
-import (
- "fmt"
- "io"
- "strings"
-)
-
-// An Encoder writes config files to an output stream.
-type Encoder struct {
- w io.Writer
-}
-
-// NewEncoder returns a new encoder that writes to w.
-func NewEncoder(w io.Writer) *Encoder {
- return &Encoder{w}
-}
-
-// Encode writes the config in git config format to the stream of the encoder.
-func (e *Encoder) Encode(cfg *Config) error {
- for _, s := range cfg.Sections {
- if err := e.encodeSection(s); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (e *Encoder) encodeSection(s *Section) error {
- if len(s.Options) > 0 {
- if err := e.printf("[%s]\n", s.Name); err != nil {
- return err
- }
-
- if err := e.encodeOptions(s.Options); err != nil {
- return err
- }
- }
-
- for _, ss := range s.Subsections {
- if err := e.encodeSubsection(s.Name, ss); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error {
- //TODO: escape
- if err := e.printf("[%s \"%s\"]\n", sectionName, s.Name); err != nil {
- return err
- }
-
- return e.encodeOptions(s.Options)
-}
-
-func (e *Encoder) encodeOptions(opts Options) error {
- for _, o := range opts {
- pattern := "\t%s = %s\n"
- if strings.Contains(o.Value, "\\") {
- pattern = "\t%s = %q\n"
- }
-
- if err := e.printf(pattern, o.Key, o.Value); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (e *Encoder) printf(msg string, args ...interface{}) error {
- _, err := fmt.Fprintf(e.w, msg, args...)
- return err
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/option.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/option.go
deleted file mode 100644
index d4775e4f0e..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/option.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package config
-
-import (
- "fmt"
- "strings"
-)
-
-// Option defines a key/value entity in a config file.
-type Option struct {
- // Key preserving original caseness.
- // Use IsKey instead to compare key regardless of caseness.
- Key string
- // Original value as string, could be not normalized.
- Value string
-}
-
-type Options []*Option
-
-// IsKey returns true if the given key matches
-// this option's key in a case-insensitive comparison.
-func (o *Option) IsKey(key string) bool {
- return strings.ToLower(o.Key) == strings.ToLower(key)
-}
-
-func (opts Options) GoString() string {
- var strs []string
- for _, opt := range opts {
- strs = append(strs, fmt.Sprintf("%#v", opt))
- }
-
- return strings.Join(strs, ", ")
-}
-
-// Get gets the value for the given key if set,
-// otherwise it returns the empty string.
-//
-// Note that there is no difference
-//
-// This matches git behaviour since git v1.8.1-rc1,
-// if there are multiple definitions of a key, the
-// last one wins.
-//
-// See: http://article.gmane.org/gmane.linux.kernel/1407184
-//
-// In order to get all possible values for the same key,
-// use GetAll.
-func (opts Options) Get(key string) string {
- for i := len(opts) - 1; i >= 0; i-- {
- o := opts[i]
- if o.IsKey(key) {
- return o.Value
- }
- }
- return ""
-}
-
-// GetAll returns all possible values for the same key.
-func (opts Options) GetAll(key string) []string {
- result := []string{}
- for _, o := range opts {
- if o.IsKey(key) {
- result = append(result, o.Value)
- }
- }
- return result
-}
-
-func (opts Options) withoutOption(key string) Options {
- result := Options{}
- for _, o := range opts {
- if !o.IsKey(key) {
- result = append(result, o)
- }
- }
- return result
-}
-
-func (opts Options) withAddedOption(key string, value string) Options {
- return append(opts, &Option{key, value})
-}
-
-func (opts Options) withSettedOption(key string, values ...string) Options {
- var result Options
- var added []string
- for _, o := range opts {
- if !o.IsKey(key) {
- result = append(result, o)
- continue
- }
-
- if contains(values, o.Value) {
- added = append(added, o.Value)
- result = append(result, o)
- continue
- }
- }
-
- for _, value := range values {
- if contains(added, value) {
- continue
- }
-
- result = result.withAddedOption(key, value)
- }
-
- return result
-}
-
-func contains(haystack []string, needle string) bool {
- for _, s := range haystack {
- if s == needle {
- return true
- }
- }
-
- return false
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/section.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/section.go
deleted file mode 100644
index 4a17e3b21b..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/section.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package config
-
-import (
- "fmt"
- "strings"
-)
-
-// Section is the representation of a section inside git configuration files.
-// Each Section contains Options that are used by both the Git plumbing
-// and the porcelains.
-// Sections can be further divided into subsections. To begin a subsection
-// put its name in double quotes, separated by space from the section name,
-// in the section header, like in the example below:
-//
-// [section "subsection"]
-//
-// All the other lines (and the remainder of the line after the section header)
-// are recognized as option variables, in the form "name = value" (or just name,
-// which is a short-hand to say that the variable is the boolean "true").
-// The variable names are case-insensitive, allow only alphanumeric characters
-// and -, and must start with an alphabetic character:
-//
-// [section "subsection1"]
-// option1 = value1
-// option2
-// [section "subsection2"]
-// option3 = value2
-//
-type Section struct {
- Name string
- Options Options
- Subsections Subsections
-}
-
-type Subsection struct {
- Name string
- Options Options
-}
-
-type Sections []*Section
-
-func (s Sections) GoString() string {
- var strs []string
- for _, ss := range s {
- strs = append(strs, fmt.Sprintf("%#v", ss))
- }
-
- return strings.Join(strs, ", ")
-}
-
-type Subsections []*Subsection
-
-func (s Subsections) GoString() string {
- var strs []string
- for _, ss := range s {
- strs = append(strs, fmt.Sprintf("%#v", ss))
- }
-
- return strings.Join(strs, ", ")
-}
-
-// IsName checks if the name provided is equals to the Section name, case insensitive.
-func (s *Section) IsName(name string) bool {
- return strings.ToLower(s.Name) == strings.ToLower(name)
-}
-
-// Option return the value for the specified key. Empty string is returned if
-// key does not exists.
-func (s *Section) Option(key string) string {
- return s.Options.Get(key)
-}
-
-// AddOption adds a new Option to the Section. The updated Section is returned.
-func (s *Section) AddOption(key string, value string) *Section {
- s.Options = s.Options.withAddedOption(key, value)
- return s
-}
-
-// SetOption adds a new Option to the Section. If the option already exists, is replaced.
-// The updated Section is returned.
-func (s *Section) SetOption(key string, value string) *Section {
- s.Options = s.Options.withSettedOption(key, value)
- return s
-}
-
-// Remove an option with the specified key. The updated Section is returned.
-func (s *Section) RemoveOption(key string) *Section {
- s.Options = s.Options.withoutOption(key)
- return s
-}
-
-// Subsection returns a Subsection from the specified Section. If the
-// Subsection does not exists, new one is created and added to Section.
-func (s *Section) Subsection(name string) *Subsection {
- for i := len(s.Subsections) - 1; i >= 0; i-- {
- ss := s.Subsections[i]
- if ss.IsName(name) {
- return ss
- }
- }
-
- ss := &Subsection{Name: name}
- s.Subsections = append(s.Subsections, ss)
- return ss
-}
-
-// HasSubsection checks if the Section has a Subsection with the specified name.
-func (s *Section) HasSubsection(name string) bool {
- for _, ss := range s.Subsections {
- if ss.IsName(name) {
- return true
- }
- }
-
- return false
-}
-
-// IsName checks if the name of the subsection is exactly the specified name.
-func (s *Subsection) IsName(name string) bool {
- return s.Name == name
-}
-
-// Option returns an option with the specified key. If the option does not exists,
-// empty spring will be returned.
-func (s *Subsection) Option(key string) string {
- return s.Options.Get(key)
-}
-
-// AddOption adds a new Option to the Subsection. The updated Subsection is returned.
-func (s *Subsection) AddOption(key string, value string) *Subsection {
- s.Options = s.Options.withAddedOption(key, value)
- return s
-}
-
-// SetOption adds a new Option to the Subsection. If the option already exists, is replaced.
-// The updated Subsection is returned.
-func (s *Subsection) SetOption(key string, value ...string) *Subsection {
- s.Options = s.Options.withSettedOption(key, value...)
- return s
-}
-
-// RemoveOption removes the option with the specified key. The updated Subsection is returned.
-func (s *Subsection) RemoveOption(key string) *Subsection {
- s.Options = s.Options.withoutOption(key)
- return s
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/diff/patch.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/diff/patch.go
deleted file mode 100644
index 7c6cf4aee3..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/diff/patch.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package diff
-
-import (
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/filemode"
-)
-
-// Operation defines the operation of a diff item.
-type Operation int
-
-const (
- // Equal item represents a equals diff.
- Equal Operation = iota
- // Add item represents an insert diff.
- Add
- // Delete item represents a delete diff.
- Delete
-)
-
-// Patch represents a collection of steps to transform several files.
-type Patch interface {
- // FilePatches returns a slice of patches per file.
- FilePatches() []FilePatch
- // Message returns an optional message that can be at the top of the
- // Patch representation.
- Message() string
-}
-
-// FilePatch represents the necessary steps to transform one file to another.
-type FilePatch interface {
- // IsBinary returns true if this patch is representing a binary file.
- IsBinary() bool
- // Files returns the from and to Files, with all the necessary metadata to
- // about them. If the patch creates a new file, "from" will be nil.
- // If the patch deletes a file, "to" will be nil.
- Files() (from, to File)
- // Chunks returns a slice of ordered changes to transform "from" File to
- // "to" File. If the file is a binary one, Chunks will be empty.
- Chunks() []Chunk
-}
-
-// File contains all the file metadata necessary to print some patch formats.
-type File interface {
- // Hash returns the File Hash.
- Hash() plumbing.Hash
- // Mode returns the FileMode.
- Mode() filemode.FileMode
- // Path returns the complete Path to the file, including the filename.
- Path() string
-}
-
-// Chunk represents a portion of a file transformation to another.
-type Chunk interface {
- // Content contains the portion of the file.
- Content() string
- // Type contains the Operation to do with this Chunk.
- Type() Operation
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/diff/unified_encoder.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/diff/unified_encoder.go
deleted file mode 100644
index 169242dc5b..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/diff/unified_encoder.go
+++ /dev/null
@@ -1,360 +0,0 @@
-package diff
-
-import (
- "bytes"
- "fmt"
- "io"
- "strings"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
-)
-
-const (
- diffInit = "diff --git a/%s b/%s\n"
-
- chunkStart = "@@ -"
- chunkMiddle = " +"
- chunkEnd = " @@%s\n"
- chunkCount = "%d,%d"
-
- noFilePath = "/dev/null"
- aDir = "a/"
- bDir = "b/"
-
- fPath = "--- %s\n"
- tPath = "+++ %s\n"
- binary = "Binary files %s and %s differ\n"
-
- addLine = "+%s\n"
- deleteLine = "-%s\n"
- equalLine = " %s\n"
-
- oldMode = "old mode %o\n"
- newMode = "new mode %o\n"
- deletedFileMode = "deleted file mode %o\n"
- newFileMode = "new file mode %o\n"
-
- renameFrom = "from"
- renameTo = "to"
- renameFileMode = "rename %s %s\n"
-
- indexAndMode = "index %s..%s %o\n"
- indexNoMode = "index %s..%s\n"
-
- DefaultContextLines = 3
-)
-
-// UnifiedEncoder encodes an unified diff into the provided Writer.
-// There are some unsupported features:
-// - Similarity index for renames
-// - Sort hash representation
-type UnifiedEncoder struct {
- io.Writer
-
- // ctxLines is the count of unchanged lines that will appear
- // surrounding a change.
- ctxLines int
-
- buf bytes.Buffer
-}
-
-func NewUnifiedEncoder(w io.Writer, ctxLines int) *UnifiedEncoder {
- return &UnifiedEncoder{ctxLines: ctxLines, Writer: w}
-}
-
-func (e *UnifiedEncoder) Encode(patch Patch) error {
- e.printMessage(patch.Message())
-
- if err := e.encodeFilePatch(patch.FilePatches()); err != nil {
- return err
- }
-
- _, err := e.buf.WriteTo(e)
-
- return err
-}
-
-func (e *UnifiedEncoder) encodeFilePatch(filePatches []FilePatch) error {
- for _, p := range filePatches {
- f, t := p.Files()
- if err := e.header(f, t, p.IsBinary()); err != nil {
- return err
- }
-
- g := newHunksGenerator(p.Chunks(), e.ctxLines)
- for _, c := range g.Generate() {
- c.WriteTo(&e.buf)
- }
- }
-
- return nil
-}
-
-func (e *UnifiedEncoder) printMessage(message string) {
- isEmpty := message == ""
- hasSuffix := strings.HasSuffix(message, "\n")
- if !isEmpty && !hasSuffix {
- message += "\n"
- }
-
- e.buf.WriteString(message)
-}
-
-func (e *UnifiedEncoder) header(from, to File, isBinary bool) error {
- switch {
- case from == nil && to == nil:
- return nil
- case from != nil && to != nil:
- hashEquals := from.Hash() == to.Hash()
-
- fmt.Fprintf(&e.buf, diffInit, from.Path(), to.Path())
-
- if from.Mode() != to.Mode() {
- fmt.Fprintf(&e.buf, oldMode+newMode, from.Mode(), to.Mode())
- }
-
- if from.Path() != to.Path() {
- fmt.Fprintf(&e.buf,
- renameFileMode+renameFileMode,
- renameFrom, from.Path(), renameTo, to.Path())
- }
-
- if from.Mode() != to.Mode() && !hashEquals {
- fmt.Fprintf(&e.buf, indexNoMode, from.Hash(), to.Hash())
- } else if !hashEquals {
- fmt.Fprintf(&e.buf, indexAndMode, from.Hash(), to.Hash(), from.Mode())
- }
-
- if !hashEquals {
- e.pathLines(isBinary, aDir+from.Path(), bDir+to.Path())
- }
- case from == nil:
- fmt.Fprintf(&e.buf, diffInit, to.Path(), to.Path())
- fmt.Fprintf(&e.buf, newFileMode, to.Mode())
- fmt.Fprintf(&e.buf, indexNoMode, plumbing.ZeroHash, to.Hash())
- e.pathLines(isBinary, noFilePath, bDir+to.Path())
- case to == nil:
- fmt.Fprintf(&e.buf, diffInit, from.Path(), from.Path())
- fmt.Fprintf(&e.buf, deletedFileMode, from.Mode())
- fmt.Fprintf(&e.buf, indexNoMode, from.Hash(), plumbing.ZeroHash)
- e.pathLines(isBinary, aDir+from.Path(), noFilePath)
- }
-
- return nil
-}
-
-func (e *UnifiedEncoder) pathLines(isBinary bool, fromPath, toPath string) {
- format := fPath + tPath
- if isBinary {
- format = binary
- }
-
- fmt.Fprintf(&e.buf, format, fromPath, toPath)
-}
-
-type hunksGenerator struct {
- fromLine, toLine int
- ctxLines int
- chunks []Chunk
- current *hunk
- hunks []*hunk
- beforeContext, afterContext []string
-}
-
-func newHunksGenerator(chunks []Chunk, ctxLines int) *hunksGenerator {
- return &hunksGenerator{
- chunks: chunks,
- ctxLines: ctxLines,
- }
-}
-
-func (c *hunksGenerator) Generate() []*hunk {
- for i, chunk := range c.chunks {
- ls := splitLines(chunk.Content())
- lsLen := len(ls)
-
- switch chunk.Type() {
- case Equal:
- c.fromLine += lsLen
- c.toLine += lsLen
- c.processEqualsLines(ls, i)
- case Delete:
- if lsLen != 0 {
- c.fromLine++
- }
-
- c.processHunk(i, chunk.Type())
- c.fromLine += lsLen - 1
- c.current.AddOp(chunk.Type(), ls...)
- case Add:
- if lsLen != 0 {
- c.toLine++
- }
- c.processHunk(i, chunk.Type())
- c.toLine += lsLen - 1
- c.current.AddOp(chunk.Type(), ls...)
- }
-
- if i == len(c.chunks)-1 && c.current != nil {
- c.hunks = append(c.hunks, c.current)
- }
- }
-
- return c.hunks
-}
-
-func (c *hunksGenerator) processHunk(i int, op Operation) {
- if c.current != nil {
- return
- }
-
- var ctxPrefix string
- linesBefore := len(c.beforeContext)
- if linesBefore > c.ctxLines {
- ctxPrefix = " " + c.beforeContext[linesBefore-c.ctxLines-1]
- c.beforeContext = c.beforeContext[linesBefore-c.ctxLines:]
- linesBefore = c.ctxLines
- }
-
- c.current = &hunk{ctxPrefix: ctxPrefix}
- c.current.AddOp(Equal, c.beforeContext...)
-
- switch op {
- case Delete:
- c.current.fromLine, c.current.toLine =
- c.addLineNumbers(c.fromLine, c.toLine, linesBefore, i, Add)
- case Add:
- c.current.toLine, c.current.fromLine =
- c.addLineNumbers(c.toLine, c.fromLine, linesBefore, i, Delete)
- }
-
- c.beforeContext = nil
-}
-
-// addLineNumbers obtains the line numbers in a new chunk
-func (c *hunksGenerator) addLineNumbers(la, lb int, linesBefore int, i int, op Operation) (cla, clb int) {
- cla = la - linesBefore
- // we need to search for a reference for the next diff
- switch {
- case linesBefore != 0 && c.ctxLines != 0:
- if lb > c.ctxLines {
- clb = lb - c.ctxLines + 1
- } else {
- clb = 1
- }
- case c.ctxLines == 0:
- clb = lb
- case i != len(c.chunks)-1:
- next := c.chunks[i+1]
- if next.Type() == op || next.Type() == Equal {
- // this diff will be into this chunk
- clb = lb + 1
- }
- }
-
- return
-}
-
-func (c *hunksGenerator) processEqualsLines(ls []string, i int) {
- if c.current == nil {
- c.beforeContext = append(c.beforeContext, ls...)
- return
- }
-
- c.afterContext = append(c.afterContext, ls...)
- if len(c.afterContext) <= c.ctxLines*2 && i != len(c.chunks)-1 {
- c.current.AddOp(Equal, c.afterContext...)
- c.afterContext = nil
- } else {
- ctxLines := c.ctxLines
- if ctxLines > len(c.afterContext) {
- ctxLines = len(c.afterContext)
- }
- c.current.AddOp(Equal, c.afterContext[:ctxLines]...)
- c.hunks = append(c.hunks, c.current)
-
- c.current = nil
- c.beforeContext = c.afterContext[ctxLines:]
- c.afterContext = nil
- }
-}
-
-func splitLines(s string) []string {
- out := strings.Split(s, "\n")
- if out[len(out)-1] == "" {
- out = out[:len(out)-1]
- }
-
- return out
-}
-
-type hunk struct {
- fromLine int
- toLine int
-
- fromCount int
- toCount int
-
- ctxPrefix string
- ops []*op
-}
-
-func (c *hunk) WriteTo(buf *bytes.Buffer) {
- buf.WriteString(chunkStart)
-
- if c.fromCount == 1 {
- fmt.Fprintf(buf, "%d", c.fromLine)
- } else {
- fmt.Fprintf(buf, chunkCount, c.fromLine, c.fromCount)
- }
-
- buf.WriteString(chunkMiddle)
-
- if c.toCount == 1 {
- fmt.Fprintf(buf, "%d", c.toLine)
- } else {
- fmt.Fprintf(buf, chunkCount, c.toLine, c.toCount)
- }
-
- fmt.Fprintf(buf, chunkEnd, c.ctxPrefix)
-
- for _, d := range c.ops {
- buf.WriteString(d.String())
- }
-}
-
-func (c *hunk) AddOp(t Operation, s ...string) {
- ls := len(s)
- switch t {
- case Add:
- c.toCount += ls
- case Delete:
- c.fromCount += ls
- case Equal:
- c.toCount += ls
- c.fromCount += ls
- }
-
- for _, l := range s {
- c.ops = append(c.ops, &op{l, t})
- }
-}
-
-type op struct {
- text string
- t Operation
-}
-
-func (o *op) String() string {
- var prefix string
- switch o.t {
- case Add:
- prefix = addLine
- case Delete:
- prefix = deleteLine
- case Equal:
- prefix = equalLine
- }
-
- return fmt.Sprintf(prefix, o.text)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/gitignore/dir.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/gitignore/dir.go
deleted file mode 100644
index 1e88970efd..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/gitignore/dir.go
+++ /dev/null
@@ -1,136 +0,0 @@
-package gitignore
-
-import (
- "bytes"
- "io/ioutil"
- "os"
- "os/user"
- "strings"
-
- "gopkg.in/src-d/go-billy.v4"
- "gopkg.in/src-d/go-git.v4/plumbing/format/config"
- gioutil "gopkg.in/src-d/go-git.v4/utils/ioutil"
-)
-
-const (
- commentPrefix = "#"
- coreSection = "core"
- eol = "\n"
- excludesfile = "excludesfile"
- gitDir = ".git"
- gitignoreFile = ".gitignore"
- gitconfigFile = ".gitconfig"
- systemFile = "/etc/gitconfig"
-)
-
-// readIgnoreFile reads a specific git ignore file.
-func readIgnoreFile(fs billy.Filesystem, path []string, ignoreFile string) (ps []Pattern, err error) {
- f, err := fs.Open(fs.Join(append(path, ignoreFile)...))
- if err == nil {
- defer f.Close()
-
- if data, err := ioutil.ReadAll(f); err == nil {
- for _, s := range strings.Split(string(data), eol) {
- if !strings.HasPrefix(s, commentPrefix) && len(strings.TrimSpace(s)) > 0 {
- ps = append(ps, ParsePattern(s, path))
- }
- }
- }
- } else if !os.IsNotExist(err) {
- return nil, err
- }
-
- return
-}
-
-// ReadPatterns reads gitignore patterns recursively traversing through the directory
-// structure. The result is in the ascending order of priority (last higher).
-func ReadPatterns(fs billy.Filesystem, path []string) (ps []Pattern, err error) {
- ps, _ = readIgnoreFile(fs, path, gitignoreFile)
-
- var fis []os.FileInfo
- fis, err = fs.ReadDir(fs.Join(path...))
- if err != nil {
- return
- }
-
- for _, fi := range fis {
- if fi.IsDir() && fi.Name() != gitDir {
- var subps []Pattern
- subps, err = ReadPatterns(fs, append(path, fi.Name()))
- if err != nil {
- return
- }
-
- if len(subps) > 0 {
- ps = append(ps, subps...)
- }
- }
- }
-
- return
-}
-
-func loadPatterns(fs billy.Filesystem, path string) (ps []Pattern, err error) {
- f, err := fs.Open(path)
- if err != nil {
- if os.IsNotExist(err) {
- return nil, nil
- }
- return nil, err
- }
-
- defer gioutil.CheckClose(f, &err)
-
- b, err := ioutil.ReadAll(f)
- if err != nil {
- return
- }
-
- d := config.NewDecoder(bytes.NewBuffer(b))
-
- raw := config.New()
- if err = d.Decode(raw); err != nil {
- return
- }
-
- s := raw.Section(coreSection)
- efo := s.Options.Get(excludesfile)
- if efo == "" {
- return nil, nil
- }
-
- ps, err = readIgnoreFile(fs, nil, efo)
- if os.IsNotExist(err) {
- return nil, nil
- }
-
- return
-}
-
-// LoadGlobalPatterns loads gitignore patterns from from the gitignore file
-// declared in a user's ~/.gitconfig file. If the ~/.gitconfig file does not
-// exist the function will return nil. If the core.excludesfile property
-// is not declared, the function will return nil. If the file pointed to by
-// the core.excludesfile property does not exist, the function will return nil.
-//
-// The function assumes fs is rooted at the root filesystem.
-func LoadGlobalPatterns(fs billy.Filesystem) (ps []Pattern, err error) {
- usr, err := user.Current()
- if err != nil {
- return
- }
-
- return loadPatterns(fs, fs.Join(usr.HomeDir, gitconfigFile))
-}
-
-// LoadSystemPatterns loads gitignore patterns from from the gitignore file
-// declared in a system's /etc/gitconfig file. If the ~/.gitconfig file does
-// not exist the function will return nil. If the core.excludesfile property
-// is not declared, the function will return nil. If the file pointed to by
-// the core.excludesfile property does not exist, the function will return nil.
-//
-// The function assumes fs is rooted at the root filesystem.
-func LoadSystemPatterns(fs billy.Filesystem) (ps []Pattern, err error) {
- return loadPatterns(fs, systemFile)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/gitignore/doc.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/gitignore/doc.go
deleted file mode 100644
index eecd4baccb..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/gitignore/doc.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Package gitignore implements matching file system paths to gitignore patterns that
-// can be automatically read from a git repository tree in the order of definition
-// priorities. It support all pattern formats as specified in the original gitignore
-// documentation, copied below:
-//
-// Pattern format
-// ==============
-//
-// - A blank line matches no files, so it can serve as a separator for readability.
-//
-// - A line starting with # serves as a comment. Put a backslash ("\") in front of
-// the first hash for patterns that begin with a hash.
-//
-// - Trailing spaces are ignored unless they are quoted with backslash ("\").
-//
-// - An optional prefix "!" which negates the pattern; any matching file excluded
-// by a previous pattern will become included again. It is not possible to
-// re-include a file if a parent directory of that file is excluded.
-// Git doesn’t list excluded directories for performance reasons, so
-// any patterns on contained files have no effect, no matter where they are
-// defined. Put a backslash ("\") in front of the first "!" for patterns
-// that begin with a literal "!", for example, "\!important!.txt".
-//
-// - If the pattern ends with a slash, it is removed for the purpose of the
-// following description, but it would only find a match with a directory.
-// In other words, foo/ will match a directory foo and paths underneath it,
-// but will not match a regular file or a symbolic link foo (this is consistent
-// with the way how pathspec works in general in Git).
-//
-// - If the pattern does not contain a slash /, Git treats it as a shell glob
-// pattern and checks for a match against the pathname relative to the location
-// of the .gitignore file (relative to the toplevel of the work tree if not
-// from a .gitignore file).
-//
-// - Otherwise, Git treats the pattern as a shell glob suitable for consumption
-// by fnmatch(3) with the FNM_PATHNAME flag: wildcards in the pattern will
-// not match a / in the pathname. For example, "Documentation/*.html" matches
-// "Documentation/git.html" but not "Documentation/ppc/ppc.html" or
-// "tools/perf/Documentation/perf.html".
-//
-// - A leading slash matches the beginning of the pathname. For example,
-// "/*.c" matches "cat-file.c" but not "mozilla-sha1/sha1.c".
-//
-// Two consecutive asterisks ("**") in patterns matched against full pathname
-// may have special meaning:
-//
-// - A leading "**" followed by a slash means match in all directories.
-// For example, "**/foo" matches file or directory "foo" anywhere, the same as
-// pattern "foo". "**/foo/bar" matches file or directory "bar"
-// anywhere that is directly under directory "foo".
-//
-// - A trailing "/**" matches everything inside. For example, "abc/**" matches
-// all files inside directory "abc", relative to the location of the
-// .gitignore file, with infinite depth.
-//
-// - A slash followed by two consecutive asterisks then a slash matches
-// zero or more directories. For example, "a/**/b" matches "a/b", "a/x/b",
-// "a/x/y/b" and so on.
-//
-// - Other consecutive asterisks are considered invalid.
-//
-// Copyright and license
-// =====================
-//
-// Copyright (c) Oleg Sklyar, Silvertern and source{d}
-//
-// The package code was donated to source{d} to include, modify and develop
-// further as a part of the `go-git` project, release it on the license of
-// the whole project or delete it from the project.
-package gitignore
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/gitignore/matcher.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/gitignore/matcher.go
deleted file mode 100644
index bd1e9e2d4c..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/gitignore/matcher.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package gitignore
-
-// Matcher defines a global multi-pattern matcher for gitignore patterns
-type Matcher interface {
- // Match matches patterns in the order of priorities. As soon as an inclusion or
- // exclusion is found, not further matching is performed.
- Match(path []string, isDir bool) bool
-}
-
-// NewMatcher constructs a new global matcher. Patterns must be given in the order of
-// increasing priority. That is most generic settings files first, then the content of
-// the repo .gitignore, then content of .gitignore down the path or the repo and then
-// the content command line arguments.
-func NewMatcher(ps []Pattern) Matcher {
- return &matcher{ps}
-}
-
-type matcher struct {
- patterns []Pattern
-}
-
-func (m *matcher) Match(path []string, isDir bool) bool {
- n := len(m.patterns)
- for i := n - 1; i >= 0; i-- {
- if match := m.patterns[i].Match(path, isDir); match > NoMatch {
- return match == Exclude
- }
- }
- return false
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/gitignore/pattern.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/gitignore/pattern.go
deleted file mode 100644
index 098cb50212..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/gitignore/pattern.go
+++ /dev/null
@@ -1,153 +0,0 @@
-package gitignore
-
-import (
- "path/filepath"
- "strings"
-)
-
-// MatchResult defines outcomes of a match, no match, exclusion or inclusion.
-type MatchResult int
-
-const (
- // NoMatch defines the no match outcome of a match check
- NoMatch MatchResult = iota
- // Exclude defines an exclusion of a file as a result of a match check
- Exclude
- // Include defines an explicit inclusion of a file as a result of a match check
- Include
-)
-
-const (
- inclusionPrefix = "!"
- zeroToManyDirs = "**"
- patternDirSep = "/"
-)
-
-// Pattern defines a single gitignore pattern.
-type Pattern interface {
- // Match matches the given path to the pattern.
- Match(path []string, isDir bool) MatchResult
-}
-
-type pattern struct {
- domain []string
- pattern []string
- inclusion bool
- dirOnly bool
- isGlob bool
-}
-
-// ParsePattern parses a gitignore pattern string into the Pattern structure.
-func ParsePattern(p string, domain []string) Pattern {
- res := pattern{domain: domain}
-
- if strings.HasPrefix(p, inclusionPrefix) {
- res.inclusion = true
- p = p[1:]
- }
-
- if !strings.HasSuffix(p, "\\ ") {
- p = strings.TrimRight(p, " ")
- }
-
- if strings.HasSuffix(p, patternDirSep) {
- res.dirOnly = true
- p = p[:len(p)-1]
- }
-
- if strings.Contains(p, patternDirSep) {
- res.isGlob = true
- }
-
- res.pattern = strings.Split(p, patternDirSep)
- return &res
-}
-
-func (p *pattern) Match(path []string, isDir bool) MatchResult {
- if len(path) <= len(p.domain) {
- return NoMatch
- }
- for i, e := range p.domain {
- if path[i] != e {
- return NoMatch
- }
- }
-
- path = path[len(p.domain):]
- if p.isGlob && !p.globMatch(path, isDir) {
- return NoMatch
- } else if !p.isGlob && !p.simpleNameMatch(path, isDir) {
- return NoMatch
- }
-
- if p.inclusion {
- return Include
- } else {
- return Exclude
- }
-}
-
-func (p *pattern) simpleNameMatch(path []string, isDir bool) bool {
- for i, name := range path {
- if match, err := filepath.Match(p.pattern[0], name); err != nil {
- return false
- } else if !match {
- continue
- }
- if p.dirOnly && !isDir && i == len(path)-1 {
- return false
- }
- return true
- }
- return false
-}
-
-func (p *pattern) globMatch(path []string, isDir bool) bool {
- matched := false
- canTraverse := false
- for i, pattern := range p.pattern {
- if pattern == "" {
- canTraverse = false
- continue
- }
- if pattern == zeroToManyDirs {
- if i == len(p.pattern)-1 {
- break
- }
- canTraverse = true
- continue
- }
- if strings.Contains(pattern, zeroToManyDirs) {
- return false
- }
- if len(path) == 0 {
- return false
- }
- if canTraverse {
- canTraverse = false
- for len(path) > 0 {
- e := path[0]
- path = path[1:]
- if match, err := filepath.Match(pattern, e); err != nil {
- return false
- } else if match {
- matched = true
- break
- } else if len(path) == 0 {
- // if nothing left then fail
- matched = false
- }
- }
- } else {
- if match, err := filepath.Match(pattern, path[0]); err != nil || !match {
- return false
- }
- matched = true
- path = path[1:]
- }
- }
- if matched && p.dirOnly && !isDir && len(path) == 0 {
- matched = false
- }
- return matched
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/decoder.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/decoder.go
deleted file mode 100644
index 9e9c1769ab..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/decoder.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package idxfile
-
-import (
- "bufio"
- "bytes"
- "errors"
- "io"
-
- "gopkg.in/src-d/go-git.v4/utils/binary"
-)
-
-var (
- // ErrUnsupportedVersion is returned by Decode when the idx file version
- // is not supported.
- ErrUnsupportedVersion = errors.New("Unsuported version")
- // ErrMalformedIdxFile is returned by Decode when the idx file is corrupted.
- ErrMalformedIdxFile = errors.New("Malformed IDX file")
-)
-
-const (
- fanout = 256
- objectIDLength = 20
-)
-
-// Decoder reads and decodes idx files from an input stream.
-type Decoder struct {
- *bufio.Reader
-}
-
-// NewDecoder builds a new idx stream decoder, that reads from r.
-func NewDecoder(r io.Reader) *Decoder {
- return &Decoder{bufio.NewReader(r)}
-}
-
-// Decode reads from the stream and decode the content into the MemoryIndex struct.
-func (d *Decoder) Decode(idx *MemoryIndex) error {
- if err := validateHeader(d); err != nil {
- return err
- }
-
- flow := []func(*MemoryIndex, io.Reader) error{
- readVersion,
- readFanout,
- readObjectNames,
- readCRC32,
- readOffsets,
- readChecksums,
- }
-
- for _, f := range flow {
- if err := f(idx, d); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func validateHeader(r io.Reader) error {
- var h = make([]byte, 4)
- if _, err := io.ReadFull(r, h); err != nil {
- return err
- }
-
- if !bytes.Equal(h, idxHeader) {
- return ErrMalformedIdxFile
- }
-
- return nil
-}
-
-func readVersion(idx *MemoryIndex, r io.Reader) error {
- v, err := binary.ReadUint32(r)
- if err != nil {
- return err
- }
-
- if v > VersionSupported {
- return ErrUnsupportedVersion
- }
-
- idx.Version = v
- return nil
-}
-
-func readFanout(idx *MemoryIndex, r io.Reader) error {
- for k := 0; k < fanout; k++ {
- n, err := binary.ReadUint32(r)
- if err != nil {
- return err
- }
-
- idx.Fanout[k] = n
- idx.FanoutMapping[k] = noMapping
- }
-
- return nil
-}
-
-func readObjectNames(idx *MemoryIndex, r io.Reader) error {
- for k := 0; k < fanout; k++ {
- var buckets uint32
- if k == 0 {
- buckets = idx.Fanout[k]
- } else {
- buckets = idx.Fanout[k] - idx.Fanout[k-1]
- }
-
- if buckets == 0 {
- continue
- }
-
- idx.FanoutMapping[k] = len(idx.Names)
-
- nameLen := int(buckets * objectIDLength)
- bin := make([]byte, nameLen)
- if _, err := io.ReadFull(r, bin); err != nil {
- return err
- }
-
- idx.Names = append(idx.Names, bin)
- idx.Offset32 = append(idx.Offset32, make([]byte, buckets*4))
- idx.CRC32 = append(idx.CRC32, make([]byte, buckets*4))
- }
-
- return nil
-}
-
-func readCRC32(idx *MemoryIndex, r io.Reader) error {
- for k := 0; k < fanout; k++ {
- if pos := idx.FanoutMapping[k]; pos != noMapping {
- if _, err := io.ReadFull(r, idx.CRC32[pos]); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func readOffsets(idx *MemoryIndex, r io.Reader) error {
- var o64cnt int
- for k := 0; k < fanout; k++ {
- if pos := idx.FanoutMapping[k]; pos != noMapping {
- if _, err := io.ReadFull(r, idx.Offset32[pos]); err != nil {
- return err
- }
-
- for p := 0; p < len(idx.Offset32[pos]); p += 4 {
- if idx.Offset32[pos][p]&(byte(1)<<7) > 0 {
- o64cnt++
- }
- }
- }
- }
-
- if o64cnt > 0 {
- idx.Offset64 = make([]byte, o64cnt*8)
- if _, err := io.ReadFull(r, idx.Offset64); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func readChecksums(idx *MemoryIndex, r io.Reader) error {
- if _, err := io.ReadFull(r, idx.PackfileChecksum[:]); err != nil {
- return err
- }
-
- if _, err := io.ReadFull(r, idx.IdxChecksum[:]); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/doc.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/doc.go
deleted file mode 100644
index 1e628ab4a5..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/doc.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Package idxfile implements encoding and decoding of packfile idx files.
-//
-// == Original (version 1) pack-*.idx files have the following format:
-//
-// - The header consists of 256 4-byte network byte order
-// integers. N-th entry of this table records the number of
-// objects in the corresponding pack, the first byte of whose
-// object name is less than or equal to N. This is called the
-// 'first-level fan-out' table.
-//
-// - The header is followed by sorted 24-byte entries, one entry
-// per object in the pack. Each entry is:
-//
-// 4-byte network byte order integer, recording where the
-// object is stored in the packfile as the offset from the
-// beginning.
-//
-// 20-byte object name.
-//
-// - The file is concluded with a trailer:
-//
-// A copy of the 20-byte SHA1 checksum at the end of
-// corresponding packfile.
-//
-// 20-byte SHA1-checksum of all of the above.
-//
-// Pack Idx file:
-//
-// -- +--------------------------------+
-// fanout | fanout[0] = 2 (for example) |-.
-// table +--------------------------------+ |
-// | fanout[1] | |
-// +--------------------------------+ |
-// | fanout[2] | |
-// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
-// | fanout[255] = total objects |---.
-// -- +--------------------------------+ | |
-// main | offset | | |
-// index | object name 00XXXXXXXXXXXXXXXX | | |
-// tab +--------------------------------+ | |
-// | offset | | |
-// | object name 00XXXXXXXXXXXXXXXX | | |
-// +--------------------------------+<+ |
-// .-| offset | |
-// | | object name 01XXXXXXXXXXXXXXXX | |
-// | +--------------------------------+ |
-// | | offset | |
-// | | object name 01XXXXXXXXXXXXXXXX | |
-// | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
-// | | offset | |
-// | | object name FFXXXXXXXXXXXXXXXX | |
-// --| +--------------------------------+<--+
-// trailer | | packfile checksum |
-// | +--------------------------------+
-// | | idxfile checksum |
-// | +--------------------------------+
-// .---------.
-// |
-// Pack file entry: <+
-//
-// packed object header:
-// 1-byte size extension bit (MSB)
-// type (next 3 bit)
-// size0 (lower 4-bit)
-// n-byte sizeN (as long as MSB is set, each 7-bit)
-// size0..sizeN form 4+7+7+..+7 bit integer, size0
-// is the least significant part, and sizeN is the
-// most significant part.
-// packed object data:
-// If it is not DELTA, then deflated bytes (the size above
-// is the size before compression).
-// If it is REF_DELTA, then
-// 20-byte base object name SHA1 (the size above is the
-// size of the delta data that follows).
-// delta data, deflated.
-// If it is OFS_DELTA, then
-// n-byte offset (see below) interpreted as a negative
-// offset from the type-byte of the header of the
-// ofs-delta entry (the size above is the size of
-// the delta data that follows).
-// delta data, deflated.
-//
-// offset encoding:
-// n bytes with MSB set in all but the last one.
-// The offset is then the number constructed by
-// concatenating the lower 7 bit of each byte, and
-// for n >= 2 adding 2^7 + 2^14 + ... + 2^(7*(n-1))
-// to the result.
-//
-// == Version 2 pack-*.idx files support packs larger than 4 GiB, and
-// have some other reorganizations. They have the format:
-//
-// - A 4-byte magic number '\377tOc' which is an unreasonable
-// fanout[0] value.
-//
-// - A 4-byte version number (= 2)
-//
-// - A 256-entry fan-out table just like v1.
-//
-// - A table of sorted 20-byte SHA1 object names. These are
-// packed together without offset values to reduce the cache
-// footprint of the binary search for a specific object name.
-//
-// - A table of 4-byte CRC32 values of the packed object data.
-// This is new in v2 so compressed data can be copied directly
-// from pack to pack during repacking without undetected
-// data corruption.
-//
-// - A table of 4-byte offset values (in network byte order).
-// These are usually 31-bit pack file offsets, but large
-// offsets are encoded as an index into the next table with
-// the msbit set.
-//
-// - A table of 8-byte offset entries (empty for pack files less
-// than 2 GiB). Pack files are organized with heavily used
-// objects toward the front, so most object references should
-// not need to refer to this table.
-//
-// - The same trailer as a v1 pack file:
-//
-// A copy of the 20-byte SHA1 checksum at the end of
-// corresponding packfile.
-//
-// 20-byte SHA1-checksum of all of the above.
-//
-// Source:
-// https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-format.txt
-package idxfile
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/encoder.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/encoder.go
deleted file mode 100644
index e479511026..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/encoder.go
+++ /dev/null
@@ -1,142 +0,0 @@
-package idxfile
-
-import (
- "crypto/sha1"
- "hash"
- "io"
-
- "gopkg.in/src-d/go-git.v4/utils/binary"
-)
-
-// Encoder writes MemoryIndex structs to an output stream.
-type Encoder struct {
- io.Writer
- hash hash.Hash
-}
-
-// NewEncoder returns a new stream encoder that writes to w.
-func NewEncoder(w io.Writer) *Encoder {
- h := sha1.New()
- mw := io.MultiWriter(w, h)
- return &Encoder{mw, h}
-}
-
-// Encode encodes an MemoryIndex to the encoder writer.
-func (e *Encoder) Encode(idx *MemoryIndex) (int, error) {
- flow := []func(*MemoryIndex) (int, error){
- e.encodeHeader,
- e.encodeFanout,
- e.encodeHashes,
- e.encodeCRC32,
- e.encodeOffsets,
- e.encodeChecksums,
- }
-
- sz := 0
- for _, f := range flow {
- i, err := f(idx)
- sz += i
-
- if err != nil {
- return sz, err
- }
- }
-
- return sz, nil
-}
-
-func (e *Encoder) encodeHeader(idx *MemoryIndex) (int, error) {
- c, err := e.Write(idxHeader)
- if err != nil {
- return c, err
- }
-
- return c + 4, binary.WriteUint32(e, idx.Version)
-}
-
-func (e *Encoder) encodeFanout(idx *MemoryIndex) (int, error) {
- for _, c := range idx.Fanout {
- if err := binary.WriteUint32(e, c); err != nil {
- return 0, err
- }
- }
-
- return fanout * 4, nil
-}
-
-func (e *Encoder) encodeHashes(idx *MemoryIndex) (int, error) {
- var size int
- for k := 0; k < fanout; k++ {
- pos := idx.FanoutMapping[k]
- if pos == noMapping {
- continue
- }
-
- n, err := e.Write(idx.Names[pos])
- if err != nil {
- return size, err
- }
- size += n
- }
- return size, nil
-}
-
-func (e *Encoder) encodeCRC32(idx *MemoryIndex) (int, error) {
- var size int
- for k := 0; k < fanout; k++ {
- pos := idx.FanoutMapping[k]
- if pos == noMapping {
- continue
- }
-
- n, err := e.Write(idx.CRC32[pos])
- if err != nil {
- return size, err
- }
-
- size += n
- }
-
- return size, nil
-}
-
-func (e *Encoder) encodeOffsets(idx *MemoryIndex) (int, error) {
- var size int
- for k := 0; k < fanout; k++ {
- pos := idx.FanoutMapping[k]
- if pos == noMapping {
- continue
- }
-
- n, err := e.Write(idx.Offset32[pos])
- if err != nil {
- return size, err
- }
-
- size += n
- }
-
- if len(idx.Offset64) > 0 {
- n, err := e.Write(idx.Offset64)
- if err != nil {
- return size, err
- }
-
- size += n
- }
-
- return size, nil
-}
-
-func (e *Encoder) encodeChecksums(idx *MemoryIndex) (int, error) {
- if _, err := e.Write(idx.PackfileChecksum[:]); err != nil {
- return 0, err
- }
-
- copy(idx.IdxChecksum[:], e.hash.Sum(nil)[:20])
- if _, err := e.Write(idx.IdxChecksum[:]); err != nil {
- return 0, err
- }
-
- return 40, nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/idxfile.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/idxfile.go
deleted file mode 100644
index 14b58603f7..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/idxfile.go
+++ /dev/null
@@ -1,346 +0,0 @@
-package idxfile
-
-import (
- "bytes"
- "io"
- "sort"
-
- encbin "encoding/binary"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
-)
-
-const (
- // VersionSupported is the only idx version supported.
- VersionSupported = 2
-
- noMapping = -1
-)
-
-var (
- idxHeader = []byte{255, 't', 'O', 'c'}
-)
-
-// Index represents an index of a packfile.
-type Index interface {
- // Contains checks whether the given hash is in the index.
- Contains(h plumbing.Hash) (bool, error)
- // FindOffset finds the offset in the packfile for the object with
- // the given hash.
- FindOffset(h plumbing.Hash) (int64, error)
- // FindCRC32 finds the CRC32 of the object with the given hash.
- FindCRC32(h plumbing.Hash) (uint32, error)
- // FindHash finds the hash for the object with the given offset.
- FindHash(o int64) (plumbing.Hash, error)
- // Count returns the number of entries in the index.
- Count() (int64, error)
- // Entries returns an iterator to retrieve all index entries.
- Entries() (EntryIter, error)
- // EntriesByOffset returns an iterator to retrieve all index entries ordered
- // by offset.
- EntriesByOffset() (EntryIter, error)
-}
-
-// MemoryIndex is the in memory representation of an idx file.
-type MemoryIndex struct {
- Version uint32
- Fanout [256]uint32
- // FanoutMapping maps the position in the fanout table to the position
- // in the Names, Offset32 and CRC32 slices. This improves the memory
- // usage by not needing an array with unnecessary empty slots.
- FanoutMapping [256]int
- Names [][]byte
- Offset32 [][]byte
- CRC32 [][]byte
- Offset64 []byte
- PackfileChecksum [20]byte
- IdxChecksum [20]byte
-
- offsetHash map[int64]plumbing.Hash
- offsetHashIsFull bool
-}
-
-var _ Index = (*MemoryIndex)(nil)
-
-// NewMemoryIndex returns an instance of a new MemoryIndex.
-func NewMemoryIndex() *MemoryIndex {
- return &MemoryIndex{}
-}
-
-func (idx *MemoryIndex) findHashIndex(h plumbing.Hash) (int, bool) {
- k := idx.FanoutMapping[h[0]]
- if k == noMapping {
- return 0, false
- }
-
- if len(idx.Names) <= k {
- return 0, false
- }
-
- data := idx.Names[k]
- high := uint64(len(idx.Offset32[k])) >> 2
- if high == 0 {
- return 0, false
- }
-
- low := uint64(0)
- for {
- mid := (low + high) >> 1
- offset := mid * objectIDLength
-
- cmp := bytes.Compare(h[:], data[offset:offset+objectIDLength])
- if cmp < 0 {
- high = mid
- } else if cmp == 0 {
- return int(mid), true
- } else {
- low = mid + 1
- }
-
- if low >= high {
- break
- }
- }
-
- return 0, false
-}
-
-// Contains implements the Index interface.
-func (idx *MemoryIndex) Contains(h plumbing.Hash) (bool, error) {
- _, ok := idx.findHashIndex(h)
- return ok, nil
-}
-
-// FindOffset implements the Index interface.
-func (idx *MemoryIndex) FindOffset(h plumbing.Hash) (int64, error) {
- if len(idx.FanoutMapping) <= int(h[0]) {
- return 0, plumbing.ErrObjectNotFound
- }
-
- k := idx.FanoutMapping[h[0]]
- i, ok := idx.findHashIndex(h)
- if !ok {
- return 0, plumbing.ErrObjectNotFound
- }
-
- offset := idx.getOffset(k, i)
-
- if !idx.offsetHashIsFull {
- // Save the offset for reverse lookup
- if idx.offsetHash == nil {
- idx.offsetHash = make(map[int64]plumbing.Hash)
- }
- idx.offsetHash[int64(offset)] = h
- }
-
- return int64(offset), nil
-}
-
-const isO64Mask = uint64(1) << 31
-
-func (idx *MemoryIndex) getOffset(firstLevel, secondLevel int) uint64 {
- offset := secondLevel << 2
- ofs := encbin.BigEndian.Uint32(idx.Offset32[firstLevel][offset : offset+4])
-
- if (uint64(ofs) & isO64Mask) != 0 {
- offset := 8 * (uint64(ofs) & ^isO64Mask)
- n := encbin.BigEndian.Uint64(idx.Offset64[offset : offset+8])
- return n
- }
-
- return uint64(ofs)
-}
-
-// FindCRC32 implements the Index interface.
-func (idx *MemoryIndex) FindCRC32(h plumbing.Hash) (uint32, error) {
- k := idx.FanoutMapping[h[0]]
- i, ok := idx.findHashIndex(h)
- if !ok {
- return 0, plumbing.ErrObjectNotFound
- }
-
- return idx.getCRC32(k, i), nil
-}
-
-func (idx *MemoryIndex) getCRC32(firstLevel, secondLevel int) uint32 {
- offset := secondLevel << 2
- return encbin.BigEndian.Uint32(idx.CRC32[firstLevel][offset : offset+4])
-}
-
-// FindHash implements the Index interface.
-func (idx *MemoryIndex) FindHash(o int64) (plumbing.Hash, error) {
- var hash plumbing.Hash
- var ok bool
-
- if idx.offsetHash != nil {
- if hash, ok = idx.offsetHash[o]; ok {
- return hash, nil
- }
- }
-
- // Lazily generate the reverse offset/hash map if required.
- if !idx.offsetHashIsFull || idx.offsetHash == nil {
- if err := idx.genOffsetHash(); err != nil {
- return plumbing.ZeroHash, err
- }
-
- hash, ok = idx.offsetHash[o]
- }
-
- if !ok {
- return plumbing.ZeroHash, plumbing.ErrObjectNotFound
- }
-
- return hash, nil
-}
-
-// genOffsetHash generates the offset/hash mapping for reverse search.
-func (idx *MemoryIndex) genOffsetHash() error {
- count, err := idx.Count()
- if err != nil {
- return err
- }
-
- idx.offsetHash = make(map[int64]plumbing.Hash, count)
- idx.offsetHashIsFull = true
-
- var hash plumbing.Hash
- i := uint32(0)
- for firstLevel, fanoutValue := range idx.Fanout {
- mappedFirstLevel := idx.FanoutMapping[firstLevel]
- for secondLevel := uint32(0); i < fanoutValue; i++ {
- copy(hash[:], idx.Names[mappedFirstLevel][secondLevel*objectIDLength:])
- offset := int64(idx.getOffset(mappedFirstLevel, int(secondLevel)))
- idx.offsetHash[offset] = hash
- secondLevel++
- }
- }
-
- return nil
-}
-
-// Count implements the Index interface.
-func (idx *MemoryIndex) Count() (int64, error) {
- return int64(idx.Fanout[fanout-1]), nil
-}
-
-// Entries implements the Index interface.
-func (idx *MemoryIndex) Entries() (EntryIter, error) {
- return &idxfileEntryIter{idx, 0, 0, 0}, nil
-}
-
-// EntriesByOffset implements the Index interface.
-func (idx *MemoryIndex) EntriesByOffset() (EntryIter, error) {
- count, err := idx.Count()
- if err != nil {
- return nil, err
- }
-
- iter := &idxfileEntryOffsetIter{
- entries: make(entriesByOffset, count),
- }
-
- entries, err := idx.Entries()
- if err != nil {
- return nil, err
- }
-
- for pos := 0; int64(pos) < count; pos++ {
- entry, err := entries.Next()
- if err != nil {
- return nil, err
- }
-
- iter.entries[pos] = entry
- }
-
- sort.Sort(iter.entries)
-
- return iter, nil
-}
-
-// EntryIter is an iterator that will return the entries in a packfile index.
-type EntryIter interface {
- // Next returns the next entry in the packfile index.
- Next() (*Entry, error)
- // Close closes the iterator.
- Close() error
-}
-
-type idxfileEntryIter struct {
- idx *MemoryIndex
- total int
- firstLevel, secondLevel int
-}
-
-func (i *idxfileEntryIter) Next() (*Entry, error) {
- for {
- if i.firstLevel >= fanout {
- return nil, io.EOF
- }
-
- if i.total >= int(i.idx.Fanout[i.firstLevel]) {
- i.firstLevel++
- i.secondLevel = 0
- continue
- }
-
- mappedFirstLevel := i.idx.FanoutMapping[i.firstLevel]
- entry := new(Entry)
- copy(entry.Hash[:], i.idx.Names[mappedFirstLevel][i.secondLevel*objectIDLength:])
- entry.Offset = i.idx.getOffset(mappedFirstLevel, i.secondLevel)
- entry.CRC32 = i.idx.getCRC32(mappedFirstLevel, i.secondLevel)
-
- i.secondLevel++
- i.total++
-
- return entry, nil
- }
-}
-
-func (i *idxfileEntryIter) Close() error {
- i.firstLevel = fanout
- return nil
-}
-
-// Entry is the in memory representation of an object entry in the idx file.
-type Entry struct {
- Hash plumbing.Hash
- CRC32 uint32
- Offset uint64
-}
-
-type idxfileEntryOffsetIter struct {
- entries entriesByOffset
- pos int
-}
-
-func (i *idxfileEntryOffsetIter) Next() (*Entry, error) {
- if i.pos >= len(i.entries) {
- return nil, io.EOF
- }
-
- entry := i.entries[i.pos]
- i.pos++
-
- return entry, nil
-}
-
-func (i *idxfileEntryOffsetIter) Close() error {
- i.pos = len(i.entries) + 1
- return nil
-}
-
-type entriesByOffset []*Entry
-
-func (o entriesByOffset) Len() int {
- return len(o)
-}
-
-func (o entriesByOffset) Less(i int, j int) bool {
- return o[i].Offset < o[j].Offset
-}
-
-func (o entriesByOffset) Swap(i int, j int) {
- o[i], o[j] = o[j], o[i]
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/writer.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/writer.go
deleted file mode 100644
index fcc78c56d0..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/writer.go
+++ /dev/null
@@ -1,186 +0,0 @@
-package idxfile
-
-import (
- "bytes"
- "fmt"
- "math"
- "sort"
- "sync"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/utils/binary"
-)
-
-// objects implements sort.Interface and uses hash as sorting key.
-type objects []Entry
-
-// Writer implements a packfile Observer interface and is used to generate
-// indexes.
-type Writer struct {
- m sync.Mutex
-
- count uint32
- checksum plumbing.Hash
- objects objects
- offset64 uint32
- finished bool
- index *MemoryIndex
- added map[plumbing.Hash]struct{}
-}
-
-// Index returns a previously created MemoryIndex or creates a new one if
-// needed.
-func (w *Writer) Index() (*MemoryIndex, error) {
- w.m.Lock()
- defer w.m.Unlock()
-
- if w.index == nil {
- return w.createIndex()
- }
-
- return w.index, nil
-}
-
-// Add appends new object data.
-func (w *Writer) Add(h plumbing.Hash, pos uint64, crc uint32) {
- w.m.Lock()
- defer w.m.Unlock()
-
- if w.added == nil {
- w.added = make(map[plumbing.Hash]struct{})
- }
-
- if _, ok := w.added[h]; !ok {
- w.added[h] = struct{}{}
- w.objects = append(w.objects, Entry{h, crc, pos})
- }
-
-}
-
-func (w *Writer) Finished() bool {
- return w.finished
-}
-
-// OnHeader implements packfile.Observer interface.
-func (w *Writer) OnHeader(count uint32) error {
- w.count = count
- w.objects = make(objects, 0, count)
- return nil
-}
-
-// OnInflatedObjectHeader implements packfile.Observer interface.
-func (w *Writer) OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error {
- return nil
-}
-
-// OnInflatedObjectContent implements packfile.Observer interface.
-func (w *Writer) OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, _ []byte) error {
- w.Add(h, uint64(pos), crc)
- return nil
-}
-
-// OnFooter implements packfile.Observer interface.
-func (w *Writer) OnFooter(h plumbing.Hash) error {
- w.checksum = h
- w.finished = true
- _, err := w.createIndex()
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// creatIndex returns a filled MemoryIndex with the information filled by
-// the observer callbacks.
-func (w *Writer) createIndex() (*MemoryIndex, error) {
- if !w.finished {
- return nil, fmt.Errorf("the index still hasn't finished building")
- }
-
- idx := new(MemoryIndex)
- w.index = idx
-
- sort.Sort(w.objects)
-
- // unmap all fans by default
- for i := range idx.FanoutMapping {
- idx.FanoutMapping[i] = noMapping
- }
-
- buf := new(bytes.Buffer)
-
- last := -1
- bucket := -1
- for i, o := range w.objects {
- fan := o.Hash[0]
-
- // fill the gaps between fans
- for j := last + 1; j < int(fan); j++ {
- idx.Fanout[j] = uint32(i)
- }
-
- // update the number of objects for this position
- idx.Fanout[fan] = uint32(i + 1)
-
- // we move from one bucket to another, update counters and allocate
- // memory
- if last != int(fan) {
- bucket++
- idx.FanoutMapping[fan] = bucket
- last = int(fan)
-
- idx.Names = append(idx.Names, make([]byte, 0))
- idx.Offset32 = append(idx.Offset32, make([]byte, 0))
- idx.CRC32 = append(idx.CRC32, make([]byte, 0))
- }
-
- idx.Names[bucket] = append(idx.Names[bucket], o.Hash[:]...)
-
- offset := o.Offset
- if offset > math.MaxInt32 {
- offset = w.addOffset64(offset)
- }
-
- buf.Truncate(0)
- binary.WriteUint32(buf, uint32(offset))
- idx.Offset32[bucket] = append(idx.Offset32[bucket], buf.Bytes()...)
-
- buf.Truncate(0)
- binary.WriteUint32(buf, o.CRC32)
- idx.CRC32[bucket] = append(idx.CRC32[bucket], buf.Bytes()...)
- }
-
- for j := last + 1; j < 256; j++ {
- idx.Fanout[j] = uint32(len(w.objects))
- }
-
- idx.Version = VersionSupported
- idx.PackfileChecksum = w.checksum
-
- return idx, nil
-}
-
-func (w *Writer) addOffset64(pos uint64) uint64 {
- buf := new(bytes.Buffer)
- binary.WriteUint64(buf, pos)
- w.index.Offset64 = append(w.index.Offset64, buf.Bytes()...)
-
- index := uint64(w.offset64 | (1 << 31))
- w.offset64++
-
- return index
-}
-
-func (o objects) Len() int {
- return len(o)
-}
-
-func (o objects) Less(i int, j int) bool {
- cmp := bytes.Compare(o[i].Hash[:], o[j].Hash[:])
- return cmp < 0
-}
-
-func (o objects) Swap(i int, j int) {
- o[i], o[j] = o[j], o[i]
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/decoder.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/decoder.go
deleted file mode 100644
index 98f92fda64..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/decoder.go
+++ /dev/null
@@ -1,477 +0,0 @@
-package index
-
-import (
- "bufio"
- "bytes"
- "crypto/sha1"
- "errors"
- "hash"
- "io"
- "io/ioutil"
- "strconv"
- "time"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/utils/binary"
-)
-
-var (
- // DecodeVersionSupported is the range of supported index versions
- DecodeVersionSupported = struct{ Min, Max uint32 }{Min: 2, Max: 4}
-
- // ErrMalformedSignature is returned by Decode when the index header file is
- // malformed
- ErrMalformedSignature = errors.New("malformed index signature file")
- // ErrInvalidChecksum is returned by Decode if the SHA1 hash mismatch with
- // the read content
- ErrInvalidChecksum = errors.New("invalid checksum")
-
- errUnknownExtension = errors.New("unknown extension")
-)
-
-const (
- entryHeaderLength = 62
- entryExtended = 0x4000
- entryValid = 0x8000
- nameMask = 0xfff
- intentToAddMask = 1 << 13
- skipWorkTreeMask = 1 << 14
-)
-
-// A Decoder reads and decodes index files from an input stream.
-type Decoder struct {
- r io.Reader
- hash hash.Hash
- lastEntry *Entry
-
- extReader *bufio.Reader
-}
-
-// NewDecoder returns a new decoder that reads from r.
-func NewDecoder(r io.Reader) *Decoder {
- h := sha1.New()
- return &Decoder{
- r: io.TeeReader(r, h),
- hash: h,
- extReader: bufio.NewReader(nil),
- }
-}
-
-// Decode reads the whole index object from its input and stores it in the
-// value pointed to by idx.
-func (d *Decoder) Decode(idx *Index) error {
- var err error
- idx.Version, err = validateHeader(d.r)
- if err != nil {
- return err
- }
-
- entryCount, err := binary.ReadUint32(d.r)
- if err != nil {
- return err
- }
-
- if err := d.readEntries(idx, int(entryCount)); err != nil {
- return err
- }
-
- return d.readExtensions(idx)
-}
-
-func (d *Decoder) readEntries(idx *Index, count int) error {
- for i := 0; i < count; i++ {
- e, err := d.readEntry(idx)
- if err != nil {
- return err
- }
-
- d.lastEntry = e
- idx.Entries = append(idx.Entries, e)
- }
-
- return nil
-}
-
-func (d *Decoder) readEntry(idx *Index) (*Entry, error) {
- e := &Entry{}
-
- var msec, mnsec, sec, nsec uint32
- var flags uint16
-
- flow := []interface{}{
- &sec, &nsec,
- &msec, &mnsec,
- &e.Dev,
- &e.Inode,
- &e.Mode,
- &e.UID,
- &e.GID,
- &e.Size,
- &e.Hash,
- &flags,
- }
-
- if err := binary.Read(d.r, flow...); err != nil {
- return nil, err
- }
-
- read := entryHeaderLength
-
- if sec != 0 || nsec != 0 {
- e.CreatedAt = time.Unix(int64(sec), int64(nsec))
- }
-
- if msec != 0 || mnsec != 0 {
- e.ModifiedAt = time.Unix(int64(msec), int64(mnsec))
- }
-
- e.Stage = Stage(flags>>12) & 0x3
-
- if flags&entryExtended != 0 {
- extended, err := binary.ReadUint16(d.r)
- if err != nil {
- return nil, err
- }
-
- read += 2
- e.IntentToAdd = extended&intentToAddMask != 0
- e.SkipWorktree = extended&skipWorkTreeMask != 0
- }
-
- if err := d.readEntryName(idx, e, flags); err != nil {
- return nil, err
- }
-
- return e, d.padEntry(idx, e, read)
-}
-
-func (d *Decoder) readEntryName(idx *Index, e *Entry, flags uint16) error {
- var name string
- var err error
-
- switch idx.Version {
- case 2, 3:
- len := flags & nameMask
- name, err = d.doReadEntryName(len)
- case 4:
- name, err = d.doReadEntryNameV4()
- default:
- return ErrUnsupportedVersion
- }
-
- if err != nil {
- return err
- }
-
- e.Name = name
- return nil
-}
-
-func (d *Decoder) doReadEntryNameV4() (string, error) {
- l, err := binary.ReadVariableWidthInt(d.r)
- if err != nil {
- return "", err
- }
-
- var base string
- if d.lastEntry != nil {
- base = d.lastEntry.Name[:len(d.lastEntry.Name)-int(l)]
- }
-
- name, err := binary.ReadUntil(d.r, '\x00')
- if err != nil {
- return "", err
- }
-
- return base + string(name), nil
-}
-
-func (d *Decoder) doReadEntryName(len uint16) (string, error) {
- name := make([]byte, len)
- _, err := io.ReadFull(d.r, name[:])
-
- return string(name), err
-}
-
-// Index entries are padded out to the next 8 byte alignment
-// for historical reasons related to how C Git read the files.
-func (d *Decoder) padEntry(idx *Index, e *Entry, read int) error {
- if idx.Version == 4 {
- return nil
- }
-
- entrySize := read + len(e.Name)
- padLen := 8 - entrySize%8
- _, err := io.CopyN(ioutil.Discard, d.r, int64(padLen))
- return err
-}
-
-func (d *Decoder) readExtensions(idx *Index) error {
- // TODO: support 'Split index' and 'Untracked cache' extensions, take in
- // count that they are not supported by jgit or libgit
-
- var expected []byte
- var err error
-
- var header [4]byte
- for {
- expected = d.hash.Sum(nil)
-
- var n int
- if n, err = io.ReadFull(d.r, header[:]); err != nil {
- if n == 0 {
- err = io.EOF
- }
-
- break
- }
-
- err = d.readExtension(idx, header[:])
- if err != nil {
- break
- }
- }
-
- if err != errUnknownExtension {
- return err
- }
-
- return d.readChecksum(expected, header)
-}
-
-func (d *Decoder) readExtension(idx *Index, header []byte) error {
- switch {
- case bytes.Equal(header, treeExtSignature):
- r, err := d.getExtensionReader()
- if err != nil {
- return err
- }
-
- idx.Cache = &Tree{}
- d := &treeExtensionDecoder{r}
- if err := d.Decode(idx.Cache); err != nil {
- return err
- }
- case bytes.Equal(header, resolveUndoExtSignature):
- r, err := d.getExtensionReader()
- if err != nil {
- return err
- }
-
- idx.ResolveUndo = &ResolveUndo{}
- d := &resolveUndoDecoder{r}
- if err := d.Decode(idx.ResolveUndo); err != nil {
- return err
- }
- case bytes.Equal(header, endOfIndexEntryExtSignature):
- r, err := d.getExtensionReader()
- if err != nil {
- return err
- }
-
- idx.EndOfIndexEntry = &EndOfIndexEntry{}
- d := &endOfIndexEntryDecoder{r}
- if err := d.Decode(idx.EndOfIndexEntry); err != nil {
- return err
- }
- default:
- return errUnknownExtension
- }
-
- return nil
-}
-
-func (d *Decoder) getExtensionReader() (*bufio.Reader, error) {
- len, err := binary.ReadUint32(d.r)
- if err != nil {
- return nil, err
- }
-
- d.extReader.Reset(&io.LimitedReader{R: d.r, N: int64(len)})
- return d.extReader, nil
-}
-
-func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error {
- var h plumbing.Hash
- copy(h[:4], alreadyRead[:])
-
- if _, err := io.ReadFull(d.r, h[4:]); err != nil {
- return err
- }
-
- if !bytes.Equal(h[:], expected) {
- return ErrInvalidChecksum
- }
-
- return nil
-}
-
-func validateHeader(r io.Reader) (version uint32, err error) {
- var s = make([]byte, 4)
- if _, err := io.ReadFull(r, s); err != nil {
- return 0, err
- }
-
- if !bytes.Equal(s, indexSignature) {
- return 0, ErrMalformedSignature
- }
-
- version, err = binary.ReadUint32(r)
- if err != nil {
- return 0, err
- }
-
- if version < DecodeVersionSupported.Min || version > DecodeVersionSupported.Max {
- return 0, ErrUnsupportedVersion
- }
-
- return
-}
-
-type treeExtensionDecoder struct {
- r *bufio.Reader
-}
-
-func (d *treeExtensionDecoder) Decode(t *Tree) error {
- for {
- e, err := d.readEntry()
- if err != nil {
- if err == io.EOF {
- return nil
- }
-
- return err
- }
-
- if e == nil {
- continue
- }
-
- t.Entries = append(t.Entries, *e)
- }
-}
-
-func (d *treeExtensionDecoder) readEntry() (*TreeEntry, error) {
- e := &TreeEntry{}
-
- path, err := binary.ReadUntil(d.r, '\x00')
- if err != nil {
- return nil, err
- }
-
- e.Path = string(path)
-
- count, err := binary.ReadUntil(d.r, ' ')
- if err != nil {
- return nil, err
- }
-
- i, err := strconv.Atoi(string(count))
- if err != nil {
- return nil, err
- }
-
- // An entry can be in an invalidated state and is represented by having a
- // negative number in the entry_count field.
- if i == -1 {
- return nil, nil
- }
-
- e.Entries = i
- trees, err := binary.ReadUntil(d.r, '\n')
- if err != nil {
- return nil, err
- }
-
- i, err = strconv.Atoi(string(trees))
- if err != nil {
- return nil, err
- }
-
- e.Trees = i
- _, err = io.ReadFull(d.r, e.Hash[:])
-
- return e, nil
-}
-
-type resolveUndoDecoder struct {
- r *bufio.Reader
-}
-
-func (d *resolveUndoDecoder) Decode(ru *ResolveUndo) error {
- for {
- e, err := d.readEntry()
- if err != nil {
- if err == io.EOF {
- return nil
- }
-
- return err
- }
-
- ru.Entries = append(ru.Entries, *e)
- }
-}
-
-func (d *resolveUndoDecoder) readEntry() (*ResolveUndoEntry, error) {
- e := &ResolveUndoEntry{
- Stages: make(map[Stage]plumbing.Hash),
- }
-
- path, err := binary.ReadUntil(d.r, '\x00')
- if err != nil {
- return nil, err
- }
-
- e.Path = string(path)
-
- for i := 0; i < 3; i++ {
- if err := d.readStage(e, Stage(i+1)); err != nil {
- return nil, err
- }
- }
-
- for s := range e.Stages {
- var hash plumbing.Hash
- if _, err := io.ReadFull(d.r, hash[:]); err != nil {
- return nil, err
- }
-
- e.Stages[s] = hash
- }
-
- return e, nil
-}
-
-func (d *resolveUndoDecoder) readStage(e *ResolveUndoEntry, s Stage) error {
- ascii, err := binary.ReadUntil(d.r, '\x00')
- if err != nil {
- return err
- }
-
- stage, err := strconv.ParseInt(string(ascii), 8, 64)
- if err != nil {
- return err
- }
-
- if stage != 0 {
- e.Stages[s] = plumbing.ZeroHash
- }
-
- return nil
-}
-
-type endOfIndexEntryDecoder struct {
- r *bufio.Reader
-}
-
-func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error {
- var err error
- e.Offset, err = binary.ReadUint32(d.r)
- if err != nil {
- return err
- }
-
- _, err = io.ReadFull(d.r, e.Hash[:])
- return err
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/doc.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/doc.go
deleted file mode 100644
index 39ae6ad5f9..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/doc.go
+++ /dev/null
@@ -1,360 +0,0 @@
-// Package index implements encoding and decoding of index format files.
-//
-// Git index format
-// ================
-//
-// == The Git index file has the following format
-//
-// All binary numbers are in network byte order. Version 2 is described
-// here unless stated otherwise.
-//
-// - A 12-byte header consisting of
-//
-// 4-byte signature:
-// The signature is { 'D', 'I', 'R', 'C' } (stands for "dircache")
-//
-// 4-byte version number:
-// The current supported versions are 2, 3 and 4.
-//
-// 32-bit number of index entries.
-//
-// - A number of sorted index entries (see below).
-//
-// - Extensions
-//
-// Extensions are identified by signature. Optional extensions can
-// be ignored if Git does not understand them.
-//
-// Git currently supports cached tree and resolve undo extensions.
-//
-// 4-byte extension signature. If the first byte is 'A'..'Z' the
-// extension is optional and can be ignored.
-//
-// 32-bit size of the extension
-//
-// Extension data
-//
-// - 160-bit SHA-1 over the content of the index file before this
-// checksum.
-//
-// == Index entry
-//
-// Index entries are sorted in ascending order on the name field,
-// interpreted as a string of unsigned bytes (i.e. memcmp() order, no
-// localization, no special casing of directory separator '/'). Entries
-// with the same name are sorted by their stage field.
-//
-// 32-bit ctime seconds, the last time a file's metadata changed
-// this is stat(2) data
-//
-// 32-bit ctime nanosecond fractions
-// this is stat(2) data
-//
-// 32-bit mtime seconds, the last time a file's data changed
-// this is stat(2) data
-//
-// 32-bit mtime nanosecond fractions
-// this is stat(2) data
-//
-// 32-bit dev
-// this is stat(2) data
-//
-// 32-bit ino
-// this is stat(2) data
-//
-// 32-bit mode, split into (high to low bits)
-//
-// 4-bit object type
-// valid values in binary are 1000 (regular file), 1010 (symbolic link)
-// and 1110 (gitlink)
-//
-// 3-bit unused
-//
-// 9-bit unix permission. Only 0755 and 0644 are valid for regular files.
-// Symbolic links and gitlinks have value 0 in this field.
-//
-// 32-bit uid
-// this is stat(2) data
-//
-// 32-bit gid
-// this is stat(2) data
-//
-// 32-bit file size
-// This is the on-disk size from stat(2), truncated to 32-bit.
-//
-// 160-bit SHA-1 for the represented object
-//
-// A 16-bit 'flags' field split into (high to low bits)
-//
-// 1-bit assume-valid flag
-//
-// 1-bit extended flag (must be zero in version 2)
-//
-// 2-bit stage (during merge)
-//
-// 12-bit name length if the length is less than 0xFFF; otherwise 0xFFF
-// is stored in this field.
-//
-// (Version 3 or later) A 16-bit field, only applicable if the
-// "extended flag" above is 1, split into (high to low bits).
-//
-// 1-bit reserved for future
-//
-// 1-bit skip-worktree flag (used by sparse checkout)
-//
-// 1-bit intent-to-add flag (used by "git add -N")
-//
-// 13-bit unused, must be zero
-//
-// Entry path name (variable length) relative to top level directory
-// (without leading slash). '/' is used as path separator. The special
-// path components ".", ".." and ".git" (without quotes) are disallowed.
-// Trailing slash is also disallowed.
-//
-// The exact encoding is undefined, but the '.' and '/' characters
-// are encoded in 7-bit ASCII and the encoding cannot contain a NUL
-// byte (iow, this is a UNIX pathname).
-//
-// (Version 4) In version 4, the entry path name is prefix-compressed
-// relative to the path name for the previous entry (the very first
-// entry is encoded as if the path name for the previous entry is an
-// empty string). At the beginning of an entry, an integer N in the
-// variable width encoding (the same encoding as the offset is encoded
-// for OFS_DELTA pack entries; see pack-format.txt) is stored, followed
-// by a NUL-terminated string S. Removing N bytes from the end of the
-// path name for the previous entry, and replacing it with the string S
-// yields the path name for this entry.
-//
-// 1-8 nul bytes as necessary to pad the entry to a multiple of eight bytes
-// while keeping the name NUL-terminated.
-//
-// (Version 4) In version 4, the padding after the pathname does not
-// exist.
-//
-// Interpretation of index entries in split index mode is completely
-// different. See below for details.
-//
-// == Extensions
-//
-// === Cached tree
-//
-// Cached tree extension contains pre-computed hashes for trees that can
-// be derived from the index. It helps speed up tree object generation
-// from index for a new commit.
-//
-// When a path is updated in index, the path must be invalidated and
-// removed from tree cache.
-//
-// The signature for this extension is { 'T', 'R', 'E', 'E' }.
-//
-// A series of entries fill the entire extension; each of which
-// consists of:
-//
-// - NUL-terminated path component (relative to its parent directory);
-//
-// - ASCII decimal number of entries in the index that is covered by the
-// tree this entry represents (entry_count);
-//
-// - A space (ASCII 32);
-//
-// - ASCII decimal number that represents the number of subtrees this
-// tree has;
-//
-// - A newline (ASCII 10); and
-//
-// - 160-bit object name for the object that would result from writing
-// this span of index as a tree.
-//
-// An entry can be in an invalidated state and is represented by having
-// a negative number in the entry_count field. In this case, there is no
-// object name and the next entry starts immediately after the newline.
-// When writing an invalid entry, -1 should always be used as entry_count.
-//
-// The entries are written out in the top-down, depth-first order. The
-// first entry represents the root level of the repository, followed by the
-// first subtree--let's call this A--of the root level (with its name
-// relative to the root level), followed by the first subtree of A (with
-// its name relative to A), ...
-//
-// === Resolve undo
-//
-// A conflict is represented in the index as a set of higher stage entries.
-// When a conflict is resolved (e.g. with "git add path"), these higher
-// stage entries will be removed and a stage-0 entry with proper resolution
-// is added.
-//
-// When these higher stage entries are removed, they are saved in the
-// resolve undo extension, so that conflicts can be recreated (e.g. with
-// "git checkout -m"), in case users want to redo a conflict resolution
-// from scratch.
-//
-// The signature for this extension is { 'R', 'E', 'U', 'C' }.
-//
-// A series of entries fill the entire extension; each of which
-// consists of:
-//
-// - NUL-terminated pathname the entry describes (relative to the root of
-// the repository, i.e. full pathname);
-//
-// - Three NUL-terminated ASCII octal numbers, entry mode of entries in
-// stage 1 to 3 (a missing stage is represented by "0" in this field);
-// and
-//
-// - At most three 160-bit object names of the entry in stages from 1 to 3
-// (nothing is written for a missing stage).
-//
-// === Split index
-//
-// In split index mode, the majority of index entries could be stored
-// in a separate file. This extension records the changes to be made on
-// top of that to produce the final index.
-//
-// The signature for this extension is { 'l', 'i', 'n', 'k' }.
-//
-// The extension consists of:
-//
-// - 160-bit SHA-1 of the shared index file. The shared index file path
-// is $GIT_DIR/sharedindex.<SHA-1>. If all 160 bits are zero, the
-// index does not require a shared index file.
-//
-// - An ewah-encoded delete bitmap, each bit represents an entry in the
-// shared index. If a bit is set, its corresponding entry in the
-// shared index will be removed from the final index. Note, because
-// a delete operation changes index entry positions, but we do need
-// original positions in replace phase, it's best to just mark
-// entries for removal, then do a mass deletion after replacement.
-//
-// - An ewah-encoded replace bitmap, each bit represents an entry in
-// the shared index. If a bit is set, its corresponding entry in the
-// shared index will be replaced with an entry in this index
-// file. All replaced entries are stored in sorted order in this
-// index. The first "1" bit in the replace bitmap corresponds to the
-// first index entry, the second "1" bit to the second entry and so
-// on. Replaced entries may have empty path names to save space.
-//
-// The remaining index entries after replaced ones will be added to the
-// final index. These added entries are also sorted by entry name then
-// stage.
-//
-// == Untracked cache
-//
-// Untracked cache saves the untracked file list and necessary data to
-// verify the cache. The signature for this extension is { 'U', 'N',
-// 'T', 'R' }.
-//
-// The extension starts with
-//
-// - A sequence of NUL-terminated strings, preceded by the size of the
-// sequence in variable width encoding. Each string describes the
-// environment where the cache can be used.
-//
-// - Stat data of $GIT_DIR/info/exclude. See "Index entry" section from
-// ctime field until "file size".
-//
-// - Stat data of plumbing.excludesfile
-//
-// - 32-bit dir_flags (see struct dir_struct)
-//
-// - 160-bit SHA-1 of $GIT_DIR/info/exclude. Null SHA-1 means the file
-// does not exist.
-//
-// - 160-bit SHA-1 of plumbing.excludesfile. Null SHA-1 means the file does
-// not exist.
-//
-// - NUL-terminated string of per-dir exclude file name. This usually
-// is ".gitignore".
-//
-// - The number of following directory blocks, variable width
-// encoding. If this number is zero, the extension ends here with a
-// following NUL.
-//
-// - A number of directory blocks in depth-first-search order, each
-// consists of
-//
-// - The number of untracked entries, variable width encoding.
-//
-// - The number of sub-directory blocks, variable width encoding.
-//
-// - The directory name terminated by NUL.
-//
-// - A number of untracked file/dir names terminated by NUL.
-//
-// The remaining data of each directory block is grouped by type:
-//
-// - An ewah bitmap, the n-th bit marks whether the n-th directory has
-// valid untracked cache entries.
-//
-// - An ewah bitmap, the n-th bit records "check-only" bit of
-// read_directory_recursive() for the n-th directory.
-//
-// - An ewah bitmap, the n-th bit indicates whether SHA-1 and stat data
-// is valid for the n-th directory and exists in the next data.
-//
-// - An array of stat data. The n-th data corresponds with the n-th
-// "one" bit in the previous ewah bitmap.
-//
-// - An array of SHA-1. The n-th SHA-1 corresponds with the n-th "one" bit
-// in the previous ewah bitmap.
-//
-// - One NUL.
-//
-// == File System Monitor cache
-//
-// The file system monitor cache tracks files for which the core.fsmonitor
-// hook has told us about changes. The signature for this extension is
-// { 'F', 'S', 'M', 'N' }.
-//
-// The extension starts with
-//
-// - 32-bit version number: the current supported version is 1.
-//
-// - 64-bit time: the extension data reflects all changes through the given
-// time which is stored as the nanoseconds elapsed since midnight,
-// January 1, 1970.
-//
-// - 32-bit bitmap size: the size of the CE_FSMONITOR_VALID bitmap.
-//
-// - An ewah bitmap, the n-th bit indicates whether the n-th index entry
-// is not CE_FSMONITOR_VALID.
-//
-// == End of Index Entry
-//
-// The End of Index Entry (EOIE) is used to locate the end of the variable
-// length index entries and the beginning of the extensions. Code can take
-// advantage of this to quickly locate the index extensions without having
-// to parse through all of the index entries.
-//
-// Because it must be able to be loaded before the variable length cache
-// entries and other index extensions, this extension must be written last.
-// The signature for this extension is { 'E', 'O', 'I', 'E' }.
-//
-// The extension consists of:
-//
-// - 32-bit offset to the end of the index entries
-//
-// - 160-bit SHA-1 over the extension types and their sizes (but not
-// their contents). E.g. if we have "TREE" extension that is N-bytes
-// long, "REUC" extension that is M-bytes long, followed by "EOIE",
-// then the hash would be:
-//
-// SHA-1("TREE" + <binary representation of N> +
-// "REUC" + <binary representation of M>)
-//
-// == Index Entry Offset Table
-//
-// The Index Entry Offset Table (IEOT) is used to help address the CPU
-// cost of loading the index by enabling multi-threading the process of
-// converting cache entries from the on-disk format to the in-memory format.
-// The signature for this extension is { 'I', 'E', 'O', 'T' }.
-//
-// The extension consists of:
-//
-// - 32-bit version (currently 1)
-//
-// - A number of index offset entries each consisting of:
-//
-// - 32-bit offset from the beginning of the file to the first cache entry
-// in this block of entries.
-//
-// - 32-bit count of cache entries in this blockpackage index
-package index
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/encoder.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/encoder.go
deleted file mode 100644
index 7111314c93..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/encoder.go
+++ /dev/null
@@ -1,150 +0,0 @@
-package index
-
-import (
- "bytes"
- "crypto/sha1"
- "errors"
- "hash"
- "io"
- "sort"
- "time"
-
- "gopkg.in/src-d/go-git.v4/utils/binary"
-)
-
-var (
- // EncodeVersionSupported is the range of supported index versions
- EncodeVersionSupported uint32 = 2
-
- // ErrInvalidTimestamp is returned by Encode if a Index with a Entry with
- // negative timestamp values
- ErrInvalidTimestamp = errors.New("negative timestamps are not allowed")
-)
-
-// An Encoder writes an Index to an output stream.
-type Encoder struct {
- w io.Writer
- hash hash.Hash
-}
-
-// NewEncoder returns a new encoder that writes to w.
-func NewEncoder(w io.Writer) *Encoder {
- h := sha1.New()
- mw := io.MultiWriter(w, h)
- return &Encoder{mw, h}
-}
-
-// Encode writes the Index to the stream of the encoder.
-func (e *Encoder) Encode(idx *Index) error {
- // TODO: support versions v3 and v4
- // TODO: support extensions
- if idx.Version != EncodeVersionSupported {
- return ErrUnsupportedVersion
- }
-
- if err := e.encodeHeader(idx); err != nil {
- return err
- }
-
- if err := e.encodeEntries(idx); err != nil {
- return err
- }
-
- return e.encodeFooter()
-}
-
-func (e *Encoder) encodeHeader(idx *Index) error {
- return binary.Write(e.w,
- indexSignature,
- idx.Version,
- uint32(len(idx.Entries)),
- )
-}
-
-func (e *Encoder) encodeEntries(idx *Index) error {
- sort.Sort(byName(idx.Entries))
-
- for _, entry := range idx.Entries {
- if err := e.encodeEntry(entry); err != nil {
- return err
- }
-
- wrote := entryHeaderLength + len(entry.Name)
- if err := e.padEntry(wrote); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (e *Encoder) encodeEntry(entry *Entry) error {
- if entry.IntentToAdd || entry.SkipWorktree {
- return ErrUnsupportedVersion
- }
-
- sec, nsec, err := e.timeToUint32(&entry.CreatedAt)
- if err != nil {
- return err
- }
-
- msec, mnsec, err := e.timeToUint32(&entry.ModifiedAt)
- if err != nil {
- return err
- }
-
- flags := uint16(entry.Stage&0x3) << 12
- if l := len(entry.Name); l < nameMask {
- flags |= uint16(l)
- } else {
- flags |= nameMask
- }
-
- flow := []interface{}{
- sec, nsec,
- msec, mnsec,
- entry.Dev,
- entry.Inode,
- entry.Mode,
- entry.UID,
- entry.GID,
- entry.Size,
- entry.Hash[:],
- flags,
- }
-
- if err := binary.Write(e.w, flow...); err != nil {
- return err
- }
-
- return binary.Write(e.w, []byte(entry.Name))
-}
-
-func (e *Encoder) timeToUint32(t *time.Time) (uint32, uint32, error) {
- if t.IsZero() {
- return 0, 0, nil
- }
-
- if t.Unix() < 0 || t.UnixNano() < 0 {
- return 0, 0, ErrInvalidTimestamp
- }
-
- return uint32(t.Unix()), uint32(t.Nanosecond()), nil
-}
-
-func (e *Encoder) padEntry(wrote int) error {
- padLen := 8 - wrote%8
-
- _, err := e.w.Write(bytes.Repeat([]byte{'\x00'}, padLen))
- return err
-}
-
-func (e *Encoder) encodeFooter() error {
- return binary.Write(e.w, e.hash.Sum(nil))
-}
-
-type byName []*Entry
-
-func (l byName) Len() int { return len(l) }
-func (l byName) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
-func (l byName) Less(i, j int) bool { return l[i].Name < l[j].Name }
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/index.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/index.go
deleted file mode 100644
index 6653c91d2b..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/index.go
+++ /dev/null
@@ -1,213 +0,0 @@
-package index
-
-import (
- "bytes"
- "errors"
- "fmt"
- "path/filepath"
- "time"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/filemode"
-)
-
-var (
- // ErrUnsupportedVersion is returned by Decode when the index file version
- // is not supported.
- ErrUnsupportedVersion = errors.New("unsupported version")
- // ErrEntryNotFound is returned by Index.Entry, if an entry is not found.
- ErrEntryNotFound = errors.New("entry not found")
-
- indexSignature = []byte{'D', 'I', 'R', 'C'}
- treeExtSignature = []byte{'T', 'R', 'E', 'E'}
- resolveUndoExtSignature = []byte{'R', 'E', 'U', 'C'}
- endOfIndexEntryExtSignature = []byte{'E', 'O', 'I', 'E'}
-)
-
-// Stage during merge
-type Stage int
-
-const (
- // Merged is the default stage, fully merged
- Merged Stage = 1
- // AncestorMode is the base revision
- AncestorMode Stage = 1
- // OurMode is the first tree revision, ours
- OurMode Stage = 2
- // TheirMode is the second tree revision, theirs
- TheirMode Stage = 3
-)
-
-// Index contains the information about which objects are currently checked out
-// in the worktree, having information about the working files. Changes in
-// worktree are detected using this Index. The Index is also used during merges
-type Index struct {
- // Version is index version
- Version uint32
- // Entries collection of entries represented by this Index. The order of
- // this collection is not guaranteed
- Entries []*Entry
- // Cache represents the 'Cached tree' extension
- Cache *Tree
- // ResolveUndo represents the 'Resolve undo' extension
- ResolveUndo *ResolveUndo
- // EndOfIndexEntry represents the 'End of Index Entry' extension
- EndOfIndexEntry *EndOfIndexEntry
-}
-
-// Add creates a new Entry and returns it. The caller should first check that
-// another entry with the same path does not exist.
-func (i *Index) Add(path string) *Entry {
- e := &Entry{
- Name: filepath.ToSlash(path),
- }
-
- i.Entries = append(i.Entries, e)
- return e
-}
-
-// Entry returns the entry that match the given path, if any.
-func (i *Index) Entry(path string) (*Entry, error) {
- path = filepath.ToSlash(path)
- for _, e := range i.Entries {
- if e.Name == path {
- return e, nil
- }
- }
-
- return nil, ErrEntryNotFound
-}
-
-// Remove remove the entry that match the give path and returns deleted entry.
-func (i *Index) Remove(path string) (*Entry, error) {
- path = filepath.ToSlash(path)
- for index, e := range i.Entries {
- if e.Name == path {
- i.Entries = append(i.Entries[:index], i.Entries[index+1:]...)
- return e, nil
- }
- }
-
- return nil, ErrEntryNotFound
-}
-
-// Glob returns the all entries matching pattern or nil if there is no matching
-// entry. The syntax of patterns is the same as in filepath.Glob.
-func (i *Index) Glob(pattern string) (matches []*Entry, err error) {
- pattern = filepath.ToSlash(pattern)
- for _, e := range i.Entries {
- m, err := match(pattern, e.Name)
- if err != nil {
- return nil, err
- }
-
- if m {
- matches = append(matches, e)
- }
- }
-
- return
-}
-
-// String is equivalent to `git ls-files --stage --debug`
-func (i *Index) String() string {
- buf := bytes.NewBuffer(nil)
- for _, e := range i.Entries {
- buf.WriteString(e.String())
- }
-
- return buf.String()
-}
-
-// Entry represents a single file (or stage of a file) in the cache. An entry
-// represents exactly one stage of a file. If a file path is unmerged then
-// multiple Entry instances may appear for the same path name.
-type Entry struct {
- // Hash is the SHA1 of the represented file
- Hash plumbing.Hash
- // Name is the Entry path name relative to top level directory
- Name string
- // CreatedAt time when the tracked path was created
- CreatedAt time.Time
- // ModifiedAt time when the tracked path was changed
- ModifiedAt time.Time
- // Dev and Inode of the tracked path
- Dev, Inode uint32
- // Mode of the path
- Mode filemode.FileMode
- // UID and GID, userid and group id of the owner
- UID, GID uint32
- // Size is the length in bytes for regular files
- Size uint32
- // Stage on a merge is defines what stage is representing this entry
- // https://git-scm.com/book/en/v2/Git-Tools-Advanced-Merging
- Stage Stage
- // SkipWorktree used in sparse checkouts
- // https://git-scm.com/docs/git-read-tree#_sparse_checkout
- SkipWorktree bool
- // IntentToAdd record only the fact that the path will be added later
- // https://git-scm.com/docs/git-add ("git add -N")
- IntentToAdd bool
-}
-
-func (e Entry) String() string {
- buf := bytes.NewBuffer(nil)
-
- fmt.Fprintf(buf, "%06o %s %d\t%s\n", e.Mode, e.Hash, e.Stage, e.Name)
- fmt.Fprintf(buf, " ctime: %d:%d\n", e.CreatedAt.Unix(), e.CreatedAt.Nanosecond())
- fmt.Fprintf(buf, " mtime: %d:%d\n", e.ModifiedAt.Unix(), e.ModifiedAt.Nanosecond())
- fmt.Fprintf(buf, " dev: %d\tino: %d\n", e.Dev, e.Inode)
- fmt.Fprintf(buf, " uid: %d\tgid: %d\n", e.UID, e.GID)
- fmt.Fprintf(buf, " size: %d\tflags: %x\n", e.Size, 0)
-
- return buf.String()
-}
-
-// Tree contains pre-computed hashes for trees that can be derived from the
-// index. It helps speed up tree object generation from index for a new commit.
-type Tree struct {
- Entries []TreeEntry
-}
-
-// TreeEntry entry of a cached Tree
-type TreeEntry struct {
- // Path component (relative to its parent directory)
- Path string
- // Entries is the number of entries in the index that is covered by the tree
- // this entry represents.
- Entries int
- // Trees is the number that represents the number of subtrees this tree has
- Trees int
- // Hash object name for the object that would result from writing this span
- // of index as a tree.
- Hash plumbing.Hash
-}
-
-// ResolveUndo is used when a conflict is resolved (e.g. with "git add path"),
-// these higher stage entries are removed and a stage-0 entry with proper
-// resolution is added. When these higher stage entries are removed, they are
-// saved in the resolve undo extension.
-type ResolveUndo struct {
- Entries []ResolveUndoEntry
-}
-
-// ResolveUndoEntry contains the information about a conflict when is resolved
-type ResolveUndoEntry struct {
- Path string
- Stages map[Stage]plumbing.Hash
-}
-
-// EndOfIndexEntry is the End of Index Entry (EOIE) is used to locate the end of
-// the variable length index entries and the beginning of the extensions. Code
-// can take advantage of this to quickly locate the index extensions without
-// having to parse through all of the index entries.
-//
-// Because it must be able to be loaded before the variable length cache
-// entries and other index extensions, this extension must be written last.
-type EndOfIndexEntry struct {
- // Offset to the end of the index entries
- Offset uint32
- // Hash is a SHA-1 over the extension types and their sizes (but not
- // their contents).
- Hash plumbing.Hash
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/match.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/match.go
deleted file mode 100644
index 2891d7d34c..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/match.go
+++ /dev/null
@@ -1,186 +0,0 @@
-package index
-
-import (
- "path/filepath"
- "runtime"
- "unicode/utf8"
-)
-
-// match is filepath.Match with support to match fullpath and not only filenames
-// code from:
-// https://github.com/golang/go/blob/39852bf4cce6927e01d0136c7843f65a801738cb/src/path/filepath/match.go#L44-L224
-func match(pattern, name string) (matched bool, err error) {
-Pattern:
- for len(pattern) > 0 {
- var star bool
- var chunk string
- star, chunk, pattern = scanChunk(pattern)
-
- // Look for match at current position.
- t, ok, err := matchChunk(chunk, name)
- // if we're the last chunk, make sure we've exhausted the name
- // otherwise we'll give a false result even if we could still match
- // using the star
- if ok && (len(t) == 0 || len(pattern) > 0) {
- name = t
- continue
- }
- if err != nil {
- return false, err
- }
- if star {
- // Look for match skipping i+1 bytes.
- // Cannot skip /.
- for i := 0; i < len(name); i++ {
- t, ok, err := matchChunk(chunk, name[i+1:])
- if ok {
- // if we're the last chunk, make sure we exhausted the name
- if len(pattern) == 0 && len(t) > 0 {
- continue
- }
- name = t
- continue Pattern
- }
- if err != nil {
- return false, err
- }
- }
- }
- return false, nil
- }
- return len(name) == 0, nil
-}
-
-// scanChunk gets the next segment of pattern, which is a non-star string
-// possibly preceded by a star.
-func scanChunk(pattern string) (star bool, chunk, rest string) {
- for len(pattern) > 0 && pattern[0] == '*' {
- pattern = pattern[1:]
- star = true
- }
- inrange := false
- var i int
-Scan:
- for i = 0; i < len(pattern); i++ {
- switch pattern[i] {
- case '\\':
- if runtime.GOOS != "windows" {
- // error check handled in matchChunk: bad pattern.
- if i+1 < len(pattern) {
- i++
- }
- }
- case '[':
- inrange = true
- case ']':
- inrange = false
- case '*':
- if !inrange {
- break Scan
- }
- }
- }
- return star, pattern[0:i], pattern[i:]
-}
-
-// matchChunk checks whether chunk matches the beginning of s.
-// If so, it returns the remainder of s (after the match).
-// Chunk is all single-character operators: literals, char classes, and ?.
-func matchChunk(chunk, s string) (rest string, ok bool, err error) {
- for len(chunk) > 0 {
- if len(s) == 0 {
- return
- }
- switch chunk[0] {
- case '[':
- // character class
- r, n := utf8.DecodeRuneInString(s)
- s = s[n:]
- chunk = chunk[1:]
- // We can't end right after '[', we're expecting at least
- // a closing bracket and possibly a caret.
- if len(chunk) == 0 {
- err = filepath.ErrBadPattern
- return
- }
- // possibly negated
- negated := chunk[0] == '^'
- if negated {
- chunk = chunk[1:]
- }
- // parse all ranges
- match := false
- nrange := 0
- for {
- if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 {
- chunk = chunk[1:]
- break
- }
- var lo, hi rune
- if lo, chunk, err = getEsc(chunk); err != nil {
- return
- }
- hi = lo
- if chunk[0] == '-' {
- if hi, chunk, err = getEsc(chunk[1:]); err != nil {
- return
- }
- }
- if lo <= r && r <= hi {
- match = true
- }
- nrange++
- }
- if match == negated {
- return
- }
-
- case '?':
- _, n := utf8.DecodeRuneInString(s)
- s = s[n:]
- chunk = chunk[1:]
-
- case '\\':
- if runtime.GOOS != "windows" {
- chunk = chunk[1:]
- if len(chunk) == 0 {
- err = filepath.ErrBadPattern
- return
- }
- }
- fallthrough
-
- default:
- if chunk[0] != s[0] {
- return
- }
- s = s[1:]
- chunk = chunk[1:]
- }
- }
- return s, true, nil
-}
-
-// getEsc gets a possibly-escaped character from chunk, for a character class.
-func getEsc(chunk string) (r rune, nchunk string, err error) {
- if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' {
- err = filepath.ErrBadPattern
- return
- }
- if chunk[0] == '\\' && runtime.GOOS != "windows" {
- chunk = chunk[1:]
- if len(chunk) == 0 {
- err = filepath.ErrBadPattern
- return
- }
- }
- r, n := utf8.DecodeRuneInString(chunk)
- if r == utf8.RuneError && n == 1 {
- err = filepath.ErrBadPattern
- }
- nchunk = chunk[n:]
- if len(nchunk) == 0 {
- err = filepath.ErrBadPattern
- }
- return
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/doc.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/doc.go
deleted file mode 100644
index a7145160ae..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/doc.go
+++ /dev/null
@@ -1,2 +0,0 @@
-// Package objfile implements encoding and decoding of object files.
-package objfile
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/reader.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/reader.go
deleted file mode 100644
index c4467e4817..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/reader.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package objfile
-
-import (
- "compress/zlib"
- "errors"
- "io"
- "strconv"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
-)
-
-var (
- ErrClosed = errors.New("objfile: already closed")
- ErrHeader = errors.New("objfile: invalid header")
- ErrNegativeSize = errors.New("objfile: negative object size")
-)
-
-// Reader reads and decodes compressed objfile data from a provided io.Reader.
-// Reader implements io.ReadCloser. Close should be called when finished with
-// the Reader. Close will not close the underlying io.Reader.
-type Reader struct {
- multi io.Reader
- zlib io.ReadCloser
- hasher plumbing.Hasher
-}
-
-// NewReader returns a new Reader reading from r.
-func NewReader(r io.Reader) (*Reader, error) {
- zlib, err := zlib.NewReader(r)
- if err != nil {
- return nil, packfile.ErrZLib.AddDetails(err.Error())
- }
-
- return &Reader{
- zlib: zlib,
- }, nil
-}
-
-// Header reads the type and the size of object, and prepares the reader for read
-func (r *Reader) Header() (t plumbing.ObjectType, size int64, err error) {
- var raw []byte
- raw, err = r.readUntil(' ')
- if err != nil {
- return
- }
-
- t, err = plumbing.ParseObjectType(string(raw))
- if err != nil {
- return
- }
-
- raw, err = r.readUntil(0)
- if err != nil {
- return
- }
-
- size, err = strconv.ParseInt(string(raw), 10, 64)
- if err != nil {
- err = ErrHeader
- return
- }
-
- defer r.prepareForRead(t, size)
- return
-}
-
-// readSlice reads one byte at a time from r until it encounters delim or an
-// error.
-func (r *Reader) readUntil(delim byte) ([]byte, error) {
- var buf [1]byte
- value := make([]byte, 0, 16)
- for {
- if n, err := r.zlib.Read(buf[:]); err != nil && (err != io.EOF || n == 0) {
- if err == io.EOF {
- return nil, ErrHeader
- }
- return nil, err
- }
-
- if buf[0] == delim {
- return value, nil
- }
-
- value = append(value, buf[0])
- }
-}
-
-func (r *Reader) prepareForRead(t plumbing.ObjectType, size int64) {
- r.hasher = plumbing.NewHasher(t, size)
- r.multi = io.TeeReader(r.zlib, r.hasher)
-}
-
-// Read reads len(p) bytes into p from the object data stream. It returns
-// the number of bytes read (0 <= n <= len(p)) and any error encountered. Even
-// if Read returns n < len(p), it may use all of p as scratch space during the
-// call.
-//
-// If Read encounters the end of the data stream it will return err == io.EOF,
-// either in the current call if n > 0 or in a subsequent call.
-func (r *Reader) Read(p []byte) (n int, err error) {
- return r.multi.Read(p)
-}
-
-// Hash returns the hash of the object data stream that has been read so far.
-func (r *Reader) Hash() plumbing.Hash {
- return r.hasher.Sum()
-}
-
-// Close releases any resources consumed by the Reader. Calling Close does not
-// close the wrapped io.Reader originally passed to NewReader.
-func (r *Reader) Close() error {
- return r.zlib.Close()
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/writer.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/writer.go
deleted file mode 100644
index 5555243401..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/writer.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package objfile
-
-import (
- "compress/zlib"
- "errors"
- "io"
- "strconv"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
-)
-
-var (
- ErrOverflow = errors.New("objfile: declared data length exceeded (overflow)")
-)
-
-// Writer writes and encodes data in compressed objfile format to a provided
-// io.Writer. Close should be called when finished with the Writer. Close will
-// not close the underlying io.Writer.
-type Writer struct {
- raw io.Writer
- zlib io.WriteCloser
- hasher plumbing.Hasher
- multi io.Writer
-
- closed bool
- pending int64 // number of unwritten bytes
-}
-
-// NewWriter returns a new Writer writing to w.
-//
-// The returned Writer implements io.WriteCloser. Close should be called when
-// finished with the Writer. Close will not close the underlying io.Writer.
-func NewWriter(w io.Writer) *Writer {
- return &Writer{
- raw: w,
- zlib: zlib.NewWriter(w),
- }
-}
-
-// WriteHeader writes the type and the size and prepares to accept the object's
-// contents. If an invalid t is provided, plumbing.ErrInvalidType is returned. If a
-// negative size is provided, ErrNegativeSize is returned.
-func (w *Writer) WriteHeader(t plumbing.ObjectType, size int64) error {
- if !t.Valid() {
- return plumbing.ErrInvalidType
- }
- if size < 0 {
- return ErrNegativeSize
- }
-
- b := t.Bytes()
- b = append(b, ' ')
- b = append(b, []byte(strconv.FormatInt(size, 10))...)
- b = append(b, 0)
-
- defer w.prepareForWrite(t, size)
- _, err := w.zlib.Write(b)
-
- return err
-}
-
-func (w *Writer) prepareForWrite(t plumbing.ObjectType, size int64) {
- w.pending = size
-
- w.hasher = plumbing.NewHasher(t, size)
- w.multi = io.MultiWriter(w.zlib, w.hasher)
-}
-
-// Write writes the object's contents. Write returns the error ErrOverflow if
-// more than size bytes are written after WriteHeader.
-func (w *Writer) Write(p []byte) (n int, err error) {
- if w.closed {
- return 0, ErrClosed
- }
-
- overwrite := false
- if int64(len(p)) > w.pending {
- p = p[0:w.pending]
- overwrite = true
- }
-
- n, err = w.multi.Write(p)
- w.pending -= int64(n)
- if err == nil && overwrite {
- err = ErrOverflow
- return
- }
-
- return
-}
-
-// Hash returns the hash of the object data stream that has been written so far.
-// It can be called before or after Close.
-func (w *Writer) Hash() plumbing.Hash {
- return w.hasher.Sum() // Not yet closed, return hash of data written so far
-}
-
-// Close releases any resources consumed by the Writer.
-//
-// Calling Close does not close the wrapped io.Writer originally passed to
-// NewWriter.
-func (w *Writer) Close() error {
- if err := w.zlib.Close(); err != nil {
- return err
- }
-
- w.closed = true
- return nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/common.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/common.go
deleted file mode 100644
index f82c1abe55..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/common.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package packfile
-
-import (
- "bytes"
- "compress/zlib"
- "io"
- "sync"
-
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-)
-
-var signature = []byte{'P', 'A', 'C', 'K'}
-
-const (
- // VersionSupported is the packfile version supported by this package
- VersionSupported uint32 = 2
-
- firstLengthBits = uint8(4) // the first byte into object header has 4 bits to store the length
- lengthBits = uint8(7) // subsequent bytes has 7 bits to store the length
- maskFirstLength = 15 // 0000 1111
- maskContinue = 0x80 // 1000 0000
- maskLength = uint8(127) // 0111 1111
- maskType = uint8(112) // 0111 0000
-)
-
-// UpdateObjectStorage updates the storer with the objects in the given
-// packfile.
-func UpdateObjectStorage(s storer.Storer, packfile io.Reader) error {
- if pw, ok := s.(storer.PackfileWriter); ok {
- return WritePackfileToObjectStorage(pw, packfile)
- }
-
- p, err := NewParserWithStorage(NewScanner(packfile), s)
- if err != nil {
- return err
- }
-
- _, err = p.Parse()
- return err
-}
-
-// WritePackfileToObjectStorage writes all the packfile objects into the given
-// object storage.
-func WritePackfileToObjectStorage(
- sw storer.PackfileWriter,
- packfile io.Reader,
-) (err error) {
- w, err := sw.PackfileWriter()
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(w, &err)
-
- var n int64
- n, err = io.Copy(w, packfile)
- if err == nil && n == 0 {
- return ErrEmptyPackfile
- }
-
- return err
-}
-
-var bufPool = sync.Pool{
- New: func() interface{} {
- return bytes.NewBuffer(nil)
- },
-}
-
-var zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01}
-
-var zlibReaderPool = sync.Pool{
- New: func() interface{} {
- r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes))
- return r
- },
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/delta_index.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/delta_index.go
deleted file mode 100644
index 07a61120e5..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/delta_index.go
+++ /dev/null
@@ -1,297 +0,0 @@
-package packfile
-
-const blksz = 16
-const maxChainLength = 64
-
-// deltaIndex is a modified version of JGit's DeltaIndex adapted to our current
-// design.
-type deltaIndex struct {
- table []int
- entries []int
- mask int
-}
-
-func (idx *deltaIndex) init(buf []byte) {
- scanner := newDeltaIndexScanner(buf, len(buf))
- idx.mask = scanner.mask
- idx.table = scanner.table
- idx.entries = make([]int, countEntries(scanner)+1)
- idx.copyEntries(scanner)
-}
-
-// findMatch returns the offset of src where the block starting at tgtOffset
-// is and the length of the match. A length of 0 means there was no match. A
-// length of -1 means the src length is lower than the blksz and whatever
-// other positive length is the length of the match in bytes.
-func (idx *deltaIndex) findMatch(src, tgt []byte, tgtOffset int) (srcOffset, l int) {
- if len(tgt) < tgtOffset+s {
- return 0, len(tgt) - tgtOffset
- }
-
- if len(src) < blksz {
- return 0, -1
- }
-
- if len(tgt) >= tgtOffset+s && len(src) >= blksz {
- h := hashBlock(tgt, tgtOffset)
- tIdx := h & idx.mask
- eIdx := idx.table[tIdx]
- if eIdx != 0 {
- srcOffset = idx.entries[eIdx]
- } else {
- return
- }
-
- l = matchLength(src, tgt, tgtOffset, srcOffset)
- }
-
- return
-}
-
-func matchLength(src, tgt []byte, otgt, osrc int) (l int) {
- lensrc := len(src)
- lentgt := len(tgt)
- for (osrc < lensrc && otgt < lentgt) && src[osrc] == tgt[otgt] {
- l++
- osrc++
- otgt++
- }
- return
-}
-
-func countEntries(scan *deltaIndexScanner) (cnt int) {
- // Figure out exactly how many entries we need. As we do the
- // enumeration truncate any delta chains longer than what we
- // are willing to scan during encode. This keeps the encode
- // logic linear in the size of the input rather than quadratic.
- for i := 0; i < len(scan.table); i++ {
- h := scan.table[i]
- if h == 0 {
- continue
- }
-
- size := 0
- for {
- size++
- if size == maxChainLength {
- scan.next[h] = 0
- break
- }
- h = scan.next[h]
-
- if h == 0 {
- break
- }
- }
- cnt += size
- }
-
- return
-}
-
-func (idx *deltaIndex) copyEntries(scanner *deltaIndexScanner) {
- // Rebuild the entries list from the scanner, positioning all
- // blocks in the same hash chain next to each other. We can
- // then later discard the next list, along with the scanner.
- //
- next := 1
- for i := 0; i < len(idx.table); i++ {
- h := idx.table[i]
- if h == 0 {
- continue
- }
-
- idx.table[i] = next
- for {
- idx.entries[next] = scanner.entries[h]
- next++
- h = scanner.next[h]
-
- if h == 0 {
- break
- }
- }
- }
-}
-
-type deltaIndexScanner struct {
- table []int
- entries []int
- next []int
- mask int
- count int
-}
-
-func newDeltaIndexScanner(buf []byte, size int) *deltaIndexScanner {
- size -= size % blksz
- worstCaseBlockCnt := size / blksz
- if worstCaseBlockCnt < 1 {
- return new(deltaIndexScanner)
- }
-
- tableSize := tableSize(worstCaseBlockCnt)
- scanner := &deltaIndexScanner{
- table: make([]int, tableSize),
- mask: tableSize - 1,
- entries: make([]int, worstCaseBlockCnt+1),
- next: make([]int, worstCaseBlockCnt+1),
- }
-
- scanner.scan(buf, size)
- return scanner
-}
-
-// slightly modified version of JGit's DeltaIndexScanner. We store the offset on the entries
-// instead of the entries and the key, so we avoid operations to retrieve the offset later, as
-// we don't use the key.
-// See: https://github.com/eclipse/jgit/blob/005e5feb4ecd08c4e4d141a38b9e7942accb3212/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/pack/DeltaIndexScanner.java
-func (s *deltaIndexScanner) scan(buf []byte, end int) {
- lastHash := 0
- ptr := end - blksz
-
- for {
- key := hashBlock(buf, ptr)
- tIdx := key & s.mask
- head := s.table[tIdx]
- if head != 0 && lastHash == key {
- s.entries[head] = ptr
- } else {
- s.count++
- eIdx := s.count
- s.entries[eIdx] = ptr
- s.next[eIdx] = head
- s.table[tIdx] = eIdx
- }
-
- lastHash = key
- ptr -= blksz
-
- if 0 > ptr {
- break
- }
- }
-}
-
-func tableSize(worstCaseBlockCnt int) int {
- shift := 32 - leadingZeros(uint32(worstCaseBlockCnt))
- sz := 1 << uint(shift-1)
- if sz < worstCaseBlockCnt {
- sz <<= 1
- }
- return sz
-}
-
-// use https://golang.org/pkg/math/bits/#LeadingZeros32 in the future
-func leadingZeros(x uint32) (n int) {
- if x >= 1<<16 {
- x >>= 16
- n = 16
- }
- if x >= 1<<8 {
- x >>= 8
- n += 8
- }
- n += int(len8tab[x])
- return 32 - n
-}
-
-var len8tab = [256]uint8{
- 0x00, 0x01, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
- 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
- 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
- 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
- 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
- 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
- 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
- 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
- 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
- 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
- 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
- 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
- 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
-}
-
-func hashBlock(raw []byte, ptr int) int {
- // The first 4 steps collapse out into a 4 byte big-endian decode,
- // with a larger right shift as we combined shift lefts together.
- //
- hash := ((uint32(raw[ptr]) & 0xff) << 24) |
- ((uint32(raw[ptr+1]) & 0xff) << 16) |
- ((uint32(raw[ptr+2]) & 0xff) << 8) |
- (uint32(raw[ptr+3]) & 0xff)
- hash ^= T[hash>>31]
-
- hash = ((hash << 8) | (uint32(raw[ptr+4]) & 0xff)) ^ T[hash>>23]
- hash = ((hash << 8) | (uint32(raw[ptr+5]) & 0xff)) ^ T[hash>>23]
- hash = ((hash << 8) | (uint32(raw[ptr+6]) & 0xff)) ^ T[hash>>23]
- hash = ((hash << 8) | (uint32(raw[ptr+7]) & 0xff)) ^ T[hash>>23]
-
- hash = ((hash << 8) | (uint32(raw[ptr+8]) & 0xff)) ^ T[hash>>23]
- hash = ((hash << 8) | (uint32(raw[ptr+9]) & 0xff)) ^ T[hash>>23]
- hash = ((hash << 8) | (uint32(raw[ptr+10]) & 0xff)) ^ T[hash>>23]
- hash = ((hash << 8) | (uint32(raw[ptr+11]) & 0xff)) ^ T[hash>>23]
-
- hash = ((hash << 8) | (uint32(raw[ptr+12]) & 0xff)) ^ T[hash>>23]
- hash = ((hash << 8) | (uint32(raw[ptr+13]) & 0xff)) ^ T[hash>>23]
- hash = ((hash << 8) | (uint32(raw[ptr+14]) & 0xff)) ^ T[hash>>23]
- hash = ((hash << 8) | (uint32(raw[ptr+15]) & 0xff)) ^ T[hash>>23]
-
- return int(hash)
-}
-
-var T = []uint32{0x00000000, 0xd4c6b32d, 0x7d4bd577,
- 0xa98d665a, 0x2e5119c3, 0xfa97aaee, 0x531accb4, 0x87dc7f99,
- 0x5ca23386, 0x886480ab, 0x21e9e6f1, 0xf52f55dc, 0x72f32a45,
- 0xa6359968, 0x0fb8ff32, 0xdb7e4c1f, 0x6d82d421, 0xb944670c,
- 0x10c90156, 0xc40fb27b, 0x43d3cde2, 0x97157ecf, 0x3e981895,
- 0xea5eabb8, 0x3120e7a7, 0xe5e6548a, 0x4c6b32d0, 0x98ad81fd,
- 0x1f71fe64, 0xcbb74d49, 0x623a2b13, 0xb6fc983e, 0x0fc31b6f,
- 0xdb05a842, 0x7288ce18, 0xa64e7d35, 0x219202ac, 0xf554b181,
- 0x5cd9d7db, 0x881f64f6, 0x536128e9, 0x87a79bc4, 0x2e2afd9e,
- 0xfaec4eb3, 0x7d30312a, 0xa9f68207, 0x007be45d, 0xd4bd5770,
- 0x6241cf4e, 0xb6877c63, 0x1f0a1a39, 0xcbcca914, 0x4c10d68d,
- 0x98d665a0, 0x315b03fa, 0xe59db0d7, 0x3ee3fcc8, 0xea254fe5,
- 0x43a829bf, 0x976e9a92, 0x10b2e50b, 0xc4745626, 0x6df9307c,
- 0xb93f8351, 0x1f8636de, 0xcb4085f3, 0x62cde3a9, 0xb60b5084,
- 0x31d72f1d, 0xe5119c30, 0x4c9cfa6a, 0x985a4947, 0x43240558,
- 0x97e2b675, 0x3e6fd02f, 0xeaa96302, 0x6d751c9b, 0xb9b3afb6,
- 0x103ec9ec, 0xc4f87ac1, 0x7204e2ff, 0xa6c251d2, 0x0f4f3788,
- 0xdb8984a5, 0x5c55fb3c, 0x88934811, 0x211e2e4b, 0xf5d89d66,
- 0x2ea6d179, 0xfa606254, 0x53ed040e, 0x872bb723, 0x00f7c8ba,
- 0xd4317b97, 0x7dbc1dcd, 0xa97aaee0, 0x10452db1, 0xc4839e9c,
- 0x6d0ef8c6, 0xb9c84beb, 0x3e143472, 0xead2875f, 0x435fe105,
- 0x97995228, 0x4ce71e37, 0x9821ad1a, 0x31accb40, 0xe56a786d,
- 0x62b607f4, 0xb670b4d9, 0x1ffdd283, 0xcb3b61ae, 0x7dc7f990,
- 0xa9014abd, 0x008c2ce7, 0xd44a9fca, 0x5396e053, 0x8750537e,
- 0x2edd3524, 0xfa1b8609, 0x2165ca16, 0xf5a3793b, 0x5c2e1f61,
- 0x88e8ac4c, 0x0f34d3d5, 0xdbf260f8, 0x727f06a2, 0xa6b9b58f,
- 0x3f0c6dbc, 0xebcade91, 0x4247b8cb, 0x96810be6, 0x115d747f,
- 0xc59bc752, 0x6c16a108, 0xb8d01225, 0x63ae5e3a, 0xb768ed17,
- 0x1ee58b4d, 0xca233860, 0x4dff47f9, 0x9939f4d4, 0x30b4928e,
- 0xe47221a3, 0x528eb99d, 0x86480ab0, 0x2fc56cea, 0xfb03dfc7,
- 0x7cdfa05e, 0xa8191373, 0x01947529, 0xd552c604, 0x0e2c8a1b,
- 0xdaea3936, 0x73675f6c, 0xa7a1ec41, 0x207d93d8, 0xf4bb20f5,
- 0x5d3646af, 0x89f0f582, 0x30cf76d3, 0xe409c5fe, 0x4d84a3a4,
- 0x99421089, 0x1e9e6f10, 0xca58dc3d, 0x63d5ba67, 0xb713094a,
- 0x6c6d4555, 0xb8abf678, 0x11269022, 0xc5e0230f, 0x423c5c96,
- 0x96faefbb, 0x3f7789e1, 0xebb13acc, 0x5d4da2f2, 0x898b11df,
- 0x20067785, 0xf4c0c4a8, 0x731cbb31, 0xa7da081c, 0x0e576e46,
- 0xda91dd6b, 0x01ef9174, 0xd5292259, 0x7ca44403, 0xa862f72e,
- 0x2fbe88b7, 0xfb783b9a, 0x52f55dc0, 0x8633eeed, 0x208a5b62,
- 0xf44ce84f, 0x5dc18e15, 0x89073d38, 0x0edb42a1, 0xda1df18c,
- 0x739097d6, 0xa75624fb, 0x7c2868e4, 0xa8eedbc9, 0x0163bd93,
- 0xd5a50ebe, 0x52797127, 0x86bfc20a, 0x2f32a450, 0xfbf4177d,
- 0x4d088f43, 0x99ce3c6e, 0x30435a34, 0xe485e919, 0x63599680,
- 0xb79f25ad, 0x1e1243f7, 0xcad4f0da, 0x11aabcc5, 0xc56c0fe8,
- 0x6ce169b2, 0xb827da9f, 0x3ffba506, 0xeb3d162b, 0x42b07071,
- 0x9676c35c, 0x2f49400d, 0xfb8ff320, 0x5202957a, 0x86c42657,
- 0x011859ce, 0xd5deeae3, 0x7c538cb9, 0xa8953f94, 0x73eb738b,
- 0xa72dc0a6, 0x0ea0a6fc, 0xda6615d1, 0x5dba6a48, 0x897cd965,
- 0x20f1bf3f, 0xf4370c12, 0x42cb942c, 0x960d2701, 0x3f80415b,
- 0xeb46f276, 0x6c9a8def, 0xb85c3ec2, 0x11d15898, 0xc517ebb5,
- 0x1e69a7aa, 0xcaaf1487, 0x632272dd, 0xb7e4c1f0, 0x3038be69,
- 0xe4fe0d44, 0x4d736b1e, 0x99b5d833,
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/delta_selector.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/delta_selector.go
deleted file mode 100644
index 6710085538..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/delta_selector.go
+++ /dev/null
@@ -1,369 +0,0 @@
-package packfile
-
-import (
- "sort"
- "sync"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
-)
-
-const (
- // deltas based on deltas, how many steps we can do.
- // 50 is the default value used in JGit
- maxDepth = int64(50)
-)
-
-// applyDelta is the set of object types that we should apply deltas
-var applyDelta = map[plumbing.ObjectType]bool{
- plumbing.BlobObject: true,
- plumbing.TreeObject: true,
-}
-
-type deltaSelector struct {
- storer storer.EncodedObjectStorer
-}
-
-func newDeltaSelector(s storer.EncodedObjectStorer) *deltaSelector {
- return &deltaSelector{s}
-}
-
-// ObjectsToPack creates a list of ObjectToPack from the hashes
-// provided, creating deltas if it's suitable, using an specific
-// internal logic. `packWindow` specifies the size of the sliding
-// window used to compare objects for delta compression; 0 turns off
-// delta compression entirely.
-func (dw *deltaSelector) ObjectsToPack(
- hashes []plumbing.Hash,
- packWindow uint,
-) ([]*ObjectToPack, error) {
- otp, err := dw.objectsToPack(hashes, packWindow)
- if err != nil {
- return nil, err
- }
-
- if packWindow == 0 {
- return otp, nil
- }
-
- dw.sort(otp)
-
- var objectGroups [][]*ObjectToPack
- var prev *ObjectToPack
- i := -1
- for _, obj := range otp {
- if prev == nil || prev.Type() != obj.Type() {
- objectGroups = append(objectGroups, []*ObjectToPack{obj})
- i++
- prev = obj
- } else {
- objectGroups[i] = append(objectGroups[i], obj)
- }
- }
-
- var wg sync.WaitGroup
- var once sync.Once
- for _, objs := range objectGroups {
- objs := objs
- wg.Add(1)
- go func() {
- if walkErr := dw.walk(objs, packWindow); walkErr != nil {
- once.Do(func() {
- err = walkErr
- })
- }
- wg.Done()
- }()
- }
- wg.Wait()
-
- if err != nil {
- return nil, err
- }
-
- return otp, nil
-}
-
-func (dw *deltaSelector) objectsToPack(
- hashes []plumbing.Hash,
- packWindow uint,
-) ([]*ObjectToPack, error) {
- var objectsToPack []*ObjectToPack
- for _, h := range hashes {
- var o plumbing.EncodedObject
- var err error
- if packWindow == 0 {
- o, err = dw.encodedObject(h)
- } else {
- o, err = dw.encodedDeltaObject(h)
- }
- if err != nil {
- return nil, err
- }
-
- otp := newObjectToPack(o)
- if _, ok := o.(plumbing.DeltaObject); ok {
- otp.CleanOriginal()
- }
-
- objectsToPack = append(objectsToPack, otp)
- }
-
- if packWindow == 0 {
- return objectsToPack, nil
- }
-
- if err := dw.fixAndBreakChains(objectsToPack); err != nil {
- return nil, err
- }
-
- return objectsToPack, nil
-}
-
-func (dw *deltaSelector) encodedDeltaObject(h plumbing.Hash) (plumbing.EncodedObject, error) {
- edos, ok := dw.storer.(storer.DeltaObjectStorer)
- if !ok {
- return dw.encodedObject(h)
- }
-
- return edos.DeltaObject(plumbing.AnyObject, h)
-}
-
-func (dw *deltaSelector) encodedObject(h plumbing.Hash) (plumbing.EncodedObject, error) {
- return dw.storer.EncodedObject(plumbing.AnyObject, h)
-}
-
-func (dw *deltaSelector) fixAndBreakChains(objectsToPack []*ObjectToPack) error {
- m := make(map[plumbing.Hash]*ObjectToPack, len(objectsToPack))
- for _, otp := range objectsToPack {
- m[otp.Hash()] = otp
- }
-
- for _, otp := range objectsToPack {
- if err := dw.fixAndBreakChainsOne(m, otp); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (dw *deltaSelector) fixAndBreakChainsOne(objectsToPack map[plumbing.Hash]*ObjectToPack, otp *ObjectToPack) error {
- if !otp.Object.Type().IsDelta() {
- return nil
- }
-
- // Initial ObjectToPack instances might have a delta assigned to Object
- // but no actual base initially. Once Base is assigned to a delta, it means
- // we already fixed it.
- if otp.Base != nil {
- return nil
- }
-
- do, ok := otp.Object.(plumbing.DeltaObject)
- if !ok {
- // if this is not a DeltaObject, then we cannot retrieve its base,
- // so we have to break the delta chain here.
- return dw.undeltify(otp)
- }
-
- base, ok := objectsToPack[do.BaseHash()]
- if !ok {
- // The base of the delta is not in our list of objects to pack, so
- // we break the chain.
- return dw.undeltify(otp)
- }
-
- if err := dw.fixAndBreakChainsOne(objectsToPack, base); err != nil {
- return err
- }
-
- otp.SetDelta(base, otp.Object)
- return nil
-}
-
-func (dw *deltaSelector) restoreOriginal(otp *ObjectToPack) error {
- if otp.Original != nil {
- return nil
- }
-
- if !otp.Object.Type().IsDelta() {
- return nil
- }
-
- obj, err := dw.encodedObject(otp.Hash())
- if err != nil {
- return err
- }
-
- otp.SetOriginal(obj)
-
- return nil
-}
-
-// undeltify undeltifies an *ObjectToPack by retrieving the original object from
-// the storer and resetting it.
-func (dw *deltaSelector) undeltify(otp *ObjectToPack) error {
- if err := dw.restoreOriginal(otp); err != nil {
- return err
- }
-
- otp.Object = otp.Original
- otp.Depth = 0
- return nil
-}
-
-func (dw *deltaSelector) sort(objectsToPack []*ObjectToPack) {
- sort.Sort(byTypeAndSize(objectsToPack))
-}
-
-func (dw *deltaSelector) walk(
- objectsToPack []*ObjectToPack,
- packWindow uint,
-) error {
- indexMap := make(map[plumbing.Hash]*deltaIndex)
- for i := 0; i < len(objectsToPack); i++ {
- // Clean up the index map and reconstructed delta objects for anything
- // outside our pack window, to save memory.
- if i > int(packWindow) {
- obj := objectsToPack[i-int(packWindow)]
-
- delete(indexMap, obj.Hash())
-
- if obj.IsDelta() {
- obj.SaveOriginalMetadata()
- obj.CleanOriginal()
- }
- }
-
- target := objectsToPack[i]
-
- // If we already have a delta, we don't try to find a new one for this
- // object. This happens when a delta is set to be reused from an existing
- // packfile.
- if target.IsDelta() {
- continue
- }
-
- // We only want to create deltas from specific types.
- if !applyDelta[target.Type()] {
- continue
- }
-
- for j := i - 1; j >= 0 && i-j < int(packWindow); j-- {
- base := objectsToPack[j]
- // Objects must use only the same type as their delta base.
- // Since objectsToPack is sorted by type and size, once we find
- // a different type, we know we won't find more of them.
- if base.Type() != target.Type() {
- break
- }
-
- if err := dw.tryToDeltify(indexMap, base, target); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func (dw *deltaSelector) tryToDeltify(indexMap map[plumbing.Hash]*deltaIndex, base, target *ObjectToPack) error {
- // Original object might not be present if we're reusing a delta, so we
- // ensure it is restored.
- if err := dw.restoreOriginal(target); err != nil {
- return err
- }
-
- if err := dw.restoreOriginal(base); err != nil {
- return err
- }
-
- // If the sizes are radically different, this is a bad pairing.
- if target.Size() < base.Size()>>4 {
- return nil
- }
-
- msz := dw.deltaSizeLimit(
- target.Object.Size(),
- base.Depth,
- target.Depth,
- target.IsDelta(),
- )
-
- // Nearly impossible to fit useful delta.
- if msz <= 8 {
- return nil
- }
-
- // If we have to insert a lot to make this work, find another.
- if base.Size()-target.Size() > msz {
- return nil
- }
-
- if _, ok := indexMap[base.Hash()]; !ok {
- indexMap[base.Hash()] = new(deltaIndex)
- }
-
- // Now we can generate the delta using originals
- delta, err := getDelta(indexMap[base.Hash()], base.Original, target.Original)
- if err != nil {
- return err
- }
-
- // if delta better than target
- if delta.Size() < msz {
- target.SetDelta(base, delta)
- }
-
- return nil
-}
-
-func (dw *deltaSelector) deltaSizeLimit(targetSize int64, baseDepth int,
- targetDepth int, targetDelta bool) int64 {
- if !targetDelta {
- // Any delta should be no more than 50% of the original size
- // (for text files deflate of whole form should shrink 50%).
- n := targetSize >> 1
-
- // Evenly distribute delta size limits over allowed depth.
- // If src is non-delta (depth = 0), delta <= 50% of original.
- // If src is almost at limit (9/10), delta <= 10% of original.
- return n * (maxDepth - int64(baseDepth)) / maxDepth
- }
-
- // With a delta base chosen any new delta must be "better".
- // Retain the distribution described above.
- d := int64(targetDepth)
- n := targetSize
-
- // If target depth is bigger than maxDepth, this delta is not suitable to be used.
- if d >= maxDepth {
- return 0
- }
-
- // If src is whole (depth=0) and base is near limit (depth=9/10)
- // any delta using src can be 10x larger and still be better.
- //
- // If src is near limit (depth=9/10) and base is whole (depth=0)
- // a new delta dependent on src must be 1/10th the size.
- return n * (maxDepth - int64(baseDepth)) / (maxDepth - d)
-}
-
-type byTypeAndSize []*ObjectToPack
-
-func (a byTypeAndSize) Len() int { return len(a) }
-
-func (a byTypeAndSize) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-func (a byTypeAndSize) Less(i, j int) bool {
- if a[i].Type() < a[j].Type() {
- return false
- }
-
- if a[i].Type() > a[j].Type() {
- return true
- }
-
- return a[i].Size() > a[j].Size()
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/diff_delta.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/diff_delta.go
deleted file mode 100644
index 43f87a0b1c..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/diff_delta.go
+++ /dev/null
@@ -1,200 +0,0 @@
-package packfile
-
-import (
- "bytes"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
-)
-
-// See https://github.com/jelmer/dulwich/blob/master/dulwich/pack.py and
-// https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js
-// for more info
-
-const (
- // Standard chunk size used to generate fingerprints
- s = 16
-
- // https://github.com/git/git/blob/f7466e94375b3be27f229c78873f0acf8301c0a5/diff-delta.c#L428
- // Max size of a copy operation (64KB)
- maxCopySize = 64 * 1024
-)
-
-// GetDelta returns an EncodedObject of type OFSDeltaObject. Base and Target object,
-// will be loaded into memory to be able to create the delta object.
-// To generate target again, you will need the obtained object and "base" one.
-// Error will be returned if base or target object cannot be read.
-func GetDelta(base, target plumbing.EncodedObject) (plumbing.EncodedObject, error) {
- return getDelta(new(deltaIndex), base, target)
-}
-
-func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (plumbing.EncodedObject, error) {
- br, err := base.Reader()
- if err != nil {
- return nil, err
- }
- defer br.Close()
- tr, err := target.Reader()
- if err != nil {
- return nil, err
- }
- defer tr.Close()
-
- bb := bufPool.Get().(*bytes.Buffer)
- defer bufPool.Put(bb)
- bb.Reset()
-
- _, err = bb.ReadFrom(br)
- if err != nil {
- return nil, err
- }
-
- tb := bufPool.Get().(*bytes.Buffer)
- defer bufPool.Put(tb)
- tb.Reset()
-
- _, err = tb.ReadFrom(tr)
- if err != nil {
- return nil, err
- }
-
- db := diffDelta(index, bb.Bytes(), tb.Bytes())
- delta := &plumbing.MemoryObject{}
- _, err = delta.Write(db)
- if err != nil {
- return nil, err
- }
-
- delta.SetSize(int64(len(db)))
- delta.SetType(plumbing.OFSDeltaObject)
-
- return delta, nil
-}
-
-// DiffDelta returns the delta that transforms src into tgt.
-func DiffDelta(src, tgt []byte) []byte {
- return diffDelta(new(deltaIndex), src, tgt)
-}
-
-func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
- buf := bufPool.Get().(*bytes.Buffer)
- defer bufPool.Put(buf)
- buf.Reset()
- buf.Write(deltaEncodeSize(len(src)))
- buf.Write(deltaEncodeSize(len(tgt)))
-
- if len(index.entries) == 0 {
- index.init(src)
- }
-
- ibuf := bufPool.Get().(*bytes.Buffer)
- defer bufPool.Put(ibuf)
- ibuf.Reset()
- for i := 0; i < len(tgt); i++ {
- offset, l := index.findMatch(src, tgt, i)
-
- if l == 0 {
- // couldn't find a match, just write the current byte and continue
- ibuf.WriteByte(tgt[i])
- } else if l < 0 {
- // src is less than blksz, copy the rest of the target to avoid
- // calls to findMatch
- for ; i < len(tgt); i++ {
- ibuf.WriteByte(tgt[i])
- }
- } else if l < s {
- // remaining target is less than blksz, copy what's left of it
- // and avoid calls to findMatch
- for j := i; j < i+l; j++ {
- ibuf.WriteByte(tgt[j])
- }
- i += l - 1
- } else {
- encodeInsertOperation(ibuf, buf)
-
- rl := l
- aOffset := offset
- for rl > 0 {
- if rl < maxCopySize {
- buf.Write(encodeCopyOperation(aOffset, rl))
- break
- }
-
- buf.Write(encodeCopyOperation(aOffset, maxCopySize))
- rl -= maxCopySize
- aOffset += maxCopySize
- }
-
- i += l - 1
- }
- }
-
- encodeInsertOperation(ibuf, buf)
-
- // buf.Bytes() is only valid until the next modifying operation on the buffer. Copy it.
- return append([]byte{}, buf.Bytes()...)
-}
-
-func encodeInsertOperation(ibuf, buf *bytes.Buffer) {
- if ibuf.Len() == 0 {
- return
- }
-
- b := ibuf.Bytes()
- s := ibuf.Len()
- o := 0
- for {
- if s <= 127 {
- break
- }
- buf.WriteByte(byte(127))
- buf.Write(b[o : o+127])
- s -= 127
- o += 127
- }
- buf.WriteByte(byte(s))
- buf.Write(b[o : o+s])
-
- ibuf.Reset()
-}
-
-func deltaEncodeSize(size int) []byte {
- var ret []byte
- c := size & 0x7f
- size >>= 7
- for {
- if size == 0 {
- break
- }
-
- ret = append(ret, byte(c|0x80))
- c = size & 0x7f
- size >>= 7
- }
- ret = append(ret, byte(c))
-
- return ret
-}
-
-func encodeCopyOperation(offset, length int) []byte {
- code := 0x80
- var opcodes []byte
-
- var i uint
- for i = 0; i < 4; i++ {
- f := 0xff << (i * 8)
- if offset&f != 0 {
- opcodes = append(opcodes, byte(offset&f>>(i*8)))
- code |= 0x01 << i
- }
- }
-
- for i = 0; i < 3; i++ {
- f := 0xff << (i * 8)
- if length&f != 0 {
- opcodes = append(opcodes, byte(length&f>>(i*8)))
- code |= 0x10 << i
- }
- }
-
- return append([]byte{byte(code)}, opcodes...)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/doc.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/doc.go
deleted file mode 100644
index 2882a7f378..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/doc.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Package packfile implements encoding and decoding of packfile format.
-//
-// == pack-*.pack files have the following format:
-//
-// - A header appears at the beginning and consists of the following:
-//
-// 4-byte signature:
-// The signature is: {'P', 'A', 'C', 'K'}
-//
-// 4-byte version number (network byte order):
-// GIT currently accepts version number 2 or 3 but
-// generates version 2 only.
-//
-// 4-byte number of objects contained in the pack (network byte order)
-//
-// Observation: we cannot have more than 4G versions ;-) and
-// more than 4G objects in a pack.
-//
-// - The header is followed by number of object entries, each of
-// which looks like this:
-//
-// (undeltified representation)
-// n-byte type and length (3-bit type, (n-1)*7+4-bit length)
-// compressed data
-//
-// (deltified representation)
-// n-byte type and length (3-bit type, (n-1)*7+4-bit length)
-// 20-byte base object name
-// compressed delta data
-//
-// Observation: length of each object is encoded in a variable
-// length format and is not constrained to 32-bit or anything.
-//
-// - The trailer records 20-byte SHA1 checksum of all of the above.
-//
-//
-// Source:
-// https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-protocol.txt
-package packfile
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/encoder.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/encoder.go
deleted file mode 100644
index b07791875d..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/encoder.go
+++ /dev/null
@@ -1,219 +0,0 @@
-package packfile
-
-import (
- "compress/zlib"
- "crypto/sha1"
- "fmt"
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
- "gopkg.in/src-d/go-git.v4/utils/binary"
-)
-
-// Encoder gets the data from the storage and write it into the writer in PACK
-// format
-type Encoder struct {
- selector *deltaSelector
- w *offsetWriter
- zw *zlib.Writer
- hasher plumbing.Hasher
-
- useRefDeltas bool
-}
-
-// NewEncoder creates a new packfile encoder using a specific Writer and
-// EncodedObjectStorer. By default deltas used to generate the packfile will be
-// OFSDeltaObject. To use Reference deltas, set useRefDeltas to true.
-func NewEncoder(w io.Writer, s storer.EncodedObjectStorer, useRefDeltas bool) *Encoder {
- h := plumbing.Hasher{
- Hash: sha1.New(),
- }
- mw := io.MultiWriter(w, h)
- ow := newOffsetWriter(mw)
- zw := zlib.NewWriter(mw)
- return &Encoder{
- selector: newDeltaSelector(s),
- w: ow,
- zw: zw,
- hasher: h,
- useRefDeltas: useRefDeltas,
- }
-}
-
-// Encode creates a packfile containing all the objects referenced in
-// hashes and writes it to the writer in the Encoder. `packWindow`
-// specifies the size of the sliding window used to compare objects
-// for delta compression; 0 turns off delta compression entirely.
-func (e *Encoder) Encode(
- hashes []plumbing.Hash,
- packWindow uint,
-) (plumbing.Hash, error) {
- objects, err := e.selector.ObjectsToPack(hashes, packWindow)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- return e.encode(objects)
-}
-
-func (e *Encoder) encode(objects []*ObjectToPack) (plumbing.Hash, error) {
- if err := e.head(len(objects)); err != nil {
- return plumbing.ZeroHash, err
- }
-
- for _, o := range objects {
- if err := e.entry(o); err != nil {
- return plumbing.ZeroHash, err
- }
- }
-
- return e.footer()
-}
-
-func (e *Encoder) head(numEntries int) error {
- return binary.Write(
- e.w,
- signature,
- int32(VersionSupported),
- int32(numEntries),
- )
-}
-
-func (e *Encoder) entry(o *ObjectToPack) error {
- if o.WantWrite() {
- // A cycle exists in this delta chain. This should only occur if a
- // selected object representation disappeared during writing
- // (for example due to a concurrent repack) and a different base
- // was chosen, forcing a cycle. Select something other than a
- // delta, and write this object.
- e.selector.restoreOriginal(o)
- o.BackToOriginal()
- }
-
- if o.IsWritten() {
- return nil
- }
-
- o.MarkWantWrite()
-
- if err := e.writeBaseIfDelta(o); err != nil {
- return err
- }
-
- // We need to check if we already write that object due a cyclic delta chain
- if o.IsWritten() {
- return nil
- }
-
- o.Offset = e.w.Offset()
-
- if o.IsDelta() {
- if err := e.writeDeltaHeader(o); err != nil {
- return err
- }
- } else {
- if err := e.entryHead(o.Type(), o.Size()); err != nil {
- return err
- }
- }
-
- e.zw.Reset(e.w)
- or, err := o.Object.Reader()
- if err != nil {
- return err
- }
-
- _, err = io.Copy(e.zw, or)
- if err != nil {
- return err
- }
-
- return e.zw.Close()
-}
-
-func (e *Encoder) writeBaseIfDelta(o *ObjectToPack) error {
- if o.IsDelta() && !o.Base.IsWritten() {
- // We must write base first
- return e.entry(o.Base)
- }
-
- return nil
-}
-
-func (e *Encoder) writeDeltaHeader(o *ObjectToPack) error {
- // Write offset deltas by default
- t := plumbing.OFSDeltaObject
- if e.useRefDeltas {
- t = plumbing.REFDeltaObject
- }
-
- if err := e.entryHead(t, o.Object.Size()); err != nil {
- return err
- }
-
- if e.useRefDeltas {
- return e.writeRefDeltaHeader(o.Base.Hash())
- } else {
- return e.writeOfsDeltaHeader(o)
- }
-}
-
-func (e *Encoder) writeRefDeltaHeader(base plumbing.Hash) error {
- return binary.Write(e.w, base)
-}
-
-func (e *Encoder) writeOfsDeltaHeader(o *ObjectToPack) error {
- // for OFS_DELTA, offset of the base is interpreted as negative offset
- // relative to the type-byte of the header of the ofs-delta entry.
- relativeOffset := o.Offset - o.Base.Offset
- if relativeOffset <= 0 {
- return fmt.Errorf("bad offset for OFS_DELTA entry: %d", relativeOffset)
- }
-
- return binary.WriteVariableWidthInt(e.w, relativeOffset)
-}
-
-func (e *Encoder) entryHead(typeNum plumbing.ObjectType, size int64) error {
- t := int64(typeNum)
- header := []byte{}
- c := (t << firstLengthBits) | (size & maskFirstLength)
- size >>= firstLengthBits
- for {
- if size == 0 {
- break
- }
- header = append(header, byte(c|maskContinue))
- c = size & int64(maskLength)
- size >>= lengthBits
- }
-
- header = append(header, byte(c))
- _, err := e.w.Write(header)
-
- return err
-}
-
-func (e *Encoder) footer() (plumbing.Hash, error) {
- h := e.hasher.Sum()
- return h, binary.Write(e.w, h)
-}
-
-type offsetWriter struct {
- w io.Writer
- offset int64
-}
-
-func newOffsetWriter(w io.Writer) *offsetWriter {
- return &offsetWriter{w: w}
-}
-
-func (ow *offsetWriter) Write(p []byte) (n int, err error) {
- n, err = ow.w.Write(p)
- ow.offset += int64(n)
- return n, err
-}
-
-func (ow *offsetWriter) Offset() int64 {
- return ow.offset
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/error.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/error.go
deleted file mode 100644
index c0b9163313..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/error.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package packfile
-
-import "fmt"
-
-// Error specifies errors returned during packfile parsing.
-type Error struct {
- reason, details string
-}
-
-// NewError returns a new error.
-func NewError(reason string) *Error {
- return &Error{reason: reason}
-}
-
-// Error returns a text representation of the error.
-func (e *Error) Error() string {
- if e.details == "" {
- return e.reason
- }
-
- return fmt.Sprintf("%s: %s", e.reason, e.details)
-}
-
-// AddDetails adds details to an error, with additional text.
-func (e *Error) AddDetails(format string, args ...interface{}) *Error {
- return &Error{
- reason: e.reason,
- details: fmt.Sprintf(format, args...),
- }
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/fsobject.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/fsobject.go
deleted file mode 100644
index a268bce7ed..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/fsobject.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package packfile
-
-import (
- "io"
-
- billy "gopkg.in/src-d/go-billy.v4"
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/cache"
- "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
-)
-
-// FSObject is an object from the packfile on the filesystem.
-type FSObject struct {
- hash plumbing.Hash
- h *ObjectHeader
- offset int64
- size int64
- typ plumbing.ObjectType
- index idxfile.Index
- fs billy.Filesystem
- path string
- cache cache.Object
-}
-
-// NewFSObject creates a new filesystem object.
-func NewFSObject(
- hash plumbing.Hash,
- finalType plumbing.ObjectType,
- offset int64,
- contentSize int64,
- index idxfile.Index,
- fs billy.Filesystem,
- path string,
- cache cache.Object,
-) *FSObject {
- return &FSObject{
- hash: hash,
- offset: offset,
- size: contentSize,
- typ: finalType,
- index: index,
- fs: fs,
- path: path,
- cache: cache,
- }
-}
-
-// Reader implements the plumbing.EncodedObject interface.
-func (o *FSObject) Reader() (io.ReadCloser, error) {
- obj, ok := o.cache.Get(o.hash)
- if ok && obj != o {
- reader, err := obj.Reader()
- if err != nil {
- return nil, err
- }
-
- return reader, nil
- }
-
- f, err := o.fs.Open(o.path)
- if err != nil {
- return nil, err
- }
-
- p := NewPackfileWithCache(o.index, nil, f, o.cache)
- r, err := p.getObjectContent(o.offset)
- if err != nil {
- _ = f.Close()
- return nil, err
- }
-
- if err := f.Close(); err != nil {
- return nil, err
- }
-
- return r, nil
-}
-
-// SetSize implements the plumbing.EncodedObject interface. This method
-// is a noop.
-func (o *FSObject) SetSize(int64) {}
-
-// SetType implements the plumbing.EncodedObject interface. This method is
-// a noop.
-func (o *FSObject) SetType(plumbing.ObjectType) {}
-
-// Hash implements the plumbing.EncodedObject interface.
-func (o *FSObject) Hash() plumbing.Hash { return o.hash }
-
-// Size implements the plumbing.EncodedObject interface.
-func (o *FSObject) Size() int64 { return o.size }
-
-// Type implements the plumbing.EncodedObject interface.
-func (o *FSObject) Type() plumbing.ObjectType {
- return o.typ
-}
-
-// Writer implements the plumbing.EncodedObject interface. This method always
-// returns a nil writer.
-func (o *FSObject) Writer() (io.WriteCloser, error) {
- return nil, nil
-}
-
-type objectReader struct {
- io.ReadCloser
- f billy.File
-}
-
-func (r *objectReader) Close() error {
- if err := r.ReadCloser.Close(); err != nil {
- _ = r.f.Close()
- return err
- }
-
- return r.f.Close()
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/object_pack.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/object_pack.go
deleted file mode 100644
index dfea5715f0..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/object_pack.go
+++ /dev/null
@@ -1,164 +0,0 @@
-package packfile
-
-import (
- "gopkg.in/src-d/go-git.v4/plumbing"
-)
-
-// ObjectToPack is a representation of an object that is going to be into a
-// pack file.
-type ObjectToPack struct {
- // The main object to pack, it could be any object, including deltas
- Object plumbing.EncodedObject
- // Base is the object that a delta is based on (it could be also another delta).
- // If the main object is not a delta, Base will be null
- Base *ObjectToPack
- // Original is the object that we can generate applying the delta to
- // Base, or the same object as Object in the case of a non-delta
- // object.
- Original plumbing.EncodedObject
- // Depth is the amount of deltas needed to resolve to obtain Original
- // (delta based on delta based on ...)
- Depth int
-
- // offset in pack when object has been already written, or 0 if it
- // has not been written yet
- Offset int64
-
- // Information from the original object
- resolvedOriginal bool
- originalType plumbing.ObjectType
- originalSize int64
- originalHash plumbing.Hash
-}
-
-// newObjectToPack creates a correct ObjectToPack based on a non-delta object
-func newObjectToPack(o plumbing.EncodedObject) *ObjectToPack {
- return &ObjectToPack{
- Object: o,
- Original: o,
- }
-}
-
-// newDeltaObjectToPack creates a correct ObjectToPack for a delta object, based on
-// his base (could be another delta), the delta target (in this case called original),
-// and the delta Object itself
-func newDeltaObjectToPack(base *ObjectToPack, original, delta plumbing.EncodedObject) *ObjectToPack {
- return &ObjectToPack{
- Object: delta,
- Base: base,
- Original: original,
- Depth: base.Depth + 1,
- }
-}
-
-// BackToOriginal converts that ObjectToPack to a non-deltified object if it was one
-func (o *ObjectToPack) BackToOriginal() {
- if o.IsDelta() && o.Original != nil {
- o.Object = o.Original
- o.Base = nil
- o.Depth = 0
- }
-}
-
-// IsWritten returns if that ObjectToPack was
-// already written into the packfile or not
-func (o *ObjectToPack) IsWritten() bool {
- return o.Offset > 1
-}
-
-// MarkWantWrite marks this ObjectToPack as WantWrite
-// to avoid delta chain loops
-func (o *ObjectToPack) MarkWantWrite() {
- o.Offset = 1
-}
-
-// WantWrite checks if this ObjectToPack was marked as WantWrite before
-func (o *ObjectToPack) WantWrite() bool {
- return o.Offset == 1
-}
-
-// SetOriginal sets both Original and saves size, type and hash. If object
-// is nil Original is set but previous resolved values are kept
-func (o *ObjectToPack) SetOriginal(obj plumbing.EncodedObject) {
- o.Original = obj
- o.SaveOriginalMetadata()
-}
-
-// SaveOriginalMetadata saves size, type and hash of Original object
-func (o *ObjectToPack) SaveOriginalMetadata() {
- if o.Original != nil {
- o.originalSize = o.Original.Size()
- o.originalType = o.Original.Type()
- o.originalHash = o.Original.Hash()
- o.resolvedOriginal = true
- }
-}
-
-// CleanOriginal sets Original to nil
-func (o *ObjectToPack) CleanOriginal() {
- o.Original = nil
-}
-
-func (o *ObjectToPack) Type() plumbing.ObjectType {
- if o.Original != nil {
- return o.Original.Type()
- }
-
- if o.resolvedOriginal {
- return o.originalType
- }
-
- if o.Base != nil {
- return o.Base.Type()
- }
-
- if o.Object != nil {
- return o.Object.Type()
- }
-
- panic("cannot get type")
-}
-
-func (o *ObjectToPack) Hash() plumbing.Hash {
- if o.Original != nil {
- return o.Original.Hash()
- }
-
- if o.resolvedOriginal {
- return o.originalHash
- }
-
- do, ok := o.Object.(plumbing.DeltaObject)
- if ok {
- return do.ActualHash()
- }
-
- panic("cannot get hash")
-}
-
-func (o *ObjectToPack) Size() int64 {
- if o.Original != nil {
- return o.Original.Size()
- }
-
- if o.resolvedOriginal {
- return o.originalSize
- }
-
- do, ok := o.Object.(plumbing.DeltaObject)
- if ok {
- return do.ActualSize()
- }
-
- panic("cannot get ObjectToPack size")
-}
-
-func (o *ObjectToPack) IsDelta() bool {
- return o.Base != nil
-}
-
-func (o *ObjectToPack) SetDelta(base *ObjectToPack, delta plumbing.EncodedObject) {
- o.Object = delta
- o.Base = base
- o.Depth = base.Depth + 1
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/packfile.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/packfile.go
deleted file mode 100644
index 21a15de0cc..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/packfile.go
+++ /dev/null
@@ -1,562 +0,0 @@
-package packfile
-
-import (
- "bytes"
- "io"
- "os"
-
- billy "gopkg.in/src-d/go-billy.v4"
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/cache"
- "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
-)
-
-var (
- // ErrInvalidObject is returned by Decode when an invalid object is
- // found in the packfile.
- ErrInvalidObject = NewError("invalid git object")
- // ErrZLib is returned by Decode when there was an error unzipping
- // the packfile contents.
- ErrZLib = NewError("zlib reading error")
-)
-
-// When reading small objects from packfile it is beneficial to do so at
-// once to exploit the buffered I/O. In many cases the objects are so small
-// that they were already loaded to memory when the object header was
-// loaded from the packfile. Wrapping in FSObject would cause this buffered
-// data to be thrown away and then re-read later, with the additional
-// seeking causing reloads from disk. Objects smaller than this threshold
-// are now always read into memory and stored in cache instead of being
-// wrapped in FSObject.
-const smallObjectThreshold = 16 * 1024
-
-// Packfile allows retrieving information from inside a packfile.
-type Packfile struct {
- idxfile.Index
- fs billy.Filesystem
- file billy.File
- s *Scanner
- deltaBaseCache cache.Object
- offsetToType map[int64]plumbing.ObjectType
-}
-
-// NewPackfileWithCache creates a new Packfile with the given object cache.
-// If the filesystem is provided, the packfile will return FSObjects, otherwise
-// it will return MemoryObjects.
-func NewPackfileWithCache(
- index idxfile.Index,
- fs billy.Filesystem,
- file billy.File,
- cache cache.Object,
-) *Packfile {
- s := NewScanner(file)
- return &Packfile{
- index,
- fs,
- file,
- s,
- cache,
- make(map[int64]plumbing.ObjectType),
- }
-}
-
-// NewPackfile returns a packfile representation for the given packfile file
-// and packfile idx.
-// If the filesystem is provided, the packfile will return FSObjects, otherwise
-// it will return MemoryObjects.
-func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File) *Packfile {
- return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault())
-}
-
-// Get retrieves the encoded object in the packfile with the given hash.
-func (p *Packfile) Get(h plumbing.Hash) (plumbing.EncodedObject, error) {
- offset, err := p.FindOffset(h)
- if err != nil {
- return nil, err
- }
-
- return p.objectAtOffset(offset, h)
-}
-
-// GetByOffset retrieves the encoded object from the packfile at the given
-// offset.
-func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) {
- hash, err := p.FindHash(o)
- if err != nil {
- return nil, err
- }
-
- return p.objectAtOffset(o, hash)
-}
-
-// GetSizeByOffset retrieves the size of the encoded object from the
-// packfile with the given offset.
-func (p *Packfile) GetSizeByOffset(o int64) (size int64, err error) {
- if _, err := p.s.SeekFromStart(o); err != nil {
- if err == io.EOF || isInvalid(err) {
- return 0, plumbing.ErrObjectNotFound
- }
-
- return 0, err
- }
-
- h, err := p.nextObjectHeader()
- if err != nil {
- return 0, err
- }
- return p.getObjectSize(h)
-}
-
-func (p *Packfile) objectHeaderAtOffset(offset int64) (*ObjectHeader, error) {
- h, err := p.s.SeekObjectHeader(offset)
- p.s.pendingObject = nil
- return h, err
-}
-
-func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) {
- h, err := p.s.NextObjectHeader()
- p.s.pendingObject = nil
- return h, err
-}
-
-func (p *Packfile) getDeltaObjectSize(buf *bytes.Buffer) int64 {
- delta := buf.Bytes()
- _, delta = decodeLEB128(delta) // skip src size
- sz, _ := decodeLEB128(delta)
- return int64(sz)
-}
-
-func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) {
- switch h.Type {
- case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
- return h.Length, nil
- case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
- buf := bufPool.Get().(*bytes.Buffer)
- defer bufPool.Put(buf)
- buf.Reset()
-
- if _, _, err := p.s.NextObject(buf); err != nil {
- return 0, err
- }
-
- return p.getDeltaObjectSize(buf), nil
- default:
- return 0, ErrInvalidObject.AddDetails("type %q", h.Type)
- }
-}
-
-func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err error) {
- switch h.Type {
- case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
- return h.Type, nil
- case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
- var offset int64
- if h.Type == plumbing.REFDeltaObject {
- offset, err = p.FindOffset(h.Reference)
- if err != nil {
- return
- }
- } else {
- offset = h.OffsetReference
- }
-
- if baseType, ok := p.offsetToType[offset]; ok {
- typ = baseType
- } else {
- h, err = p.objectHeaderAtOffset(offset)
- if err != nil {
- return
- }
-
- typ, err = p.getObjectType(h)
- if err != nil {
- return
- }
- }
- default:
- err = ErrInvalidObject.AddDetails("type %q", h.Type)
- }
-
- p.offsetToType[h.Offset] = typ
-
- return
-}
-
-func (p *Packfile) objectAtOffset(offset int64, hash plumbing.Hash) (plumbing.EncodedObject, error) {
- if obj, ok := p.cacheGet(hash); ok {
- return obj, nil
- }
-
- h, err := p.objectHeaderAtOffset(offset)
- if err != nil {
- if err == io.EOF || isInvalid(err) {
- return nil, plumbing.ErrObjectNotFound
- }
- return nil, err
- }
-
- return p.getNextObject(h, hash)
-}
-
-func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.EncodedObject, error) {
- var err error
-
- // If we have no filesystem, we will return a MemoryObject instead
- // of an FSObject.
- if p.fs == nil {
- return p.getNextMemoryObject(h)
- }
-
- // If the object is small enough then read it completely into memory now since
- // it is already read from disk into buffer anyway. For delta objects we want
- // to perform the optimization too, but we have to be careful about applying
- // small deltas on big objects.
- var size int64
- if h.Length <= smallObjectThreshold {
- if h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject {
- return p.getNextMemoryObject(h)
- }
-
- // For delta objects we read the delta data and apply the small object
- // optimization only if the expanded version of the object still meets
- // the small object threshold condition.
- buf := bufPool.Get().(*bytes.Buffer)
- defer bufPool.Put(buf)
- buf.Reset()
- if _, _, err := p.s.NextObject(buf); err != nil {
- return nil, err
- }
-
- size = p.getDeltaObjectSize(buf)
- if size <= smallObjectThreshold {
- var obj = new(plumbing.MemoryObject)
- obj.SetSize(size)
- if h.Type == plumbing.REFDeltaObject {
- err = p.fillREFDeltaObjectContentWithBuffer(obj, h.Reference, buf)
- } else {
- err = p.fillOFSDeltaObjectContentWithBuffer(obj, h.OffsetReference, buf)
- }
- return obj, err
- }
- } else {
- size, err = p.getObjectSize(h)
- if err != nil {
- return nil, err
- }
- }
-
- typ, err := p.getObjectType(h)
- if err != nil {
- return nil, err
- }
-
- p.offsetToType[h.Offset] = typ
-
- return NewFSObject(
- hash,
- typ,
- h.Offset,
- size,
- p.Index,
- p.fs,
- p.file.Name(),
- p.deltaBaseCache,
- ), nil
-}
-
-func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
- h, err := p.objectHeaderAtOffset(offset)
- if err != nil {
- return nil, err
- }
-
- // getObjectContent is called from FSObject, so we have to explicitly
- // get memory object here to avoid recursive cycle
- obj, err := p.getNextMemoryObject(h)
- if err != nil {
- return nil, err
- }
-
- return obj.Reader()
-}
-
-func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject, error) {
- var obj = new(plumbing.MemoryObject)
- obj.SetSize(h.Length)
- obj.SetType(h.Type)
-
- var err error
- switch h.Type {
- case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
- err = p.fillRegularObjectContent(obj)
- case plumbing.REFDeltaObject:
- err = p.fillREFDeltaObjectContent(obj, h.Reference)
- case plumbing.OFSDeltaObject:
- err = p.fillOFSDeltaObjectContent(obj, h.OffsetReference)
- default:
- err = ErrInvalidObject.AddDetails("type %q", h.Type)
- }
-
- if err != nil {
- return nil, err
- }
-
- p.offsetToType[h.Offset] = obj.Type()
-
- return obj, nil
-}
-
-func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) error {
- w, err := obj.Writer()
- if err != nil {
- return err
- }
-
- _, _, err = p.s.NextObject(w)
- p.cachePut(obj)
-
- return err
-}
-
-func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) error {
- buf := bufPool.Get().(*bytes.Buffer)
- defer bufPool.Put(buf)
- buf.Reset()
- _, _, err := p.s.NextObject(buf)
- if err != nil {
- return err
- }
-
- return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf)
-}
-
-func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, ref plumbing.Hash, buf *bytes.Buffer) error {
- var err error
-
- base, ok := p.cacheGet(ref)
- if !ok {
- base, err = p.Get(ref)
- if err != nil {
- return err
- }
- }
-
- obj.SetType(base.Type())
- err = ApplyDelta(obj, base, buf.Bytes())
- p.cachePut(obj)
-
- return err
-}
-
-func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error {
- buf := bufPool.Get().(*bytes.Buffer)
- defer bufPool.Put(buf)
- buf.Reset()
- _, _, err := p.s.NextObject(buf)
- if err != nil {
- return err
- }
-
- return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf)
-}
-
-func (p *Packfile) fillOFSDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, offset int64, buf *bytes.Buffer) error {
- hash, err := p.FindHash(offset)
- if err != nil {
- return err
- }
-
- base, err := p.objectAtOffset(offset, hash)
- if err != nil {
- return err
- }
-
- obj.SetType(base.Type())
- err = ApplyDelta(obj, base, buf.Bytes())
- p.cachePut(obj)
-
- return err
-}
-
-func (p *Packfile) cacheGet(h plumbing.Hash) (plumbing.EncodedObject, bool) {
- if p.deltaBaseCache == nil {
- return nil, false
- }
-
- return p.deltaBaseCache.Get(h)
-}
-
-func (p *Packfile) cachePut(obj plumbing.EncodedObject) {
- if p.deltaBaseCache == nil {
- return
- }
-
- p.deltaBaseCache.Put(obj)
-}
-
-// GetAll returns an iterator with all encoded objects in the packfile.
-// The iterator returned is not thread-safe, it should be used in the same
-// thread as the Packfile instance.
-func (p *Packfile) GetAll() (storer.EncodedObjectIter, error) {
- return p.GetByType(plumbing.AnyObject)
-}
-
-// GetByType returns all the objects of the given type.
-func (p *Packfile) GetByType(typ plumbing.ObjectType) (storer.EncodedObjectIter, error) {
- switch typ {
- case plumbing.AnyObject,
- plumbing.BlobObject,
- plumbing.TreeObject,
- plumbing.CommitObject,
- plumbing.TagObject:
- entries, err := p.EntriesByOffset()
- if err != nil {
- return nil, err
- }
-
- return &objectIter{
- // Easiest way to provide an object decoder is just to pass a Packfile
- // instance. To not mess with the seeks, it's a new instance with a
- // different scanner but the same cache and offset to hash map for
- // reusing as much cache as possible.
- p: p,
- iter: entries,
- typ: typ,
- }, nil
- default:
- return nil, plumbing.ErrInvalidType
- }
-}
-
-// ID returns the ID of the packfile, which is the checksum at the end of it.
-func (p *Packfile) ID() (plumbing.Hash, error) {
- prev, err := p.file.Seek(-20, io.SeekEnd)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- var hash plumbing.Hash
- if _, err := io.ReadFull(p.file, hash[:]); err != nil {
- return plumbing.ZeroHash, err
- }
-
- if _, err := p.file.Seek(prev, io.SeekStart); err != nil {
- return plumbing.ZeroHash, err
- }
-
- return hash, nil
-}
-
-// Scanner returns the packfile's Scanner
-func (p *Packfile) Scanner() *Scanner {
- return p.s
-}
-
-// Close the packfile and its resources.
-func (p *Packfile) Close() error {
- closer, ok := p.file.(io.Closer)
- if !ok {
- return nil
- }
-
- return closer.Close()
-}
-
-type objectIter struct {
- p *Packfile
- typ plumbing.ObjectType
- iter idxfile.EntryIter
-}
-
-func (i *objectIter) Next() (plumbing.EncodedObject, error) {
- for {
- e, err := i.iter.Next()
- if err != nil {
- return nil, err
- }
-
- if i.typ != plumbing.AnyObject {
- if typ, ok := i.p.offsetToType[int64(e.Offset)]; ok {
- if typ != i.typ {
- continue
- }
- } else if obj, ok := i.p.cacheGet(e.Hash); ok {
- if obj.Type() != i.typ {
- i.p.offsetToType[int64(e.Offset)] = obj.Type()
- continue
- }
- return obj, nil
- } else {
- h, err := i.p.objectHeaderAtOffset(int64(e.Offset))
- if err != nil {
- return nil, err
- }
-
- if h.Type == plumbing.REFDeltaObject || h.Type == plumbing.OFSDeltaObject {
- typ, err := i.p.getObjectType(h)
- if err != nil {
- return nil, err
- }
- if typ != i.typ {
- i.p.offsetToType[int64(e.Offset)] = typ
- continue
- }
- // getObjectType will seek in the file so we cannot use getNextObject safely
- return i.p.objectAtOffset(int64(e.Offset), e.Hash)
- } else {
- if h.Type != i.typ {
- i.p.offsetToType[int64(e.Offset)] = h.Type
- continue
- }
- return i.p.getNextObject(h, e.Hash)
- }
- }
- }
-
- obj, err := i.p.objectAtOffset(int64(e.Offset), e.Hash)
- if err != nil {
- return nil, err
- }
-
- return obj, nil
- }
-}
-
-func (i *objectIter) ForEach(f func(plumbing.EncodedObject) error) error {
- for {
- o, err := i.Next()
- if err != nil {
- if err == io.EOF {
- return nil
- }
- return err
- }
-
- if err := f(o); err != nil {
- return err
- }
- }
-}
-
-func (i *objectIter) Close() {
- i.iter.Close()
-}
-
-// isInvalid checks whether an error is an os.PathError with an os.ErrInvalid
-// error inside. It also checks for the windows error, which is different from
-// os.ErrInvalid.
-func isInvalid(err error) bool {
- pe, ok := err.(*os.PathError)
- if !ok {
- return false
- }
-
- errstr := pe.Err.Error()
- return errstr == errInvalidUnix || errstr == errInvalidWindows
-}
-
-// errInvalidWindows is the Windows equivalent to os.ErrInvalid
-const errInvalidWindows = "The parameter is incorrect."
-
-var errInvalidUnix = os.ErrInvalid.Error()
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/parser.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/parser.go
deleted file mode 100644
index 71cbba9838..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/parser.go
+++ /dev/null
@@ -1,483 +0,0 @@
-package packfile
-
-import (
- "bytes"
- "errors"
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/cache"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
-)
-
-var (
- // ErrReferenceDeltaNotFound is returned when the reference delta is not
- // found.
- ErrReferenceDeltaNotFound = errors.New("reference delta not found")
-
- // ErrNotSeekableSource is returned when the source for the parser is not
- // seekable and a storage was not provided, so it can't be parsed.
- ErrNotSeekableSource = errors.New("parser source is not seekable and storage was not provided")
-
- // ErrDeltaNotCached is returned when the delta could not be found in cache.
- ErrDeltaNotCached = errors.New("delta could not be found in cache")
-)
-
-// Observer interface is implemented by index encoders.
-type Observer interface {
- // OnHeader is called when a new packfile is opened.
- OnHeader(count uint32) error
- // OnInflatedObjectHeader is called for each object header read.
- OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error
- // OnInflatedObjectContent is called for each decoded object.
- OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, content []byte) error
- // OnFooter is called when decoding is done.
- OnFooter(h plumbing.Hash) error
-}
-
-// Parser decodes a packfile and calls any observer associated to it. Is used
-// to generate indexes.
-type Parser struct {
- storage storer.EncodedObjectStorer
- scanner *Scanner
- count uint32
- oi []*objectInfo
- oiByHash map[plumbing.Hash]*objectInfo
- oiByOffset map[int64]*objectInfo
- hashOffset map[plumbing.Hash]int64
- checksum plumbing.Hash
-
- cache *cache.BufferLRU
- // delta content by offset, only used if source is not seekable
- deltas map[int64][]byte
-
- ob []Observer
-}
-
-// NewParser creates a new Parser. The Scanner source must be seekable.
-// If it's not, NewParserWithStorage should be used instead.
-func NewParser(scanner *Scanner, ob ...Observer) (*Parser, error) {
- return NewParserWithStorage(scanner, nil, ob...)
-}
-
-// NewParserWithStorage creates a new Parser. The scanner source must either
-// be seekable or a storage must be provided.
-func NewParserWithStorage(
- scanner *Scanner,
- storage storer.EncodedObjectStorer,
- ob ...Observer,
-) (*Parser, error) {
- if !scanner.IsSeekable && storage == nil {
- return nil, ErrNotSeekableSource
- }
-
- var deltas map[int64][]byte
- if !scanner.IsSeekable {
- deltas = make(map[int64][]byte)
- }
-
- return &Parser{
- storage: storage,
- scanner: scanner,
- ob: ob,
- count: 0,
- cache: cache.NewBufferLRUDefault(),
- deltas: deltas,
- }, nil
-}
-
-func (p *Parser) forEachObserver(f func(o Observer) error) error {
- for _, o := range p.ob {
- if err := f(o); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (p *Parser) onHeader(count uint32) error {
- return p.forEachObserver(func(o Observer) error {
- return o.OnHeader(count)
- })
-}
-
-func (p *Parser) onInflatedObjectHeader(
- t plumbing.ObjectType,
- objSize int64,
- pos int64,
-) error {
- return p.forEachObserver(func(o Observer) error {
- return o.OnInflatedObjectHeader(t, objSize, pos)
- })
-}
-
-func (p *Parser) onInflatedObjectContent(
- h plumbing.Hash,
- pos int64,
- crc uint32,
- content []byte,
-) error {
- return p.forEachObserver(func(o Observer) error {
- return o.OnInflatedObjectContent(h, pos, crc, content)
- })
-}
-
-func (p *Parser) onFooter(h plumbing.Hash) error {
- return p.forEachObserver(func(o Observer) error {
- return o.OnFooter(h)
- })
-}
-
-// Parse start decoding phase of the packfile.
-func (p *Parser) Parse() (plumbing.Hash, error) {
- if err := p.init(); err != nil {
- return plumbing.ZeroHash, err
- }
-
- if err := p.indexObjects(); err != nil {
- return plumbing.ZeroHash, err
- }
-
- var err error
- p.checksum, err = p.scanner.Checksum()
- if err != nil && err != io.EOF {
- return plumbing.ZeroHash, err
- }
-
- if err := p.resolveDeltas(); err != nil {
- return plumbing.ZeroHash, err
- }
-
- if err := p.onFooter(p.checksum); err != nil {
- return plumbing.ZeroHash, err
- }
-
- return p.checksum, nil
-}
-
-func (p *Parser) init() error {
- _, c, err := p.scanner.Header()
- if err != nil {
- return err
- }
-
- if err := p.onHeader(c); err != nil {
- return err
- }
-
- p.count = c
- p.oiByHash = make(map[plumbing.Hash]*objectInfo, p.count)
- p.oiByOffset = make(map[int64]*objectInfo, p.count)
- p.oi = make([]*objectInfo, p.count)
-
- return nil
-}
-
-func (p *Parser) indexObjects() error {
- buf := new(bytes.Buffer)
-
- for i := uint32(0); i < p.count; i++ {
- buf.Reset()
-
- oh, err := p.scanner.NextObjectHeader()
- if err != nil {
- return err
- }
-
- delta := false
- var ota *objectInfo
- switch t := oh.Type; t {
- case plumbing.OFSDeltaObject:
- delta = true
-
- parent, ok := p.oiByOffset[oh.OffsetReference]
- if !ok {
- return plumbing.ErrObjectNotFound
- }
-
- ota = newDeltaObject(oh.Offset, oh.Length, t, parent)
- parent.Children = append(parent.Children, ota)
- case plumbing.REFDeltaObject:
- delta = true
- parent, ok := p.oiByHash[oh.Reference]
- if !ok {
- // can't find referenced object in this pack file
- // this must be a "thin" pack.
- parent = &objectInfo{ //Placeholder parent
- SHA1: oh.Reference,
- ExternalRef: true, // mark as an external reference that must be resolved
- Type: plumbing.AnyObject,
- DiskType: plumbing.AnyObject,
- }
- p.oiByHash[oh.Reference] = parent
- }
- ota = newDeltaObject(oh.Offset, oh.Length, t, parent)
- parent.Children = append(parent.Children, ota)
-
- default:
- ota = newBaseObject(oh.Offset, oh.Length, t)
- }
-
- _, crc, err := p.scanner.NextObject(buf)
- if err != nil {
- return err
- }
-
- ota.Crc32 = crc
- ota.Length = oh.Length
-
- data := buf.Bytes()
- if !delta {
- sha1, err := getSHA1(ota.Type, data)
- if err != nil {
- return err
- }
-
- ota.SHA1 = sha1
- p.oiByHash[ota.SHA1] = ota
- }
-
- if p.storage != nil && !delta {
- obj := new(plumbing.MemoryObject)
- obj.SetSize(oh.Length)
- obj.SetType(oh.Type)
- if _, err := obj.Write(data); err != nil {
- return err
- }
-
- if _, err := p.storage.SetEncodedObject(obj); err != nil {
- return err
- }
- }
-
- if delta && !p.scanner.IsSeekable {
- p.deltas[oh.Offset] = make([]byte, len(data))
- copy(p.deltas[oh.Offset], data)
- }
-
- p.oiByOffset[oh.Offset] = ota
- p.oi[i] = ota
- }
-
- return nil
-}
-
-func (p *Parser) resolveDeltas() error {
- for _, obj := range p.oi {
- content, err := p.get(obj)
- if err != nil {
- return err
- }
-
- if err := p.onInflatedObjectHeader(obj.Type, obj.Length, obj.Offset); err != nil {
- return err
- }
-
- if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, content); err != nil {
- return err
- }
-
- if !obj.IsDelta() && len(obj.Children) > 0 {
- for _, child := range obj.Children {
- if _, err := p.resolveObject(child, content); err != nil {
- return err
- }
- }
-
- // Remove the delta from the cache.
- if obj.DiskType.IsDelta() && !p.scanner.IsSeekable {
- delete(p.deltas, obj.Offset)
- }
- }
- }
-
- return nil
-}
-
-func (p *Parser) get(o *objectInfo) (b []byte, err error) {
- var ok bool
- if !o.ExternalRef { // skip cache check for placeholder parents
- b, ok = p.cache.Get(o.Offset)
- }
-
- // If it's not on the cache and is not a delta we can try to find it in the
- // storage, if there's one. External refs must enter here.
- if !ok && p.storage != nil && !o.Type.IsDelta() {
- e, err := p.storage.EncodedObject(plumbing.AnyObject, o.SHA1)
- if err != nil {
- return nil, err
- }
- o.Type = e.Type()
-
- r, err := e.Reader()
- if err != nil {
- return nil, err
- }
-
- b = make([]byte, e.Size())
- if _, err = r.Read(b); err != nil {
- return nil, err
- }
- }
-
- if b != nil {
- return b, nil
- }
-
- if o.ExternalRef {
- // we were not able to resolve a ref in a thin pack
- return nil, ErrReferenceDeltaNotFound
- }
-
- var data []byte
- if o.DiskType.IsDelta() {
- base, err := p.get(o.Parent)
- if err != nil {
- return nil, err
- }
-
- data, err = p.resolveObject(o, base)
- if err != nil {
- return nil, err
- }
- } else {
- data, err = p.readData(o)
- if err != nil {
- return nil, err
- }
- }
-
- if len(o.Children) > 0 {
- p.cache.Put(o.Offset, data)
- }
-
- return data, nil
-}
-
-func (p *Parser) resolveObject(
- o *objectInfo,
- base []byte,
-) ([]byte, error) {
- if !o.DiskType.IsDelta() {
- return nil, nil
- }
-
- data, err := p.readData(o)
- if err != nil {
- return nil, err
- }
-
- data, err = applyPatchBase(o, data, base)
- if err != nil {
- return nil, err
- }
-
- if p.storage != nil {
- obj := new(plumbing.MemoryObject)
- obj.SetSize(o.Size())
- obj.SetType(o.Type)
- if _, err := obj.Write(data); err != nil {
- return nil, err
- }
-
- if _, err := p.storage.SetEncodedObject(obj); err != nil {
- return nil, err
- }
- }
-
- return data, nil
-}
-
-func (p *Parser) readData(o *objectInfo) ([]byte, error) {
- if !p.scanner.IsSeekable && o.DiskType.IsDelta() {
- data, ok := p.deltas[o.Offset]
- if !ok {
- return nil, ErrDeltaNotCached
- }
-
- return data, nil
- }
-
- if _, err := p.scanner.SeekObjectHeader(o.Offset); err != nil {
- return nil, err
- }
-
- buf := new(bytes.Buffer)
- if _, _, err := p.scanner.NextObject(buf); err != nil {
- return nil, err
- }
-
- return buf.Bytes(), nil
-}
-
-func applyPatchBase(ota *objectInfo, data, base []byte) ([]byte, error) {
- patched, err := PatchDelta(base, data)
- if err != nil {
- return nil, err
- }
-
- if ota.SHA1 == plumbing.ZeroHash {
- ota.Type = ota.Parent.Type
- sha1, err := getSHA1(ota.Type, patched)
- if err != nil {
- return nil, err
- }
-
- ota.SHA1 = sha1
- ota.Length = int64(len(patched))
- }
-
- return patched, nil
-}
-
-func getSHA1(t plumbing.ObjectType, data []byte) (plumbing.Hash, error) {
- hasher := plumbing.NewHasher(t, int64(len(data)))
- if _, err := hasher.Write(data); err != nil {
- return plumbing.ZeroHash, err
- }
-
- return hasher.Sum(), nil
-}
-
-type objectInfo struct {
- Offset int64
- Length int64
- Type plumbing.ObjectType
- DiskType plumbing.ObjectType
- ExternalRef bool // indicates this is an external reference in a thin pack file
-
- Crc32 uint32
-
- Parent *objectInfo
- Children []*objectInfo
- SHA1 plumbing.Hash
-}
-
-func newBaseObject(offset, length int64, t plumbing.ObjectType) *objectInfo {
- return newDeltaObject(offset, length, t, nil)
-}
-
-func newDeltaObject(
- offset, length int64,
- t plumbing.ObjectType,
- parent *objectInfo,
-) *objectInfo {
- obj := &objectInfo{
- Offset: offset,
- Length: length,
- Type: t,
- DiskType: t,
- Crc32: 0,
- Parent: parent,
- }
-
- return obj
-}
-
-func (o *objectInfo) IsDelta() bool {
- return o.Type.IsDelta()
-}
-
-func (o *objectInfo) Size() int64 {
- return o.Length
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/patch_delta.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/patch_delta.go
deleted file mode 100644
index a972f1c424..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/patch_delta.go
+++ /dev/null
@@ -1,229 +0,0 @@
-package packfile
-
-import (
- "errors"
- "io/ioutil"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
-)
-
-// See https://github.com/git/git/blob/49fa3dc76179e04b0833542fa52d0f287a4955ac/delta.h
-// https://github.com/git/git/blob/c2c5f6b1e479f2c38e0e01345350620944e3527f/patch-delta.c,
-// and https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js
-// for details about the delta format.
-
-const deltaSizeMin = 4
-
-// ApplyDelta writes to target the result of applying the modification deltas in delta to base.
-func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) error {
- r, err := base.Reader()
- if err != nil {
- return err
- }
-
- w, err := target.Writer()
- if err != nil {
- return err
- }
-
- src, err := ioutil.ReadAll(r)
- if err != nil {
- return err
- }
-
- dst, err := PatchDelta(src, delta)
- if err != nil {
- return err
- }
-
- target.SetSize(int64(len(dst)))
-
- _, err = w.Write(dst)
- return err
-}
-
-var (
- ErrInvalidDelta = errors.New("invalid delta")
- ErrDeltaCmd = errors.New("wrong delta command")
-)
-
-// PatchDelta returns the result of applying the modification deltas in delta to src.
-// An error will be returned if delta is corrupted (ErrDeltaLen) or an action command
-// is not copy from source or copy from delta (ErrDeltaCmd).
-func PatchDelta(src, delta []byte) ([]byte, error) {
- if len(delta) < deltaSizeMin {
- return nil, ErrInvalidDelta
- }
-
- srcSz, delta := decodeLEB128(delta)
- if srcSz != uint(len(src)) {
- return nil, ErrInvalidDelta
- }
-
- targetSz, delta := decodeLEB128(delta)
- remainingTargetSz := targetSz
-
- var cmd byte
- dest := make([]byte, 0, targetSz)
- for {
- if len(delta) == 0 {
- return nil, ErrInvalidDelta
- }
-
- cmd = delta[0]
- delta = delta[1:]
- if isCopyFromSrc(cmd) {
- var offset, sz uint
- var err error
- offset, delta, err = decodeOffset(cmd, delta)
- if err != nil {
- return nil, err
- }
-
- sz, delta, err = decodeSize(cmd, delta)
- if err != nil {
- return nil, err
- }
-
- if invalidSize(sz, targetSz) ||
- invalidOffsetSize(offset, sz, srcSz) {
- break
- }
- dest = append(dest, src[offset:offset+sz]...)
- remainingTargetSz -= sz
- } else if isCopyFromDelta(cmd) {
- sz := uint(cmd) // cmd is the size itself
- if invalidSize(sz, targetSz) {
- return nil, ErrInvalidDelta
- }
-
- if uint(len(delta)) < sz {
- return nil, ErrInvalidDelta
- }
-
- dest = append(dest, delta[0:sz]...)
- remainingTargetSz -= sz
- delta = delta[sz:]
- } else {
- return nil, ErrDeltaCmd
- }
-
- if remainingTargetSz <= 0 {
- break
- }
- }
-
- return dest, nil
-}
-
-// Decodes a number encoded as an unsigned LEB128 at the start of some
-// binary data and returns the decoded number and the rest of the
-// stream.
-//
-// This must be called twice on the delta data buffer, first to get the
-// expected source buffer size, and again to get the target buffer size.
-func decodeLEB128(input []byte) (uint, []byte) {
- var num, sz uint
- var b byte
- for {
- b = input[sz]
- num |= (uint(b) & payload) << (sz * 7) // concats 7 bits chunks
- sz++
-
- if uint(b)&continuation == 0 || sz == uint(len(input)) {
- break
- }
- }
-
- return num, input[sz:]
-}
-
-const (
- payload = 0x7f // 0111 1111
- continuation = 0x80 // 1000 0000
-)
-
-func isCopyFromSrc(cmd byte) bool {
- return (cmd & 0x80) != 0
-}
-
-func isCopyFromDelta(cmd byte) bool {
- return (cmd&0x80) == 0 && cmd != 0
-}
-
-func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) {
- var offset uint
- if (cmd & 0x01) != 0 {
- if len(delta) == 0 {
- return 0, nil, ErrInvalidDelta
- }
- offset = uint(delta[0])
- delta = delta[1:]
- }
- if (cmd & 0x02) != 0 {
- if len(delta) == 0 {
- return 0, nil, ErrInvalidDelta
- }
- offset |= uint(delta[0]) << 8
- delta = delta[1:]
- }
- if (cmd & 0x04) != 0 {
- if len(delta) == 0 {
- return 0, nil, ErrInvalidDelta
- }
- offset |= uint(delta[0]) << 16
- delta = delta[1:]
- }
- if (cmd & 0x08) != 0 {
- if len(delta) == 0 {
- return 0, nil, ErrInvalidDelta
- }
- offset |= uint(delta[0]) << 24
- delta = delta[1:]
- }
-
- return offset, delta, nil
-}
-
-func decodeSize(cmd byte, delta []byte) (uint, []byte, error) {
- var sz uint
- if (cmd & 0x10) != 0 {
- if len(delta) == 0 {
- return 0, nil, ErrInvalidDelta
- }
- sz = uint(delta[0])
- delta = delta[1:]
- }
- if (cmd & 0x20) != 0 {
- if len(delta) == 0 {
- return 0, nil, ErrInvalidDelta
- }
- sz |= uint(delta[0]) << 8
- delta = delta[1:]
- }
- if (cmd & 0x40) != 0 {
- if len(delta) == 0 {
- return 0, nil, ErrInvalidDelta
- }
- sz |= uint(delta[0]) << 16
- delta = delta[1:]
- }
- if sz == 0 {
- sz = 0x10000
- }
-
- return sz, delta, nil
-}
-
-func invalidSize(sz, targetSz uint) bool {
- return sz > targetSz
-}
-
-func invalidOffsetSize(offset, sz, srcSz uint) bool {
- return sumOverflows(offset, sz) ||
- offset+sz > srcSz
-}
-
-func sumOverflows(a, b uint) bool {
- return a+b < a
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/scanner.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/scanner.go
deleted file mode 100644
index 7b44192a9c..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/scanner.go
+++ /dev/null
@@ -1,466 +0,0 @@
-package packfile
-
-import (
- "bufio"
- "bytes"
- "compress/zlib"
- "fmt"
- "hash"
- "hash/crc32"
- "io"
- stdioutil "io/ioutil"
- "sync"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/utils/binary"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-)
-
-var (
- // ErrEmptyPackfile is returned by ReadHeader when no data is found in the packfile
- ErrEmptyPackfile = NewError("empty packfile")
- // ErrBadSignature is returned by ReadHeader when the signature in the packfile is incorrect.
- ErrBadSignature = NewError("malformed pack file signature")
- // ErrUnsupportedVersion is returned by ReadHeader when the packfile version is
- // different than VersionSupported.
- ErrUnsupportedVersion = NewError("unsupported packfile version")
- // ErrSeekNotSupported returned if seek is not support
- ErrSeekNotSupported = NewError("not seek support")
-)
-
-// ObjectHeader contains the information related to the object, this information
-// is collected from the previous bytes to the content of the object.
-type ObjectHeader struct {
- Type plumbing.ObjectType
- Offset int64
- Length int64
- Reference plumbing.Hash
- OffsetReference int64
-}
-
-type Scanner struct {
- r *scannerReader
- crc hash.Hash32
-
- // pendingObject is used to detect if an object has been read, or still
- // is waiting to be read
- pendingObject *ObjectHeader
- version, objects uint32
-
- // lsSeekable says if this scanner can do Seek or not, to have a Scanner
- // seekable a r implementing io.Seeker is required
- IsSeekable bool
-}
-
-// NewScanner returns a new Scanner based on a reader, if the given reader
-// implements io.ReadSeeker the Scanner will be also Seekable
-func NewScanner(r io.Reader) *Scanner {
- _, ok := r.(io.ReadSeeker)
-
- crc := crc32.NewIEEE()
- return &Scanner{
- r: newScannerReader(r, crc),
- crc: crc,
- IsSeekable: ok,
- }
-}
-
-func (s *Scanner) Reset(r io.Reader) {
- _, ok := r.(io.ReadSeeker)
-
- s.r.Reset(r)
- s.crc.Reset()
- s.IsSeekable = ok
- s.pendingObject = nil
- s.version = 0
- s.objects = 0
-}
-
-// Header reads the whole packfile header (signature, version and object count).
-// It returns the version and the object count and performs checks on the
-// validity of the signature and the version fields.
-func (s *Scanner) Header() (version, objects uint32, err error) {
- if s.version != 0 {
- return s.version, s.objects, nil
- }
-
- sig, err := s.readSignature()
- if err != nil {
- if err == io.EOF {
- err = ErrEmptyPackfile
- }
-
- return
- }
-
- if !s.isValidSignature(sig) {
- err = ErrBadSignature
- return
- }
-
- version, err = s.readVersion()
- s.version = version
- if err != nil {
- return
- }
-
- if !s.isSupportedVersion(version) {
- err = ErrUnsupportedVersion.AddDetails("%d", version)
- return
- }
-
- objects, err = s.readCount()
- s.objects = objects
- return
-}
-
-// readSignature reads an returns the signature field in the packfile.
-func (s *Scanner) readSignature() ([]byte, error) {
- var sig = make([]byte, 4)
- if _, err := io.ReadFull(s.r, sig); err != nil {
- return []byte{}, err
- }
-
- return sig, nil
-}
-
-// isValidSignature returns if sig is a valid packfile signature.
-func (s *Scanner) isValidSignature(sig []byte) bool {
- return bytes.Equal(sig, signature)
-}
-
-// readVersion reads and returns the version field of a packfile.
-func (s *Scanner) readVersion() (uint32, error) {
- return binary.ReadUint32(s.r)
-}
-
-// isSupportedVersion returns whether version v is supported by the parser.
-// The current supported version is VersionSupported, defined above.
-func (s *Scanner) isSupportedVersion(v uint32) bool {
- return v == VersionSupported
-}
-
-// readCount reads and returns the count of objects field of a packfile.
-func (s *Scanner) readCount() (uint32, error) {
- return binary.ReadUint32(s.r)
-}
-
-// SeekObjectHeader seeks to specified offset and returns the ObjectHeader
-// for the next object in the reader
-func (s *Scanner) SeekObjectHeader(offset int64) (*ObjectHeader, error) {
- // if seeking we assume that you are not interested in the header
- if s.version == 0 {
- s.version = VersionSupported
- }
-
- if _, err := s.r.Seek(offset, io.SeekStart); err != nil {
- return nil, err
- }
-
- h, err := s.nextObjectHeader()
- if err != nil {
- return nil, err
- }
-
- h.Offset = offset
- return h, nil
-}
-
-// NextObjectHeader returns the ObjectHeader for the next object in the reader
-func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) {
- if err := s.doPending(); err != nil {
- return nil, err
- }
-
- offset, err := s.r.Seek(0, io.SeekCurrent)
- if err != nil {
- return nil, err
- }
-
- h, err := s.nextObjectHeader()
- if err != nil {
- return nil, err
- }
-
- h.Offset = offset
- return h, nil
-}
-
-// nextObjectHeader returns the ObjectHeader for the next object in the reader
-// without the Offset field
-func (s *Scanner) nextObjectHeader() (*ObjectHeader, error) {
- s.r.Flush()
- s.crc.Reset()
-
- h := &ObjectHeader{}
- s.pendingObject = h
-
- var err error
- h.Offset, err = s.r.Seek(0, io.SeekCurrent)
- if err != nil {
- return nil, err
- }
-
- h.Type, h.Length, err = s.readObjectTypeAndLength()
- if err != nil {
- return nil, err
- }
-
- switch h.Type {
- case plumbing.OFSDeltaObject:
- no, err := binary.ReadVariableWidthInt(s.r)
- if err != nil {
- return nil, err
- }
-
- h.OffsetReference = h.Offset - no
- case plumbing.REFDeltaObject:
- var err error
- h.Reference, err = binary.ReadHash(s.r)
- if err != nil {
- return nil, err
- }
- }
-
- return h, nil
-}
-
-func (s *Scanner) doPending() error {
- if s.version == 0 {
- var err error
- s.version, s.objects, err = s.Header()
- if err != nil {
- return err
- }
- }
-
- return s.discardObjectIfNeeded()
-}
-
-func (s *Scanner) discardObjectIfNeeded() error {
- if s.pendingObject == nil {
- return nil
- }
-
- h := s.pendingObject
- n, _, err := s.NextObject(stdioutil.Discard)
- if err != nil {
- return err
- }
-
- if n != h.Length {
- return fmt.Errorf(
- "error discarding object, discarded %d, expected %d",
- n, h.Length,
- )
- }
-
- return nil
-}
-
-// ReadObjectTypeAndLength reads and returns the object type and the
-// length field from an object entry in a packfile.
-func (s *Scanner) readObjectTypeAndLength() (plumbing.ObjectType, int64, error) {
- t, c, err := s.readType()
- if err != nil {
- return t, 0, err
- }
-
- l, err := s.readLength(c)
-
- return t, l, err
-}
-
-func (s *Scanner) readType() (plumbing.ObjectType, byte, error) {
- var c byte
- var err error
- if c, err = s.r.ReadByte(); err != nil {
- return plumbing.ObjectType(0), 0, err
- }
-
- typ := parseType(c)
-
- return typ, c, nil
-}
-
-func parseType(b byte) plumbing.ObjectType {
- return plumbing.ObjectType((b & maskType) >> firstLengthBits)
-}
-
-// the length is codified in the last 4 bits of the first byte and in
-// the last 7 bits of subsequent bytes. Last byte has a 0 MSB.
-func (s *Scanner) readLength(first byte) (int64, error) {
- length := int64(first & maskFirstLength)
-
- c := first
- shift := firstLengthBits
- var err error
- for c&maskContinue > 0 {
- if c, err = s.r.ReadByte(); err != nil {
- return 0, err
- }
-
- length += int64(c&maskLength) << shift
- shift += lengthBits
- }
-
- return length, nil
-}
-
-// NextObject writes the content of the next object into the reader, returns
-// the number of bytes written, the CRC32 of the content and an error, if any
-func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err error) {
- s.pendingObject = nil
- written, err = s.copyObject(w)
-
- s.r.Flush()
- crc32 = s.crc.Sum32()
- s.crc.Reset()
-
- return
-}
-
-// ReadRegularObject reads and write a non-deltified object
-// from it zlib stream in an object entry in the packfile.
-func (s *Scanner) copyObject(w io.Writer) (n int64, err error) {
- zr := zlibReaderPool.Get().(io.ReadCloser)
- defer zlibReaderPool.Put(zr)
-
- if err = zr.(zlib.Resetter).Reset(s.r, nil); err != nil {
- return 0, fmt.Errorf("zlib reset error: %s", err)
- }
-
- defer ioutil.CheckClose(zr, &err)
- buf := byteSlicePool.Get().([]byte)
- n, err = io.CopyBuffer(w, zr, buf)
- byteSlicePool.Put(buf)
- return
-}
-
-var byteSlicePool = sync.Pool{
- New: func() interface{} {
- return make([]byte, 32*1024)
- },
-}
-
-// SeekFromStart sets a new offset from start, returns the old position before
-// the change.
-func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) {
- // if seeking we assume that you are not interested in the header
- if s.version == 0 {
- s.version = VersionSupported
- }
-
- previous, err = s.r.Seek(0, io.SeekCurrent)
- if err != nil {
- return -1, err
- }
-
- _, err = s.r.Seek(offset, io.SeekStart)
- return previous, err
-}
-
-// Checksum returns the checksum of the packfile
-func (s *Scanner) Checksum() (plumbing.Hash, error) {
- err := s.discardObjectIfNeeded()
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- return binary.ReadHash(s.r)
-}
-
-// Close reads the reader until io.EOF
-func (s *Scanner) Close() error {
- buf := byteSlicePool.Get().([]byte)
- _, err := io.CopyBuffer(stdioutil.Discard, s.r, buf)
- byteSlicePool.Put(buf)
- return err
-}
-
-// Flush is a no-op (deprecated)
-func (s *Scanner) Flush() error {
- return nil
-}
-
-// scannerReader has the following characteristics:
-// - Provides an io.SeekReader impl for bufio.Reader, when the underlying
-// reader supports it.
-// - Keeps track of the current read position, for when the underlying reader
-// isn't an io.SeekReader, but we still want to know the current offset.
-// - Writes to the hash writer what it reads, with the aid of a smaller buffer.
-// The buffer helps avoid a performance penality for performing small writes
-// to the crc32 hash writer.
-type scannerReader struct {
- reader io.Reader
- crc io.Writer
- rbuf *bufio.Reader
- wbuf *bufio.Writer
- offset int64
-}
-
-func newScannerReader(r io.Reader, h io.Writer) *scannerReader {
- sr := &scannerReader{
- rbuf: bufio.NewReader(nil),
- wbuf: bufio.NewWriterSize(nil, 64),
- crc: h,
- }
- sr.Reset(r)
-
- return sr
-}
-
-func (r *scannerReader) Reset(reader io.Reader) {
- r.reader = reader
- r.rbuf.Reset(r.reader)
- r.wbuf.Reset(r.crc)
-
- r.offset = 0
- if seeker, ok := r.reader.(io.ReadSeeker); ok {
- r.offset, _ = seeker.Seek(0, io.SeekCurrent)
- }
-}
-
-func (r *scannerReader) Read(p []byte) (n int, err error) {
- n, err = r.rbuf.Read(p)
-
- r.offset += int64(n)
- if _, err := r.wbuf.Write(p[:n]); err != nil {
- return n, err
- }
- return
-}
-
-func (r *scannerReader) ReadByte() (b byte, err error) {
- b, err = r.rbuf.ReadByte()
- if err == nil {
- r.offset++
- return b, r.wbuf.WriteByte(b)
- }
- return
-}
-
-func (r *scannerReader) Flush() error {
- return r.wbuf.Flush()
-}
-
-// Seek seeks to a location. If the underlying reader is not an io.ReadSeeker,
-// then only whence=io.SeekCurrent is supported, any other operation fails.
-func (r *scannerReader) Seek(offset int64, whence int) (int64, error) {
- var err error
-
- if seeker, ok := r.reader.(io.ReadSeeker); !ok {
- if whence != io.SeekCurrent || offset != 0 {
- return -1, ErrSeekNotSupported
- }
- } else {
- if whence == io.SeekCurrent && offset == 0 {
- return r.offset, nil
- }
-
- r.offset, err = seeker.Seek(offset, whence)
- r.rbuf.Reset(r.reader)
- }
-
- return r.offset, err
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/pktline/encoder.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/pktline/encoder.go
deleted file mode 100644
index 6d409795b0..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/pktline/encoder.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Package pktline implements reading payloads form pkt-lines and encoding
-// pkt-lines from payloads.
-package pktline
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
-)
-
-// An Encoder writes pkt-lines to an output stream.
-type Encoder struct {
- w io.Writer
-}
-
-const (
- // MaxPayloadSize is the maximum payload size of a pkt-line in bytes.
- MaxPayloadSize = 65516
-
- // For compatibility with canonical Git implementation, accept longer pkt-lines
- OversizePayloadMax = 65520
-)
-
-var (
- // FlushPkt are the contents of a flush-pkt pkt-line.
- FlushPkt = []byte{'0', '0', '0', '0'}
- // Flush is the payload to use with the Encode method to encode a flush-pkt.
- Flush = []byte{}
- // FlushString is the payload to use with the EncodeString method to encode a flush-pkt.
- FlushString = ""
- // ErrPayloadTooLong is returned by the Encode methods when any of the
- // provided payloads is bigger than MaxPayloadSize.
- ErrPayloadTooLong = errors.New("payload is too long")
-)
-
-// NewEncoder returns a new encoder that writes to w.
-func NewEncoder(w io.Writer) *Encoder {
- return &Encoder{
- w: w,
- }
-}
-
-// Flush encodes a flush-pkt to the output stream.
-func (e *Encoder) Flush() error {
- _, err := e.w.Write(FlushPkt)
- return err
-}
-
-// Encode encodes a pkt-line with the payload specified and write it to
-// the output stream. If several payloads are specified, each of them
-// will get streamed in their own pkt-lines.
-func (e *Encoder) Encode(payloads ...[]byte) error {
- for _, p := range payloads {
- if err := e.encodeLine(p); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (e *Encoder) encodeLine(p []byte) error {
- if len(p) > MaxPayloadSize {
- return ErrPayloadTooLong
- }
-
- if bytes.Equal(p, Flush) {
- return e.Flush()
- }
-
- n := len(p) + 4
- if _, err := e.w.Write(asciiHex16(n)); err != nil {
- return err
- }
- _, err := e.w.Write(p)
- return err
-}
-
-// Returns the hexadecimal ascii representation of the 16 less
-// significant bits of n. The length of the returned slice will always
-// be 4. Example: if n is 1234 (0x4d2), the return value will be
-// []byte{'0', '4', 'd', '2'}.
-func asciiHex16(n int) []byte {
- var ret [4]byte
- ret[0] = byteToASCIIHex(byte(n & 0xf000 >> 12))
- ret[1] = byteToASCIIHex(byte(n & 0x0f00 >> 8))
- ret[2] = byteToASCIIHex(byte(n & 0x00f0 >> 4))
- ret[3] = byteToASCIIHex(byte(n & 0x000f))
-
- return ret[:]
-}
-
-// turns a byte into its hexadecimal ascii representation. Example:
-// from 11 (0xb) to 'b'.
-func byteToASCIIHex(n byte) byte {
- if n < 10 {
- return '0' + n
- }
-
- return 'a' - 10 + n
-}
-
-// EncodeString works similarly as Encode but payloads are specified as strings.
-func (e *Encoder) EncodeString(payloads ...string) error {
- for _, p := range payloads {
- if err := e.Encode([]byte(p)); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Encodef encodes a single pkt-line with the payload formatted as
-// the format specifier. The rest of the arguments will be used in
-// the format string.
-func (e *Encoder) Encodef(format string, a ...interface{}) error {
- return e.EncodeString(
- fmt.Sprintf(format, a...),
- )
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/pktline/scanner.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/pktline/scanner.go
deleted file mode 100644
index 99aab46e88..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/pktline/scanner.go
+++ /dev/null
@@ -1,134 +0,0 @@
-package pktline
-
-import (
- "errors"
- "io"
-)
-
-const (
- lenSize = 4
-)
-
-// ErrInvalidPktLen is returned by Err() when an invalid pkt-len is found.
-var ErrInvalidPktLen = errors.New("invalid pkt-len found")
-
-// Scanner provides a convenient interface for reading the payloads of a
-// series of pkt-lines. It takes an io.Reader providing the source,
-// which then can be tokenized through repeated calls to the Scan
-// method.
-//
-// After each Scan call, the Bytes method will return the payload of the
-// corresponding pkt-line on a shared buffer, which will be 65516 bytes
-// or smaller. Flush pkt-lines are represented by empty byte slices.
-//
-// Scanning stops at EOF or the first I/O error.
-type Scanner struct {
- r io.Reader // The reader provided by the client
- err error // Sticky error
- payload []byte // Last pkt-payload
- len [lenSize]byte // Last pkt-len
-}
-
-// NewScanner returns a new Scanner to read from r.
-func NewScanner(r io.Reader) *Scanner {
- return &Scanner{
- r: r,
- }
-}
-
-// Err returns the first error encountered by the Scanner.
-func (s *Scanner) Err() error {
- return s.err
-}
-
-// Scan advances the Scanner to the next pkt-line, whose payload will
-// then be available through the Bytes method. Scanning stops at EOF
-// or the first I/O error. After Scan returns false, the Err method
-// will return any error that occurred during scanning, except that if
-// it was io.EOF, Err will return nil.
-func (s *Scanner) Scan() bool {
- var l int
- l, s.err = s.readPayloadLen()
- if s.err == io.EOF {
- s.err = nil
- return false
- }
- if s.err != nil {
- return false
- }
-
- if cap(s.payload) < l {
- s.payload = make([]byte, 0, l)
- }
-
- if _, s.err = io.ReadFull(s.r, s.payload[:l]); s.err != nil {
- return false
- }
- s.payload = s.payload[:l]
-
- return true
-}
-
-// Bytes returns the most recent payload generated by a call to Scan.
-// The underlying array may point to data that will be overwritten by a
-// subsequent call to Scan. It does no allocation.
-func (s *Scanner) Bytes() []byte {
- return s.payload
-}
-
-// Method readPayloadLen returns the payload length by reading the
-// pkt-len and subtracting the pkt-len size.
-func (s *Scanner) readPayloadLen() (int, error) {
- if _, err := io.ReadFull(s.r, s.len[:]); err != nil {
- if err == io.ErrUnexpectedEOF {
- return 0, ErrInvalidPktLen
- }
-
- return 0, err
- }
-
- n, err := hexDecode(s.len)
- if err != nil {
- return 0, err
- }
-
- switch {
- case n == 0:
- return 0, nil
- case n <= lenSize:
- return 0, ErrInvalidPktLen
- case n > OversizePayloadMax+lenSize:
- return 0, ErrInvalidPktLen
- default:
- return n - lenSize, nil
- }
-}
-
-// Turns the hexadecimal representation of a number in a byte slice into
-// a number. This function substitute strconv.ParseUint(string(buf), 16,
-// 16) and/or hex.Decode, to avoid generating new strings, thus helping the
-// GC.
-func hexDecode(buf [lenSize]byte) (int, error) {
- var ret int
- for i := 0; i < lenSize; i++ {
- n, err := asciiHexToByte(buf[i])
- if err != nil {
- return 0, ErrInvalidPktLen
- }
- ret = 16*ret + int(n)
- }
- return ret, nil
-}
-
-// turns the hexadecimal ascii representation of a byte into its
-// numerical value. Example: from 'b' to 11 (0xb).
-func asciiHexToByte(b byte) (byte, error) {
- switch {
- case b >= '0' && b <= '9':
- return b - '0', nil
- case b >= 'a' && b <= 'f':
- return b - 'a' + 10, nil
- default:
- return 0, ErrInvalidPktLen
- }
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/hash.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/hash.go
deleted file mode 100644
index 8e60877894..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/hash.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package plumbing
-
-import (
- "bytes"
- "crypto/sha1"
- "encoding/hex"
- "hash"
- "sort"
- "strconv"
-)
-
-// Hash SHA1 hased content
-type Hash [20]byte
-
-// ZeroHash is Hash with value zero
-var ZeroHash Hash
-
-// ComputeHash compute the hash for a given ObjectType and content
-func ComputeHash(t ObjectType, content []byte) Hash {
- h := NewHasher(t, int64(len(content)))
- h.Write(content)
- return h.Sum()
-}
-
-// NewHash return a new Hash from a hexadecimal hash representation
-func NewHash(s string) Hash {
- b, _ := hex.DecodeString(s)
-
- var h Hash
- copy(h[:], b)
-
- return h
-}
-
-func (h Hash) IsZero() bool {
- var empty Hash
- return h == empty
-}
-
-func (h Hash) String() string {
- return hex.EncodeToString(h[:])
-}
-
-type Hasher struct {
- hash.Hash
-}
-
-func NewHasher(t ObjectType, size int64) Hasher {
- h := Hasher{sha1.New()}
- h.Write(t.Bytes())
- h.Write([]byte(" "))
- h.Write([]byte(strconv.FormatInt(size, 10)))
- h.Write([]byte{0})
- return h
-}
-
-func (h Hasher) Sum() (hash Hash) {
- copy(hash[:], h.Hash.Sum(nil))
- return
-}
-
-// HashesSort sorts a slice of Hashes in increasing order.
-func HashesSort(a []Hash) {
- sort.Sort(HashSlice(a))
-}
-
-// HashSlice attaches the methods of sort.Interface to []Hash, sorting in
-// increasing order.
-type HashSlice []Hash
-
-func (p HashSlice) Len() int { return len(p) }
-func (p HashSlice) Less(i, j int) bool { return bytes.Compare(p[i][:], p[j][:]) < 0 }
-func (p HashSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/memory.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/memory.go
deleted file mode 100644
index b8e1e1b817..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/memory.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package plumbing
-
-import (
- "bytes"
- "io"
- "io/ioutil"
-)
-
-// MemoryObject on memory Object implementation
-type MemoryObject struct {
- t ObjectType
- h Hash
- cont []byte
- sz int64
-}
-
-// Hash returns the object Hash, the hash is calculated on-the-fly the first
-// time it's called, in all subsequent calls the same Hash is returned even
-// if the type or the content have changed. The Hash is only generated if the
-// size of the content is exactly the object size.
-func (o *MemoryObject) Hash() Hash {
- if o.h == ZeroHash && int64(len(o.cont)) == o.sz {
- o.h = ComputeHash(o.t, o.cont)
- }
-
- return o.h
-}
-
-// Type return the ObjectType
-func (o *MemoryObject) Type() ObjectType { return o.t }
-
-// SetType sets the ObjectType
-func (o *MemoryObject) SetType(t ObjectType) { o.t = t }
-
-// Size return the size of the object
-func (o *MemoryObject) Size() int64 { return o.sz }
-
-// SetSize set the object size, a content of the given size should be written
-// afterwards
-func (o *MemoryObject) SetSize(s int64) { o.sz = s }
-
-// Reader returns a ObjectReader used to read the object's content.
-func (o *MemoryObject) Reader() (io.ReadCloser, error) {
- return ioutil.NopCloser(bytes.NewBuffer(o.cont)), nil
-}
-
-// Writer returns a ObjectWriter used to write the object's content.
-func (o *MemoryObject) Writer() (io.WriteCloser, error) {
- return o, nil
-}
-
-func (o *MemoryObject) Write(p []byte) (n int, err error) {
- o.cont = append(o.cont, p...)
- o.sz = int64(len(o.cont))
-
- return len(p), nil
-}
-
-// Close releases any resources consumed by the object when it is acting as a
-// ObjectWriter.
-func (o *MemoryObject) Close() error { return nil }
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object.go
deleted file mode 100644
index 2655dee43e..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// package plumbing implement the core interfaces and structs used by go-git
-package plumbing
-
-import (
- "errors"
- "io"
-)
-
-var (
- ErrObjectNotFound = errors.New("object not found")
- // ErrInvalidType is returned when an invalid object type is provided.
- ErrInvalidType = errors.New("invalid object type")
-)
-
-// Object is a generic representation of any git object
-type EncodedObject interface {
- Hash() Hash
- Type() ObjectType
- SetType(ObjectType)
- Size() int64
- SetSize(int64)
- Reader() (io.ReadCloser, error)
- Writer() (io.WriteCloser, error)
-}
-
-// DeltaObject is an EncodedObject representing a delta.
-type DeltaObject interface {
- EncodedObject
- // BaseHash returns the hash of the object used as base for this delta.
- BaseHash() Hash
- // ActualHash returns the hash of the object after applying the delta.
- ActualHash() Hash
- // Size returns the size of the object after applying the delta.
- ActualSize() int64
-}
-
-// ObjectType internal object type
-// Integer values from 0 to 7 map to those exposed by git.
-// AnyObject is used to represent any from 0 to 7.
-type ObjectType int8
-
-const (
- InvalidObject ObjectType = 0
- CommitObject ObjectType = 1
- TreeObject ObjectType = 2
- BlobObject ObjectType = 3
- TagObject ObjectType = 4
- // 5 reserved for future expansion
- OFSDeltaObject ObjectType = 6
- REFDeltaObject ObjectType = 7
-
- AnyObject ObjectType = -127
-)
-
-func (t ObjectType) String() string {
- switch t {
- case CommitObject:
- return "commit"
- case TreeObject:
- return "tree"
- case BlobObject:
- return "blob"
- case TagObject:
- return "tag"
- case OFSDeltaObject:
- return "ofs-delta"
- case REFDeltaObject:
- return "ref-delta"
- case AnyObject:
- return "any"
- default:
- return "unknown"
- }
-}
-
-func (t ObjectType) Bytes() []byte {
- return []byte(t.String())
-}
-
-// Valid returns true if t is a valid ObjectType.
-func (t ObjectType) Valid() bool {
- return t >= CommitObject && t <= REFDeltaObject
-}
-
-// IsDelta returns true for any ObjectTyoe that represents a delta (i.e.
-// REFDeltaObject or OFSDeltaObject).
-func (t ObjectType) IsDelta() bool {
- return t == REFDeltaObject || t == OFSDeltaObject
-}
-
-// ParseObjectType parses a string representation of ObjectType. It returns an
-// error on parse failure.
-func ParseObjectType(value string) (typ ObjectType, err error) {
- switch value {
- case "commit":
- typ = CommitObject
- case "tree":
- typ = TreeObject
- case "blob":
- typ = BlobObject
- case "tag":
- typ = TagObject
- case "ofs-delta":
- typ = OFSDeltaObject
- case "ref-delta":
- typ = REFDeltaObject
- default:
- err = ErrInvalidType
- }
- return
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/blob.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/blob.go
deleted file mode 100644
index f376baa65a..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/blob.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package object
-
-import (
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-)
-
-// Blob is used to store arbitrary data - it is generally a file.
-type Blob struct {
- // Hash of the blob.
- Hash plumbing.Hash
- // Size of the (uncompressed) blob.
- Size int64
-
- obj plumbing.EncodedObject
-}
-
-// GetBlob gets a blob from an object storer and decodes it.
-func GetBlob(s storer.EncodedObjectStorer, h plumbing.Hash) (*Blob, error) {
- o, err := s.EncodedObject(plumbing.BlobObject, h)
- if err != nil {
- return nil, err
- }
-
- return DecodeBlob(o)
-}
-
-// DecodeObject decodes an encoded object into a *Blob.
-func DecodeBlob(o plumbing.EncodedObject) (*Blob, error) {
- b := &Blob{}
- if err := b.Decode(o); err != nil {
- return nil, err
- }
-
- return b, nil
-}
-
-// ID returns the object ID of the blob. The returned value will always match
-// the current value of Blob.Hash.
-//
-// ID is present to fulfill the Object interface.
-func (b *Blob) ID() plumbing.Hash {
- return b.Hash
-}
-
-// Type returns the type of object. It always returns plumbing.BlobObject.
-//
-// Type is present to fulfill the Object interface.
-func (b *Blob) Type() plumbing.ObjectType {
- return plumbing.BlobObject
-}
-
-// Decode transforms a plumbing.EncodedObject into a Blob struct.
-func (b *Blob) Decode(o plumbing.EncodedObject) error {
- if o.Type() != plumbing.BlobObject {
- return ErrUnsupportedObject
- }
-
- b.Hash = o.Hash()
- b.Size = o.Size()
- b.obj = o
-
- return nil
-}
-
-// Encode transforms a Blob into a plumbing.EncodedObject.
-func (b *Blob) Encode(o plumbing.EncodedObject) (err error) {
- o.SetType(plumbing.BlobObject)
-
- w, err := o.Writer()
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(w, &err)
-
- r, err := b.Reader()
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(r, &err)
-
- _, err = io.Copy(w, r)
- return err
-}
-
-// Reader returns a reader allow the access to the content of the blob
-func (b *Blob) Reader() (io.ReadCloser, error) {
- return b.obj.Reader()
-}
-
-// BlobIter provides an iterator for a set of blobs.
-type BlobIter struct {
- storer.EncodedObjectIter
- s storer.EncodedObjectStorer
-}
-
-// NewBlobIter takes a storer.EncodedObjectStorer and a
-// storer.EncodedObjectIter and returns a *BlobIter that iterates over all
-// blobs contained in the storer.EncodedObjectIter.
-//
-// Any non-blob object returned by the storer.EncodedObjectIter is skipped.
-func NewBlobIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *BlobIter {
- return &BlobIter{iter, s}
-}
-
-// Next moves the iterator to the next blob and returns a pointer to it. If
-// there are no more blobs, it returns io.EOF.
-func (iter *BlobIter) Next() (*Blob, error) {
- for {
- obj, err := iter.EncodedObjectIter.Next()
- if err != nil {
- return nil, err
- }
-
- if obj.Type() != plumbing.BlobObject {
- continue
- }
-
- return DecodeBlob(obj)
- }
-}
-
-// ForEach call the cb function for each blob contained on this iter until
-// an error happens or the end of the iter is reached. If ErrStop is sent
-// the iteration is stop but no error is returned. The iterator is closed.
-func (iter *BlobIter) ForEach(cb func(*Blob) error) error {
- return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error {
- if obj.Type() != plumbing.BlobObject {
- return nil
- }
-
- b, err := DecodeBlob(obj)
- if err != nil {
- return err
- }
-
- return cb(b)
- })
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/change.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/change.go
deleted file mode 100644
index a1b4c27499..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/change.go
+++ /dev/null
@@ -1,157 +0,0 @@
-package object
-
-import (
- "bytes"
- "context"
- "fmt"
- "strings"
-
- "gopkg.in/src-d/go-git.v4/utils/merkletrie"
-)
-
-// Change values represent a detected change between two git trees. For
-// modifications, From is the original status of the node and To is its
-// final status. For insertions, From is the zero value and for
-// deletions To is the zero value.
-type Change struct {
- From ChangeEntry
- To ChangeEntry
-}
-
-var empty = ChangeEntry{}
-
-// Action returns the kind of action represented by the change, an
-// insertion, a deletion or a modification.
-func (c *Change) Action() (merkletrie.Action, error) {
- if c.From == empty && c.To == empty {
- return merkletrie.Action(0),
- fmt.Errorf("malformed change: empty from and to")
- }
- if c.From == empty {
- return merkletrie.Insert, nil
- }
- if c.To == empty {
- return merkletrie.Delete, nil
- }
-
- return merkletrie.Modify, nil
-}
-
-// Files return the files before and after a change.
-// For insertions from will be nil. For deletions to will be nil.
-func (c *Change) Files() (from, to *File, err error) {
- action, err := c.Action()
- if err != nil {
- return
- }
-
- if action == merkletrie.Insert || action == merkletrie.Modify {
- to, err = c.To.Tree.TreeEntryFile(&c.To.TreeEntry)
- if !c.To.TreeEntry.Mode.IsFile() {
- return nil, nil, nil
- }
-
- if err != nil {
- return
- }
- }
-
- if action == merkletrie.Delete || action == merkletrie.Modify {
- from, err = c.From.Tree.TreeEntryFile(&c.From.TreeEntry)
- if !c.From.TreeEntry.Mode.IsFile() {
- return nil, nil, nil
- }
-
- if err != nil {
- return
- }
- }
-
- return
-}
-
-func (c *Change) String() string {
- action, err := c.Action()
- if err != nil {
- return fmt.Sprintf("malformed change")
- }
-
- return fmt.Sprintf("<Action: %s, Path: %s>", action, c.name())
-}
-
-// Patch returns a Patch with all the file changes in chunks. This
-// representation can be used to create several diff outputs.
-func (c *Change) Patch() (*Patch, error) {
- return c.PatchContext(context.Background())
-}
-
-// Patch returns a Patch with all the file changes in chunks. This
-// representation can be used to create several diff outputs.
-// If context expires, an non-nil error will be returned
-// Provided context must be non-nil
-func (c *Change) PatchContext(ctx context.Context) (*Patch, error) {
- return getPatchContext(ctx, "", c)
-}
-
-func (c *Change) name() string {
- if c.From != empty {
- return c.From.Name
- }
-
- return c.To.Name
-}
-
-// ChangeEntry values represent a node that has suffered a change.
-type ChangeEntry struct {
- // Full path of the node using "/" as separator.
- Name string
- // Parent tree of the node that has changed.
- Tree *Tree
- // The entry of the node.
- TreeEntry TreeEntry
-}
-
-// Changes represents a collection of changes between two git trees.
-// Implements sort.Interface lexicographically over the path of the
-// changed files.
-type Changes []*Change
-
-func (c Changes) Len() int {
- return len(c)
-}
-
-func (c Changes) Swap(i, j int) {
- c[i], c[j] = c[j], c[i]
-}
-
-func (c Changes) Less(i, j int) bool {
- return strings.Compare(c[i].name(), c[j].name()) < 0
-}
-
-func (c Changes) String() string {
- var buffer bytes.Buffer
- buffer.WriteString("[")
- comma := ""
- for _, v := range c {
- buffer.WriteString(comma)
- buffer.WriteString(v.String())
- comma = ", "
- }
- buffer.WriteString("]")
-
- return buffer.String()
-}
-
-// Patch returns a Patch with all the changes in chunks. This
-// representation can be used to create several diff outputs.
-func (c Changes) Patch() (*Patch, error) {
- return c.PatchContext(context.Background())
-}
-
-// Patch returns a Patch with all the changes in chunks. This
-// representation can be used to create several diff outputs.
-// If context expires, an non-nil error will be returned
-// Provided context must be non-nil
-func (c Changes) PatchContext(ctx context.Context) (*Patch, error) {
- return getPatchContext(ctx, "", c...)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/change_adaptor.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/change_adaptor.go
deleted file mode 100644
index 491c39907a..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/change_adaptor.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package object
-
-import (
- "errors"
- "fmt"
-
- "gopkg.in/src-d/go-git.v4/utils/merkletrie"
- "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder"
-)
-
-// The following functions transform changes types form the merkletrie
-// package to changes types from this package.
-
-func newChange(c merkletrie.Change) (*Change, error) {
- ret := &Change{}
-
- var err error
- if ret.From, err = newChangeEntry(c.From); err != nil {
- return nil, fmt.Errorf("From field: %s", err)
- }
-
- if ret.To, err = newChangeEntry(c.To); err != nil {
- return nil, fmt.Errorf("To field: %s", err)
- }
-
- return ret, nil
-}
-
-func newChangeEntry(p noder.Path) (ChangeEntry, error) {
- if p == nil {
- return empty, nil
- }
-
- asTreeNoder, ok := p.Last().(*treeNoder)
- if !ok {
- return ChangeEntry{}, errors.New("cannot transform non-TreeNoders")
- }
-
- return ChangeEntry{
- Name: p.String(),
- Tree: asTreeNoder.parent,
- TreeEntry: TreeEntry{
- Name: asTreeNoder.name,
- Mode: asTreeNoder.mode,
- Hash: asTreeNoder.hash,
- },
- }, nil
-}
-
-func newChanges(src merkletrie.Changes) (Changes, error) {
- ret := make(Changes, len(src))
- var err error
- for i, e := range src {
- ret[i], err = newChange(e)
- if err != nil {
- return nil, fmt.Errorf("change #%d: %s", i, err)
- }
- }
-
- return ret, nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit.go
deleted file mode 100644
index 6b50934050..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit.go
+++ /dev/null
@@ -1,430 +0,0 @@
-package object
-
-import (
- "bufio"
- "bytes"
- "context"
- "errors"
- "fmt"
- "io"
- "strings"
-
- "golang.org/x/crypto/openpgp"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-)
-
-const (
- beginpgp string = "-----BEGIN PGP SIGNATURE-----"
- endpgp string = "-----END PGP SIGNATURE-----"
- headerpgp string = "gpgsig"
-)
-
-// Hash represents the hash of an object
-type Hash plumbing.Hash
-
-// Commit points to a single tree, marking it as what the project looked like
-// at a certain point in time. It contains meta-information about that point
-// in time, such as a timestamp, the author of the changes since the last
-// commit, a pointer to the previous commit(s), etc.
-// http://shafiulazam.com/gitbook/1_the_git_object_model.html
-type Commit struct {
- // Hash of the commit object.
- Hash plumbing.Hash
- // Author is the original author of the commit.
- Author Signature
- // Committer is the one performing the commit, might be different from
- // Author.
- Committer Signature
- // PGPSignature is the PGP signature of the commit.
- PGPSignature string
- // Message is the commit message, contains arbitrary text.
- Message string
- // TreeHash is the hash of the root tree of the commit.
- TreeHash plumbing.Hash
- // ParentHashes are the hashes of the parent commits of the commit.
- ParentHashes []plumbing.Hash
-
- s storer.EncodedObjectStorer
-}
-
-// GetCommit gets a commit from an object storer and decodes it.
-func GetCommit(s storer.EncodedObjectStorer, h plumbing.Hash) (*Commit, error) {
- o, err := s.EncodedObject(plumbing.CommitObject, h)
- if err != nil {
- return nil, err
- }
-
- return DecodeCommit(s, o)
-}
-
-// DecodeCommit decodes an encoded object into a *Commit and associates it to
-// the given object storer.
-func DecodeCommit(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Commit, error) {
- c := &Commit{s: s}
- if err := c.Decode(o); err != nil {
- return nil, err
- }
-
- return c, nil
-}
-
-// Tree returns the Tree from the commit.
-func (c *Commit) Tree() (*Tree, error) {
- return GetTree(c.s, c.TreeHash)
-}
-
-// PatchContext returns the Patch between the actual commit and the provided one.
-// Error will be return if context expires. Provided context must be non-nil.
-func (c *Commit) PatchContext(ctx context.Context, to *Commit) (*Patch, error) {
- fromTree, err := c.Tree()
- if err != nil {
- return nil, err
- }
-
- toTree, err := to.Tree()
- if err != nil {
- return nil, err
- }
-
- return fromTree.PatchContext(ctx, toTree)
-}
-
-// Patch returns the Patch between the actual commit and the provided one.
-func (c *Commit) Patch(to *Commit) (*Patch, error) {
- return c.PatchContext(context.Background(), to)
-}
-
-// Parents return a CommitIter to the parent Commits.
-func (c *Commit) Parents() CommitIter {
- return NewCommitIter(c.s,
- storer.NewEncodedObjectLookupIter(c.s, plumbing.CommitObject, c.ParentHashes),
- )
-}
-
-// NumParents returns the number of parents in a commit.
-func (c *Commit) NumParents() int {
- return len(c.ParentHashes)
-}
-
-var ErrParentNotFound = errors.New("commit parent not found")
-
-// Parent returns the ith parent of a commit.
-func (c *Commit) Parent(i int) (*Commit, error) {
- if len(c.ParentHashes) == 0 || i > len(c.ParentHashes)-1 {
- return nil, ErrParentNotFound
- }
-
- return GetCommit(c.s, c.ParentHashes[i])
-}
-
-// File returns the file with the specified "path" in the commit and a
-// nil error if the file exists. If the file does not exist, it returns
-// a nil file and the ErrFileNotFound error.
-func (c *Commit) File(path string) (*File, error) {
- tree, err := c.Tree()
- if err != nil {
- return nil, err
- }
-
- return tree.File(path)
-}
-
-// Files returns a FileIter allowing to iterate over the Tree
-func (c *Commit) Files() (*FileIter, error) {
- tree, err := c.Tree()
- if err != nil {
- return nil, err
- }
-
- return tree.Files(), nil
-}
-
-// ID returns the object ID of the commit. The returned value will always match
-// the current value of Commit.Hash.
-//
-// ID is present to fulfill the Object interface.
-func (c *Commit) ID() plumbing.Hash {
- return c.Hash
-}
-
-// Type returns the type of object. It always returns plumbing.CommitObject.
-//
-// Type is present to fulfill the Object interface.
-func (c *Commit) Type() plumbing.ObjectType {
- return plumbing.CommitObject
-}
-
-// Decode transforms a plumbing.EncodedObject into a Commit struct.
-func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
- if o.Type() != plumbing.CommitObject {
- return ErrUnsupportedObject
- }
-
- c.Hash = o.Hash()
-
- reader, err := o.Reader()
- if err != nil {
- return err
- }
- defer ioutil.CheckClose(reader, &err)
-
- r := bufPool.Get().(*bufio.Reader)
- defer bufPool.Put(r)
- r.Reset(reader)
-
- var message bool
- var pgpsig bool
- for {
- line, err := r.ReadBytes('\n')
- if err != nil && err != io.EOF {
- return err
- }
-
- if pgpsig {
- if len(line) > 0 && line[0] == ' ' {
- line = bytes.TrimLeft(line, " ")
- c.PGPSignature += string(line)
- continue
- } else {
- pgpsig = false
- }
- }
-
- if !message {
- line = bytes.TrimSpace(line)
- if len(line) == 0 {
- message = true
- continue
- }
-
- split := bytes.SplitN(line, []byte{' '}, 2)
-
- var data []byte
- if len(split) == 2 {
- data = split[1]
- }
-
- switch string(split[0]) {
- case "tree":
- c.TreeHash = plumbing.NewHash(string(data))
- case "parent":
- c.ParentHashes = append(c.ParentHashes, plumbing.NewHash(string(data)))
- case "author":
- c.Author.Decode(data)
- case "committer":
- c.Committer.Decode(data)
- case headerpgp:
- c.PGPSignature += string(data) + "\n"
- pgpsig = true
- }
- } else {
- c.Message += string(line)
- }
-
- if err == io.EOF {
- return nil
- }
- }
-}
-
-// Encode transforms a Commit into a plumbing.EncodedObject.
-func (b *Commit) Encode(o plumbing.EncodedObject) error {
- return b.encode(o, true)
-}
-
-// EncodeWithoutSignature export a Commit into a plumbing.EncodedObject without the signature (correspond to the payload of the PGP signature).
-func (b *Commit) EncodeWithoutSignature(o plumbing.EncodedObject) error {
- return b.encode(o, false)
-}
-
-func (b *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
- o.SetType(plumbing.CommitObject)
- w, err := o.Writer()
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(w, &err)
-
- if _, err = fmt.Fprintf(w, "tree %s\n", b.TreeHash.String()); err != nil {
- return err
- }
-
- for _, parent := range b.ParentHashes {
- if _, err = fmt.Fprintf(w, "parent %s\n", parent.String()); err != nil {
- return err
- }
- }
-
- if _, err = fmt.Fprint(w, "author "); err != nil {
- return err
- }
-
- if err = b.Author.Encode(w); err != nil {
- return err
- }
-
- if _, err = fmt.Fprint(w, "\ncommitter "); err != nil {
- return err
- }
-
- if err = b.Committer.Encode(w); err != nil {
- return err
- }
-
- if b.PGPSignature != "" && includeSig {
- if _, err = fmt.Fprint(w, "\n"+headerpgp+" "); err != nil {
- return err
- }
-
- // Split all the signature lines and re-write with a left padding and
- // newline. Use join for this so it's clear that a newline should not be
- // added after this section, as it will be added when the message is
- // printed.
- signature := strings.TrimSuffix(b.PGPSignature, "\n")
- lines := strings.Split(signature, "\n")
- if _, err = fmt.Fprint(w, strings.Join(lines, "\n ")); err != nil {
- return err
- }
- }
-
- if _, err = fmt.Fprintf(w, "\n\n%s", b.Message); err != nil {
- return err
- }
-
- return err
-}
-
-// Stats returns the stats of a commit.
-func (c *Commit) Stats() (FileStats, error) {
- return c.StatsContext(context.Background())
-}
-
-// StatsContext returns the stats of a commit. Error will be return if context
-// expires. Provided context must be non-nil.
-func (c *Commit) StatsContext(ctx context.Context) (FileStats, error) {
- fromTree, err := c.Tree()
- if err != nil {
- return nil, err
- }
-
- toTree := &Tree{}
- if c.NumParents() != 0 {
- firstParent, err := c.Parents().Next()
- if err != nil {
- return nil, err
- }
-
- toTree, err = firstParent.Tree()
- if err != nil {
- return nil, err
- }
- }
-
- patch, err := toTree.PatchContext(ctx, fromTree)
- if err != nil {
- return nil, err
- }
-
- return getFileStatsFromFilePatches(patch.FilePatches()), nil
-}
-
-func (c *Commit) String() string {
- return fmt.Sprintf(
- "%s %s\nAuthor: %s\nDate: %s\n\n%s\n",
- plumbing.CommitObject, c.Hash, c.Author.String(),
- c.Author.When.Format(DateFormat), indent(c.Message),
- )
-}
-
-// Verify performs PGP verification of the commit with a provided armored
-// keyring and returns openpgp.Entity associated with verifying key on success.
-func (c *Commit) Verify(armoredKeyRing string) (*openpgp.Entity, error) {
- keyRingReader := strings.NewReader(armoredKeyRing)
- keyring, err := openpgp.ReadArmoredKeyRing(keyRingReader)
- if err != nil {
- return nil, err
- }
-
- // Extract signature.
- signature := strings.NewReader(c.PGPSignature)
-
- encoded := &plumbing.MemoryObject{}
- // Encode commit components, excluding signature and get a reader object.
- if err := c.EncodeWithoutSignature(encoded); err != nil {
- return nil, err
- }
- er, err := encoded.Reader()
- if err != nil {
- return nil, err
- }
-
- return openpgp.CheckArmoredDetachedSignature(keyring, er, signature)
-}
-
-func indent(t string) string {
- var output []string
- for _, line := range strings.Split(t, "\n") {
- if len(line) != 0 {
- line = " " + line
- }
-
- output = append(output, line)
- }
-
- return strings.Join(output, "\n")
-}
-
-// CommitIter is a generic closable interface for iterating over commits.
-type CommitIter interface {
- Next() (*Commit, error)
- ForEach(func(*Commit) error) error
- Close()
-}
-
-// storerCommitIter provides an iterator from commits in an EncodedObjectStorer.
-type storerCommitIter struct {
- storer.EncodedObjectIter
- s storer.EncodedObjectStorer
-}
-
-// NewCommitIter takes a storer.EncodedObjectStorer and a
-// storer.EncodedObjectIter and returns a CommitIter that iterates over all
-// commits contained in the storer.EncodedObjectIter.
-//
-// Any non-commit object returned by the storer.EncodedObjectIter is skipped.
-func NewCommitIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) CommitIter {
- return &storerCommitIter{iter, s}
-}
-
-// Next moves the iterator to the next commit and returns a pointer to it. If
-// there are no more commits, it returns io.EOF.
-func (iter *storerCommitIter) Next() (*Commit, error) {
- obj, err := iter.EncodedObjectIter.Next()
- if err != nil {
- return nil, err
- }
-
- return DecodeCommit(iter.s, obj)
-}
-
-// ForEach call the cb function for each commit contained on this iter until
-// an error appends or the end of the iter is reached. If ErrStop is sent
-// the iteration is stopped but no error is returned. The iterator is closed.
-func (iter *storerCommitIter) ForEach(cb func(*Commit) error) error {
- return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error {
- c, err := DecodeCommit(iter.s, obj)
- if err != nil {
- return err
- }
-
- return cb(c)
- })
-}
-
-func (iter *storerCommitIter) Close() {
- iter.EncodedObjectIter.Close()
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker.go
deleted file mode 100644
index 0eff059127..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker.go
+++ /dev/null
@@ -1,327 +0,0 @@
-package object
-
-import (
- "container/list"
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
- "gopkg.in/src-d/go-git.v4/storage"
-)
-
-type commitPreIterator struct {
- seenExternal map[plumbing.Hash]bool
- seen map[plumbing.Hash]bool
- stack []CommitIter
- start *Commit
-}
-
-// NewCommitPreorderIter returns a CommitIter that walks the commit history,
-// starting at the given commit and visiting its parents in pre-order.
-// The given callback will be called for each visited commit. Each commit will
-// be visited only once. If the callback returns an error, walking will stop
-// and will return the error. Other errors might be returned if the history
-// cannot be traversed (e.g. missing objects). Ignore allows to skip some
-// commits from being iterated.
-func NewCommitPreorderIter(
- c *Commit,
- seenExternal map[plumbing.Hash]bool,
- ignore []plumbing.Hash,
-) CommitIter {
- seen := make(map[plumbing.Hash]bool)
- for _, h := range ignore {
- seen[h] = true
- }
-
- return &commitPreIterator{
- seenExternal: seenExternal,
- seen: seen,
- stack: make([]CommitIter, 0),
- start: c,
- }
-}
-
-func (w *commitPreIterator) Next() (*Commit, error) {
- var c *Commit
- for {
- if w.start != nil {
- c = w.start
- w.start = nil
- } else {
- current := len(w.stack) - 1
- if current < 0 {
- return nil, io.EOF
- }
-
- var err error
- c, err = w.stack[current].Next()
- if err == io.EOF {
- w.stack = w.stack[:current]
- continue
- }
-
- if err != nil {
- return nil, err
- }
- }
-
- if w.seen[c.Hash] || w.seenExternal[c.Hash] {
- continue
- }
-
- w.seen[c.Hash] = true
-
- if c.NumParents() > 0 {
- w.stack = append(w.stack, filteredParentIter(c, w.seen))
- }
-
- return c, nil
- }
-}
-
-func filteredParentIter(c *Commit, seen map[plumbing.Hash]bool) CommitIter {
- var hashes []plumbing.Hash
- for _, h := range c.ParentHashes {
- if !seen[h] {
- hashes = append(hashes, h)
- }
- }
-
- return NewCommitIter(c.s,
- storer.NewEncodedObjectLookupIter(c.s, plumbing.CommitObject, hashes),
- )
-}
-
-func (w *commitPreIterator) ForEach(cb func(*Commit) error) error {
- for {
- c, err := w.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- return err
- }
-
- err = cb(c)
- if err == storer.ErrStop {
- break
- }
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (w *commitPreIterator) Close() {}
-
-type commitPostIterator struct {
- stack []*Commit
- seen map[plumbing.Hash]bool
-}
-
-// NewCommitPostorderIter returns a CommitIter that walks the commit
-// history like WalkCommitHistory but in post-order. This means that after
-// walking a merge commit, the merged commit will be walked before the base
-// it was merged on. This can be useful if you wish to see the history in
-// chronological order. Ignore allows to skip some commits from being iterated.
-func NewCommitPostorderIter(c *Commit, ignore []plumbing.Hash) CommitIter {
- seen := make(map[plumbing.Hash]bool)
- for _, h := range ignore {
- seen[h] = true
- }
-
- return &commitPostIterator{
- stack: []*Commit{c},
- seen: seen,
- }
-}
-
-func (w *commitPostIterator) Next() (*Commit, error) {
- for {
- if len(w.stack) == 0 {
- return nil, io.EOF
- }
-
- c := w.stack[len(w.stack)-1]
- w.stack = w.stack[:len(w.stack)-1]
-
- if w.seen[c.Hash] {
- continue
- }
-
- w.seen[c.Hash] = true
-
- return c, c.Parents().ForEach(func(p *Commit) error {
- w.stack = append(w.stack, p)
- return nil
- })
- }
-}
-
-func (w *commitPostIterator) ForEach(cb func(*Commit) error) error {
- for {
- c, err := w.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- return err
- }
-
- err = cb(c)
- if err == storer.ErrStop {
- break
- }
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (w *commitPostIterator) Close() {}
-
-// commitAllIterator stands for commit iterator for all refs.
-type commitAllIterator struct {
- // currCommit points to the current commit.
- currCommit *list.Element
-}
-
-// NewCommitAllIter returns a new commit iterator for all refs.
-// repoStorer is a repo Storer used to get commits and references.
-// commitIterFunc is a commit iterator function, used to iterate through ref commits in chosen order
-func NewCommitAllIter(repoStorer storage.Storer, commitIterFunc func(*Commit) CommitIter) (CommitIter, error) {
- commitsPath := list.New()
- commitsLookup := make(map[plumbing.Hash]*list.Element)
- head, err := storer.ResolveReference(repoStorer, plumbing.HEAD)
- if err == nil {
- err = addReference(repoStorer, commitIterFunc, head, commitsPath, commitsLookup)
- }
-
- if err != nil && err != plumbing.ErrReferenceNotFound {
- return nil, err
- }
-
- // add all references along with the HEAD
- refIter, err := repoStorer.IterReferences()
- if err != nil {
- return nil, err
- }
- defer refIter.Close()
-
- for {
- ref, err := refIter.Next()
- if err == io.EOF {
- break
- }
-
- if err == plumbing.ErrReferenceNotFound {
- continue
- }
-
- if err != nil {
- return nil, err
- }
-
- if err = addReference(repoStorer, commitIterFunc, ref, commitsPath, commitsLookup); err != nil {
- return nil, err
- }
- }
-
- return &commitAllIterator{commitsPath.Front()}, nil
-}
-
-func addReference(
- repoStorer storage.Storer,
- commitIterFunc func(*Commit) CommitIter,
- ref *plumbing.Reference,
- commitsPath *list.List,
- commitsLookup map[plumbing.Hash]*list.Element) error {
-
- _, exists := commitsLookup[ref.Hash()]
- if exists {
- // we already have it - skip the reference.
- return nil
- }
-
- refCommit, _ := GetCommit(repoStorer, ref.Hash())
- if refCommit == nil {
- // if it's not a commit - skip it.
- return nil
- }
-
- var (
- refCommits []*Commit
- parent *list.Element
- )
- // collect all ref commits to add
- commitIter := commitIterFunc(refCommit)
- for c, e := commitIter.Next(); e == nil; {
- parent, exists = commitsLookup[c.Hash]
- if exists {
- break
- }
- refCommits = append(refCommits, c)
- c, e = commitIter.Next()
- }
- commitIter.Close()
-
- if parent == nil {
- // common parent - not found
- // add all commits to the path from this ref (maybe it's a HEAD and we don't have anything, yet)
- for _, c := range refCommits {
- parent = commitsPath.PushBack(c)
- commitsLookup[c.Hash] = parent
- }
- } else {
- // add ref's commits to the path in reverse order (from the latest)
- for i := len(refCommits) - 1; i >= 0; i-- {
- c := refCommits[i]
- // insert before found common parent
- parent = commitsPath.InsertBefore(c, parent)
- commitsLookup[c.Hash] = parent
- }
- }
-
- return nil
-}
-
-func (it *commitAllIterator) Next() (*Commit, error) {
- if it.currCommit == nil {
- return nil, io.EOF
- }
-
- c := it.currCommit.Value.(*Commit)
- it.currCommit = it.currCommit.Next()
-
- return c, nil
-}
-
-func (it *commitAllIterator) ForEach(cb func(*Commit) error) error {
- for {
- c, err := it.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- return err
- }
-
- err = cb(c)
- if err == storer.ErrStop {
- break
- }
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (it *commitAllIterator) Close() {
- it.currCommit = nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_bfs.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_bfs.go
deleted file mode 100644
index dabfe75c27..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_bfs.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package object
-
-import (
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
-)
-
-type bfsCommitIterator struct {
- seenExternal map[plumbing.Hash]bool
- seen map[plumbing.Hash]bool
- queue []*Commit
-}
-
-// NewCommitIterBSF returns a CommitIter that walks the commit history,
-// starting at the given commit and visiting its parents in pre-order.
-// The given callback will be called for each visited commit. Each commit will
-// be visited only once. If the callback returns an error, walking will stop
-// and will return the error. Other errors might be returned if the history
-// cannot be traversed (e.g. missing objects). Ignore allows to skip some
-// commits from being iterated.
-func NewCommitIterBSF(
- c *Commit,
- seenExternal map[plumbing.Hash]bool,
- ignore []plumbing.Hash,
-) CommitIter {
- seen := make(map[plumbing.Hash]bool)
- for _, h := range ignore {
- seen[h] = true
- }
-
- return &bfsCommitIterator{
- seenExternal: seenExternal,
- seen: seen,
- queue: []*Commit{c},
- }
-}
-
-func (w *bfsCommitIterator) appendHash(store storer.EncodedObjectStorer, h plumbing.Hash) error {
- if w.seen[h] || w.seenExternal[h] {
- return nil
- }
- c, err := GetCommit(store, h)
- if err != nil {
- return err
- }
- w.queue = append(w.queue, c)
- return nil
-}
-
-func (w *bfsCommitIterator) Next() (*Commit, error) {
- var c *Commit
- for {
- if len(w.queue) == 0 {
- return nil, io.EOF
- }
- c = w.queue[0]
- w.queue = w.queue[1:]
-
- if w.seen[c.Hash] || w.seenExternal[c.Hash] {
- continue
- }
-
- w.seen[c.Hash] = true
-
- for _, h := range c.ParentHashes {
- err := w.appendHash(c.s, h)
- if err != nil {
- return nil, err
- }
- }
-
- return c, nil
- }
-}
-
-func (w *bfsCommitIterator) ForEach(cb func(*Commit) error) error {
- for {
- c, err := w.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- return err
- }
-
- err = cb(c)
- if err == storer.ErrStop {
- break
- }
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (w *bfsCommitIterator) Close() {}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_bfs_filtered.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_bfs_filtered.go
deleted file mode 100644
index b12523d489..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_bfs_filtered.go
+++ /dev/null
@@ -1,176 +0,0 @@
-package object
-
-import (
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
-)
-
-// NewFilterCommitIter returns a CommitIter that walks the commit history,
-// starting at the passed commit and visiting its parents in Breadth-first order.
-// The commits returned by the CommitIter will validate the passed CommitFilter.
-// The history won't be transversed beyond a commit if isLimit is true for it.
-// Each commit will be visited only once.
-// If the commit history can not be traversed, or the Close() method is called,
-// the CommitIter won't return more commits.
-// If no isValid is passed, all ancestors of from commit will be valid.
-// If no isLimit is limmit, all ancestors of all commits will be visited.
-func NewFilterCommitIter(
- from *Commit,
- isValid *CommitFilter,
- isLimit *CommitFilter,
-) CommitIter {
- var validFilter CommitFilter
- if isValid == nil {
- validFilter = func(_ *Commit) bool {
- return true
- }
- } else {
- validFilter = *isValid
- }
-
- var limitFilter CommitFilter
- if isLimit == nil {
- limitFilter = func(_ *Commit) bool {
- return false
- }
- } else {
- limitFilter = *isLimit
- }
-
- return &filterCommitIter{
- isValid: validFilter,
- isLimit: limitFilter,
- visited: map[plumbing.Hash]struct{}{},
- queue: []*Commit{from},
- }
-}
-
-// CommitFilter returns a boolean for the passed Commit
-type CommitFilter func(*Commit) bool
-
-// filterCommitIter implments CommitIter
-type filterCommitIter struct {
- isValid CommitFilter
- isLimit CommitFilter
- visited map[plumbing.Hash]struct{}
- queue []*Commit
- lastErr error
-}
-
-// Next returns the next commit of the CommitIter.
-// It will return io.EOF if there are no more commits to visit,
-// or an error if the history could not be traversed.
-func (w *filterCommitIter) Next() (*Commit, error) {
- var commit *Commit
- var err error
- for {
- commit, err = w.popNewFromQueue()
- if err != nil {
- return nil, w.close(err)
- }
-
- w.visited[commit.Hash] = struct{}{}
-
- if !w.isLimit(commit) {
- err = w.addToQueue(commit.s, commit.ParentHashes...)
- if err != nil {
- return nil, w.close(err)
- }
- }
-
- if w.isValid(commit) {
- return commit, nil
- }
- }
-}
-
-// ForEach runs the passed callback over each Commit returned by the CommitIter
-// until the callback returns an error or there is no more commits to traverse.
-func (w *filterCommitIter) ForEach(cb func(*Commit) error) error {
- for {
- commit, err := w.Next()
- if err == io.EOF {
- break
- }
-
- if err != nil {
- return err
- }
-
- if err := cb(commit); err == storer.ErrStop {
- break
- } else if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Error returns the error that caused that the CommitIter is no longer returning commits
-func (w *filterCommitIter) Error() error {
- return w.lastErr
-}
-
-// Close closes the CommitIter
-func (w *filterCommitIter) Close() {
- w.visited = map[plumbing.Hash]struct{}{}
- w.queue = []*Commit{}
- w.isLimit = nil
- w.isValid = nil
-}
-
-// close closes the CommitIter with an error
-func (w *filterCommitIter) close(err error) error {
- w.Close()
- w.lastErr = err
- return err
-}
-
-// popNewFromQueue returns the first new commit from the internal fifo queue,
-// or an io.EOF error if the queue is empty
-func (w *filterCommitIter) popNewFromQueue() (*Commit, error) {
- var first *Commit
- for {
- if len(w.queue) == 0 {
- if w.lastErr != nil {
- return nil, w.lastErr
- }
-
- return nil, io.EOF
- }
-
- first = w.queue[0]
- w.queue = w.queue[1:]
- if _, ok := w.visited[first.Hash]; ok {
- continue
- }
-
- return first, nil
- }
-}
-
-// addToQueue adds the passed commits to the internal fifo queue if they weren't seen
-// or returns an error if the passed hashes could not be used to get valid commits
-func (w *filterCommitIter) addToQueue(
- store storer.EncodedObjectStorer,
- hashes ...plumbing.Hash,
-) error {
- for _, hash := range hashes {
- if _, ok := w.visited[hash]; ok {
- continue
- }
-
- commit, err := GetCommit(store, hash)
- if err != nil {
- return err
- }
-
- w.queue = append(w.queue, commit)
- }
-
- return nil
-}
-
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_ctime.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_ctime.go
deleted file mode 100644
index 019161496f..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_ctime.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package object
-
-import (
- "io"
-
- "github.com/emirpasic/gods/trees/binaryheap"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
-)
-
-type commitIteratorByCTime struct {
- seenExternal map[plumbing.Hash]bool
- seen map[plumbing.Hash]bool
- heap *binaryheap.Heap
-}
-
-// NewCommitIterCTime returns a CommitIter that walks the commit history,
-// starting at the given commit and visiting its parents while preserving Committer Time order.
-// this appears to be the closest order to `git log`
-// The given callback will be called for each visited commit. Each commit will
-// be visited only once. If the callback returns an error, walking will stop
-// and will return the error. Other errors might be returned if the history
-// cannot be traversed (e.g. missing objects). Ignore allows to skip some
-// commits from being iterated.
-func NewCommitIterCTime(
- c *Commit,
- seenExternal map[plumbing.Hash]bool,
- ignore []plumbing.Hash,
-) CommitIter {
- seen := make(map[plumbing.Hash]bool)
- for _, h := range ignore {
- seen[h] = true
- }
-
- heap := binaryheap.NewWith(func(a, b interface{}) int {
- if a.(*Commit).Committer.When.Before(b.(*Commit).Committer.When) {
- return 1
- }
- return -1
- })
- heap.Push(c)
-
- return &commitIteratorByCTime{
- seenExternal: seenExternal,
- seen: seen,
- heap: heap,
- }
-}
-
-func (w *commitIteratorByCTime) Next() (*Commit, error) {
- var c *Commit
- for {
- cIn, ok := w.heap.Pop()
- if !ok {
- return nil, io.EOF
- }
- c = cIn.(*Commit)
-
- if w.seen[c.Hash] || w.seenExternal[c.Hash] {
- continue
- }
-
- w.seen[c.Hash] = true
-
- for _, h := range c.ParentHashes {
- if w.seen[h] || w.seenExternal[h] {
- continue
- }
- pc, err := GetCommit(c.s, h)
- if err != nil {
- return nil, err
- }
- w.heap.Push(pc)
- }
-
- return c, nil
- }
-}
-
-func (w *commitIteratorByCTime) ForEach(cb func(*Commit) error) error {
- for {
- c, err := w.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- return err
- }
-
- err = cb(c)
- if err == storer.ErrStop {
- break
- }
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (w *commitIteratorByCTime) Close() {}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_file.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_file.go
deleted file mode 100644
index 6f16e611f1..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_file.go
+++ /dev/null
@@ -1,145 +0,0 @@
-package object
-
-import (
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
-
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
-)
-
-type commitFileIter struct {
- fileName string
- sourceIter CommitIter
- currentCommit *Commit
- checkParent bool
-}
-
-// NewCommitFileIterFromIter returns a commit iterator which performs diffTree between
-// successive trees returned from the commit iterator from the argument. The purpose of this is
-// to find the commits that explain how the files that match the path came to be.
-// If checkParent is true then the function double checks if potential parent (next commit in a path)
-// is one of the parents in the tree (it's used by `git log --all`).
-func NewCommitFileIterFromIter(fileName string, commitIter CommitIter, checkParent bool) CommitIter {
- iterator := new(commitFileIter)
- iterator.sourceIter = commitIter
- iterator.fileName = fileName
- iterator.checkParent = checkParent
- return iterator
-}
-
-func (c *commitFileIter) Next() (*Commit, error) {
- if c.currentCommit == nil {
- var err error
- c.currentCommit, err = c.sourceIter.Next()
- if err != nil {
- return nil, err
- }
- }
- commit, commitErr := c.getNextFileCommit()
-
- // Setting current-commit to nil to prevent unwanted states when errors are raised
- if commitErr != nil {
- c.currentCommit = nil
- }
- return commit, commitErr
-}
-
-func (c *commitFileIter) getNextFileCommit() (*Commit, error) {
- for {
- // Parent-commit can be nil if the current-commit is the initial commit
- parentCommit, parentCommitErr := c.sourceIter.Next()
- if parentCommitErr != nil {
- // If the parent-commit is beyond the initial commit, keep it nil
- if parentCommitErr != io.EOF {
- return nil, parentCommitErr
- }
- parentCommit = nil
- }
-
- // Fetch the trees of the current and parent commits
- currentTree, currTreeErr := c.currentCommit.Tree()
- if currTreeErr != nil {
- return nil, currTreeErr
- }
-
- var parentTree *Tree
- if parentCommit != nil {
- var parentTreeErr error
- parentTree, parentTreeErr = parentCommit.Tree()
- if parentTreeErr != nil {
- return nil, parentTreeErr
- }
- }
-
- // Find diff between current and parent trees
- changes, diffErr := DiffTree(currentTree, parentTree)
- if diffErr != nil {
- return nil, diffErr
- }
-
- found := c.hasFileChange(changes, parentCommit)
-
- // Storing the current-commit in-case a change is found, and
- // Updating the current-commit for the next-iteration
- prevCommit := c.currentCommit
- c.currentCommit = parentCommit
-
- if found {
- return prevCommit, nil
- }
-
- // If not matches found and if parent-commit is beyond the initial commit, then return with EOF
- if parentCommit == nil {
- return nil, io.EOF
- }
- }
-}
-
-func (c *commitFileIter) hasFileChange(changes Changes, parent *Commit) bool {
- for _, change := range changes {
- if change.name() != c.fileName {
- continue
- }
-
- // filename matches, now check if source iterator contains all commits (from all refs)
- if c.checkParent {
- if parent != nil && isParentHash(parent.Hash, c.currentCommit) {
- return true
- }
- continue
- }
-
- return true
- }
-
- return false
-}
-
-func isParentHash(hash plumbing.Hash, commit *Commit) bool {
- for _, h := range commit.ParentHashes {
- if h == hash {
- return true
- }
- }
- return false
-}
-
-func (c *commitFileIter) ForEach(cb func(*Commit) error) error {
- for {
- commit, nextErr := c.Next()
- if nextErr != nil {
- return nextErr
- }
- err := cb(commit)
- if err == storer.ErrStop {
- return nil
- } else if err != nil {
- return err
- }
- }
-}
-
-func (c *commitFileIter) Close() {
- c.sourceIter.Close()
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commitgraph/commitnode.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commitgraph/commitnode.go
deleted file mode 100644
index e218d3210b..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commitgraph/commitnode.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package commitgraph
-
-import (
- "io"
- "time"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/object"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
-)
-
-// CommitNode is generic interface encapsulating a lightweight commit object retrieved
-// from CommitNodeIndex
-type CommitNode interface {
- // ID returns the Commit object id referenced by the commit graph node.
- ID() plumbing.Hash
- // Tree returns the Tree referenced by the commit graph node.
- Tree() (*object.Tree, error)
- // CommitTime returns the Commiter.When time of the Commit referenced by the commit graph node.
- CommitTime() time.Time
- // NumParents returns the number of parents in a commit.
- NumParents() int
- // ParentNodes return a CommitNodeIter for parents of specified node.
- ParentNodes() CommitNodeIter
- // ParentNode returns the ith parent of a commit.
- ParentNode(i int) (CommitNode, error)
- // ParentHashes returns hashes of the parent commits for a specified node
- ParentHashes() []plumbing.Hash
- // Generation returns the generation of the commit for reachability analysis.
- // Objects with newer generation are not reachable from objects of older generation.
- Generation() uint64
- // Commit returns the full commit object from the node
- Commit() (*object.Commit, error)
-}
-
-// CommitNodeIndex is generic interface encapsulating an index of CommitNode objects
-type CommitNodeIndex interface {
- // Get returns a commit node from a commit hash
- Get(hash plumbing.Hash) (CommitNode, error)
-}
-
-// CommitNodeIter is a generic closable interface for iterating over commit nodes.
-type CommitNodeIter interface {
- Next() (CommitNode, error)
- ForEach(func(CommitNode) error) error
- Close()
-}
-
-// parentCommitNodeIter provides an iterator for parent commits from associated CommitNodeIndex.
-type parentCommitNodeIter struct {
- node CommitNode
- i int
-}
-
-func newParentgraphCommitNodeIter(node CommitNode) CommitNodeIter {
- return &parentCommitNodeIter{node, 0}
-}
-
-// Next moves the iterator to the next commit and returns a pointer to it. If
-// there are no more commits, it returns io.EOF.
-func (iter *parentCommitNodeIter) Next() (CommitNode, error) {
- obj, err := iter.node.ParentNode(iter.i)
- if err == object.ErrParentNotFound {
- return nil, io.EOF
- }
- if err == nil {
- iter.i++
- }
-
- return obj, err
-}
-
-// ForEach call the cb function for each commit contained on this iter until
-// an error appends or the end of the iter is reached. If ErrStop is sent
-// the iteration is stopped but no error is returned. The iterator is closed.
-func (iter *parentCommitNodeIter) ForEach(cb func(CommitNode) error) error {
- for {
- obj, err := iter.Next()
- if err != nil {
- if err == io.EOF {
- return nil
- }
-
- return err
- }
-
- if err := cb(obj); err != nil {
- if err == storer.ErrStop {
- return nil
- }
-
- return err
- }
- }
-}
-
-func (iter *parentCommitNodeIter) Close() {
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commitgraph/commitnode_graph.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commitgraph/commitnode_graph.go
deleted file mode 100644
index bd54e18886..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commitgraph/commitnode_graph.go
+++ /dev/null
@@ -1,131 +0,0 @@
-package commitgraph
-
-import (
- "fmt"
- "time"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph"
- "gopkg.in/src-d/go-git.v4/plumbing/object"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
-)
-
-// graphCommitNode is a reduced representation of Commit as presented in the commit
-// graph file (commitgraph.Node). It is merely useful as an optimization for walking
-// the commit graphs.
-//
-// graphCommitNode implements the CommitNode interface.
-type graphCommitNode struct {
- // Hash for the Commit object
- hash plumbing.Hash
- // Index of the node in the commit graph file
- index int
-
- commitData *commitgraph.CommitData
- gci *graphCommitNodeIndex
-}
-
-// graphCommitNodeIndex is an index that can load CommitNode objects from both the commit
-// graph files and the object store.
-//
-// graphCommitNodeIndex implements the CommitNodeIndex interface
-type graphCommitNodeIndex struct {
- commitGraph commitgraph.Index
- s storer.EncodedObjectStorer
-}
-
-// NewGraphCommitNodeIndex returns CommitNodeIndex implementation that uses commit-graph
-// files as backing storage and falls back to object storage when necessary
-func NewGraphCommitNodeIndex(commitGraph commitgraph.Index, s storer.EncodedObjectStorer) CommitNodeIndex {
- return &graphCommitNodeIndex{commitGraph, s}
-}
-
-func (gci *graphCommitNodeIndex) Get(hash plumbing.Hash) (CommitNode, error) {
- // Check the commit graph first
- parentIndex, err := gci.commitGraph.GetIndexByHash(hash)
- if err == nil {
- parent, err := gci.commitGraph.GetCommitDataByIndex(parentIndex)
- if err != nil {
- return nil, err
- }
-
- return &graphCommitNode{
- hash: hash,
- index: parentIndex,
- commitData: parent,
- gci: gci,
- }, nil
- }
-
- // Fallback to loading full commit object
- commit, err := object.GetCommit(gci.s, hash)
- if err != nil {
- return nil, err
- }
-
- return &objectCommitNode{
- nodeIndex: gci,
- commit: commit,
- }, nil
-}
-
-func (c *graphCommitNode) ID() plumbing.Hash {
- return c.hash
-}
-
-func (c *graphCommitNode) Tree() (*object.Tree, error) {
- return object.GetTree(c.gci.s, c.commitData.TreeHash)
-}
-
-func (c *graphCommitNode) CommitTime() time.Time {
- return c.commitData.When
-}
-
-func (c *graphCommitNode) NumParents() int {
- return len(c.commitData.ParentIndexes)
-}
-
-func (c *graphCommitNode) ParentNodes() CommitNodeIter {
- return newParentgraphCommitNodeIter(c)
-}
-
-func (c *graphCommitNode) ParentNode(i int) (CommitNode, error) {
- if i < 0 || i >= len(c.commitData.ParentIndexes) {
- return nil, object.ErrParentNotFound
- }
-
- parent, err := c.gci.commitGraph.GetCommitDataByIndex(c.commitData.ParentIndexes[i])
- if err != nil {
- return nil, err
- }
-
- return &graphCommitNode{
- hash: c.commitData.ParentHashes[i],
- index: c.commitData.ParentIndexes[i],
- commitData: parent,
- gci: c.gci,
- }, nil
-}
-
-func (c *graphCommitNode) ParentHashes() []plumbing.Hash {
- return c.commitData.ParentHashes
-}
-
-func (c *graphCommitNode) Generation() uint64 {
- // If the commit-graph file was generated with older Git version that
- // set the generation to zero for every commit the generation assumption
- // is still valid. It is just less useful.
- return uint64(c.commitData.Generation)
-}
-
-func (c *graphCommitNode) Commit() (*object.Commit, error) {
- return object.GetCommit(c.gci.s, c.hash)
-}
-
-func (c *graphCommitNode) String() string {
- return fmt.Sprintf(
- "%s %s\nDate: %s",
- plumbing.CommitObject, c.ID(),
- c.CommitTime().Format(object.DateFormat),
- )
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commitgraph/commitnode_object.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commitgraph/commitnode_object.go
deleted file mode 100644
index 2779a54bc7..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commitgraph/commitnode_object.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package commitgraph
-
-import (
- "math"
- "time"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/object"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
-)
-
-// objectCommitNode is a representation of Commit as presented in the GIT object format.
-//
-// objectCommitNode implements the CommitNode interface.
-type objectCommitNode struct {
- nodeIndex CommitNodeIndex
- commit *object.Commit
-}
-
-// NewObjectCommitNodeIndex returns CommitNodeIndex implementation that uses
-// only object storage to load the nodes
-func NewObjectCommitNodeIndex(s storer.EncodedObjectStorer) CommitNodeIndex {
- return &objectCommitNodeIndex{s}
-}
-
-func (oci *objectCommitNodeIndex) Get(hash plumbing.Hash) (CommitNode, error) {
- commit, err := object.GetCommit(oci.s, hash)
- if err != nil {
- return nil, err
- }
-
- return &objectCommitNode{
- nodeIndex: oci,
- commit: commit,
- }, nil
-}
-
-// objectCommitNodeIndex is an index that can load CommitNode objects only from the
-// object store.
-//
-// objectCommitNodeIndex implements the CommitNodeIndex interface
-type objectCommitNodeIndex struct {
- s storer.EncodedObjectStorer
-}
-
-func (c *objectCommitNode) CommitTime() time.Time {
- return c.commit.Committer.When
-}
-
-func (c *objectCommitNode) ID() plumbing.Hash {
- return c.commit.ID()
-}
-
-func (c *objectCommitNode) Tree() (*object.Tree, error) {
- return c.commit.Tree()
-}
-
-func (c *objectCommitNode) NumParents() int {
- return c.commit.NumParents()
-}
-
-func (c *objectCommitNode) ParentNodes() CommitNodeIter {
- return newParentgraphCommitNodeIter(c)
-}
-
-func (c *objectCommitNode) ParentNode(i int) (CommitNode, error) {
- if i < 0 || i >= len(c.commit.ParentHashes) {
- return nil, object.ErrParentNotFound
- }
-
- // Note: It's necessary to go through CommitNodeIndex here to ensure
- // that if the commit-graph file covers only part of the history we
- // start using it when that part is reached.
- return c.nodeIndex.Get(c.commit.ParentHashes[i])
-}
-
-func (c *objectCommitNode) ParentHashes() []plumbing.Hash {
- return c.commit.ParentHashes
-}
-
-func (c *objectCommitNode) Generation() uint64 {
- // Commit nodes representing objects outside of the commit graph can never
- // be reached by objects from the commit-graph thus we return the highest
- // possible value.
- return math.MaxUint64
-}
-
-func (c *objectCommitNode) Commit() (*object.Commit, error) {
- return c.commit, nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commitgraph/commitnode_walker_ctime.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commitgraph/commitnode_walker_ctime.go
deleted file mode 100644
index f6a1b6a4ef..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commitgraph/commitnode_walker_ctime.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package commitgraph
-
-import (
- "io"
-
- "github.com/emirpasic/gods/trees/binaryheap"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
-)
-
-type commitNodeIteratorByCTime struct {
- heap *binaryheap.Heap
- seenExternal map[plumbing.Hash]bool
- seen map[plumbing.Hash]bool
-}
-
-// NewCommitNodeIterCTime returns a CommitNodeIter that walks the commit history,
-// starting at the given commit and visiting its parents while preserving Committer Time order.
-// this appears to be the closest order to `git log`
-// The given callback will be called for each visited commit. Each commit will
-// be visited only once. If the callback returns an error, walking will stop
-// and will return the error. Other errors might be returned if the history
-// cannot be traversed (e.g. missing objects). Ignore allows to skip some
-// commits from being iterated.
-func NewCommitNodeIterCTime(
- c CommitNode,
- seenExternal map[plumbing.Hash]bool,
- ignore []plumbing.Hash,
-) CommitNodeIter {
- seen := make(map[plumbing.Hash]bool)
- for _, h := range ignore {
- seen[h] = true
- }
-
- heap := binaryheap.NewWith(func(a, b interface{}) int {
- if a.(CommitNode).CommitTime().Before(b.(CommitNode).CommitTime()) {
- return 1
- }
- return -1
- })
-
- heap.Push(c)
-
- return &commitNodeIteratorByCTime{
- heap: heap,
- seenExternal: seenExternal,
- seen: seen,
- }
-}
-
-func (w *commitNodeIteratorByCTime) Next() (CommitNode, error) {
- var c CommitNode
- for {
- cIn, ok := w.heap.Pop()
- if !ok {
- return nil, io.EOF
- }
- c = cIn.(CommitNode)
- cID := c.ID()
-
- if w.seen[cID] || w.seenExternal[cID] {
- continue
- }
-
- w.seen[cID] = true
-
- for i, h := range c.ParentHashes() {
- if w.seen[h] || w.seenExternal[h] {
- continue
- }
- pc, err := c.ParentNode(i)
- if err != nil {
- return nil, err
- }
- w.heap.Push(pc)
- }
-
- return c, nil
- }
-}
-
-func (w *commitNodeIteratorByCTime) ForEach(cb func(CommitNode) error) error {
- for {
- c, err := w.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- return err
- }
-
- err = cb(c)
- if err == storer.ErrStop {
- break
- }
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (w *commitNodeIteratorByCTime) Close() {}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commitgraph/doc.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commitgraph/doc.go
deleted file mode 100644
index 0a55ad5b01..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commitgraph/doc.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// Package commitgraph provides an interface for efficient traversal over Git
-// commit graph either through the regular object storage, or optionally with
-// the index stored in commit-graph file (Git 2.18+).
-//
-// The API and functionality of this package are considered EXPERIMENTAL and is
-// not considered stable nor production ready.
-package commitgraph
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/common.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/common.go
deleted file mode 100644
index 3591f5f0a6..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/common.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package object
-
-import (
- "bufio"
- "sync"
-)
-
-var bufPool = sync.Pool{
- New: func() interface{} {
- return bufio.NewReader(nil)
- },
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/difftree.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/difftree.go
deleted file mode 100644
index a30a29e37f..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/difftree.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package object
-
-import (
- "bytes"
- "context"
-
- "gopkg.in/src-d/go-git.v4/utils/merkletrie"
- "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder"
-)
-
-// DiffTree compares the content and mode of the blobs found via two
-// tree objects.
-func DiffTree(a, b *Tree) (Changes, error) {
- return DiffTreeContext(context.Background(), a, b)
-}
-
-// DiffTree compares the content and mode of the blobs found via two
-// tree objects. Provided context must be non-nil.
-// An error will be return if context expires
-func DiffTreeContext(ctx context.Context, a, b *Tree) (Changes, error) {
- from := NewTreeRootNode(a)
- to := NewTreeRootNode(b)
-
- hashEqual := func(a, b noder.Hasher) bool {
- return bytes.Equal(a.Hash(), b.Hash())
- }
-
- merkletrieChanges, err := merkletrie.DiffTreeContext(ctx, from, to, hashEqual)
- if err != nil {
- if err == merkletrie.ErrCanceled {
- return nil, ErrCanceled
- }
- return nil, err
- }
-
- return newChanges(merkletrieChanges)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/file.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/file.go
deleted file mode 100644
index 1c5fdbb386..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/file.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package object
-
-import (
- "bytes"
- "io"
- "strings"
-
- "gopkg.in/src-d/go-git.v4/plumbing/filemode"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
- "gopkg.in/src-d/go-git.v4/utils/binary"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-)
-
-// File represents git file objects.
-type File struct {
- // Name is the path of the file. It might be relative to a tree,
- // depending of the function that generates it.
- Name string
- // Mode is the file mode.
- Mode filemode.FileMode
- // Blob with the contents of the file.
- Blob
-}
-
-// NewFile returns a File based on the given blob object
-func NewFile(name string, m filemode.FileMode, b *Blob) *File {
- return &File{Name: name, Mode: m, Blob: *b}
-}
-
-// Contents returns the contents of a file as a string.
-func (f *File) Contents() (content string, err error) {
- reader, err := f.Reader()
- if err != nil {
- return "", err
- }
- defer ioutil.CheckClose(reader, &err)
-
- buf := new(bytes.Buffer)
- if _, err := buf.ReadFrom(reader); err != nil {
- return "", err
- }
-
- return buf.String(), nil
-}
-
-// IsBinary returns if the file is binary or not
-func (f *File) IsBinary() (bin bool, err error) {
- reader, err := f.Reader()
- if err != nil {
- return false, err
- }
- defer ioutil.CheckClose(reader, &err)
-
- return binary.IsBinary(reader)
-}
-
-// Lines returns a slice of lines from the contents of a file, stripping
-// all end of line characters. If the last line is empty (does not end
-// in an end of line), it is also stripped.
-func (f *File) Lines() ([]string, error) {
- content, err := f.Contents()
- if err != nil {
- return nil, err
- }
-
- splits := strings.Split(content, "\n")
- // remove the last line if it is empty
- if splits[len(splits)-1] == "" {
- return splits[:len(splits)-1], nil
- }
-
- return splits, nil
-}
-
-// FileIter provides an iterator for the files in a tree.
-type FileIter struct {
- s storer.EncodedObjectStorer
- w TreeWalker
-}
-
-// NewFileIter takes a storer.EncodedObjectStorer and a Tree and returns a
-// *FileIter that iterates over all files contained in the tree, recursively.
-func NewFileIter(s storer.EncodedObjectStorer, t *Tree) *FileIter {
- return &FileIter{s: s, w: *NewTreeWalker(t, true, nil)}
-}
-
-// Next moves the iterator to the next file and returns a pointer to it. If
-// there are no more files, it returns io.EOF.
-func (iter *FileIter) Next() (*File, error) {
- for {
- name, entry, err := iter.w.Next()
- if err != nil {
- return nil, err
- }
-
- if entry.Mode == filemode.Dir || entry.Mode == filemode.Submodule {
- continue
- }
-
- blob, err := GetBlob(iter.s, entry.Hash)
- if err != nil {
- return nil, err
- }
-
- return NewFile(name, entry.Mode, blob), nil
- }
-}
-
-// ForEach call the cb function for each file contained in this iter until
-// an error happens or the end of the iter is reached. If plumbing.ErrStop is sent
-// the iteration is stop but no error is returned. The iterator is closed.
-func (iter *FileIter) ForEach(cb func(*File) error) error {
- defer iter.Close()
-
- for {
- f, err := iter.Next()
- if err != nil {
- if err == io.EOF {
- return nil
- }
-
- return err
- }
-
- if err := cb(f); err != nil {
- if err == storer.ErrStop {
- return nil
- }
-
- return err
- }
- }
-}
-
-func (iter *FileIter) Close() {
- iter.w.Close()
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/merge_base.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/merge_base.go
deleted file mode 100644
index 6f2568dbc8..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/merge_base.go
+++ /dev/null
@@ -1,210 +0,0 @@
-package object
-
-import (
- "fmt"
- "sort"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
-)
-
-// errIsReachable is thrown when first commit is an ancestor of the second
-var errIsReachable = fmt.Errorf("first is reachable from second")
-
-// MergeBase mimics the behavior of `git merge-base actual other`, returning the
-// best common ancestor between the actual and the passed one.
-// The best common ancestors can not be reached from other common ancestors.
-func (c *Commit) MergeBase(other *Commit) ([]*Commit, error) {
- // use sortedByCommitDateDesc strategy
- sorted := sortByCommitDateDesc(c, other)
- newer := sorted[0]
- older := sorted[1]
-
- newerHistory, err := ancestorsIndex(older, newer)
- if err == errIsReachable {
- return []*Commit{older}, nil
- }
-
- if err != nil {
- return nil, err
- }
-
- var res []*Commit
- inNewerHistory := isInIndexCommitFilter(newerHistory)
- resIter := NewFilterCommitIter(older, &inNewerHistory, &inNewerHistory)
- _ = resIter.ForEach(func(commit *Commit) error {
- res = append(res, commit)
- return nil
- })
-
- return Independents(res)
-}
-
-// IsAncestor returns true if the actual commit is ancestor of the passed one.
-// It returns an error if the history is not transversable
-// It mimics the behavior of `git merge --is-ancestor actual other`
-func (c *Commit) IsAncestor(other *Commit) (bool, error) {
- found := false
- iter := NewCommitPreorderIter(other, nil, nil)
- err := iter.ForEach(func(comm *Commit) error {
- if comm.Hash != c.Hash {
- return nil
- }
-
- found = true
- return storer.ErrStop
- })
-
- return found, err
-}
-
-// ancestorsIndex returns a map with the ancestors of the starting commit if the
-// excluded one is not one of them. It returns errIsReachable if the excluded commit
-// is ancestor of the starting, or another error if the history is not traversable.
-func ancestorsIndex(excluded, starting *Commit) (map[plumbing.Hash]struct{}, error) {
- if excluded.Hash.String() == starting.Hash.String() {
- return nil, errIsReachable
- }
-
- startingHistory := map[plumbing.Hash]struct{}{}
- startingIter := NewCommitIterBSF(starting, nil, nil)
- err := startingIter.ForEach(func(commit *Commit) error {
- if commit.Hash == excluded.Hash {
- return errIsReachable
- }
-
- startingHistory[commit.Hash] = struct{}{}
- return nil
- })
-
- if err != nil {
- return nil, err
- }
-
- return startingHistory, nil
-}
-
-// Independents returns a subset of the passed commits, that are not reachable the others
-// It mimics the behavior of `git merge-base --independent commit...`.
-func Independents(commits []*Commit) ([]*Commit, error) {
- // use sortedByCommitDateDesc strategy
- candidates := sortByCommitDateDesc(commits...)
- candidates = removeDuplicated(candidates)
-
- seen := map[plumbing.Hash]struct{}{}
- var isLimit CommitFilter = func(commit *Commit) bool {
- _, ok := seen[commit.Hash]
- return ok
- }
-
- if len(candidates) < 2 {
- return candidates, nil
- }
-
- pos := 0
- for {
- from := candidates[pos]
- others := remove(candidates, from)
- fromHistoryIter := NewFilterCommitIter(from, nil, &isLimit)
- err := fromHistoryIter.ForEach(func(fromAncestor *Commit) error {
- for _, other := range others {
- if fromAncestor.Hash == other.Hash {
- candidates = remove(candidates, other)
- others = remove(others, other)
- }
- }
-
- if len(candidates) == 1 {
- return storer.ErrStop
- }
-
- seen[fromAncestor.Hash] = struct{}{}
- return nil
- })
-
- if err != nil {
- return nil, err
- }
-
- nextPos := indexOf(candidates, from) + 1
- if nextPos >= len(candidates) {
- break
- }
-
- pos = nextPos
- }
-
- return candidates, nil
-}
-
-// sortByCommitDateDesc returns the passed commits, sorted by `committer.When desc`
-//
-// Following this strategy, it is tried to reduce the time needed when walking
-// the history from one commit to reach the others. It is assumed that ancestors
-// use to be committed before its descendant;
-// That way `Independents(A^, A)` will be processed as being `Independents(A, A^)`;
-// so starting by `A` it will be reached `A^` way sooner than walking from `A^`
-// to the initial commit, and then from `A` to `A^`.
-func sortByCommitDateDesc(commits ...*Commit) []*Commit {
- sorted := make([]*Commit, len(commits))
- copy(sorted, commits)
- sort.Slice(sorted, func(i, j int) bool {
- return sorted[i].Committer.When.After(sorted[j].Committer.When)
- })
-
- return sorted
-}
-
-// indexOf returns the first position where target was found in the passed commits
-func indexOf(commits []*Commit, target *Commit) int {
- for i, commit := range commits {
- if target.Hash == commit.Hash {
- return i
- }
- }
-
- return -1
-}
-
-// remove returns the passed commits excluding the commit toDelete
-func remove(commits []*Commit, toDelete *Commit) []*Commit {
- res := make([]*Commit, len(commits))
- j := 0
- for _, commit := range commits {
- if commit.Hash == toDelete.Hash {
- continue
- }
-
- res[j] = commit
- j++
- }
-
- return res[:j]
-}
-
-// removeDuplicated removes duplicated commits from the passed slice of commits
-func removeDuplicated(commits []*Commit) []*Commit {
- seen := make(map[plumbing.Hash]struct{}, len(commits))
- res := make([]*Commit, len(commits))
- j := 0
- for _, commit := range commits {
- if _, ok := seen[commit.Hash]; ok {
- continue
- }
-
- seen[commit.Hash] = struct{}{}
- res[j] = commit
- j++
- }
-
- return res[:j]
-}
-
-// isInIndexCommitFilter returns a commitFilter that returns true
-// if the commit is in the passed index.
-func isInIndexCommitFilter(index map[plumbing.Hash]struct{}) CommitFilter {
- return func(c *Commit) bool {
- _, ok := index[c.Hash]
- return ok
- }
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/object.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/object.go
deleted file mode 100644
index e960e50c94..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/object.go
+++ /dev/null
@@ -1,237 +0,0 @@
-// Package object contains implementations of all Git objects and utility
-// functions to work with them.
-package object
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "strconv"
- "time"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
-)
-
-// ErrUnsupportedObject trigger when a non-supported object is being decoded.
-var ErrUnsupportedObject = errors.New("unsupported object type")
-
-// Object is a generic representation of any git object. It is implemented by
-// Commit, Tree, Blob, and Tag, and includes the functions that are common to
-// them.
-//
-// Object is returned when an object can be of any type. It is frequently used
-// with a type cast to acquire the specific type of object:
-//
-// func process(obj Object) {
-// switch o := obj.(type) {
-// case *Commit:
-// // o is a Commit
-// case *Tree:
-// // o is a Tree
-// case *Blob:
-// // o is a Blob
-// case *Tag:
-// // o is a Tag
-// }
-// }
-//
-// This interface is intentionally different from plumbing.EncodedObject, which
-// is a lower level interface used by storage implementations to read and write
-// objects in its encoded form.
-type Object interface {
- ID() plumbing.Hash
- Type() plumbing.ObjectType
- Decode(plumbing.EncodedObject) error
- Encode(plumbing.EncodedObject) error
-}
-
-// GetObject gets an object from an object storer and decodes it.
-func GetObject(s storer.EncodedObjectStorer, h plumbing.Hash) (Object, error) {
- o, err := s.EncodedObject(plumbing.AnyObject, h)
- if err != nil {
- return nil, err
- }
-
- return DecodeObject(s, o)
-}
-
-// DecodeObject decodes an encoded object into an Object and associates it to
-// the given object storer.
-func DecodeObject(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (Object, error) {
- switch o.Type() {
- case plumbing.CommitObject:
- return DecodeCommit(s, o)
- case plumbing.TreeObject:
- return DecodeTree(s, o)
- case plumbing.BlobObject:
- return DecodeBlob(o)
- case plumbing.TagObject:
- return DecodeTag(s, o)
- default:
- return nil, plumbing.ErrInvalidType
- }
-}
-
-// DateFormat is the format being used in the original git implementation
-const DateFormat = "Mon Jan 02 15:04:05 2006 -0700"
-
-// Signature is used to identify who and when created a commit or tag.
-type Signature struct {
- // Name represents a person name. It is an arbitrary string.
- Name string
- // Email is an email, but it cannot be assumed to be well-formed.
- Email string
- // When is the timestamp of the signature.
- When time.Time
-}
-
-// Decode decodes a byte slice into a signature
-func (s *Signature) Decode(b []byte) {
- open := bytes.LastIndexByte(b, '<')
- close := bytes.LastIndexByte(b, '>')
- if open == -1 || close == -1 {
- return
- }
-
- if close < open {
- return
- }
-
- s.Name = string(bytes.Trim(b[:open], " "))
- s.Email = string(b[open+1 : close])
-
- hasTime := close+2 < len(b)
- if hasTime {
- s.decodeTimeAndTimeZone(b[close+2:])
- }
-}
-
-// Encode encodes a Signature into a writer.
-func (s *Signature) Encode(w io.Writer) error {
- if _, err := fmt.Fprintf(w, "%s <%s> ", s.Name, s.Email); err != nil {
- return err
- }
- if err := s.encodeTimeAndTimeZone(w); err != nil {
- return err
- }
- return nil
-}
-
-var timeZoneLength = 5
-
-func (s *Signature) decodeTimeAndTimeZone(b []byte) {
- space := bytes.IndexByte(b, ' ')
- if space == -1 {
- space = len(b)
- }
-
- ts, err := strconv.ParseInt(string(b[:space]), 10, 64)
- if err != nil {
- return
- }
-
- s.When = time.Unix(ts, 0).In(time.UTC)
- var tzStart = space + 1
- if tzStart >= len(b) || tzStart+timeZoneLength > len(b) {
- return
- }
-
- // Include a dummy year in this time.Parse() call to avoid a bug in Go:
- // https://github.com/golang/go/issues/19750
- //
- // Parsing the timezone with no other details causes the tl.Location() call
- // below to return time.Local instead of the parsed zone in some cases
- tl, err := time.Parse("2006 -0700", "1970 "+string(b[tzStart:tzStart+timeZoneLength]))
- if err != nil {
- return
- }
-
- s.When = s.When.In(tl.Location())
-}
-
-func (s *Signature) encodeTimeAndTimeZone(w io.Writer) error {
- u := s.When.Unix()
- if u < 0 {
- u = 0
- }
- _, err := fmt.Fprintf(w, "%d %s", u, s.When.Format("-0700"))
- return err
-}
-
-func (s *Signature) String() string {
- return fmt.Sprintf("%s <%s>", s.Name, s.Email)
-}
-
-// ObjectIter provides an iterator for a set of objects.
-type ObjectIter struct {
- storer.EncodedObjectIter
- s storer.EncodedObjectStorer
-}
-
-// NewObjectIter takes a storer.EncodedObjectStorer and a
-// storer.EncodedObjectIter and returns an *ObjectIter that iterates over all
-// objects contained in the storer.EncodedObjectIter.
-func NewObjectIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *ObjectIter {
- return &ObjectIter{iter, s}
-}
-
-// Next moves the iterator to the next object and returns a pointer to it. If
-// there are no more objects, it returns io.EOF.
-func (iter *ObjectIter) Next() (Object, error) {
- for {
- obj, err := iter.EncodedObjectIter.Next()
- if err != nil {
- return nil, err
- }
-
- o, err := iter.toObject(obj)
- if err == plumbing.ErrInvalidType {
- continue
- }
-
- if err != nil {
- return nil, err
- }
-
- return o, nil
- }
-}
-
-// ForEach call the cb function for each object contained on this iter until
-// an error happens or the end of the iter is reached. If ErrStop is sent
-// the iteration is stop but no error is returned. The iterator is closed.
-func (iter *ObjectIter) ForEach(cb func(Object) error) error {
- return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error {
- o, err := iter.toObject(obj)
- if err == plumbing.ErrInvalidType {
- return nil
- }
-
- if err != nil {
- return err
- }
-
- return cb(o)
- })
-}
-
-func (iter *ObjectIter) toObject(obj plumbing.EncodedObject) (Object, error) {
- switch obj.Type() {
- case plumbing.BlobObject:
- blob := &Blob{}
- return blob, blob.Decode(obj)
- case plumbing.TreeObject:
- tree := &Tree{s: iter.s}
- return tree, tree.Decode(obj)
- case plumbing.CommitObject:
- commit := &Commit{}
- return commit, commit.Decode(obj)
- case plumbing.TagObject:
- tag := &Tag{}
- return tag, tag.Decode(obj)
- default:
- return nil, plumbing.ErrInvalidType
- }
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/patch.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/patch.go
deleted file mode 100644
index 32454ac48d..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/patch.go
+++ /dev/null
@@ -1,346 +0,0 @@
-package object
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "io"
- "math"
- "strings"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/filemode"
- fdiff "gopkg.in/src-d/go-git.v4/plumbing/format/diff"
- "gopkg.in/src-d/go-git.v4/utils/diff"
-
- dmp "github.com/sergi/go-diff/diffmatchpatch"
-)
-
-var (
- ErrCanceled = errors.New("operation canceled")
-)
-
-func getPatch(message string, changes ...*Change) (*Patch, error) {
- ctx := context.Background()
- return getPatchContext(ctx, message, changes...)
-}
-
-func getPatchContext(ctx context.Context, message string, changes ...*Change) (*Patch, error) {
- var filePatches []fdiff.FilePatch
- for _, c := range changes {
- select {
- case <-ctx.Done():
- return nil, ErrCanceled
- default:
- }
-
- fp, err := filePatchWithContext(ctx, c)
- if err != nil {
- return nil, err
- }
-
- filePatches = append(filePatches, fp)
- }
-
- return &Patch{message, filePatches}, nil
-}
-
-func filePatchWithContext(ctx context.Context, c *Change) (fdiff.FilePatch, error) {
- from, to, err := c.Files()
- if err != nil {
- return nil, err
- }
- fromContent, fIsBinary, err := fileContent(from)
- if err != nil {
- return nil, err
- }
-
- toContent, tIsBinary, err := fileContent(to)
- if err != nil {
- return nil, err
- }
-
- if fIsBinary || tIsBinary {
- return &textFilePatch{from: c.From, to: c.To}, nil
- }
-
- diffs := diff.Do(fromContent, toContent)
-
- var chunks []fdiff.Chunk
- for _, d := range diffs {
- select {
- case <-ctx.Done():
- return nil, ErrCanceled
- default:
- }
-
- var op fdiff.Operation
- switch d.Type {
- case dmp.DiffEqual:
- op = fdiff.Equal
- case dmp.DiffDelete:
- op = fdiff.Delete
- case dmp.DiffInsert:
- op = fdiff.Add
- }
-
- chunks = append(chunks, &textChunk{d.Text, op})
- }
-
- return &textFilePatch{
- chunks: chunks,
- from: c.From,
- to: c.To,
- }, nil
-
-}
-
-func filePatch(c *Change) (fdiff.FilePatch, error) {
- return filePatchWithContext(context.Background(), c)
-}
-
-func fileContent(f *File) (content string, isBinary bool, err error) {
- if f == nil {
- return
- }
-
- isBinary, err = f.IsBinary()
- if err != nil || isBinary {
- return
- }
-
- content, err = f.Contents()
-
- return
-}
-
-// textPatch is an implementation of fdiff.Patch interface
-type Patch struct {
- message string
- filePatches []fdiff.FilePatch
-}
-
-func (t *Patch) FilePatches() []fdiff.FilePatch {
- return t.filePatches
-}
-
-func (t *Patch) Message() string {
- return t.message
-}
-
-func (p *Patch) Encode(w io.Writer) error {
- ue := fdiff.NewUnifiedEncoder(w, fdiff.DefaultContextLines)
-
- return ue.Encode(p)
-}
-
-func (p *Patch) Stats() FileStats {
- return getFileStatsFromFilePatches(p.FilePatches())
-}
-
-func (p *Patch) String() string {
- buf := bytes.NewBuffer(nil)
- err := p.Encode(buf)
- if err != nil {
- return fmt.Sprintf("malformed patch: %s", err.Error())
- }
-
- return buf.String()
-}
-
-// changeEntryWrapper is an implementation of fdiff.File interface
-type changeEntryWrapper struct {
- ce ChangeEntry
-}
-
-func (f *changeEntryWrapper) Hash() plumbing.Hash {
- if !f.ce.TreeEntry.Mode.IsFile() {
- return plumbing.ZeroHash
- }
-
- return f.ce.TreeEntry.Hash
-}
-
-func (f *changeEntryWrapper) Mode() filemode.FileMode {
- return f.ce.TreeEntry.Mode
-}
-func (f *changeEntryWrapper) Path() string {
- if !f.ce.TreeEntry.Mode.IsFile() {
- return ""
- }
-
- return f.ce.Name
-}
-
-func (f *changeEntryWrapper) Empty() bool {
- return !f.ce.TreeEntry.Mode.IsFile()
-}
-
-// textFilePatch is an implementation of fdiff.FilePatch interface
-type textFilePatch struct {
- chunks []fdiff.Chunk
- from, to ChangeEntry
-}
-
-func (tf *textFilePatch) Files() (from fdiff.File, to fdiff.File) {
- f := &changeEntryWrapper{tf.from}
- t := &changeEntryWrapper{tf.to}
-
- if !f.Empty() {
- from = f
- }
-
- if !t.Empty() {
- to = t
- }
-
- return
-}
-
-func (t *textFilePatch) IsBinary() bool {
- return len(t.chunks) == 0
-}
-
-func (t *textFilePatch) Chunks() []fdiff.Chunk {
- return t.chunks
-}
-
-// textChunk is an implementation of fdiff.Chunk interface
-type textChunk struct {
- content string
- op fdiff.Operation
-}
-
-func (t *textChunk) Content() string {
- return t.content
-}
-
-func (t *textChunk) Type() fdiff.Operation {
- return t.op
-}
-
-// FileStat stores the status of changes in content of a file.
-type FileStat struct {
- Name string
- Addition int
- Deletion int
-}
-
-func (fs FileStat) String() string {
- return printStat([]FileStat{fs})
-}
-
-// FileStats is a collection of FileStat.
-type FileStats []FileStat
-
-func (fileStats FileStats) String() string {
- return printStat(fileStats)
-}
-
-func printStat(fileStats []FileStat) string {
- padLength := float64(len(" "))
- newlineLength := float64(len("\n"))
- separatorLength := float64(len("|"))
- // Soft line length limit. The text length calculation below excludes
- // length of the change number. Adding that would take it closer to 80,
- // but probably not more than 80, until it's a huge number.
- lineLength := 72.0
-
- // Get the longest filename and longest total change.
- var longestLength float64
- var longestTotalChange float64
- for _, fs := range fileStats {
- if int(longestLength) < len(fs.Name) {
- longestLength = float64(len(fs.Name))
- }
- totalChange := fs.Addition + fs.Deletion
- if int(longestTotalChange) < totalChange {
- longestTotalChange = float64(totalChange)
- }
- }
-
- // Parts of the output:
- // <pad><filename><pad>|<pad><changeNumber><pad><+++/---><newline>
- // example: " main.go | 10 +++++++--- "
-
- // <pad><filename><pad>
- leftTextLength := padLength + longestLength + padLength
-
- // <pad><number><pad><+++++/-----><newline>
- // Excluding number length here.
- rightTextLength := padLength + padLength + newlineLength
-
- totalTextArea := leftTextLength + separatorLength + rightTextLength
- heightOfHistogram := lineLength - totalTextArea
-
- // Scale the histogram.
- var scaleFactor float64
- if longestTotalChange > heightOfHistogram {
- // Scale down to heightOfHistogram.
- scaleFactor = longestTotalChange / heightOfHistogram
- } else {
- scaleFactor = 1.0
- }
-
- finalOutput := ""
- for _, fs := range fileStats {
- addn := float64(fs.Addition)
- deln := float64(fs.Deletion)
- adds := strings.Repeat("+", int(math.Floor(addn/scaleFactor)))
- dels := strings.Repeat("-", int(math.Floor(deln/scaleFactor)))
- finalOutput += fmt.Sprintf(" %s | %d %s%s\n", fs.Name, (fs.Addition + fs.Deletion), adds, dels)
- }
-
- return finalOutput
-}
-
-func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats {
- var fileStats FileStats
-
- for _, fp := range filePatches {
- // ignore empty patches (binary files, submodule refs updates)
- if len(fp.Chunks()) == 0 {
- continue
- }
-
- cs := FileStat{}
- from, to := fp.Files()
- if from == nil {
- // New File is created.
- cs.Name = to.Path()
- } else if to == nil {
- // File is deleted.
- cs.Name = from.Path()
- } else if from.Path() != to.Path() {
- // File is renamed. Not supported.
- // cs.Name = fmt.Sprintf("%s => %s", from.Path(), to.Path())
- } else {
- cs.Name = from.Path()
- }
-
- for _, chunk := range fp.Chunks() {
- s := chunk.Content()
- if len(s) == 0 {
- continue
- }
-
- switch chunk.Type() {
- case fdiff.Add:
- cs.Addition += strings.Count(s, "\n")
- if s[len(s)-1] != '\n' {
- cs.Addition++
- }
- case fdiff.Delete:
- cs.Deletion += strings.Count(s, "\n")
- if s[len(s)-1] != '\n' {
- cs.Deletion++
- }
- }
- }
-
- fileStats = append(fileStats, cs)
- }
-
- return fileStats
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tag.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tag.go
deleted file mode 100644
index 9ee550925d..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tag.go
+++ /dev/null
@@ -1,357 +0,0 @@
-package object
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- stdioutil "io/ioutil"
- "strings"
-
- "golang.org/x/crypto/openpgp"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-)
-
-// Tag represents an annotated tag object. It points to a single git object of
-// any type, but tags typically are applied to commit or blob objects. It
-// provides a reference that associates the target with a tag name. It also
-// contains meta-information about the tag, including the tagger, tag date and
-// message.
-//
-// Note that this is not used for lightweight tags.
-//
-// https://git-scm.com/book/en/v2/Git-Internals-Git-References#Tags
-type Tag struct {
- // Hash of the tag.
- Hash plumbing.Hash
- // Name of the tag.
- Name string
- // Tagger is the one who created the tag.
- Tagger Signature
- // Message is an arbitrary text message.
- Message string
- // PGPSignature is the PGP signature of the tag.
- PGPSignature string
- // TargetType is the object type of the target.
- TargetType plumbing.ObjectType
- // Target is the hash of the target object.
- Target plumbing.Hash
-
- s storer.EncodedObjectStorer
-}
-
-// GetTag gets a tag from an object storer and decodes it.
-func GetTag(s storer.EncodedObjectStorer, h plumbing.Hash) (*Tag, error) {
- o, err := s.EncodedObject(plumbing.TagObject, h)
- if err != nil {
- return nil, err
- }
-
- return DecodeTag(s, o)
-}
-
-// DecodeTag decodes an encoded object into a *Commit and associates it to the
-// given object storer.
-func DecodeTag(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Tag, error) {
- t := &Tag{s: s}
- if err := t.Decode(o); err != nil {
- return nil, err
- }
-
- return t, nil
-}
-
-// ID returns the object ID of the tag, not the object that the tag references.
-// The returned value will always match the current value of Tag.Hash.
-//
-// ID is present to fulfill the Object interface.
-func (t *Tag) ID() plumbing.Hash {
- return t.Hash
-}
-
-// Type returns the type of object. It always returns plumbing.TagObject.
-//
-// Type is present to fulfill the Object interface.
-func (t *Tag) Type() plumbing.ObjectType {
- return plumbing.TagObject
-}
-
-// Decode transforms a plumbing.EncodedObject into a Tag struct.
-func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
- if o.Type() != plumbing.TagObject {
- return ErrUnsupportedObject
- }
-
- t.Hash = o.Hash()
-
- reader, err := o.Reader()
- if err != nil {
- return err
- }
- defer ioutil.CheckClose(reader, &err)
-
- r := bufPool.Get().(*bufio.Reader)
- defer bufPool.Put(r)
- r.Reset(reader)
- for {
- var line []byte
- line, err = r.ReadBytes('\n')
- if err != nil && err != io.EOF {
- return err
- }
-
- line = bytes.TrimSpace(line)
- if len(line) == 0 {
- break // Start of message
- }
-
- split := bytes.SplitN(line, []byte{' '}, 2)
- switch string(split[0]) {
- case "object":
- t.Target = plumbing.NewHash(string(split[1]))
- case "type":
- t.TargetType, err = plumbing.ParseObjectType(string(split[1]))
- if err != nil {
- return err
- }
- case "tag":
- t.Name = string(split[1])
- case "tagger":
- t.Tagger.Decode(split[1])
- }
-
- if err == io.EOF {
- return nil
- }
- }
-
- data, err := stdioutil.ReadAll(r)
- if err != nil {
- return err
- }
-
- var pgpsig bool
- // Check if data contains PGP signature.
- if bytes.Contains(data, []byte(beginpgp)) {
- // Split the lines at newline.
- messageAndSig := bytes.Split(data, []byte("\n"))
-
- for _, l := range messageAndSig {
- if pgpsig {
- if bytes.Contains(l, []byte(endpgp)) {
- t.PGPSignature += endpgp + "\n"
- break
- } else {
- t.PGPSignature += string(l) + "\n"
- }
- continue
- }
-
- // Check if it's the beginning of a PGP signature.
- if bytes.Contains(l, []byte(beginpgp)) {
- t.PGPSignature += beginpgp + "\n"
- pgpsig = true
- continue
- }
-
- t.Message += string(l) + "\n"
- }
- } else {
- t.Message = string(data)
- }
-
- return nil
-}
-
-// Encode transforms a Tag into a plumbing.EncodedObject.
-func (t *Tag) Encode(o plumbing.EncodedObject) error {
- return t.encode(o, true)
-}
-
-// EncodeWithoutSignature export a Tag into a plumbing.EncodedObject without the signature (correspond to the payload of the PGP signature).
-func (t *Tag) EncodeWithoutSignature(o plumbing.EncodedObject) error {
- return t.encode(o, false)
-}
-
-func (t *Tag) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
- o.SetType(plumbing.TagObject)
- w, err := o.Writer()
- if err != nil {
- return err
- }
- defer ioutil.CheckClose(w, &err)
-
- if _, err = fmt.Fprintf(w,
- "object %s\ntype %s\ntag %s\ntagger ",
- t.Target.String(), t.TargetType.Bytes(), t.Name); err != nil {
- return err
- }
-
- if err = t.Tagger.Encode(w); err != nil {
- return err
- }
-
- if _, err = fmt.Fprint(w, "\n\n"); err != nil {
- return err
- }
-
- if _, err = fmt.Fprint(w, t.Message); err != nil {
- return err
- }
-
- // Note that this is highly sensitive to what it sent along in the message.
- // Message *always* needs to end with a newline, or else the message and the
- // signature will be concatenated into a corrupt object. Since this is a
- // lower-level method, we assume you know what you are doing and have already
- // done the needful on the message in the caller.
- if includeSig {
- if _, err = fmt.Fprint(w, t.PGPSignature); err != nil {
- return err
- }
- }
-
- return err
-}
-
-// Commit returns the commit pointed to by the tag. If the tag points to a
-// different type of object ErrUnsupportedObject will be returned.
-func (t *Tag) Commit() (*Commit, error) {
- if t.TargetType != plumbing.CommitObject {
- return nil, ErrUnsupportedObject
- }
-
- o, err := t.s.EncodedObject(plumbing.CommitObject, t.Target)
- if err != nil {
- return nil, err
- }
-
- return DecodeCommit(t.s, o)
-}
-
-// Tree returns the tree pointed to by the tag. If the tag points to a commit
-// object the tree of that commit will be returned. If the tag does not point
-// to a commit or tree object ErrUnsupportedObject will be returned.
-func (t *Tag) Tree() (*Tree, error) {
- switch t.TargetType {
- case plumbing.CommitObject:
- c, err := t.Commit()
- if err != nil {
- return nil, err
- }
-
- return c.Tree()
- case plumbing.TreeObject:
- return GetTree(t.s, t.Target)
- default:
- return nil, ErrUnsupportedObject
- }
-}
-
-// Blob returns the blob pointed to by the tag. If the tag points to a
-// different type of object ErrUnsupportedObject will be returned.
-func (t *Tag) Blob() (*Blob, error) {
- if t.TargetType != plumbing.BlobObject {
- return nil, ErrUnsupportedObject
- }
-
- return GetBlob(t.s, t.Target)
-}
-
-// Object returns the object pointed to by the tag.
-func (t *Tag) Object() (Object, error) {
- o, err := t.s.EncodedObject(t.TargetType, t.Target)
- if err != nil {
- return nil, err
- }
-
- return DecodeObject(t.s, o)
-}
-
-// String returns the meta information contained in the tag as a formatted
-// string.
-func (t *Tag) String() string {
- obj, _ := t.Object()
-
- return fmt.Sprintf(
- "%s %s\nTagger: %s\nDate: %s\n\n%s\n%s",
- plumbing.TagObject, t.Name, t.Tagger.String(), t.Tagger.When.Format(DateFormat),
- t.Message, objectAsString(obj),
- )
-}
-
-// Verify performs PGP verification of the tag with a provided armored
-// keyring and returns openpgp.Entity associated with verifying key on success.
-func (t *Tag) Verify(armoredKeyRing string) (*openpgp.Entity, error) {
- keyRingReader := strings.NewReader(armoredKeyRing)
- keyring, err := openpgp.ReadArmoredKeyRing(keyRingReader)
- if err != nil {
- return nil, err
- }
-
- // Extract signature.
- signature := strings.NewReader(t.PGPSignature)
-
- encoded := &plumbing.MemoryObject{}
- // Encode tag components, excluding signature and get a reader object.
- if err := t.EncodeWithoutSignature(encoded); err != nil {
- return nil, err
- }
- er, err := encoded.Reader()
- if err != nil {
- return nil, err
- }
-
- return openpgp.CheckArmoredDetachedSignature(keyring, er, signature)
-}
-
-// TagIter provides an iterator for a set of tags.
-type TagIter struct {
- storer.EncodedObjectIter
- s storer.EncodedObjectStorer
-}
-
-// NewTagIter takes a storer.EncodedObjectStorer and a
-// storer.EncodedObjectIter and returns a *TagIter that iterates over all
-// tags contained in the storer.EncodedObjectIter.
-//
-// Any non-tag object returned by the storer.EncodedObjectIter is skipped.
-func NewTagIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *TagIter {
- return &TagIter{iter, s}
-}
-
-// Next moves the iterator to the next tag and returns a pointer to it. If
-// there are no more tags, it returns io.EOF.
-func (iter *TagIter) Next() (*Tag, error) {
- obj, err := iter.EncodedObjectIter.Next()
- if err != nil {
- return nil, err
- }
-
- return DecodeTag(iter.s, obj)
-}
-
-// ForEach call the cb function for each tag contained on this iter until
-// an error happens or the end of the iter is reached. If ErrStop is sent
-// the iteration is stop but no error is returned. The iterator is closed.
-func (iter *TagIter) ForEach(cb func(*Tag) error) error {
- return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error {
- t, err := DecodeTag(iter.s, obj)
- if err != nil {
- return err
- }
-
- return cb(t)
- })
-}
-
-func objectAsString(obj Object) string {
- switch o := obj.(type) {
- case *Commit:
- return o.String()
- default:
- return ""
- }
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tree.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tree.go
deleted file mode 100644
index d0b4fff15c..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tree.go
+++ /dev/null
@@ -1,520 +0,0 @@
-package object
-
-import (
- "bufio"
- "context"
- "errors"
- "fmt"
- "io"
- "path"
- "path/filepath"
- "strings"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/filemode"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-)
-
-const (
- maxTreeDepth = 1024
- startingStackSize = 8
-)
-
-// New errors defined by this package.
-var (
- ErrMaxTreeDepth = errors.New("maximum tree depth exceeded")
- ErrFileNotFound = errors.New("file not found")
- ErrDirectoryNotFound = errors.New("directory not found")
- ErrEntryNotFound = errors.New("entry not found")
-)
-
-// Tree is basically like a directory - it references a bunch of other trees
-// and/or blobs (i.e. files and sub-directories)
-type Tree struct {
- Entries []TreeEntry
- Hash plumbing.Hash
-
- s storer.EncodedObjectStorer
- m map[string]*TreeEntry
- t map[string]*Tree // tree path cache
-}
-
-// GetTree gets a tree from an object storer and decodes it.
-func GetTree(s storer.EncodedObjectStorer, h plumbing.Hash) (*Tree, error) {
- o, err := s.EncodedObject(plumbing.TreeObject, h)
- if err != nil {
- return nil, err
- }
-
- return DecodeTree(s, o)
-}
-
-// DecodeTree decodes an encoded object into a *Tree and associates it to the
-// given object storer.
-func DecodeTree(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Tree, error) {
- t := &Tree{s: s}
- if err := t.Decode(o); err != nil {
- return nil, err
- }
-
- return t, nil
-}
-
-// TreeEntry represents a file
-type TreeEntry struct {
- Name string
- Mode filemode.FileMode
- Hash plumbing.Hash
-}
-
-// File returns the hash of the file identified by the `path` argument.
-// The path is interpreted as relative to the tree receiver.
-func (t *Tree) File(path string) (*File, error) {
- e, err := t.FindEntry(path)
- if err != nil {
- return nil, ErrFileNotFound
- }
-
- blob, err := GetBlob(t.s, e.Hash)
- if err != nil {
- if err == plumbing.ErrObjectNotFound {
- return nil, ErrFileNotFound
- }
- return nil, err
- }
-
- return NewFile(path, e.Mode, blob), nil
-}
-
-// Size returns the plaintext size of an object, without reading it
-// into memory.
-func (t *Tree) Size(path string) (int64, error) {
- e, err := t.FindEntry(path)
- if err != nil {
- return 0, ErrEntryNotFound
- }
-
- return t.s.EncodedObjectSize(e.Hash)
-}
-
-// Tree returns the tree identified by the `path` argument.
-// The path is interpreted as relative to the tree receiver.
-func (t *Tree) Tree(path string) (*Tree, error) {
- e, err := t.FindEntry(path)
- if err != nil {
- return nil, ErrDirectoryNotFound
- }
-
- tree, err := GetTree(t.s, e.Hash)
- if err == plumbing.ErrObjectNotFound {
- return nil, ErrDirectoryNotFound
- }
-
- return tree, err
-}
-
-// TreeEntryFile returns the *File for a given *TreeEntry.
-func (t *Tree) TreeEntryFile(e *TreeEntry) (*File, error) {
- blob, err := GetBlob(t.s, e.Hash)
- if err != nil {
- return nil, err
- }
-
- return NewFile(e.Name, e.Mode, blob), nil
-}
-
-// FindEntry search a TreeEntry in this tree or any subtree.
-func (t *Tree) FindEntry(path string) (*TreeEntry, error) {
- if t.t == nil {
- t.t = make(map[string]*Tree)
- }
-
- pathParts := strings.Split(path, "/")
- startingTree := t
- pathCurrent := ""
-
- // search for the longest path in the tree path cache
- for i := len(pathParts) - 1; i > 1; i-- {
- path := filepath.Join(pathParts[:i]...)
-
- tree, ok := t.t[path]
- if ok {
- startingTree = tree
- pathParts = pathParts[i:]
- pathCurrent = path
-
- break
- }
- }
-
- var tree *Tree
- var err error
- for tree = startingTree; len(pathParts) > 1; pathParts = pathParts[1:] {
- if tree, err = tree.dir(pathParts[0]); err != nil {
- return nil, err
- }
-
- pathCurrent = filepath.Join(pathCurrent, pathParts[0])
- t.t[pathCurrent] = tree
- }
-
- return tree.entry(pathParts[0])
-}
-
-func (t *Tree) dir(baseName string) (*Tree, error) {
- entry, err := t.entry(baseName)
- if err != nil {
- return nil, ErrDirectoryNotFound
- }
-
- obj, err := t.s.EncodedObject(plumbing.TreeObject, entry.Hash)
- if err != nil {
- return nil, err
- }
-
- tree := &Tree{s: t.s}
- err = tree.Decode(obj)
-
- return tree, err
-}
-
-func (t *Tree) entry(baseName string) (*TreeEntry, error) {
- if t.m == nil {
- t.buildMap()
- }
-
- entry, ok := t.m[baseName]
- if !ok {
- return nil, ErrEntryNotFound
- }
-
- return entry, nil
-}
-
-// Files returns a FileIter allowing to iterate over the Tree
-func (t *Tree) Files() *FileIter {
- return NewFileIter(t.s, t)
-}
-
-// ID returns the object ID of the tree. The returned value will always match
-// the current value of Tree.Hash.
-//
-// ID is present to fulfill the Object interface.
-func (t *Tree) ID() plumbing.Hash {
- return t.Hash
-}
-
-// Type returns the type of object. It always returns plumbing.TreeObject.
-func (t *Tree) Type() plumbing.ObjectType {
- return plumbing.TreeObject
-}
-
-// Decode transform an plumbing.EncodedObject into a Tree struct
-func (t *Tree) Decode(o plumbing.EncodedObject) (err error) {
- if o.Type() != plumbing.TreeObject {
- return ErrUnsupportedObject
- }
-
- t.Hash = o.Hash()
- if o.Size() == 0 {
- return nil
- }
-
- t.Entries = nil
- t.m = nil
-
- reader, err := o.Reader()
- if err != nil {
- return err
- }
- defer ioutil.CheckClose(reader, &err)
-
- r := bufPool.Get().(*bufio.Reader)
- defer bufPool.Put(r)
- r.Reset(reader)
- for {
- str, err := r.ReadString(' ')
- if err != nil {
- if err == io.EOF {
- break
- }
-
- return err
- }
- str = str[:len(str)-1] // strip last byte (' ')
-
- mode, err := filemode.New(str)
- if err != nil {
- return err
- }
-
- name, err := r.ReadString(0)
- if err != nil && err != io.EOF {
- return err
- }
-
- var hash plumbing.Hash
- if _, err = io.ReadFull(r, hash[:]); err != nil {
- return err
- }
-
- baseName := name[:len(name)-1]
- t.Entries = append(t.Entries, TreeEntry{
- Hash: hash,
- Mode: mode,
- Name: baseName,
- })
- }
-
- return nil
-}
-
-// Encode transforms a Tree into a plumbing.EncodedObject.
-func (t *Tree) Encode(o plumbing.EncodedObject) (err error) {
- o.SetType(plumbing.TreeObject)
- w, err := o.Writer()
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(w, &err)
- for _, entry := range t.Entries {
- if _, err = fmt.Fprintf(w, "%o %s", entry.Mode, entry.Name); err != nil {
- return err
- }
-
- if _, err = w.Write([]byte{0x00}); err != nil {
- return err
- }
-
- if _, err = w.Write(entry.Hash[:]); err != nil {
- return err
- }
- }
-
- return err
-}
-
-func (t *Tree) buildMap() {
- t.m = make(map[string]*TreeEntry)
- for i := 0; i < len(t.Entries); i++ {
- t.m[t.Entries[i].Name] = &t.Entries[i]
- }
-}
-
-// Diff returns a list of changes between this tree and the provided one
-func (from *Tree) Diff(to *Tree) (Changes, error) {
- return DiffTree(from, to)
-}
-
-// Diff returns a list of changes between this tree and the provided one
-// Error will be returned if context expires
-// Provided context must be non nil
-func (from *Tree) DiffContext(ctx context.Context, to *Tree) (Changes, error) {
- return DiffTreeContext(ctx, from, to)
-}
-
-// Patch returns a slice of Patch objects with all the changes between trees
-// in chunks. This representation can be used to create several diff outputs.
-func (from *Tree) Patch(to *Tree) (*Patch, error) {
- return from.PatchContext(context.Background(), to)
-}
-
-// Patch returns a slice of Patch objects with all the changes between trees
-// in chunks. This representation can be used to create several diff outputs.
-// If context expires, an error will be returned
-// Provided context must be non-nil
-func (from *Tree) PatchContext(ctx context.Context, to *Tree) (*Patch, error) {
- changes, err := DiffTreeContext(ctx, from, to)
- if err != nil {
- return nil, err
- }
-
- return changes.PatchContext(ctx)
-}
-
-// treeEntryIter facilitates iterating through the TreeEntry objects in a Tree.
-type treeEntryIter struct {
- t *Tree
- pos int
-}
-
-func (iter *treeEntryIter) Next() (TreeEntry, error) {
- if iter.pos >= len(iter.t.Entries) {
- return TreeEntry{}, io.EOF
- }
- iter.pos++
- return iter.t.Entries[iter.pos-1], nil
-}
-
-// TreeWalker provides a means of walking through all of the entries in a Tree.
-type TreeWalker struct {
- stack []*treeEntryIter
- base string
- recursive bool
- seen map[plumbing.Hash]bool
-
- s storer.EncodedObjectStorer
- t *Tree
-}
-
-// NewTreeWalker returns a new TreeWalker for the given tree.
-//
-// It is the caller's responsibility to call Close() when finished with the
-// tree walker.
-func NewTreeWalker(t *Tree, recursive bool, seen map[plumbing.Hash]bool) *TreeWalker {
- stack := make([]*treeEntryIter, 0, startingStackSize)
- stack = append(stack, &treeEntryIter{t, 0})
-
- return &TreeWalker{
- stack: stack,
- recursive: recursive,
- seen: seen,
-
- s: t.s,
- t: t,
- }
-}
-
-// Next returns the next object from the tree. Objects are returned in order
-// and subtrees are included. After the last object has been returned further
-// calls to Next() will return io.EOF.
-//
-// In the current implementation any objects which cannot be found in the
-// underlying repository will be skipped automatically. It is possible that this
-// may change in future versions.
-func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
- var obj *Tree
- for {
- current := len(w.stack) - 1
- if current < 0 {
- // Nothing left on the stack so we're finished
- err = io.EOF
- return
- }
-
- if current > maxTreeDepth {
- // We're probably following bad data or some self-referencing tree
- err = ErrMaxTreeDepth
- return
- }
-
- entry, err = w.stack[current].Next()
- if err == io.EOF {
- // Finished with the current tree, move back up to the parent
- w.stack = w.stack[:current]
- w.base, _ = path.Split(w.base)
- w.base = strings.TrimSuffix(w.base, "/")
- continue
- }
-
- if err != nil {
- return
- }
-
- if w.seen[entry.Hash] {
- continue
- }
-
- if entry.Mode == filemode.Dir {
- obj, err = GetTree(w.s, entry.Hash)
- }
-
- name = simpleJoin(w.base, entry.Name)
-
- if err != nil {
- err = io.EOF
- return
- }
-
- break
- }
-
- if !w.recursive {
- return
- }
-
- if obj != nil {
- w.stack = append(w.stack, &treeEntryIter{obj, 0})
- w.base = simpleJoin(w.base, entry.Name)
- }
-
- return
-}
-
-// Tree returns the tree that the tree walker most recently operated on.
-func (w *TreeWalker) Tree() *Tree {
- current := len(w.stack) - 1
- if w.stack[current].pos == 0 {
- current--
- }
-
- if current < 0 {
- return nil
- }
-
- return w.stack[current].t
-}
-
-// Close releases any resources used by the TreeWalker.
-func (w *TreeWalker) Close() {
- w.stack = nil
-}
-
-// TreeIter provides an iterator for a set of trees.
-type TreeIter struct {
- storer.EncodedObjectIter
- s storer.EncodedObjectStorer
-}
-
-// NewTreeIter takes a storer.EncodedObjectStorer and a
-// storer.EncodedObjectIter and returns a *TreeIter that iterates over all
-// tree contained in the storer.EncodedObjectIter.
-//
-// Any non-tree object returned by the storer.EncodedObjectIter is skipped.
-func NewTreeIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *TreeIter {
- return &TreeIter{iter, s}
-}
-
-// Next moves the iterator to the next tree and returns a pointer to it. If
-// there are no more trees, it returns io.EOF.
-func (iter *TreeIter) Next() (*Tree, error) {
- for {
- obj, err := iter.EncodedObjectIter.Next()
- if err != nil {
- return nil, err
- }
-
- if obj.Type() != plumbing.TreeObject {
- continue
- }
-
- return DecodeTree(iter.s, obj)
- }
-}
-
-// ForEach call the cb function for each tree contained on this iter until
-// an error happens or the end of the iter is reached. If ErrStop is sent
-// the iteration is stop but no error is returned. The iterator is closed.
-func (iter *TreeIter) ForEach(cb func(*Tree) error) error {
- return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error {
- if obj.Type() != plumbing.TreeObject {
- return nil
- }
-
- t, err := DecodeTree(iter.s, obj)
- if err != nil {
- return err
- }
-
- return cb(t)
- })
-}
-
-func simpleJoin(parent, child string) string {
- if len(parent) > 0 {
- return parent + "/" + child
- }
- return child
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/treenoder.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/treenoder.go
deleted file mode 100644
index 52f0e61221..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/treenoder.go
+++ /dev/null
@@ -1,136 +0,0 @@
-package object
-
-import (
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/filemode"
- "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder"
-)
-
-// A treenoder is a helper type that wraps git trees into merkletrie
-// noders.
-//
-// As a merkletrie noder doesn't understand the concept of modes (e.g.
-// file permissions), the treenoder includes the mode of the git tree in
-// the hash, so changes in the modes will be detected as modifications
-// to the file contents by the merkletrie difftree algorithm. This is
-// consistent with how the "git diff-tree" command works.
-type treeNoder struct {
- parent *Tree // the root node is its own parent
- name string // empty string for the root node
- mode filemode.FileMode
- hash plumbing.Hash
- children []noder.Noder // memoized
-}
-
-// NewTreeRootNode returns the root node of a Tree
-func NewTreeRootNode(t *Tree) noder.Noder {
- if t == nil {
- return &treeNoder{}
- }
-
- return &treeNoder{
- parent: t,
- name: "",
- mode: filemode.Dir,
- hash: t.Hash,
- }
-}
-
-func (t *treeNoder) isRoot() bool {
- return t.name == ""
-}
-
-func (t *treeNoder) String() string {
- return "treeNoder <" + t.name + ">"
-}
-
-func (t *treeNoder) Hash() []byte {
- if t.mode == filemode.Deprecated {
- return append(t.hash[:], filemode.Regular.Bytes()...)
- }
- return append(t.hash[:], t.mode.Bytes()...)
-}
-
-func (t *treeNoder) Name() string {
- return t.name
-}
-
-func (t *treeNoder) IsDir() bool {
- return t.mode == filemode.Dir
-}
-
-// Children will return the children of a treenoder as treenoders,
-// building them from the children of the wrapped git tree.
-func (t *treeNoder) Children() ([]noder.Noder, error) {
- if t.mode != filemode.Dir {
- return noder.NoChildren, nil
- }
-
- // children are memoized for efficiency
- if t.children != nil {
- return t.children, nil
- }
-
- // the parent of the returned children will be ourself as a tree if
- // we are a not the root treenoder. The root is special as it
- // is is own parent.
- parent := t.parent
- if !t.isRoot() {
- var err error
- if parent, err = t.parent.Tree(t.name); err != nil {
- return nil, err
- }
- }
-
- return transformChildren(parent)
-}
-
-// Returns the children of a tree as treenoders.
-// Efficiency is key here.
-func transformChildren(t *Tree) ([]noder.Noder, error) {
- var err error
- var e TreeEntry
-
- // there will be more tree entries than children in the tree,
- // due to submodules and empty directories, but I think it is still
- // worth it to pre-allocate the whole array now, even if sometimes
- // is bigger than needed.
- ret := make([]noder.Noder, 0, len(t.Entries))
-
- walker := NewTreeWalker(t, false, nil) // don't recurse
- // don't defer walker.Close() for efficiency reasons.
- for {
- _, e, err = walker.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- walker.Close()
- return nil, err
- }
-
- ret = append(ret, &treeNoder{
- parent: t,
- name: e.Name,
- mode: e.Mode,
- hash: e.Hash,
- })
- }
- walker.Close()
-
- return ret, nil
-}
-
-// len(t.tree.Entries) != the number of elements walked by treewalker
-// for some reason because of empty directories, submodules, etc, so we
-// have to walk here.
-func (t *treeNoder) NumChildren() (int, error) {
- children, err := t.Children()
- if err != nil {
- return 0, err
- }
-
- return len(children), nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs.go
deleted file mode 100644
index 487ee19bd8..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs.go
+++ /dev/null
@@ -1,203 +0,0 @@
-package packp
-
-import (
- "fmt"
- "sort"
- "strings"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
- "gopkg.in/src-d/go-git.v4/storage/memory"
-)
-
-// AdvRefs values represent the information transmitted on an
-// advertised-refs message. Values from this type are not zero-value
-// safe, use the New function instead.
-type AdvRefs struct {
- // Prefix stores prefix payloads.
- //
- // When using this message over (smart) HTTP, you have to add a pktline
- // before the whole thing with the following payload:
- //
- // '# service=$servicename" LF
- //
- // Moreover, some (all) git HTTP smart servers will send a flush-pkt
- // just after the first pkt-line.
- //
- // To accommodate both situations, the Prefix field allow you to store
- // any data you want to send before the actual pktlines. It will also
- // be filled up with whatever is found on the line.
- Prefix [][]byte
- // Head stores the resolved HEAD reference if present.
- // This can be present with git-upload-pack, not with git-receive-pack.
- Head *plumbing.Hash
- // Capabilities are the capabilities.
- Capabilities *capability.List
- // References are the hash references.
- References map[string]plumbing.Hash
- // Peeled are the peeled hash references.
- Peeled map[string]plumbing.Hash
- // Shallows are the shallow object ids.
- Shallows []plumbing.Hash
-}
-
-// NewAdvRefs returns a pointer to a new AdvRefs value, ready to be used.
-func NewAdvRefs() *AdvRefs {
- return &AdvRefs{
- Prefix: [][]byte{},
- Capabilities: capability.NewList(),
- References: make(map[string]plumbing.Hash),
- Peeled: make(map[string]plumbing.Hash),
- Shallows: []plumbing.Hash{},
- }
-}
-
-func (a *AdvRefs) AddReference(r *plumbing.Reference) error {
- switch r.Type() {
- case plumbing.SymbolicReference:
- v := fmt.Sprintf("%s:%s", r.Name().String(), r.Target().String())
- a.Capabilities.Add(capability.SymRef, v)
- case plumbing.HashReference:
- a.References[r.Name().String()] = r.Hash()
- default:
- return plumbing.ErrInvalidType
- }
-
- return nil
-}
-
-func (a *AdvRefs) AllReferences() (memory.ReferenceStorage, error) {
- s := memory.ReferenceStorage{}
- if err := a.addRefs(s); err != nil {
- return s, plumbing.NewUnexpectedError(err)
- }
-
- return s, nil
-}
-
-func (a *AdvRefs) addRefs(s storer.ReferenceStorer) error {
- for name, hash := range a.References {
- ref := plumbing.NewReferenceFromStrings(name, hash.String())
- if err := s.SetReference(ref); err != nil {
- return err
- }
- }
-
- if a.supportSymrefs() {
- return a.addSymbolicRefs(s)
- }
-
- return a.resolveHead(s)
-}
-
-// If the server does not support symrefs capability,
-// we need to guess the reference where HEAD is pointing to.
-//
-// Git versions prior to 1.8.4.3 has an special procedure to get
-// the reference where is pointing to HEAD:
-// - Check if a reference called master exists. If exists and it
-// has the same hash as HEAD hash, we can say that HEAD is pointing to master
-// - If master does not exists or does not have the same hash as HEAD,
-// order references and check in that order if that reference has the same
-// hash than HEAD. If yes, set HEAD pointing to that branch hash
-// - If no reference is found, throw an error
-func (a *AdvRefs) resolveHead(s storer.ReferenceStorer) error {
- if a.Head == nil {
- return nil
- }
-
- ref, err := s.Reference(plumbing.Master)
-
- // check first if HEAD is pointing to master
- if err == nil {
- ok, err := a.createHeadIfCorrectReference(ref, s)
- if err != nil {
- return err
- }
-
- if ok {
- return nil
- }
- }
-
- if err != nil && err != plumbing.ErrReferenceNotFound {
- return err
- }
-
- // From here we are trying to guess the branch that HEAD is pointing
- refIter, err := s.IterReferences()
- if err != nil {
- return err
- }
-
- var refNames []string
- err = refIter.ForEach(func(r *plumbing.Reference) error {
- refNames = append(refNames, string(r.Name()))
- return nil
- })
- if err != nil {
- return err
- }
-
- sort.Strings(refNames)
-
- var headSet bool
- for _, refName := range refNames {
- ref, err := s.Reference(plumbing.ReferenceName(refName))
- if err != nil {
- return err
- }
- ok, err := a.createHeadIfCorrectReference(ref, s)
- if err != nil {
- return err
- }
- if ok {
- headSet = true
- break
- }
- }
-
- if !headSet {
- return plumbing.ErrReferenceNotFound
- }
-
- return nil
-}
-
-func (a *AdvRefs) createHeadIfCorrectReference(
- reference *plumbing.Reference,
- s storer.ReferenceStorer) (bool, error) {
- if reference.Hash() == *a.Head {
- headRef := plumbing.NewSymbolicReference(plumbing.HEAD, reference.Name())
- if err := s.SetReference(headRef); err != nil {
- return false, err
- }
-
- return true, nil
- }
-
- return false, nil
-}
-
-func (a *AdvRefs) addSymbolicRefs(s storer.ReferenceStorer) error {
- for _, symref := range a.Capabilities.Get(capability.SymRef) {
- chunks := strings.Split(symref, ":")
- if len(chunks) != 2 {
- err := fmt.Errorf("bad number of `:` in symref value (%q)", symref)
- return plumbing.NewUnexpectedError(err)
- }
- name := plumbing.ReferenceName(chunks[0])
- target := plumbing.ReferenceName(chunks[1])
- ref := plumbing.NewSymbolicReference(name, target)
- if err := s.SetReference(ref); err != nil {
- return nil
- }
- }
-
- return nil
-}
-
-func (a *AdvRefs) supportSymrefs() bool {
- return a.Capabilities.Supports(capability.SymRef)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs_decode.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs_decode.go
deleted file mode 100644
index 1b4c62c896..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs_decode.go
+++ /dev/null
@@ -1,288 +0,0 @@
-package packp
-
-import (
- "bytes"
- "encoding/hex"
- "errors"
- "fmt"
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
-)
-
-// Decode reads the next advertised-refs message form its input and
-// stores it in the AdvRefs.
-func (a *AdvRefs) Decode(r io.Reader) error {
- d := newAdvRefsDecoder(r)
- return d.Decode(a)
-}
-
-type advRefsDecoder struct {
- s *pktline.Scanner // a pkt-line scanner from the input stream
- line []byte // current pkt-line contents, use parser.nextLine() to make it advance
- nLine int // current pkt-line number for debugging, begins at 1
- hash plumbing.Hash // last hash read
- err error // sticky error, use the parser.error() method to fill this out
- data *AdvRefs // parsed data is stored here
-}
-
-var (
- // ErrEmptyAdvRefs is returned by Decode if it gets an empty advertised
- // references message.
- ErrEmptyAdvRefs = errors.New("empty advertised-ref message")
- // ErrEmptyInput is returned by Decode if the input is empty.
- ErrEmptyInput = errors.New("empty input")
-)
-
-func newAdvRefsDecoder(r io.Reader) *advRefsDecoder {
- return &advRefsDecoder{
- s: pktline.NewScanner(r),
- }
-}
-
-func (d *advRefsDecoder) Decode(v *AdvRefs) error {
- d.data = v
-
- for state := decodePrefix; state != nil; {
- state = state(d)
- }
-
- return d.err
-}
-
-type decoderStateFn func(*advRefsDecoder) decoderStateFn
-
-// fills out the parser stiky error
-func (d *advRefsDecoder) error(format string, a ...interface{}) {
- msg := fmt.Sprintf(
- "pkt-line %d: %s", d.nLine,
- fmt.Sprintf(format, a...),
- )
-
- d.err = NewErrUnexpectedData(msg, d.line)
-}
-
-// Reads a new pkt-line from the scanner, makes its payload available as
-// p.line and increments p.nLine. A successful invocation returns true,
-// otherwise, false is returned and the sticky error is filled out
-// accordingly. Trims eols at the end of the payloads.
-func (d *advRefsDecoder) nextLine() bool {
- d.nLine++
-
- if !d.s.Scan() {
- if d.err = d.s.Err(); d.err != nil {
- return false
- }
-
- if d.nLine == 1 {
- d.err = ErrEmptyInput
- return false
- }
-
- d.error("EOF")
- return false
- }
-
- d.line = d.s.Bytes()
- d.line = bytes.TrimSuffix(d.line, eol)
-
- return true
-}
-
-// The HTTP smart prefix is often followed by a flush-pkt.
-func decodePrefix(d *advRefsDecoder) decoderStateFn {
- if ok := d.nextLine(); !ok {
- return nil
- }
-
- if !isPrefix(d.line) {
- return decodeFirstHash
- }
-
- tmp := make([]byte, len(d.line))
- copy(tmp, d.line)
- d.data.Prefix = append(d.data.Prefix, tmp)
- if ok := d.nextLine(); !ok {
- return nil
- }
-
- if !isFlush(d.line) {
- return decodeFirstHash
- }
-
- d.data.Prefix = append(d.data.Prefix, pktline.Flush)
- if ok := d.nextLine(); !ok {
- return nil
- }
-
- return decodeFirstHash
-}
-
-func isPrefix(payload []byte) bool {
- return len(payload) > 0 && payload[0] == '#'
-}
-
-// If the first hash is zero, then a no-refs is coming. Otherwise, a
-// list-of-refs is coming, and the hash will be followed by the first
-// advertised ref.
-func decodeFirstHash(p *advRefsDecoder) decoderStateFn {
- // If the repository is empty, we receive a flush here (HTTP).
- if isFlush(p.line) {
- p.err = ErrEmptyAdvRefs
- return nil
- }
-
- if len(p.line) < hashSize {
- p.error("cannot read hash, pkt-line too short")
- return nil
- }
-
- if _, err := hex.Decode(p.hash[:], p.line[:hashSize]); err != nil {
- p.error("invalid hash text: %s", err)
- return nil
- }
-
- p.line = p.line[hashSize:]
-
- if p.hash.IsZero() {
- return decodeSkipNoRefs
- }
-
- return decodeFirstRef
-}
-
-// Skips SP "capabilities^{}" NUL
-func decodeSkipNoRefs(p *advRefsDecoder) decoderStateFn {
- if len(p.line) < len(noHeadMark) {
- p.error("too short zero-id ref")
- return nil
- }
-
- if !bytes.HasPrefix(p.line, noHeadMark) {
- p.error("malformed zero-id ref")
- return nil
- }
-
- p.line = p.line[len(noHeadMark):]
-
- return decodeCaps
-}
-
-// decode the refname, expects SP refname NULL
-func decodeFirstRef(l *advRefsDecoder) decoderStateFn {
- if len(l.line) < 3 {
- l.error("line too short after hash")
- return nil
- }
-
- if !bytes.HasPrefix(l.line, sp) {
- l.error("no space after hash")
- return nil
- }
- l.line = l.line[1:]
-
- chunks := bytes.SplitN(l.line, null, 2)
- if len(chunks) < 2 {
- l.error("NULL not found")
- return nil
- }
- ref := chunks[0]
- l.line = chunks[1]
-
- if bytes.Equal(ref, []byte(head)) {
- l.data.Head = &l.hash
- } else {
- l.data.References[string(ref)] = l.hash
- }
-
- return decodeCaps
-}
-
-func decodeCaps(p *advRefsDecoder) decoderStateFn {
- if err := p.data.Capabilities.Decode(p.line); err != nil {
- p.error("invalid capabilities: %s", err)
- return nil
- }
-
- return decodeOtherRefs
-}
-
-// The refs are either tips (obj-id SP refname) or a peeled (obj-id SP refname^{}).
-// If there are no refs, then there might be a shallow or flush-ptk.
-func decodeOtherRefs(p *advRefsDecoder) decoderStateFn {
- if ok := p.nextLine(); !ok {
- return nil
- }
-
- if bytes.HasPrefix(p.line, shallow) {
- return decodeShallow
- }
-
- if len(p.line) == 0 {
- return nil
- }
-
- saveTo := p.data.References
- if bytes.HasSuffix(p.line, peeled) {
- p.line = bytes.TrimSuffix(p.line, peeled)
- saveTo = p.data.Peeled
- }
-
- ref, hash, err := readRef(p.line)
- if err != nil {
- p.error("%s", err)
- return nil
- }
- saveTo[ref] = hash
-
- return decodeOtherRefs
-}
-
-// Reads a ref-name
-func readRef(data []byte) (string, plumbing.Hash, error) {
- chunks := bytes.Split(data, sp)
- switch {
- case len(chunks) == 1:
- return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: no space was found")
- case len(chunks) > 2:
- return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: more than one space found")
- default:
- return string(chunks[1]), plumbing.NewHash(string(chunks[0])), nil
- }
-}
-
-// Keeps reading shallows until a flush-pkt is found
-func decodeShallow(p *advRefsDecoder) decoderStateFn {
- if !bytes.HasPrefix(p.line, shallow) {
- p.error("malformed shallow prefix, found %q... instead", p.line[:len(shallow)])
- return nil
- }
- p.line = bytes.TrimPrefix(p.line, shallow)
-
- if len(p.line) != hashSize {
- p.error(fmt.Sprintf(
- "malformed shallow hash: wrong length, expected 40 bytes, read %d bytes",
- len(p.line)))
- return nil
- }
-
- text := p.line[:hashSize]
- var h plumbing.Hash
- if _, err := hex.Decode(h[:], text); err != nil {
- p.error("invalid hash text: %s", err)
- return nil
- }
-
- p.data.Shallows = append(p.data.Shallows, h)
-
- if ok := p.nextLine(); !ok {
- return nil
- }
-
- if len(p.line) == 0 {
- return nil // succesfull parse of the advertised-refs message
- }
-
- return decodeShallow
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs_encode.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs_encode.go
deleted file mode 100644
index c23e3feb0f..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs_encode.go
+++ /dev/null
@@ -1,176 +0,0 @@
-package packp
-
-import (
- "bytes"
- "fmt"
- "io"
- "sort"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
-)
-
-// Encode writes the AdvRefs encoding to a writer.
-//
-// All the payloads will end with a newline character. Capabilities,
-// references and shallows are written in alphabetical order, except for
-// peeled references that always follow their corresponding references.
-func (a *AdvRefs) Encode(w io.Writer) error {
- e := newAdvRefsEncoder(w)
- return e.Encode(a)
-}
-
-type advRefsEncoder struct {
- data *AdvRefs // data to encode
- pe *pktline.Encoder // where to write the encoded data
- firstRefName string // reference name to encode in the first pkt-line (HEAD if present)
- firstRefHash plumbing.Hash // hash referenced to encode in the first pkt-line (HEAD if present)
- sortedRefs []string // hash references to encode ordered by increasing order
- err error // sticky error
-
-}
-
-func newAdvRefsEncoder(w io.Writer) *advRefsEncoder {
- return &advRefsEncoder{
- pe: pktline.NewEncoder(w),
- }
-}
-
-func (e *advRefsEncoder) Encode(v *AdvRefs) error {
- e.data = v
- e.sortRefs()
- e.setFirstRef()
-
- for state := encodePrefix; state != nil; {
- state = state(e)
- }
-
- return e.err
-}
-
-func (e *advRefsEncoder) sortRefs() {
- if len(e.data.References) > 0 {
- refs := make([]string, 0, len(e.data.References))
- for refName := range e.data.References {
- refs = append(refs, refName)
- }
-
- sort.Strings(refs)
- e.sortedRefs = refs
- }
-}
-
-func (e *advRefsEncoder) setFirstRef() {
- if e.data.Head != nil {
- e.firstRefName = head
- e.firstRefHash = *e.data.Head
- return
- }
-
- if len(e.sortedRefs) > 0 {
- refName := e.sortedRefs[0]
- e.firstRefName = refName
- e.firstRefHash = e.data.References[refName]
- }
-}
-
-type encoderStateFn func(*advRefsEncoder) encoderStateFn
-
-func encodePrefix(e *advRefsEncoder) encoderStateFn {
- for _, p := range e.data.Prefix {
- if bytes.Equal(p, pktline.Flush) {
- if e.err = e.pe.Flush(); e.err != nil {
- return nil
- }
- continue
- }
- if e.err = e.pe.Encodef("%s\n", string(p)); e.err != nil {
- return nil
- }
- }
-
- return encodeFirstLine
-}
-
-// Adds the first pkt-line payload: head hash, head ref and capabilities.
-// If HEAD ref is not found, the first reference ordered in increasing order will be used.
-// If there aren't HEAD neither refs, the first line will be "PKT-LINE(zero-id SP "capabilities^{}" NUL capability-list)".
-// See: https://github.com/git/git/blob/master/Documentation/technical/pack-protocol.txt
-// See: https://github.com/git/git/blob/master/Documentation/technical/protocol-common.txt
-func encodeFirstLine(e *advRefsEncoder) encoderStateFn {
- const formatFirstLine = "%s %s\x00%s\n"
- var firstLine string
- capabilities := formatCaps(e.data.Capabilities)
-
- if e.firstRefName == "" {
- firstLine = fmt.Sprintf(formatFirstLine, plumbing.ZeroHash.String(), "capabilities^{}", capabilities)
- } else {
- firstLine = fmt.Sprintf(formatFirstLine, e.firstRefHash.String(), e.firstRefName, capabilities)
-
- }
-
- if e.err = e.pe.EncodeString(firstLine); e.err != nil {
- return nil
- }
-
- return encodeRefs
-}
-
-func formatCaps(c *capability.List) string {
- if c == nil {
- return ""
- }
-
- return c.String()
-}
-
-// Adds the (sorted) refs: hash SP refname EOL
-// and their peeled refs if any.
-func encodeRefs(e *advRefsEncoder) encoderStateFn {
- for _, r := range e.sortedRefs {
- if r == e.firstRefName {
- continue
- }
-
- hash := e.data.References[r]
- if e.err = e.pe.Encodef("%s %s\n", hash.String(), r); e.err != nil {
- return nil
- }
-
- if hash, ok := e.data.Peeled[r]; ok {
- if e.err = e.pe.Encodef("%s %s^{}\n", hash.String(), r); e.err != nil {
- return nil
- }
- }
- }
-
- return encodeShallow
-}
-
-// Adds the (sorted) shallows: "shallow" SP hash EOL
-func encodeShallow(e *advRefsEncoder) encoderStateFn {
- sorted := sortShallows(e.data.Shallows)
- for _, hash := range sorted {
- if e.err = e.pe.Encodef("shallow %s\n", hash); e.err != nil {
- return nil
- }
- }
-
- return encodeFlush
-}
-
-func sortShallows(c []plumbing.Hash) []string {
- ret := []string{}
- for _, h := range c {
- ret = append(ret, h.String())
- }
- sort.Strings(ret)
-
- return ret
-}
-
-func encodeFlush(e *advRefsEncoder) encoderStateFn {
- e.err = e.pe.Flush()
- return nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability/capability.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability/capability.go
deleted file mode 100644
index a129781157..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability/capability.go
+++ /dev/null
@@ -1,252 +0,0 @@
-// Package capability defines the server and client capabilities.
-package capability
-
-// Capability describes a server or client capability.
-type Capability string
-
-func (n Capability) String() string {
- return string(n)
-}
-
-const (
- // MultiACK capability allows the server to return "ACK obj-id continue" as
- // soon as it finds a commit that it can use as a common base, between the
- // client's wants and the client's have set.
- //
- // By sending this early, the server can potentially head off the client
- // from walking any further down that particular branch of the client's
- // repository history. The client may still need to walk down other
- // branches, sending have lines for those, until the server has a
- // complete cut across the DAG, or the client has said "done".
- //
- // Without multi_ack, a client sends have lines in --date-order until
- // the server has found a common base. That means the client will send
- // have lines that are already known by the server to be common, because
- // they overlap in time with another branch that the server hasn't found
- // a common base on yet.
- //
- // For example suppose the client has commits in caps that the server
- // doesn't and the server has commits in lower case that the client
- // doesn't, as in the following diagram:
- //
- // +---- u ---------------------- x
- // / +----- y
- // / /
- // a -- b -- c -- d -- E -- F
- // \
- // +--- Q -- R -- S
- //
- // If the client wants x,y and starts out by saying have F,S, the server
- // doesn't know what F,S is. Eventually the client says "have d" and
- // the server sends "ACK d continue" to let the client know to stop
- // walking down that line (so don't send c-b-a), but it's not done yet,
- // it needs a base for x. The client keeps going with S-R-Q, until a
- // gets reached, at which point the server has a clear base and it all
- // ends.
- //
- // Without multi_ack the client would have sent that c-b-a chain anyway,
- // interleaved with S-R-Q.
- MultiACK Capability = "multi_ack"
- // MultiACKDetailed is an extension of multi_ack that permits client to
- // better understand the server's in-memory state.
- MultiACKDetailed Capability = "multi_ack_detailed"
- // NoDone should only be used with the smart HTTP protocol. If
- // multi_ack_detailed and no-done are both present, then the sender is
- // free to immediately send a pack following its first "ACK obj-id ready"
- // message.
- //
- // Without no-done in the smart HTTP protocol, the server session would
- // end and the client has to make another trip to send "done" before
- // the server can send the pack. no-done removes the last round and
- // thus slightly reduces latency.
- NoDone Capability = "no-done"
- // ThinPack is one with deltas which reference base objects not
- // contained within the pack (but are known to exist at the receiving
- // end). This can reduce the network traffic significantly, but it
- // requires the receiving end to know how to "thicken" these packs by
- // adding the missing bases to the pack.
- //
- // The upload-pack server advertises 'thin-pack' when it can generate
- // and send a thin pack. A client requests the 'thin-pack' capability
- // when it understands how to "thicken" it, notifying the server that
- // it can receive such a pack. A client MUST NOT request the
- // 'thin-pack' capability if it cannot turn a thin pack into a
- // self-contained pack.
- //
- // Receive-pack, on the other hand, is assumed by default to be able to
- // handle thin packs, but can ask the client not to use the feature by
- // advertising the 'no-thin' capability. A client MUST NOT send a thin
- // pack if the server advertises the 'no-thin' capability.
- //
- // The reasons for this asymmetry are historical. The receive-pack
- // program did not exist until after the invention of thin packs, so
- // historically the reference implementation of receive-pack always
- // understood thin packs. Adding 'no-thin' later allowed receive-pack
- // to disable the feature in a backwards-compatible manner.
- ThinPack Capability = "thin-pack"
- // Sideband means that server can send, and client understand multiplexed
- // progress reports and error info interleaved with the packfile itself.
- //
- // These two options are mutually exclusive. A modern client always
- // favors Sideband64k.
- //
- // Either mode indicates that the packfile data will be streamed broken
- // up into packets of up to either 1000 bytes in the case of 'side_band',
- // or 65520 bytes in the case of 'side_band_64k'. Each packet is made up
- // of a leading 4-byte pkt-line length of how much data is in the packet,
- // followed by a 1-byte stream code, followed by the actual data.
- //
- // The stream code can be one of:
- //
- // 1 - pack data
- // 2 - progress messages
- // 3 - fatal error message just before stream aborts
- //
- // The "side-band-64k" capability came about as a way for newer clients
- // that can handle much larger packets to request packets that are
- // actually crammed nearly full, while maintaining backward compatibility
- // for the older clients.
- //
- // Further, with side-band and its up to 1000-byte messages, it's actually
- // 999 bytes of payload and 1 byte for the stream code. With side-band-64k,
- // same deal, you have up to 65519 bytes of data and 1 byte for the stream
- // code.
- //
- // The client MUST send only maximum of one of "side-band" and "side-
- // band-64k". Server MUST diagnose it as an error if client requests
- // both.
- Sideband Capability = "side-band"
- Sideband64k Capability = "side-band-64k"
- // OFSDelta server can send, and client understand PACKv2 with delta
- // referring to its base by position in pack rather than by an obj-id. That
- // is, they can send/read OBJ_OFS_DELTA (aka type 6) in a packfile.
- OFSDelta Capability = "ofs-delta"
- // Agent the server may optionally send this capability to notify the client
- // that the server is running version `X`. The client may optionally return
- // its own agent string by responding with an `agent=Y` capability (but it
- // MUST NOT do so if the server did not mention the agent capability). The
- // `X` and `Y` strings may contain any printable ASCII characters except
- // space (i.e., the byte range 32 < x < 127), and are typically of the form
- // "package/version" (e.g., "git/1.8.3.1"). The agent strings are purely
- // informative for statistics and debugging purposes, and MUST NOT be used
- // to programmatically assume the presence or absence of particular features.
- Agent Capability = "agent"
- // Shallow capability adds "deepen", "shallow" and "unshallow" commands to
- // the fetch-pack/upload-pack protocol so clients can request shallow
- // clones.
- Shallow Capability = "shallow"
- // DeepenSince adds "deepen-since" command to fetch-pack/upload-pack
- // protocol so the client can request shallow clones that are cut at a
- // specific time, instead of depth. Internally it's equivalent of doing
- // "rev-list --max-age=<timestamp>" on the server side. "deepen-since"
- // cannot be used with "deepen".
- DeepenSince Capability = "deepen-since"
- // DeepenNot adds "deepen-not" command to fetch-pack/upload-pack
- // protocol so the client can request shallow clones that are cut at a
- // specific revision, instead of depth. Internally it's equivalent of
- // doing "rev-list --not <rev>" on the server side. "deepen-not"
- // cannot be used with "deepen", but can be used with "deepen-since".
- DeepenNot Capability = "deepen-not"
- // DeepenRelative if this capability is requested by the client, the
- // semantics of "deepen" command is changed. The "depth" argument is the
- // depth from the current shallow boundary, instead of the depth from
- // remote refs.
- DeepenRelative Capability = "deepen-relative"
- // NoProgress the client was started with "git clone -q" or something, and
- // doesn't want that side band 2. Basically the client just says "I do not
- // wish to receive stream 2 on sideband, so do not send it to me, and if
- // you did, I will drop it on the floor anyway". However, the sideband
- // channel 3 is still used for error responses.
- NoProgress Capability = "no-progress"
- // IncludeTag capability is about sending annotated tags if we are
- // sending objects they point to. If we pack an object to the client, and
- // a tag object points exactly at that object, we pack the tag object too.
- // In general this allows a client to get all new annotated tags when it
- // fetches a branch, in a single network connection.
- //
- // Clients MAY always send include-tag, hardcoding it into a request when
- // the server advertises this capability. The decision for a client to
- // request include-tag only has to do with the client's desires for tag
- // data, whether or not a server had advertised objects in the
- // refs/tags/* namespace.
- //
- // Servers MUST pack the tags if their referrant is packed and the client
- // has requested include-tags.
- //
- // Clients MUST be prepared for the case where a server has ignored
- // include-tag and has not actually sent tags in the pack. In such
- // cases the client SHOULD issue a subsequent fetch to acquire the tags
- // that include-tag would have otherwise given the client.
- //
- // The server SHOULD send include-tag, if it supports it, regardless
- // of whether or not there are tags available.
- IncludeTag Capability = "include-tag"
- // ReportStatus the receive-pack process can receive a 'report-status'
- // capability, which tells it that the client wants a report of what
- // happened after a packfile upload and reference update. If the pushing
- // client requests this capability, after unpacking and updating references
- // the server will respond with whether the packfile unpacked successfully
- // and if each reference was updated successfully. If any of those were not
- // successful, it will send back an error message. See pack-protocol.txt
- // for example messages.
- ReportStatus Capability = "report-status"
- // DeleteRefs If the server sends back this capability, it means that
- // it is capable of accepting a zero-id value as the target
- // value of a reference update. It is not sent back by the client, it
- // simply informs the client that it can be sent zero-id values
- // to delete references
- DeleteRefs Capability = "delete-refs"
- // Quiet If the receive-pack server advertises this capability, it is
- // capable of silencing human-readable progress output which otherwise may
- // be shown when processing the received pack. A send-pack client should
- // respond with the 'quiet' capability to suppress server-side progress
- // reporting if the local progress reporting is also being suppressed
- // (e.g., via `push -q`, or if stderr does not go to a tty).
- Quiet Capability = "quiet"
- // Atomic If the server sends this capability it is capable of accepting
- // atomic pushes. If the pushing client requests this capability, the server
- // will update the refs in one atomic transaction. Either all refs are
- // updated or none.
- Atomic Capability = "atomic"
- // PushOptions If the server sends this capability it is able to accept
- // push options after the update commands have been sent, but before the
- // packfile is streamed. If the pushing client requests this capability,
- // the server will pass the options to the pre- and post- receive hooks
- // that process this push request.
- PushOptions Capability = "push-options"
- // AllowTipSHA1InWant if the upload-pack server advertises this capability,
- // fetch-pack may send "want" lines with SHA-1s that exist at the server but
- // are not advertised by upload-pack.
- AllowTipSHA1InWant Capability = "allow-tip-sha1-in-want"
- // AllowReachableSHA1InWant if the upload-pack server advertises this
- // capability, fetch-pack may send "want" lines with SHA-1s that exist at
- // the server but are not advertised by upload-pack.
- AllowReachableSHA1InWant Capability = "allow-reachable-sha1-in-want"
- // PushCert the receive-pack server that advertises this capability is
- // willing to accept a signed push certificate, and asks the <nonce> to be
- // included in the push certificate. A send-pack client MUST NOT
- // send a push-cert packet unless the receive-pack server advertises
- // this capability.
- PushCert Capability = "push-cert"
- // SymRef symbolic reference support for better negotiation.
- SymRef Capability = "symref"
-)
-
-const DefaultAgent = "go-git/4.x"
-
-var known = map[Capability]bool{
- MultiACK: true, MultiACKDetailed: true, NoDone: true, ThinPack: true,
- Sideband: true, Sideband64k: true, OFSDelta: true, Agent: true,
- Shallow: true, DeepenSince: true, DeepenNot: true, DeepenRelative: true,
- NoProgress: true, IncludeTag: true, ReportStatus: true, DeleteRefs: true,
- Quiet: true, Atomic: true, PushOptions: true, AllowTipSHA1InWant: true,
- AllowReachableSHA1InWant: true, PushCert: true, SymRef: true,
-}
-
-var requiresArgument = map[Capability]bool{
- Agent: true, PushCert: true, SymRef: true,
-}
-
-var multipleArgument = map[Capability]bool{
- SymRef: true,
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability/list.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability/list.go
deleted file mode 100644
index 26a79b6e73..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability/list.go
+++ /dev/null
@@ -1,196 +0,0 @@
-package capability
-
-import (
- "bytes"
- "errors"
- "fmt"
- "strings"
-)
-
-var (
- // ErrArgumentsRequired is returned if no arguments are giving with a
- // capability that requires arguments
- ErrArgumentsRequired = errors.New("arguments required")
- // ErrArguments is returned if arguments are given with a capabilities that
- // not supports arguments
- ErrArguments = errors.New("arguments not allowed")
- // ErrEmtpyArgument is returned when an empty value is given
- ErrEmtpyArgument = errors.New("empty argument")
- // ErrMultipleArguments multiple argument given to a capabilities that not
- // support it
- ErrMultipleArguments = errors.New("multiple arguments not allowed")
-)
-
-// List represents a list of capabilities
-type List struct {
- m map[Capability]*entry
- sort []string
-}
-
-type entry struct {
- Name Capability
- Values []string
-}
-
-// NewList returns a new List of capabilities
-func NewList() *List {
- return &List{
- m: make(map[Capability]*entry),
- }
-}
-
-// IsEmpty returns true if the List is empty
-func (l *List) IsEmpty() bool {
- return len(l.sort) == 0
-}
-
-// Decode decodes list of capabilities from raw into the list
-func (l *List) Decode(raw []byte) error {
- // git 1.x receive pack used to send a leading space on its
- // git-receive-pack capabilities announcement. We just trim space to be
- // tolerant to space changes in different versions.
- raw = bytes.TrimSpace(raw)
-
- if len(raw) == 0 {
- return nil
- }
-
- for _, data := range bytes.Split(raw, []byte{' '}) {
- pair := bytes.SplitN(data, []byte{'='}, 2)
-
- c := Capability(pair[0])
- if len(pair) == 1 {
- if err := l.Add(c); err != nil {
- return err
- }
-
- continue
- }
-
- if err := l.Add(c, string(pair[1])); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Get returns the values for a capability
-func (l *List) Get(capability Capability) []string {
- if _, ok := l.m[capability]; !ok {
- return nil
- }
-
- return l.m[capability].Values
-}
-
-// Set sets a capability removing the previous values
-func (l *List) Set(capability Capability, values ...string) error {
- if _, ok := l.m[capability]; ok {
- delete(l.m, capability)
- }
-
- return l.Add(capability, values...)
-}
-
-// Add adds a capability, values are optional
-func (l *List) Add(c Capability, values ...string) error {
- if err := l.validate(c, values); err != nil {
- return err
- }
-
- if !l.Supports(c) {
- l.m[c] = &entry{Name: c}
- l.sort = append(l.sort, c.String())
- }
-
- if len(values) == 0 {
- return nil
- }
-
- if known[c] && !multipleArgument[c] && len(l.m[c].Values) > 0 {
- return ErrMultipleArguments
- }
-
- l.m[c].Values = append(l.m[c].Values, values...)
- return nil
-}
-
-func (l *List) validateNoEmptyArgs(values []string) error {
- for _, v := range values {
- if v == "" {
- return ErrEmtpyArgument
- }
- }
- return nil
-}
-
-func (l *List) validate(c Capability, values []string) error {
- if !known[c] {
- return l.validateNoEmptyArgs(values)
- }
- if requiresArgument[c] && len(values) == 0 {
- return ErrArgumentsRequired
- }
-
- if !requiresArgument[c] && len(values) != 0 {
- return ErrArguments
- }
-
- if !multipleArgument[c] && len(values) > 1 {
- return ErrMultipleArguments
- }
- return l.validateNoEmptyArgs(values)
-}
-
-// Supports returns true if capability is present
-func (l *List) Supports(capability Capability) bool {
- _, ok := l.m[capability]
- return ok
-}
-
-// Delete deletes a capability from the List
-func (l *List) Delete(capability Capability) {
- if !l.Supports(capability) {
- return
- }
-
- delete(l.m, capability)
- for i, c := range l.sort {
- if c != string(capability) {
- continue
- }
-
- l.sort = append(l.sort[:i], l.sort[i+1:]...)
- return
- }
-}
-
-// All returns a slice with all defined capabilities.
-func (l *List) All() []Capability {
- var cs []Capability
- for _, key := range l.sort {
- cs = append(cs, Capability(key))
- }
-
- return cs
-}
-
-// String generates the capabilities strings, the capabilities are sorted in
-// insertion order
-func (l *List) String() string {
- var o []string
- for _, key := range l.sort {
- cap := l.m[Capability(key)]
- if len(cap.Values) == 0 {
- o = append(o, key)
- continue
- }
-
- for _, value := range cap.Values {
- o = append(o, fmt.Sprintf("%s=%s", key, value))
- }
- }
-
- return strings.Join(o, " ")
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/common.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/common.go
deleted file mode 100644
index ab07ac8f74..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/common.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package packp
-
-import (
- "fmt"
-)
-
-type stateFn func() stateFn
-
-const (
- // common
- hashSize = 40
-
- // advrefs
- head = "HEAD"
- noHead = "capabilities^{}"
-)
-
-var (
- // common
- sp = []byte(" ")
- eol = []byte("\n")
- eq = []byte{'='}
-
- // advertised-refs
- null = []byte("\x00")
- peeled = []byte("^{}")
- noHeadMark = []byte(" capabilities^{}\x00")
-
- // upload-request
- want = []byte("want ")
- shallow = []byte("shallow ")
- deepen = []byte("deepen")
- deepenCommits = []byte("deepen ")
- deepenSince = []byte("deepen-since ")
- deepenReference = []byte("deepen-not ")
-
- // shallow-update
- unshallow = []byte("unshallow ")
-
- // server-response
- ack = []byte("ACK")
- nak = []byte("NAK")
-
- // updreq
- shallowNoSp = []byte("shallow")
-)
-
-func isFlush(payload []byte) bool {
- return len(payload) == 0
-}
-
-// ErrUnexpectedData represents an unexpected data decoding a message
-type ErrUnexpectedData struct {
- Msg string
- Data []byte
-}
-
-// NewErrUnexpectedData returns a new ErrUnexpectedData containing the data and
-// the message given
-func NewErrUnexpectedData(msg string, data []byte) error {
- return &ErrUnexpectedData{Msg: msg, Data: data}
-}
-
-func (err *ErrUnexpectedData) Error() string {
- if len(err.Data) == 0 {
- return err.Msg
- }
-
- return fmt.Sprintf("%s (%s)", err.Msg, err.Data)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/doc.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/doc.go
deleted file mode 100644
index 4950d1d662..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/doc.go
+++ /dev/null
@@ -1,724 +0,0 @@
-package packp
-
-/*
-
-A nice way to trace the real data transmitted and received by git, use:
-
-GIT_TRACE_PACKET=true git ls-remote http://github.com/src-d/go-git
-GIT_TRACE_PACKET=true git clone http://github.com/src-d/go-git
-
-Here follows a copy of the current protocol specification at the time of
-this writing.
-
-(Please notice that most http git servers will add a flush-pkt after the
-first pkt-line when using HTTP smart.)
-
-
-Documentation Common to Pack and Http Protocols
-===============================================
-
-ABNF Notation
--------------
-
-ABNF notation as described by RFC 5234 is used within the protocol documents,
-except the following replacement core rules are used:
-----
- HEXDIG = DIGIT / "a" / "b" / "c" / "d" / "e" / "f"
-----
-
-We also define the following common rules:
-----
- NUL = %x00
- zero-id = 40*"0"
- obj-id = 40*(HEXDIGIT)
-
- refname = "HEAD"
- refname /= "refs/" <see discussion below>
-----
-
-A refname is a hierarchical octet string beginning with "refs/" and
-not violating the 'git-check-ref-format' command's validation rules.
-More specifically, they:
-
-. They can include slash `/` for hierarchical (directory)
- grouping, but no slash-separated component can begin with a
- dot `.`.
-
-. They must contain at least one `/`. This enforces the presence of a
- category like `heads/`, `tags/` etc. but the actual names are not
- restricted.
-
-. They cannot have two consecutive dots `..` anywhere.
-
-. They cannot have ASCII control characters (i.e. bytes whose
- values are lower than \040, or \177 `DEL`), space, tilde `~`,
- caret `^`, colon `:`, question-mark `?`, asterisk `*`,
- or open bracket `[` anywhere.
-
-. They cannot end with a slash `/` or a dot `.`.
-
-. They cannot end with the sequence `.lock`.
-
-. They cannot contain a sequence `@{`.
-
-. They cannot contain a `\\`.
-
-
-pkt-line Format
----------------
-
-Much (but not all) of the payload is described around pkt-lines.
-
-A pkt-line is a variable length binary string. The first four bytes
-of the line, the pkt-len, indicates the total length of the line,
-in hexadecimal. The pkt-len includes the 4 bytes used to contain
-the length's hexadecimal representation.
-
-A pkt-line MAY contain binary data, so implementors MUST ensure
-pkt-line parsing/formatting routines are 8-bit clean.
-
-A non-binary line SHOULD BE terminated by an LF, which if present
-MUST be included in the total length. Receivers MUST treat pkt-lines
-with non-binary data the same whether or not they contain the trailing
-LF (stripping the LF if present, and not complaining when it is
-missing).
-
-The maximum length of a pkt-line's data component is 65516 bytes.
-Implementations MUST NOT send pkt-line whose length exceeds 65520
-(65516 bytes of payload + 4 bytes of length data).
-
-Implementations SHOULD NOT send an empty pkt-line ("0004").
-
-A pkt-line with a length field of 0 ("0000"), called a flush-pkt,
-is a special case and MUST be handled differently than an empty
-pkt-line ("0004").
-
-----
- pkt-line = data-pkt / flush-pkt
-
- data-pkt = pkt-len pkt-payload
- pkt-len = 4*(HEXDIG)
- pkt-payload = (pkt-len - 4)*(OCTET)
-
- flush-pkt = "0000"
-----
-
-Examples (as C-style strings):
-
-----
- pkt-line actual value
- ---------------------------------
- "0006a\n" "a\n"
- "0005a" "a"
- "000bfoobar\n" "foobar\n"
- "0004" ""
-----
-
-Packfile transfer protocols
-===========================
-
-Git supports transferring data in packfiles over the ssh://, git://, http:// and
-file:// transports. There exist two sets of protocols, one for pushing
-data from a client to a server and another for fetching data from a
-server to a client. The three transports (ssh, git, file) use the same
-protocol to transfer data. http is documented in http-protocol.txt.
-
-The processes invoked in the canonical Git implementation are 'upload-pack'
-on the server side and 'fetch-pack' on the client side for fetching data;
-then 'receive-pack' on the server and 'send-pack' on the client for pushing
-data. The protocol functions to have a server tell a client what is
-currently on the server, then for the two to negotiate the smallest amount
-of data to send in order to fully update one or the other.
-
-pkt-line Format
----------------
-
-The descriptions below build on the pkt-line format described in
-protocol-common.txt. When the grammar indicate `PKT-LINE(...)`, unless
-otherwise noted the usual pkt-line LF rules apply: the sender SHOULD
-include a LF, but the receiver MUST NOT complain if it is not present.
-
-Transports
-----------
-There are three transports over which the packfile protocol is
-initiated. The Git transport is a simple, unauthenticated server that
-takes the command (almost always 'upload-pack', though Git
-servers can be configured to be globally writable, in which 'receive-
-pack' initiation is also allowed) with which the client wishes to
-communicate and executes it and connects it to the requesting
-process.
-
-In the SSH transport, the client just runs the 'upload-pack'
-or 'receive-pack' process on the server over the SSH protocol and then
-communicates with that invoked process over the SSH connection.
-
-The file:// transport runs the 'upload-pack' or 'receive-pack'
-process locally and communicates with it over a pipe.
-
-Git Transport
--------------
-
-The Git transport starts off by sending the command and repository
-on the wire using the pkt-line format, followed by a NUL byte and a
-hostname parameter, terminated by a NUL byte.
-
- 0032git-upload-pack /project.git\0host=myserver.com\0
-
---
- git-proto-request = request-command SP pathname NUL [ host-parameter NUL ]
- request-command = "git-upload-pack" / "git-receive-pack" /
- "git-upload-archive" ; case sensitive
- pathname = *( %x01-ff ) ; exclude NUL
- host-parameter = "host=" hostname [ ":" port ]
---
-
-Only host-parameter is allowed in the git-proto-request. Clients
-MUST NOT attempt to send additional parameters. It is used for the
-git-daemon name based virtual hosting. See --interpolated-path
-option to git daemon, with the %H/%CH format characters.
-
-Basically what the Git client is doing to connect to an 'upload-pack'
-process on the server side over the Git protocol is this:
-
- $ echo -e -n \
- "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" |
- nc -v example.com 9418
-
-If the server refuses the request for some reasons, it could abort
-gracefully with an error message.
-
-----
- error-line = PKT-LINE("ERR" SP explanation-text)
-----
-
-
-SSH Transport
--------------
-
-Initiating the upload-pack or receive-pack processes over SSH is
-executing the binary on the server via SSH remote execution.
-It is basically equivalent to running this:
-
- $ ssh git.example.com "git-upload-pack '/project.git'"
-
-For a server to support Git pushing and pulling for a given user over
-SSH, that user needs to be able to execute one or both of those
-commands via the SSH shell that they are provided on login. On some
-systems, that shell access is limited to only being able to run those
-two commands, or even just one of them.
-
-In an ssh:// format URI, it's absolute in the URI, so the '/' after
-the host name (or port number) is sent as an argument, which is then
-read by the remote git-upload-pack exactly as is, so it's effectively
-an absolute path in the remote filesystem.
-
- git clone ssh://user@example.com/project.git
- |
- v
- ssh user@example.com "git-upload-pack '/project.git'"
-
-In a "user@host:path" format URI, its relative to the user's home
-directory, because the Git client will run:
-
- git clone user@example.com:project.git
- |
- v
- ssh user@example.com "git-upload-pack 'project.git'"
-
-The exception is if a '~' is used, in which case
-we execute it without the leading '/'.
-
- ssh://user@example.com/~alice/project.git,
- |
- v
- ssh user@example.com "git-upload-pack '~alice/project.git'"
-
-A few things to remember here:
-
-- The "command name" is spelled with dash (e.g. git-upload-pack), but
- this can be overridden by the client;
-
-- The repository path is always quoted with single quotes.
-
-Fetching Data From a Server
----------------------------
-
-When one Git repository wants to get data that a second repository
-has, the first can 'fetch' from the second. This operation determines
-what data the server has that the client does not then streams that
-data down to the client in packfile format.
-
-
-Reference Discovery
--------------------
-
-When the client initially connects the server will immediately respond
-with a listing of each reference it has (all branches and tags) along
-with the object name that each reference currently points to.
-
- $ echo -e -n "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" |
- nc -v example.com 9418
- 00887217a7c7e582c46cec22a130adf4b9d7d950fba0 HEAD\0multi_ack thin-pack
- side-band side-band-64k ofs-delta shallow no-progress include-tag
- 00441d3fcd5ced445d1abc402225c0b8a1299641f497 refs/heads/integration
- 003f7217a7c7e582c46cec22a130adf4b9d7d950fba0 refs/heads/master
- 003cb88d2441cac0977faf98efc80305012112238d9d refs/tags/v0.9
- 003c525128480b96c89e6418b1e40909bf6c5b2d580f refs/tags/v1.0
- 003fe92df48743b7bc7d26bcaabfddde0a1e20cae47c refs/tags/v1.0^{}
- 0000
-
-The returned response is a pkt-line stream describing each ref and
-its current value. The stream MUST be sorted by name according to
-the C locale ordering.
-
-If HEAD is a valid ref, HEAD MUST appear as the first advertised
-ref. If HEAD is not a valid ref, HEAD MUST NOT appear in the
-advertisement list at all, but other refs may still appear.
-
-The stream MUST include capability declarations behind a NUL on the
-first ref. The peeled value of a ref (that is "ref^{}") MUST be
-immediately after the ref itself, if presented. A conforming server
-MUST peel the ref if it's an annotated tag.
-
-----
- advertised-refs = (no-refs / list-of-refs)
- *shallow
- flush-pkt
-
- no-refs = PKT-LINE(zero-id SP "capabilities^{}"
- NUL capability-list)
-
- list-of-refs = first-ref *other-ref
- first-ref = PKT-LINE(obj-id SP refname
- NUL capability-list)
-
- other-ref = PKT-LINE(other-tip / other-peeled)
- other-tip = obj-id SP refname
- other-peeled = obj-id SP refname "^{}"
-
- shallow = PKT-LINE("shallow" SP obj-id)
-
- capability-list = capability *(SP capability)
- capability = 1*(LC_ALPHA / DIGIT / "-" / "_")
- LC_ALPHA = %x61-7A
-----
-
-Server and client MUST use lowercase for obj-id, both MUST treat obj-id
-as case-insensitive.
-
-See protocol-capabilities.txt for a list of allowed server capabilities
-and descriptions.
-
-Packfile Negotiation
---------------------
-After reference and capabilities discovery, the client can decide to
-terminate the connection by sending a flush-pkt, telling the server it can
-now gracefully terminate, and disconnect, when it does not need any pack
-data. This can happen with the ls-remote command, and also can happen when
-the client already is up-to-date.
-
-Otherwise, it enters the negotiation phase, where the client and
-server determine what the minimal packfile necessary for transport is,
-by telling the server what objects it wants, its shallow objects
-(if any), and the maximum commit depth it wants (if any). The client
-will also send a list of the capabilities it wants to be in effect,
-out of what the server said it could do with the first 'want' line.
-
-----
- upload-request = want-list
- *shallow-line
- *1depth-request
- flush-pkt
-
- want-list = first-want
- *additional-want
-
- shallow-line = PKT-LINE("shallow" SP obj-id)
-
- depth-request = PKT-LINE("deepen" SP depth) /
- PKT-LINE("deepen-since" SP timestamp) /
- PKT-LINE("deepen-not" SP ref)
-
- first-want = PKT-LINE("want" SP obj-id SP capability-list)
- additional-want = PKT-LINE("want" SP obj-id)
-
- depth = 1*DIGIT
-----
-
-Clients MUST send all the obj-ids it wants from the reference
-discovery phase as 'want' lines. Clients MUST send at least one
-'want' command in the request body. Clients MUST NOT mention an
-obj-id in a 'want' command which did not appear in the response
-obtained through ref discovery.
-
-The client MUST write all obj-ids which it only has shallow copies
-of (meaning that it does not have the parents of a commit) as
-'shallow' lines so that the server is aware of the limitations of
-the client's history.
-
-The client now sends the maximum commit history depth it wants for
-this transaction, which is the number of commits it wants from the
-tip of the history, if any, as a 'deepen' line. A depth of 0 is the
-same as not making a depth request. The client does not want to receive
-any commits beyond this depth, nor does it want objects needed only to
-complete those commits. Commits whose parents are not received as a
-result are defined as shallow and marked as such in the server. This
-information is sent back to the client in the next step.
-
-Once all the 'want's and 'shallow's (and optional 'deepen') are
-transferred, clients MUST send a flush-pkt, to tell the server side
-that it is done sending the list.
-
-Otherwise, if the client sent a positive depth request, the server
-will determine which commits will and will not be shallow and
-send this information to the client. If the client did not request
-a positive depth, this step is skipped.
-
-----
- shallow-update = *shallow-line
- *unshallow-line
- flush-pkt
-
- shallow-line = PKT-LINE("shallow" SP obj-id)
-
- unshallow-line = PKT-LINE("unshallow" SP obj-id)
-----
-
-If the client has requested a positive depth, the server will compute
-the set of commits which are no deeper than the desired depth. The set
-of commits start at the client's wants.
-
-The server writes 'shallow' lines for each
-commit whose parents will not be sent as a result. The server writes
-an 'unshallow' line for each commit which the client has indicated is
-shallow, but is no longer shallow at the currently requested depth
-(that is, its parents will now be sent). The server MUST NOT mark
-as unshallow anything which the client has not indicated was shallow.
-
-Now the client will send a list of the obj-ids it has using 'have'
-lines, so the server can make a packfile that only contains the objects
-that the client needs. In multi_ack mode, the canonical implementation
-will send up to 32 of these at a time, then will send a flush-pkt. The
-canonical implementation will skip ahead and send the next 32 immediately,
-so that there is always a block of 32 "in-flight on the wire" at a time.
-
-----
- upload-haves = have-list
- compute-end
-
- have-list = *have-line
- have-line = PKT-LINE("have" SP obj-id)
- compute-end = flush-pkt / PKT-LINE("done")
-----
-
-If the server reads 'have' lines, it then will respond by ACKing any
-of the obj-ids the client said it had that the server also has. The
-server will ACK obj-ids differently depending on which ack mode is
-chosen by the client.
-
-In multi_ack mode:
-
- * the server will respond with 'ACK obj-id continue' for any common
- commits.
-
- * once the server has found an acceptable common base commit and is
- ready to make a packfile, it will blindly ACK all 'have' obj-ids
- back to the client.
-
- * the server will then send a 'NAK' and then wait for another response
- from the client - either a 'done' or another list of 'have' lines.
-
-In multi_ack_detailed mode:
-
- * the server will differentiate the ACKs where it is signaling
- that it is ready to send data with 'ACK obj-id ready' lines, and
- signals the identified common commits with 'ACK obj-id common' lines.
-
-Without either multi_ack or multi_ack_detailed:
-
- * upload-pack sends "ACK obj-id" on the first common object it finds.
- After that it says nothing until the client gives it a "done".
-
- * upload-pack sends "NAK" on a flush-pkt if no common object
- has been found yet. If one has been found, and thus an ACK
- was already sent, it's silent on the flush-pkt.
-
-After the client has gotten enough ACK responses that it can determine
-that the server has enough information to send an efficient packfile
-(in the canonical implementation, this is determined when it has received
-enough ACKs that it can color everything left in the --date-order queue
-as common with the server, or the --date-order queue is empty), or the
-client determines that it wants to give up (in the canonical implementation,
-this is determined when the client sends 256 'have' lines without getting
-any of them ACKed by the server - meaning there is nothing in common and
-the server should just send all of its objects), then the client will send
-a 'done' command. The 'done' command signals to the server that the client
-is ready to receive its packfile data.
-
-However, the 256 limit *only* turns on in the canonical client
-implementation if we have received at least one "ACK %s continue"
-during a prior round. This helps to ensure that at least one common
-ancestor is found before we give up entirely.
-
-Once the 'done' line is read from the client, the server will either
-send a final 'ACK obj-id' or it will send a 'NAK'. 'obj-id' is the object
-name of the last commit determined to be common. The server only sends
-ACK after 'done' if there is at least one common base and multi_ack or
-multi_ack_detailed is enabled. The server always sends NAK after 'done'
-if there is no common base found.
-
-Then the server will start sending its packfile data.
-
-----
- server-response = *ack_multi ack / nak
- ack_multi = PKT-LINE("ACK" SP obj-id ack_status)
- ack_status = "continue" / "common" / "ready"
- ack = PKT-LINE("ACK" SP obj-id)
- nak = PKT-LINE("NAK")
-----
-
-A simple clone may look like this (with no 'have' lines):
-
-----
- C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \
- side-band-64k ofs-delta\n
- C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n
- C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n
- C: 0032want 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n
- C: 0032want 74730d410fcb6603ace96f1dc55ea6196122532d\n
- C: 0000
- C: 0009done\n
-
- S: 0008NAK\n
- S: [PACKFILE]
-----
-
-An incremental update (fetch) response might look like this:
-
-----
- C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \
- side-band-64k ofs-delta\n
- C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n
- C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n
- C: 0000
- C: 0032have 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n
- C: [30 more have lines]
- C: 0032have 74730d410fcb6603ace96f1dc55ea6196122532d\n
- C: 0000
-
- S: 003aACK 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01 continue\n
- S: 003aACK 74730d410fcb6603ace96f1dc55ea6196122532d continue\n
- S: 0008NAK\n
-
- C: 0009done\n
-
- S: 0031ACK 74730d410fcb6603ace96f1dc55ea6196122532d\n
- S: [PACKFILE]
-----
-
-
-Packfile Data
--------------
-
-Now that the client and server have finished negotiation about what
-the minimal amount of data that needs to be sent to the client is, the server
-will construct and send the required data in packfile format.
-
-See pack-format.txt for what the packfile itself actually looks like.
-
-If 'side-band' or 'side-band-64k' capabilities have been specified by
-the client, the server will send the packfile data multiplexed.
-
-Each packet starting with the packet-line length of the amount of data
-that follows, followed by a single byte specifying the sideband the
-following data is coming in on.
-
-In 'side-band' mode, it will send up to 999 data bytes plus 1 control
-code, for a total of up to 1000 bytes in a pkt-line. In 'side-band-64k'
-mode it will send up to 65519 data bytes plus 1 control code, for a
-total of up to 65520 bytes in a pkt-line.
-
-The sideband byte will be a '1', '2' or a '3'. Sideband '1' will contain
-packfile data, sideband '2' will be used for progress information that the
-client will generally print to stderr and sideband '3' is used for error
-information.
-
-If no 'side-band' capability was specified, the server will stream the
-entire packfile without multiplexing.
-
-
-Pushing Data To a Server
-------------------------
-
-Pushing data to a server will invoke the 'receive-pack' process on the
-server, which will allow the client to tell it which references it should
-update and then send all the data the server will need for those new
-references to be complete. Once all the data is received and validated,
-the server will then update its references to what the client specified.
-
-Authentication
---------------
-
-The protocol itself contains no authentication mechanisms. That is to be
-handled by the transport, such as SSH, before the 'receive-pack' process is
-invoked. If 'receive-pack' is configured over the Git transport, those
-repositories will be writable by anyone who can access that port (9418) as
-that transport is unauthenticated.
-
-Reference Discovery
--------------------
-
-The reference discovery phase is done nearly the same way as it is in the
-fetching protocol. Each reference obj-id and name on the server is sent
-in packet-line format to the client, followed by a flush-pkt. The only
-real difference is that the capability listing is different - the only
-possible values are 'report-status', 'delete-refs', 'ofs-delta' and
-'push-options'.
-
-Reference Update Request and Packfile Transfer
-----------------------------------------------
-
-Once the client knows what references the server is at, it can send a
-list of reference update requests. For each reference on the server
-that it wants to update, it sends a line listing the obj-id currently on
-the server, the obj-id the client would like to update it to and the name
-of the reference.
-
-This list is followed by a flush-pkt. Then the push options are transmitted
-one per packet followed by another flush-pkt. After that the packfile that
-should contain all the objects that the server will need to complete the new
-references will be sent.
-
-----
- update-request = *shallow ( command-list | push-cert ) [packfile]
-
- shallow = PKT-LINE("shallow" SP obj-id)
-
- command-list = PKT-LINE(command NUL capability-list)
- *PKT-LINE(command)
- flush-pkt
-
- command = create / delete / update
- create = zero-id SP new-id SP name
- delete = old-id SP zero-id SP name
- update = old-id SP new-id SP name
-
- old-id = obj-id
- new-id = obj-id
-
- push-cert = PKT-LINE("push-cert" NUL capability-list LF)
- PKT-LINE("certificate version 0.1" LF)
- PKT-LINE("pusher" SP ident LF)
- PKT-LINE("pushee" SP url LF)
- PKT-LINE("nonce" SP nonce LF)
- PKT-LINE(LF)
- *PKT-LINE(command LF)
- *PKT-LINE(gpg-signature-lines LF)
- PKT-LINE("push-cert-end" LF)
-
- packfile = "PACK" 28*(OCTET)
-----
-
-If the receiving end does not support delete-refs, the sending end MUST
-NOT ask for delete command.
-
-If the receiving end does not support push-cert, the sending end
-MUST NOT send a push-cert command. When a push-cert command is
-sent, command-list MUST NOT be sent; the commands recorded in the
-push certificate is used instead.
-
-The packfile MUST NOT be sent if the only command used is 'delete'.
-
-A packfile MUST be sent if either create or update command is used,
-even if the server already has all the necessary objects. In this
-case the client MUST send an empty packfile. The only time this
-is likely to happen is if the client is creating
-a new branch or a tag that points to an existing obj-id.
-
-The server will receive the packfile, unpack it, then validate each
-reference that is being updated that it hasn't changed while the request
-was being processed (the obj-id is still the same as the old-id), and
-it will run any update hooks to make sure that the update is acceptable.
-If all of that is fine, the server will then update the references.
-
-Push Certificate
-----------------
-
-A push certificate begins with a set of header lines. After the
-header and an empty line, the protocol commands follow, one per
-line. Note that the trailing LF in push-cert PKT-LINEs is _not_
-optional; it must be present.
-
-Currently, the following header fields are defined:
-
-`pusher` ident::
- Identify the GPG key in "Human Readable Name <email@address>"
- format.
-
-`pushee` url::
- The repository URL (anonymized, if the URL contains
- authentication material) the user who ran `git push`
- intended to push into.
-
-`nonce` nonce::
- The 'nonce' string the receiving repository asked the
- pushing user to include in the certificate, to prevent
- replay attacks.
-
-The GPG signature lines are a detached signature for the contents
-recorded in the push certificate before the signature block begins.
-The detached signature is used to certify that the commands were
-given by the pusher, who must be the signer.
-
-Report Status
--------------
-
-After receiving the pack data from the sender, the receiver sends a
-report if 'report-status' capability is in effect.
-It is a short listing of what happened in that update. It will first
-list the status of the packfile unpacking as either 'unpack ok' or
-'unpack [error]'. Then it will list the status for each of the references
-that it tried to update. Each line is either 'ok [refname]' if the
-update was successful, or 'ng [refname] [error]' if the update was not.
-
-----
- report-status = unpack-status
- 1*(command-status)
- flush-pkt
-
- unpack-status = PKT-LINE("unpack" SP unpack-result)
- unpack-result = "ok" / error-msg
-
- command-status = command-ok / command-fail
- command-ok = PKT-LINE("ok" SP refname)
- command-fail = PKT-LINE("ng" SP refname SP error-msg)
-
- error-msg = 1*(OCTECT) ; where not "ok"
-----
-
-Updates can be unsuccessful for a number of reasons. The reference can have
-changed since the reference discovery phase was originally sent, meaning
-someone pushed in the meantime. The reference being pushed could be a
-non-fast-forward reference and the update hooks or configuration could be
-set to not allow that, etc. Also, some references can be updated while others
-can be rejected.
-
-An example client/server communication might look like this:
-
-----
- S: 007c74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/local\0report-status delete-refs ofs-delta\n
- S: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe refs/heads/debug\n
- S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/master\n
- S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/team\n
- S: 0000
-
- C: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe 74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/debug\n
- C: 003e74730d410fcb6603ace96f1dc55ea6196122532d 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a refs/heads/master\n
- C: 0000
- C: [PACKDATA]
-
- S: 000eunpack ok\n
- S: 0018ok refs/heads/debug\n
- S: 002ang refs/heads/master non-fast-forward\n
-----
-*/
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/report_status.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/report_status.go
deleted file mode 100644
index 29c1a4cd86..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/report_status.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package packp
-
-import (
- "bytes"
- "fmt"
- "io"
- "strings"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
-)
-
-const (
- ok = "ok"
-)
-
-// ReportStatus is a report status message, as used in the git-receive-pack
-// process whenever the 'report-status' capability is negotiated.
-type ReportStatus struct {
- UnpackStatus string
- CommandStatuses []*CommandStatus
-}
-
-// NewReportStatus creates a new ReportStatus message.
-func NewReportStatus() *ReportStatus {
- return &ReportStatus{}
-}
-
-// Error returns the first error if any.
-func (s *ReportStatus) Error() error {
- if s.UnpackStatus != ok {
- return fmt.Errorf("unpack error: %s", s.UnpackStatus)
- }
-
- for _, s := range s.CommandStatuses {
- if err := s.Error(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Encode writes the report status to a writer.
-func (s *ReportStatus) Encode(w io.Writer) error {
- e := pktline.NewEncoder(w)
- if err := e.Encodef("unpack %s\n", s.UnpackStatus); err != nil {
- return err
- }
-
- for _, cs := range s.CommandStatuses {
- if err := cs.encode(w); err != nil {
- return err
- }
- }
-
- return e.Flush()
-}
-
-// Decode reads from the given reader and decodes a report-status message. It
-// does not read more input than what is needed to fill the report status.
-func (s *ReportStatus) Decode(r io.Reader) error {
- scan := pktline.NewScanner(r)
- if err := s.scanFirstLine(scan); err != nil {
- return err
- }
-
- if err := s.decodeReportStatus(scan.Bytes()); err != nil {
- return err
- }
-
- flushed := false
- for scan.Scan() {
- b := scan.Bytes()
- if isFlush(b) {
- flushed = true
- break
- }
-
- if err := s.decodeCommandStatus(b); err != nil {
- return err
- }
- }
-
- if !flushed {
- return fmt.Errorf("missing flush")
- }
-
- return scan.Err()
-}
-
-func (s *ReportStatus) scanFirstLine(scan *pktline.Scanner) error {
- if scan.Scan() {
- return nil
- }
-
- if scan.Err() != nil {
- return scan.Err()
- }
-
- return io.ErrUnexpectedEOF
-}
-
-func (s *ReportStatus) decodeReportStatus(b []byte) error {
- if isFlush(b) {
- return fmt.Errorf("premature flush")
- }
-
- b = bytes.TrimSuffix(b, eol)
-
- line := string(b)
- fields := strings.SplitN(line, " ", 2)
- if len(fields) != 2 || fields[0] != "unpack" {
- return fmt.Errorf("malformed unpack status: %s", line)
- }
-
- s.UnpackStatus = fields[1]
- return nil
-}
-
-func (s *ReportStatus) decodeCommandStatus(b []byte) error {
- b = bytes.TrimSuffix(b, eol)
-
- line := string(b)
- fields := strings.SplitN(line, " ", 3)
- status := ok
- if len(fields) == 3 && fields[0] == "ng" {
- status = fields[2]
- } else if len(fields) != 2 || fields[0] != "ok" {
- return fmt.Errorf("malformed command status: %s", line)
- }
-
- cs := &CommandStatus{
- ReferenceName: plumbing.ReferenceName(fields[1]),
- Status: status,
- }
- s.CommandStatuses = append(s.CommandStatuses, cs)
- return nil
-}
-
-// CommandStatus is the status of a reference in a report status.
-// See ReportStatus struct.
-type CommandStatus struct {
- ReferenceName plumbing.ReferenceName
- Status string
-}
-
-// Error returns the error, if any.
-func (s *CommandStatus) Error() error {
- if s.Status == ok {
- return nil
- }
-
- return fmt.Errorf("command error on %s: %s",
- s.ReferenceName.String(), s.Status)
-}
-
-func (s *CommandStatus) encode(w io.Writer) error {
- e := pktline.NewEncoder(w)
- if s.Error() == nil {
- return e.Encodef("ok %s\n", s.ReferenceName.String())
- }
-
- return e.Encodef("ng %s %s\n", s.ReferenceName.String(), s.Status)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/shallowupd.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/shallowupd.go
deleted file mode 100644
index fce4e3be2b..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/shallowupd.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package packp
-
-import (
- "bytes"
- "fmt"
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
-)
-
-const (
- shallowLineLen = 48
- unshallowLineLen = 50
-)
-
-type ShallowUpdate struct {
- Shallows []plumbing.Hash
- Unshallows []plumbing.Hash
-}
-
-func (r *ShallowUpdate) Decode(reader io.Reader) error {
- s := pktline.NewScanner(reader)
-
- for s.Scan() {
- line := s.Bytes()
- line = bytes.TrimSpace(line)
-
- var err error
- switch {
- case bytes.HasPrefix(line, shallow):
- err = r.decodeShallowLine(line)
- case bytes.HasPrefix(line, unshallow):
- err = r.decodeUnshallowLine(line)
- case bytes.Equal(line, pktline.Flush):
- return nil
- }
-
- if err != nil {
- return err
- }
- }
-
- return s.Err()
-}
-
-func (r *ShallowUpdate) decodeShallowLine(line []byte) error {
- hash, err := r.decodeLine(line, shallow, shallowLineLen)
- if err != nil {
- return err
- }
-
- r.Shallows = append(r.Shallows, hash)
- return nil
-}
-
-func (r *ShallowUpdate) decodeUnshallowLine(line []byte) error {
- hash, err := r.decodeLine(line, unshallow, unshallowLineLen)
- if err != nil {
- return err
- }
-
- r.Unshallows = append(r.Unshallows, hash)
- return nil
-}
-
-func (r *ShallowUpdate) decodeLine(line, prefix []byte, expLen int) (plumbing.Hash, error) {
- if len(line) != expLen {
- return plumbing.ZeroHash, fmt.Errorf("malformed %s%q", prefix, line)
- }
-
- raw := string(line[expLen-40 : expLen])
- return plumbing.NewHash(raw), nil
-}
-
-func (r *ShallowUpdate) Encode(w io.Writer) error {
- e := pktline.NewEncoder(w)
-
- for _, h := range r.Shallows {
- if err := e.Encodef("%s%s\n", shallow, h.String()); err != nil {
- return err
- }
- }
-
- for _, h := range r.Unshallows {
- if err := e.Encodef("%s%s\n", unshallow, h.String()); err != nil {
- return err
- }
- }
-
- return e.Flush()
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/common.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/common.go
deleted file mode 100644
index de5001281f..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/common.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package sideband
-
-// Type sideband type "side-band" or "side-band-64k"
-type Type int8
-
-const (
- // Sideband legacy sideband type up to 1000-byte messages
- Sideband Type = iota
- // Sideband64k sideband type up to 65519-byte messages
- Sideband64k Type = iota
-
- // MaxPackedSize for Sideband type
- MaxPackedSize = 1000
- // MaxPackedSize64k for Sideband64k type
- MaxPackedSize64k = 65520
-)
-
-// Channel sideband channel
-type Channel byte
-
-// WithPayload encode the payload as a message
-func (ch Channel) WithPayload(payload []byte) []byte {
- return append([]byte{byte(ch)}, payload...)
-}
-
-const (
- // PackData packfile content
- PackData Channel = 1
- // ProgressMessage progress messages
- ProgressMessage Channel = 2
- // ErrorMessage fatal error message just before stream aborts
- ErrorMessage Channel = 3
-)
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/demux.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/demux.go
deleted file mode 100644
index 352336dc68..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/demux.go
+++ /dev/null
@@ -1,148 +0,0 @@
-package sideband
-
-import (
- "errors"
- "fmt"
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
-)
-
-// ErrMaxPackedExceeded returned by Read, if the maximum packed size is exceeded
-var ErrMaxPackedExceeded = errors.New("max. packed size exceeded")
-
-// Progress where the progress information is stored
-type Progress interface {
- io.Writer
-}
-
-// Demuxer demultiplexes the progress reports and error info interleaved with the
-// packfile itself.
-//
-// A sideband has three different channels the main one, called PackData, contains
-// the packfile data; the ErrorMessage channel, that contains server errors; and
-// the last one, ProgressMessage channel, containing information about the ongoing
-// task happening in the server (optional, can be suppressed sending NoProgress
-// or Quiet capabilities to the server)
-//
-// In order to demultiplex the data stream, method `Read` should be called to
-// retrieve the PackData channel, the incoming data from the ProgressMessage is
-// written at `Progress` (if any), if any message is retrieved from the
-// ErrorMessage channel an error is returned and we can assume that the
-// connection has been closed.
-type Demuxer struct {
- t Type
- r io.Reader
- s *pktline.Scanner
-
- max int
- pending []byte
-
- // Progress is where the progress messages are stored
- Progress Progress
-}
-
-// NewDemuxer returns a new Demuxer for the given t and read from r
-func NewDemuxer(t Type, r io.Reader) *Demuxer {
- max := MaxPackedSize64k
- if t == Sideband {
- max = MaxPackedSize
- }
-
- return &Demuxer{
- t: t,
- r: r,
- max: max,
- s: pktline.NewScanner(r),
- }
-}
-
-// Read reads up to len(p) bytes from the PackData channel into p, an error can
-// be return if an error happens when reading or if a message is sent in the
-// ErrorMessage channel.
-//
-// When a ProgressMessage is read, is not copy to b, instead of this is written
-// to the Progress
-func (d *Demuxer) Read(b []byte) (n int, err error) {
- var read, req int
-
- req = len(b)
- for read < req {
- n, err := d.doRead(b[read:req])
- read += n
-
- if err != nil {
- return read, err
- }
- }
-
- return read, nil
-}
-
-func (d *Demuxer) doRead(b []byte) (int, error) {
- read, err := d.nextPackData()
- size := len(read)
- wanted := len(b)
-
- if size > wanted {
- d.pending = read[wanted:]
- }
-
- if wanted > size {
- wanted = size
- }
-
- size = copy(b, read[:wanted])
- return size, err
-}
-
-func (d *Demuxer) nextPackData() ([]byte, error) {
- content := d.getPending()
- if len(content) != 0 {
- return content, nil
- }
-
- if !d.s.Scan() {
- if err := d.s.Err(); err != nil {
- return nil, err
- }
-
- return nil, io.EOF
- }
-
- content = d.s.Bytes()
-
- size := len(content)
- if size == 0 {
- return nil, nil
- } else if size > d.max {
- return nil, ErrMaxPackedExceeded
- }
-
- switch Channel(content[0]) {
- case PackData:
- return content[1:], nil
- case ProgressMessage:
- if d.Progress != nil {
- _, err := d.Progress.Write(content[1:])
- return nil, err
- }
- case ErrorMessage:
- return nil, fmt.Errorf("unexpected error: %s", content[1:])
- default:
- return nil, fmt.Errorf("unknown channel %s", content)
- }
-
- return nil, nil
-}
-
-func (d *Demuxer) getPending() (b []byte) {
- if len(d.pending) == 0 {
- return nil
- }
-
- content := d.pending
- d.pending = nil
-
- return content
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/doc.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/doc.go
deleted file mode 100644
index c5d2429529..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/doc.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Package sideband implements a sideband mutiplex/demultiplexer
-package sideband
-
-// If 'side-band' or 'side-band-64k' capabilities have been specified by
-// the client, the server will send the packfile data multiplexed.
-//
-// Either mode indicates that the packfile data will be streamed broken
-// up into packets of up to either 1000 bytes in the case of 'side_band',
-// or 65520 bytes in the case of 'side_band_64k'. Each packet is made up
-// of a leading 4-byte pkt-line length of how much data is in the packet,
-// followed by a 1-byte stream code, followed by the actual data.
-//
-// The stream code can be one of:
-//
-// 1 - pack data
-// 2 - progress messages
-// 3 - fatal error message just before stream aborts
-//
-// The "side-band-64k" capability came about as a way for newer clients
-// that can handle much larger packets to request packets that are
-// actually crammed nearly full, while maintaining backward compatibility
-// for the older clients.
-//
-// Further, with side-band and its up to 1000-byte messages, it's actually
-// 999 bytes of payload and 1 byte for the stream code. With side-band-64k,
-// same deal, you have up to 65519 bytes of data and 1 byte for the stream
-// code.
-//
-// The client MUST send only maximum of one of "side-band" and "side-
-// band-64k". Server MUST diagnose it as an error if client requests
-// both.
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/muxer.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/muxer.go
deleted file mode 100644
index 45fecc2cbd..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/muxer.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package sideband
-
-import (
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
-)
-
-// Muxer multiplex the packfile along with the progress messages and the error
-// information. The multiplex is perform using pktline format.
-type Muxer struct {
- max int
- e *pktline.Encoder
-}
-
-const chLen = 1
-
-// NewMuxer returns a new Muxer for the given t that writes on w.
-//
-// If t is equal to `Sideband` the max pack size is set to MaxPackedSize, in any
-// other value is given, max pack is set to MaxPackedSize64k, that is the
-// maximum length of a line in pktline format.
-func NewMuxer(t Type, w io.Writer) *Muxer {
- max := MaxPackedSize64k
- if t == Sideband {
- max = MaxPackedSize
- }
-
- return &Muxer{
- max: max - chLen,
- e: pktline.NewEncoder(w),
- }
-}
-
-// Write writes p in the PackData channel
-func (m *Muxer) Write(p []byte) (int, error) {
- return m.WriteChannel(PackData, p)
-}
-
-// WriteChannel writes p in the given channel. This method can be used with any
-// channel, but is recommend use it only for the ProgressMessage and
-// ErrorMessage channels and use Write for the PackData channel
-func (m *Muxer) WriteChannel(t Channel, p []byte) (int, error) {
- wrote := 0
- size := len(p)
- for wrote < size {
- n, err := m.doWrite(t, p[wrote:])
- wrote += n
-
- if err != nil {
- return wrote, err
- }
- }
-
- return wrote, nil
-}
-
-func (m *Muxer) doWrite(ch Channel, p []byte) (int, error) {
- sz := len(p)
- if sz > m.max {
- sz = m.max
- }
-
- return sz, m.e.Encode(ch.WithPayload(p[:sz]))
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/srvresp.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/srvresp.go
deleted file mode 100644
index 6a91991839..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/srvresp.go
+++ /dev/null
@@ -1,127 +0,0 @@
-package packp
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
-)
-
-const ackLineLen = 44
-
-// ServerResponse object acknowledgement from upload-pack service
-type ServerResponse struct {
- ACKs []plumbing.Hash
-}
-
-// Decode decodes the response into the struct, isMultiACK should be true, if
-// the request was done with multi_ack or multi_ack_detailed capabilities.
-func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error {
- // TODO: implement support for multi_ack or multi_ack_detailed responses
- if isMultiACK {
- return errors.New("multi_ack and multi_ack_detailed are not supported")
- }
-
- s := pktline.NewScanner(reader)
-
- for s.Scan() {
- line := s.Bytes()
-
- if err := r.decodeLine(line); err != nil {
- return err
- }
-
- // we need to detect when the end of a response header and the beginning
- // of a packfile header happened, some requests to the git daemon
- // produces a duplicate ACK header even when multi_ack is not supported.
- stop, err := r.stopReading(reader)
- if err != nil {
- return err
- }
-
- if stop {
- break
- }
- }
-
- return s.Err()
-}
-
-// stopReading detects when a valid command such as ACK or NAK is found to be
-// read in the buffer without moving the read pointer.
-func (r *ServerResponse) stopReading(reader *bufio.Reader) (bool, error) {
- ahead, err := reader.Peek(7)
- if err == io.EOF {
- return true, nil
- }
-
- if err != nil {
- return false, err
- }
-
- if len(ahead) > 4 && r.isValidCommand(ahead[0:3]) {
- return false, nil
- }
-
- if len(ahead) == 7 && r.isValidCommand(ahead[4:]) {
- return false, nil
- }
-
- return true, nil
-}
-
-func (r *ServerResponse) isValidCommand(b []byte) bool {
- commands := [][]byte{ack, nak}
- for _, c := range commands {
- if bytes.Equal(b, c) {
- return true
- }
- }
-
- return false
-}
-
-func (r *ServerResponse) decodeLine(line []byte) error {
- if len(line) == 0 {
- return fmt.Errorf("unexpected flush")
- }
-
- if bytes.Equal(line[0:3], ack) {
- return r.decodeACKLine(line)
- }
-
- if bytes.Equal(line[0:3], nak) {
- return nil
- }
-
- return fmt.Errorf("unexpected content %q", string(line))
-}
-
-func (r *ServerResponse) decodeACKLine(line []byte) error {
- if len(line) < ackLineLen {
- return fmt.Errorf("malformed ACK %q", line)
- }
-
- sp := bytes.Index(line, []byte(" "))
- h := plumbing.NewHash(string(line[sp+1 : sp+41]))
- r.ACKs = append(r.ACKs, h)
- return nil
-}
-
-// Encode encodes the ServerResponse into a writer.
-func (r *ServerResponse) Encode(w io.Writer) error {
- if len(r.ACKs) > 1 {
- return errors.New("multi_ack and multi_ack_detailed are not supported")
- }
-
- e := pktline.NewEncoder(w)
- if len(r.ACKs) == 0 {
- return e.Encodef("%s\n", nak)
- }
-
- return e.Encodef("%s %s\n", ack, r.ACKs[0].String())
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq.go
deleted file mode 100644
index 74109d8853..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package packp
-
-import (
- "fmt"
- "time"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
-)
-
-// UploadRequest values represent the information transmitted on a
-// upload-request message. Values from this type are not zero-value
-// safe, use the New function instead.
-// This is a low level type, use UploadPackRequest instead.
-type UploadRequest struct {
- Capabilities *capability.List
- Wants []plumbing.Hash
- Shallows []plumbing.Hash
- Depth Depth
-}
-
-// Depth values stores the desired depth of the requested packfile: see
-// DepthCommit, DepthSince and DepthReference.
-type Depth interface {
- isDepth()
- IsZero() bool
-}
-
-// DepthCommits values stores the maximum number of requested commits in
-// the packfile. Zero means infinite. A negative value will have
-// undefined consequences.
-type DepthCommits int
-
-func (d DepthCommits) isDepth() {}
-
-func (d DepthCommits) IsZero() bool {
- return d == 0
-}
-
-// DepthSince values requests only commits newer than the specified time.
-type DepthSince time.Time
-
-func (d DepthSince) isDepth() {}
-
-func (d DepthSince) IsZero() bool {
- return time.Time(d).IsZero()
-}
-
-// DepthReference requests only commits not to found in the specified reference.
-type DepthReference string
-
-func (d DepthReference) isDepth() {}
-
-func (d DepthReference) IsZero() bool {
- return string(d) == ""
-}
-
-// NewUploadRequest returns a pointer to a new UploadRequest value, ready to be
-// used. It has no capabilities, wants or shallows and an infinite depth. Please
-// note that to encode an upload-request it has to have at least one wanted hash.
-func NewUploadRequest() *UploadRequest {
- return &UploadRequest{
- Capabilities: capability.NewList(),
- Wants: []plumbing.Hash{},
- Shallows: []plumbing.Hash{},
- Depth: DepthCommits(0),
- }
-}
-
-// NewUploadRequestFromCapabilities returns a pointer to a new UploadRequest
-// value, the request capabilities are filled with the most optiomal ones, based
-// on the adv value (advertaised capabilities), the UploadRequest generated it
-// has no wants or shallows and an infinite depth.
-func NewUploadRequestFromCapabilities(adv *capability.List) *UploadRequest {
- r := NewUploadRequest()
-
- if adv.Supports(capability.MultiACKDetailed) {
- r.Capabilities.Set(capability.MultiACKDetailed)
- } else if adv.Supports(capability.MultiACK) {
- r.Capabilities.Set(capability.MultiACK)
- }
-
- if adv.Supports(capability.Sideband64k) {
- r.Capabilities.Set(capability.Sideband64k)
- } else if adv.Supports(capability.Sideband) {
- r.Capabilities.Set(capability.Sideband)
- }
-
- if adv.Supports(capability.ThinPack) {
- r.Capabilities.Set(capability.ThinPack)
- }
-
- if adv.Supports(capability.OFSDelta) {
- r.Capabilities.Set(capability.OFSDelta)
- }
-
- if adv.Supports(capability.Agent) {
- r.Capabilities.Set(capability.Agent, capability.DefaultAgent)
- }
-
- return r
-}
-
-// Validate validates the content of UploadRequest, following the next rules:
-// - Wants MUST have at least one reference
-// - capability.Shallow MUST be present if Shallows is not empty
-// - is a non-zero DepthCommits is given capability.Shallow MUST be present
-// - is a DepthSince is given capability.Shallow MUST be present
-// - is a DepthReference is given capability.DeepenNot MUST be present
-// - MUST contain only maximum of one of capability.Sideband and capability.Sideband64k
-// - MUST contain only maximum of one of capability.MultiACK and capability.MultiACKDetailed
-func (r *UploadRequest) Validate() error {
- if len(r.Wants) == 0 {
- return fmt.Errorf("want can't be empty")
- }
-
- if err := r.validateRequiredCapabilities(); err != nil {
- return err
- }
-
- if err := r.validateConflictCapabilities(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (r *UploadRequest) validateRequiredCapabilities() error {
- msg := "missing capability %s"
-
- if len(r.Shallows) != 0 && !r.Capabilities.Supports(capability.Shallow) {
- return fmt.Errorf(msg, capability.Shallow)
- }
-
- switch r.Depth.(type) {
- case DepthCommits:
- if r.Depth != DepthCommits(0) {
- if !r.Capabilities.Supports(capability.Shallow) {
- return fmt.Errorf(msg, capability.Shallow)
- }
- }
- case DepthSince:
- if !r.Capabilities.Supports(capability.DeepenSince) {
- return fmt.Errorf(msg, capability.DeepenSince)
- }
- case DepthReference:
- if !r.Capabilities.Supports(capability.DeepenNot) {
- return fmt.Errorf(msg, capability.DeepenNot)
- }
- }
-
- return nil
-}
-
-func (r *UploadRequest) validateConflictCapabilities() error {
- msg := "capabilities %s and %s are mutually exclusive"
- if r.Capabilities.Supports(capability.Sideband) &&
- r.Capabilities.Supports(capability.Sideband64k) {
- return fmt.Errorf(msg, capability.Sideband, capability.Sideband64k)
- }
-
- if r.Capabilities.Supports(capability.MultiACK) &&
- r.Capabilities.Supports(capability.MultiACKDetailed) {
- return fmt.Errorf(msg, capability.MultiACK, capability.MultiACKDetailed)
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq_decode.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq_decode.go
deleted file mode 100644
index bcd642db2a..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq_decode.go
+++ /dev/null
@@ -1,257 +0,0 @@
-package packp
-
-import (
- "bytes"
- "encoding/hex"
- "fmt"
- "io"
- "strconv"
- "time"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
-)
-
-// Decode reads the next upload-request form its input and
-// stores it in the UploadRequest.
-func (u *UploadRequest) Decode(r io.Reader) error {
- d := newUlReqDecoder(r)
- return d.Decode(u)
-}
-
-type ulReqDecoder struct {
- s *pktline.Scanner // a pkt-line scanner from the input stream
- line []byte // current pkt-line contents, use parser.nextLine() to make it advance
- nLine int // current pkt-line number for debugging, begins at 1
- err error // sticky error, use the parser.error() method to fill this out
- data *UploadRequest // parsed data is stored here
-}
-
-func newUlReqDecoder(r io.Reader) *ulReqDecoder {
- return &ulReqDecoder{
- s: pktline.NewScanner(r),
- }
-}
-
-func (d *ulReqDecoder) Decode(v *UploadRequest) error {
- d.data = v
-
- for state := d.decodeFirstWant; state != nil; {
- state = state()
- }
-
- return d.err
-}
-
-// fills out the parser stiky error
-func (d *ulReqDecoder) error(format string, a ...interface{}) {
- msg := fmt.Sprintf(
- "pkt-line %d: %s", d.nLine,
- fmt.Sprintf(format, a...),
- )
-
- d.err = NewErrUnexpectedData(msg, d.line)
-}
-
-// Reads a new pkt-line from the scanner, makes its payload available as
-// p.line and increments p.nLine. A successful invocation returns true,
-// otherwise, false is returned and the sticky error is filled out
-// accordingly. Trims eols at the end of the payloads.
-func (d *ulReqDecoder) nextLine() bool {
- d.nLine++
-
- if !d.s.Scan() {
- if d.err = d.s.Err(); d.err != nil {
- return false
- }
-
- d.error("EOF")
- return false
- }
-
- d.line = d.s.Bytes()
- d.line = bytes.TrimSuffix(d.line, eol)
-
- return true
-}
-
-// Expected format: want <hash>[ capabilities]
-func (d *ulReqDecoder) decodeFirstWant() stateFn {
- if ok := d.nextLine(); !ok {
- return nil
- }
-
- if !bytes.HasPrefix(d.line, want) {
- d.error("missing 'want ' prefix")
- return nil
- }
- d.line = bytes.TrimPrefix(d.line, want)
-
- hash, ok := d.readHash()
- if !ok {
- return nil
- }
- d.data.Wants = append(d.data.Wants, hash)
-
- return d.decodeCaps
-}
-
-func (d *ulReqDecoder) readHash() (plumbing.Hash, bool) {
- if len(d.line) < hashSize {
- d.err = fmt.Errorf("malformed hash: %v", d.line)
- return plumbing.ZeroHash, false
- }
-
- var hash plumbing.Hash
- if _, err := hex.Decode(hash[:], d.line[:hashSize]); err != nil {
- d.error("invalid hash text: %s", err)
- return plumbing.ZeroHash, false
- }
- d.line = d.line[hashSize:]
-
- return hash, true
-}
-
-// Expected format: sp cap1 sp cap2 sp cap3...
-func (d *ulReqDecoder) decodeCaps() stateFn {
- d.line = bytes.TrimPrefix(d.line, sp)
- if err := d.data.Capabilities.Decode(d.line); err != nil {
- d.error("invalid capabilities: %s", err)
- }
-
- return d.decodeOtherWants
-}
-
-// Expected format: want <hash>
-func (d *ulReqDecoder) decodeOtherWants() stateFn {
- if ok := d.nextLine(); !ok {
- return nil
- }
-
- if bytes.HasPrefix(d.line, shallow) {
- return d.decodeShallow
- }
-
- if bytes.HasPrefix(d.line, deepen) {
- return d.decodeDeepen
- }
-
- if len(d.line) == 0 {
- return nil
- }
-
- if !bytes.HasPrefix(d.line, want) {
- d.error("unexpected payload while expecting a want: %q", d.line)
- return nil
- }
- d.line = bytes.TrimPrefix(d.line, want)
-
- hash, ok := d.readHash()
- if !ok {
- return nil
- }
- d.data.Wants = append(d.data.Wants, hash)
-
- return d.decodeOtherWants
-}
-
-// Expected format: shallow <hash>
-func (d *ulReqDecoder) decodeShallow() stateFn {
- if bytes.HasPrefix(d.line, deepen) {
- return d.decodeDeepen
- }
-
- if len(d.line) == 0 {
- return nil
- }
-
- if !bytes.HasPrefix(d.line, shallow) {
- d.error("unexpected payload while expecting a shallow: %q", d.line)
- return nil
- }
- d.line = bytes.TrimPrefix(d.line, shallow)
-
- hash, ok := d.readHash()
- if !ok {
- return nil
- }
- d.data.Shallows = append(d.data.Shallows, hash)
-
- if ok := d.nextLine(); !ok {
- return nil
- }
-
- return d.decodeShallow
-}
-
-// Expected format: deepen <n> / deepen-since <ul> / deepen-not <ref>
-func (d *ulReqDecoder) decodeDeepen() stateFn {
- if bytes.HasPrefix(d.line, deepenCommits) {
- return d.decodeDeepenCommits
- }
-
- if bytes.HasPrefix(d.line, deepenSince) {
- return d.decodeDeepenSince
- }
-
- if bytes.HasPrefix(d.line, deepenReference) {
- return d.decodeDeepenReference
- }
-
- if len(d.line) == 0 {
- return nil
- }
-
- d.error("unexpected deepen specification: %q", d.line)
- return nil
-}
-
-func (d *ulReqDecoder) decodeDeepenCommits() stateFn {
- d.line = bytes.TrimPrefix(d.line, deepenCommits)
-
- var n int
- if n, d.err = strconv.Atoi(string(d.line)); d.err != nil {
- return nil
- }
- if n < 0 {
- d.err = fmt.Errorf("negative depth")
- return nil
- }
- d.data.Depth = DepthCommits(n)
-
- return d.decodeFlush
-}
-
-func (d *ulReqDecoder) decodeDeepenSince() stateFn {
- d.line = bytes.TrimPrefix(d.line, deepenSince)
-
- var secs int64
- secs, d.err = strconv.ParseInt(string(d.line), 10, 64)
- if d.err != nil {
- return nil
- }
- t := time.Unix(secs, 0).UTC()
- d.data.Depth = DepthSince(t)
-
- return d.decodeFlush
-}
-
-func (d *ulReqDecoder) decodeDeepenReference() stateFn {
- d.line = bytes.TrimPrefix(d.line, deepenReference)
-
- d.data.Depth = DepthReference(string(d.line))
-
- return d.decodeFlush
-}
-
-func (d *ulReqDecoder) decodeFlush() stateFn {
- if ok := d.nextLine(); !ok {
- return nil
- }
-
- if len(d.line) != 0 {
- d.err = fmt.Errorf("unexpected payload while expecting a flush-pkt: %q", d.line)
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq_encode.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq_encode.go
deleted file mode 100644
index 89a59868d0..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq_encode.go
+++ /dev/null
@@ -1,145 +0,0 @@
-package packp
-
-import (
- "bytes"
- "fmt"
- "io"
- "time"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
-)
-
-// Encode writes the UlReq encoding of u to the stream.
-//
-// All the payloads will end with a newline character. Wants and
-// shallows are sorted alphabetically. A depth of 0 means no depth
-// request is sent.
-func (u *UploadRequest) Encode(w io.Writer) error {
- e := newUlReqEncoder(w)
- return e.Encode(u)
-}
-
-type ulReqEncoder struct {
- pe *pktline.Encoder // where to write the encoded data
- data *UploadRequest // the data to encode
- err error // sticky error
-}
-
-func newUlReqEncoder(w io.Writer) *ulReqEncoder {
- return &ulReqEncoder{
- pe: pktline.NewEncoder(w),
- }
-}
-
-func (e *ulReqEncoder) Encode(v *UploadRequest) error {
- e.data = v
-
- if len(v.Wants) == 0 {
- return fmt.Errorf("empty wants provided")
- }
-
- plumbing.HashesSort(e.data.Wants)
- for state := e.encodeFirstWant; state != nil; {
- state = state()
- }
-
- return e.err
-}
-
-func (e *ulReqEncoder) encodeFirstWant() stateFn {
- var err error
- if e.data.Capabilities.IsEmpty() {
- err = e.pe.Encodef("want %s\n", e.data.Wants[0])
- } else {
- err = e.pe.Encodef(
- "want %s %s\n",
- e.data.Wants[0],
- e.data.Capabilities.String(),
- )
- }
-
- if err != nil {
- e.err = fmt.Errorf("encoding first want line: %s", err)
- return nil
- }
-
- return e.encodeAditionalWants
-}
-
-func (e *ulReqEncoder) encodeAditionalWants() stateFn {
- last := e.data.Wants[0]
- for _, w := range e.data.Wants[1:] {
- if bytes.Equal(last[:], w[:]) {
- continue
- }
-
- if err := e.pe.Encodef("want %s\n", w); err != nil {
- e.err = fmt.Errorf("encoding want %q: %s", w, err)
- return nil
- }
-
- last = w
- }
-
- return e.encodeShallows
-}
-
-func (e *ulReqEncoder) encodeShallows() stateFn {
- plumbing.HashesSort(e.data.Shallows)
-
- var last plumbing.Hash
- for _, s := range e.data.Shallows {
- if bytes.Equal(last[:], s[:]) {
- continue
- }
-
- if err := e.pe.Encodef("shallow %s\n", s); err != nil {
- e.err = fmt.Errorf("encoding shallow %q: %s", s, err)
- return nil
- }
-
- last = s
- }
-
- return e.encodeDepth
-}
-
-func (e *ulReqEncoder) encodeDepth() stateFn {
- switch depth := e.data.Depth.(type) {
- case DepthCommits:
- if depth != 0 {
- commits := int(depth)
- if err := e.pe.Encodef("deepen %d\n", commits); err != nil {
- e.err = fmt.Errorf("encoding depth %d: %s", depth, err)
- return nil
- }
- }
- case DepthSince:
- when := time.Time(depth).UTC()
- if err := e.pe.Encodef("deepen-since %d\n", when.Unix()); err != nil {
- e.err = fmt.Errorf("encoding depth %s: %s", when, err)
- return nil
- }
- case DepthReference:
- reference := string(depth)
- if err := e.pe.Encodef("deepen-not %s\n", reference); err != nil {
- e.err = fmt.Errorf("encoding depth %s: %s", reference, err)
- return nil
- }
- default:
- e.err = fmt.Errorf("unsupported depth type")
- return nil
- }
-
- return e.encodeFlush
-}
-
-func (e *ulReqEncoder) encodeFlush() stateFn {
- if err := e.pe.Flush(); err != nil {
- e.err = fmt.Errorf("encoding flush-pkt: %s", err)
- return nil
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq.go
deleted file mode 100644
index 73be117197..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package packp
-
-import (
- "errors"
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband"
-)
-
-var (
- ErrEmptyCommands = errors.New("commands cannot be empty")
- ErrMalformedCommand = errors.New("malformed command")
-)
-
-// ReferenceUpdateRequest values represent reference upload requests.
-// Values from this type are not zero-value safe, use the New function instead.
-type ReferenceUpdateRequest struct {
- Capabilities *capability.List
- Commands []*Command
- Shallow *plumbing.Hash
- // Packfile contains an optional packfile reader.
- Packfile io.ReadCloser
-
- // Progress receives sideband progress messages from the server
- Progress sideband.Progress
-}
-
-// New returns a pointer to a new ReferenceUpdateRequest value.
-func NewReferenceUpdateRequest() *ReferenceUpdateRequest {
- return &ReferenceUpdateRequest{
- // TODO: Add support for push-cert
- Capabilities: capability.NewList(),
- Commands: nil,
- }
-}
-
-// NewReferenceUpdateRequestFromCapabilities returns a pointer to a new
-// ReferenceUpdateRequest value, the request capabilities are filled with the
-// most optimal ones, based on the adv value (advertised capabilities), the
-// ReferenceUpdateRequest contains no commands
-//
-// It does set the following capabilities:
-// - agent
-// - report-status
-// - ofs-delta
-// - ref-delta
-// - delete-refs
-// It leaves up to the user to add the following capabilities later:
-// - atomic
-// - ofs-delta
-// - side-band
-// - side-band-64k
-// - quiet
-// - push-cert
-func NewReferenceUpdateRequestFromCapabilities(adv *capability.List) *ReferenceUpdateRequest {
- r := NewReferenceUpdateRequest()
-
- if adv.Supports(capability.Agent) {
- r.Capabilities.Set(capability.Agent, capability.DefaultAgent)
- }
-
- if adv.Supports(capability.ReportStatus) {
- r.Capabilities.Set(capability.ReportStatus)
- }
-
- return r
-}
-
-func (r *ReferenceUpdateRequest) validate() error {
- if len(r.Commands) == 0 {
- return ErrEmptyCommands
- }
-
- for _, c := range r.Commands {
- if err := c.validate(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-type Action string
-
-const (
- Create Action = "create"
- Update = "update"
- Delete = "delete"
- Invalid = "invalid"
-)
-
-type Command struct {
- Name plumbing.ReferenceName
- Old plumbing.Hash
- New plumbing.Hash
-}
-
-func (c *Command) Action() Action {
- if c.Old == plumbing.ZeroHash && c.New == plumbing.ZeroHash {
- return Invalid
- }
-
- if c.Old == plumbing.ZeroHash {
- return Create
- }
-
- if c.New == plumbing.ZeroHash {
- return Delete
- }
-
- return Update
-}
-
-func (c *Command) validate() error {
- if c.Action() == Invalid {
- return ErrMalformedCommand
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq_decode.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq_decode.go
deleted file mode 100644
index 51e8183d1f..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq_decode.go
+++ /dev/null
@@ -1,250 +0,0 @@
-package packp
-
-import (
- "bytes"
- "encoding/hex"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
-)
-
-var (
- shallowLineLength = len(shallow) + hashSize
- minCommandLength = hashSize*2 + 2 + 1
- minCommandAndCapsLenth = minCommandLength + 1
-)
-
-var (
- ErrEmpty = errors.New("empty update-request message")
- errNoCommands = errors.New("unexpected EOF before any command")
- errMissingCapabilitiesDelimiter = errors.New("capabilities delimiter not found")
-)
-
-func errMalformedRequest(reason string) error {
- return fmt.Errorf("malformed request: %s", reason)
-}
-
-func errInvalidHashSize(got int) error {
- return fmt.Errorf("invalid hash size: expected %d, got %d",
- hashSize, got)
-}
-
-func errInvalidHash(err error) error {
- return fmt.Errorf("invalid hash: %s", err.Error())
-}
-
-func errInvalidShallowLineLength(got int) error {
- return errMalformedRequest(fmt.Sprintf(
- "invalid shallow line length: expected %d, got %d",
- shallowLineLength, got))
-}
-
-func errInvalidCommandCapabilitiesLineLength(got int) error {
- return errMalformedRequest(fmt.Sprintf(
- "invalid command and capabilities line length: expected at least %d, got %d",
- minCommandAndCapsLenth, got))
-}
-
-func errInvalidCommandLineLength(got int) error {
- return errMalformedRequest(fmt.Sprintf(
- "invalid command line length: expected at least %d, got %d",
- minCommandLength, got))
-}
-
-func errInvalidShallowObjId(err error) error {
- return errMalformedRequest(
- fmt.Sprintf("invalid shallow object id: %s", err.Error()))
-}
-
-func errInvalidOldObjId(err error) error {
- return errMalformedRequest(
- fmt.Sprintf("invalid old object id: %s", err.Error()))
-}
-
-func errInvalidNewObjId(err error) error {
- return errMalformedRequest(
- fmt.Sprintf("invalid new object id: %s", err.Error()))
-}
-
-func errMalformedCommand(err error) error {
- return errMalformedRequest(fmt.Sprintf(
- "malformed command: %s", err.Error()))
-}
-
-// Decode reads the next update-request message form the reader and wr
-func (req *ReferenceUpdateRequest) Decode(r io.Reader) error {
- var rc io.ReadCloser
- var ok bool
- rc, ok = r.(io.ReadCloser)
- if !ok {
- rc = ioutil.NopCloser(r)
- }
-
- d := &updReqDecoder{r: rc, s: pktline.NewScanner(r)}
- return d.Decode(req)
-}
-
-type updReqDecoder struct {
- r io.ReadCloser
- s *pktline.Scanner
- req *ReferenceUpdateRequest
-}
-
-func (d *updReqDecoder) Decode(req *ReferenceUpdateRequest) error {
- d.req = req
- funcs := []func() error{
- d.scanLine,
- d.decodeShallow,
- d.decodeCommandAndCapabilities,
- d.decodeCommands,
- d.setPackfile,
- req.validate,
- }
-
- for _, f := range funcs {
- if err := f(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *updReqDecoder) scanLine() error {
- if ok := d.s.Scan(); !ok {
- return d.scanErrorOr(ErrEmpty)
- }
-
- return nil
-}
-
-func (d *updReqDecoder) decodeShallow() error {
- b := d.s.Bytes()
-
- if !bytes.HasPrefix(b, shallowNoSp) {
- return nil
- }
-
- if len(b) != shallowLineLength {
- return errInvalidShallowLineLength(len(b))
- }
-
- h, err := parseHash(string(b[len(shallow):]))
- if err != nil {
- return errInvalidShallowObjId(err)
- }
-
- if ok := d.s.Scan(); !ok {
- return d.scanErrorOr(errNoCommands)
- }
-
- d.req.Shallow = &h
-
- return nil
-}
-
-func (d *updReqDecoder) decodeCommands() error {
- for {
- b := d.s.Bytes()
- if bytes.Equal(b, pktline.Flush) {
- return nil
- }
-
- c, err := parseCommand(b)
- if err != nil {
- return err
- }
-
- d.req.Commands = append(d.req.Commands, c)
-
- if ok := d.s.Scan(); !ok {
- return d.s.Err()
- }
- }
-}
-
-func (d *updReqDecoder) decodeCommandAndCapabilities() error {
- b := d.s.Bytes()
- i := bytes.IndexByte(b, 0)
- if i == -1 {
- return errMissingCapabilitiesDelimiter
- }
-
- if len(b) < minCommandAndCapsLenth {
- return errInvalidCommandCapabilitiesLineLength(len(b))
- }
-
- cmd, err := parseCommand(b[:i])
- if err != nil {
- return err
- }
-
- d.req.Commands = append(d.req.Commands, cmd)
-
- if err := d.req.Capabilities.Decode(b[i+1:]); err != nil {
- return err
- }
-
- if err := d.scanLine(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (d *updReqDecoder) setPackfile() error {
- d.req.Packfile = d.r
-
- return nil
-}
-
-func parseCommand(b []byte) (*Command, error) {
- if len(b) < minCommandLength {
- return nil, errInvalidCommandLineLength(len(b))
- }
-
- var (
- os, ns string
- n plumbing.ReferenceName
- )
- if _, err := fmt.Sscanf(string(b), "%s %s %s", &os, &ns, &n); err != nil {
- return nil, errMalformedCommand(err)
- }
-
- oh, err := parseHash(os)
- if err != nil {
- return nil, errInvalidOldObjId(err)
- }
-
- nh, err := parseHash(ns)
- if err != nil {
- return nil, errInvalidNewObjId(err)
- }
-
- return &Command{Old: oh, New: nh, Name: n}, nil
-}
-
-func parseHash(s string) (plumbing.Hash, error) {
- if len(s) != hashSize {
- return plumbing.ZeroHash, errInvalidHashSize(len(s))
- }
-
- if _, err := hex.DecodeString(s); err != nil {
- return plumbing.ZeroHash, errInvalidHash(err)
- }
-
- h := plumbing.NewHash(s)
- return h, nil
-}
-
-func (d *updReqDecoder) scanErrorOr(origErr error) error {
- if err := d.s.Err(); err != nil {
- return err
- }
-
- return origErr
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq_encode.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq_encode.go
deleted file mode 100644
index 44c05739d8..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq_encode.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package packp
-
-import (
- "fmt"
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
-)
-
-var (
- zeroHashString = plumbing.ZeroHash.String()
-)
-
-// Encode writes the ReferenceUpdateRequest encoding to the stream.
-func (r *ReferenceUpdateRequest) Encode(w io.Writer) error {
- if err := r.validate(); err != nil {
- return err
- }
-
- e := pktline.NewEncoder(w)
-
- if err := r.encodeShallow(e, r.Shallow); err != nil {
- return err
- }
-
- if err := r.encodeCommands(e, r.Commands, r.Capabilities); err != nil {
- return err
- }
-
- if r.Packfile != nil {
- if _, err := io.Copy(w, r.Packfile); err != nil {
- return err
- }
-
- return r.Packfile.Close()
- }
-
- return nil
-}
-
-func (r *ReferenceUpdateRequest) encodeShallow(e *pktline.Encoder,
- h *plumbing.Hash) error {
-
- if h == nil {
- return nil
- }
-
- objId := []byte(h.String())
- return e.Encodef("%s%s", shallow, objId)
-}
-
-func (r *ReferenceUpdateRequest) encodeCommands(e *pktline.Encoder,
- cmds []*Command, cap *capability.List) error {
-
- if err := e.Encodef("%s\x00%s",
- formatCommand(cmds[0]), cap.String()); err != nil {
- return err
- }
-
- for _, cmd := range cmds[1:] {
- if err := e.Encodef(formatCommand(cmd)); err != nil {
- return err
- }
- }
-
- return e.Flush()
-}
-
-func formatCommand(cmd *Command) string {
- o := cmd.Old.String()
- n := cmd.New.String()
- return fmt.Sprintf("%s %s %s", o, n, cmd.Name)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/uppackreq.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/uppackreq.go
deleted file mode 100644
index 114413952c..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/uppackreq.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package packp
-
-import (
- "bytes"
- "fmt"
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
-)
-
-// UploadPackRequest represents a upload-pack request.
-// Zero-value is not safe, use NewUploadPackRequest instead.
-type UploadPackRequest struct {
- UploadRequest
- UploadHaves
-}
-
-// NewUploadPackRequest creates a new UploadPackRequest and returns a pointer.
-func NewUploadPackRequest() *UploadPackRequest {
- ur := NewUploadRequest()
- return &UploadPackRequest{
- UploadHaves: UploadHaves{},
- UploadRequest: *ur,
- }
-}
-
-// NewUploadPackRequestFromCapabilities creates a new UploadPackRequest and
-// returns a pointer. The request capabilities are filled with the most optiomal
-// ones, based on the adv value (advertaised capabilities), the UploadPackRequest
-// it has no wants, haves or shallows and an infinite depth
-func NewUploadPackRequestFromCapabilities(adv *capability.List) *UploadPackRequest {
- ur := NewUploadRequestFromCapabilities(adv)
- return &UploadPackRequest{
- UploadHaves: UploadHaves{},
- UploadRequest: *ur,
- }
-}
-
-// IsEmpty a request if empty if Haves are contained in the Wants, or if Wants
-// length is zero
-func (r *UploadPackRequest) IsEmpty() bool {
- return isSubset(r.Wants, r.Haves)
-}
-
-func isSubset(needle []plumbing.Hash, haystack []plumbing.Hash) bool {
- for _, h := range needle {
- found := false
- for _, oh := range haystack {
- if h == oh {
- found = true
- break
- }
- }
-
- if !found {
- return false
- }
- }
-
- return true
-}
-
-// UploadHaves is a message to signal the references that a client has in a
-// upload-pack. Do not use this directly. Use UploadPackRequest request instead.
-type UploadHaves struct {
- Haves []plumbing.Hash
-}
-
-// Encode encodes the UploadHaves into the Writer. If flush is true, a flush
-// command will be encoded at the end of the writer content.
-func (u *UploadHaves) Encode(w io.Writer, flush bool) error {
- e := pktline.NewEncoder(w)
-
- plumbing.HashesSort(u.Haves)
-
- var last plumbing.Hash
- for _, have := range u.Haves {
- if bytes.Equal(last[:], have[:]) {
- continue
- }
-
- if err := e.Encodef("have %s\n", have); err != nil {
- return fmt.Errorf("sending haves for %q: %s", have, err)
- }
-
- last = have
- }
-
- if flush && len(u.Haves) != 0 {
- if err := e.Flush(); err != nil {
- return fmt.Errorf("sending flush-pkt after haves: %s", err)
- }
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/uppackresp.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/uppackresp.go
deleted file mode 100644
index c18e159e00..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/uppackresp.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package packp
-
-import (
- "errors"
- "io"
-
- "bufio"
-
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-)
-
-// ErrUploadPackResponseNotDecoded is returned if Read is called without
-// decoding first
-var ErrUploadPackResponseNotDecoded = errors.New("upload-pack-response should be decoded")
-
-// UploadPackResponse contains all the information responded by the upload-pack
-// service, the response implements io.ReadCloser that allows to read the
-// packfile directly from it.
-type UploadPackResponse struct {
- ShallowUpdate
- ServerResponse
-
- r io.ReadCloser
- isShallow bool
- isMultiACK bool
- isOk bool
-}
-
-// NewUploadPackResponse create a new UploadPackResponse instance, the request
-// being responded by the response is required.
-func NewUploadPackResponse(req *UploadPackRequest) *UploadPackResponse {
- isShallow := !req.Depth.IsZero()
- isMultiACK := req.Capabilities.Supports(capability.MultiACK) ||
- req.Capabilities.Supports(capability.MultiACKDetailed)
-
- return &UploadPackResponse{
- isShallow: isShallow,
- isMultiACK: isMultiACK,
- }
-}
-
-// NewUploadPackResponseWithPackfile creates a new UploadPackResponse instance,
-// and sets its packfile reader.
-func NewUploadPackResponseWithPackfile(req *UploadPackRequest,
- pf io.ReadCloser) *UploadPackResponse {
-
- r := NewUploadPackResponse(req)
- r.r = pf
- return r
-}
-
-// Decode decodes all the responses sent by upload-pack service into the struct
-// and prepares it to read the packfile using the Read method
-func (r *UploadPackResponse) Decode(reader io.ReadCloser) error {
- buf := bufio.NewReader(reader)
-
- if r.isShallow {
- if err := r.ShallowUpdate.Decode(buf); err != nil {
- return err
- }
- }
-
- if err := r.ServerResponse.Decode(buf, r.isMultiACK); err != nil {
- return err
- }
-
- // now the reader is ready to read the packfile content
- r.r = ioutil.NewReadCloser(buf, reader)
-
- return nil
-}
-
-// Encode encodes an UploadPackResponse.
-func (r *UploadPackResponse) Encode(w io.Writer) (err error) {
- if r.isShallow {
- if err := r.ShallowUpdate.Encode(w); err != nil {
- return err
- }
- }
-
- if err := r.ServerResponse.Encode(w); err != nil {
- return err
- }
-
- defer ioutil.CheckClose(r.r, &err)
- _, err = io.Copy(w, r.r)
- return err
-}
-
-// Read reads the packfile data, if the request was done with any Sideband
-// capability the content read should be demultiplexed. If the methods wasn't
-// called before the ErrUploadPackResponseNotDecoded will be return
-func (r *UploadPackResponse) Read(p []byte) (int, error) {
- if r.r == nil {
- return 0, ErrUploadPackResponseNotDecoded
- }
-
- return r.r.Read(p)
-}
-
-// Close the underlying reader, if any
-func (r *UploadPackResponse) Close() error {
- if r.r == nil {
- return nil
- }
-
- return r.r.Close()
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/reference.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/reference.go
deleted file mode 100644
index 08e908f1f3..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/reference.go
+++ /dev/null
@@ -1,209 +0,0 @@
-package plumbing
-
-import (
- "errors"
- "fmt"
- "strings"
-)
-
-const (
- refPrefix = "refs/"
- refHeadPrefix = refPrefix + "heads/"
- refTagPrefix = refPrefix + "tags/"
- refRemotePrefix = refPrefix + "remotes/"
- refNotePrefix = refPrefix + "notes/"
- symrefPrefix = "ref: "
-)
-
-// RefRevParseRules are a set of rules to parse references into short names.
-// These are the same rules as used by git in shorten_unambiguous_ref.
-// See: https://github.com/git/git/blob/e0aaa1b6532cfce93d87af9bc813fb2e7a7ce9d7/refs.c#L417
-var RefRevParseRules = []string{
- "refs/%s",
- "refs/tags/%s",
- "refs/heads/%s",
- "refs/remotes/%s",
- "refs/remotes/%s/HEAD",
-}
-
-var (
- ErrReferenceNotFound = errors.New("reference not found")
-)
-
-// ReferenceType reference type's
-type ReferenceType int8
-
-const (
- InvalidReference ReferenceType = 0
- HashReference ReferenceType = 1
- SymbolicReference ReferenceType = 2
-)
-
-func (r ReferenceType) String() string {
- switch r {
- case InvalidReference:
- return "invalid-reference"
- case HashReference:
- return "hash-reference"
- case SymbolicReference:
- return "symbolic-reference"
- }
-
- return ""
-}
-
-// ReferenceName reference name's
-type ReferenceName string
-
-// NewBranchReferenceName returns a reference name describing a branch based on
-// his short name.
-func NewBranchReferenceName(name string) ReferenceName {
- return ReferenceName(refHeadPrefix + name)
-}
-
-// NewNoteReferenceName returns a reference name describing a note based on his
-// short name.
-func NewNoteReferenceName(name string) ReferenceName {
- return ReferenceName(refNotePrefix + name)
-}
-
-// NewRemoteReferenceName returns a reference name describing a remote branch
-// based on his short name and the remote name.
-func NewRemoteReferenceName(remote, name string) ReferenceName {
- return ReferenceName(refRemotePrefix + fmt.Sprintf("%s/%s", remote, name))
-}
-
-// NewRemoteHEADReferenceName returns a reference name describing a the HEAD
-// branch of a remote.
-func NewRemoteHEADReferenceName(remote string) ReferenceName {
- return ReferenceName(refRemotePrefix + fmt.Sprintf("%s/%s", remote, HEAD))
-}
-
-// NewTagReferenceName returns a reference name describing a tag based on short
-// his name.
-func NewTagReferenceName(name string) ReferenceName {
- return ReferenceName(refTagPrefix + name)
-}
-
-// IsBranch check if a reference is a branch
-func (r ReferenceName) IsBranch() bool {
- return strings.HasPrefix(string(r), refHeadPrefix)
-}
-
-// IsNote check if a reference is a note
-func (r ReferenceName) IsNote() bool {
- return strings.HasPrefix(string(r), refNotePrefix)
-}
-
-// IsRemote check if a reference is a remote
-func (r ReferenceName) IsRemote() bool {
- return strings.HasPrefix(string(r), refRemotePrefix)
-}
-
-// IsTag check if a reference is a tag
-func (r ReferenceName) IsTag() bool {
- return strings.HasPrefix(string(r), refTagPrefix)
-}
-
-func (r ReferenceName) String() string {
- return string(r)
-}
-
-// Short returns the short name of a ReferenceName
-func (r ReferenceName) Short() string {
- s := string(r)
- res := s
- for _, format := range RefRevParseRules {
- _, err := fmt.Sscanf(s, format, &res)
- if err == nil {
- continue
- }
- }
-
- return res
-}
-
-const (
- HEAD ReferenceName = "HEAD"
- Master ReferenceName = "refs/heads/master"
-)
-
-// Reference is a representation of git reference
-type Reference struct {
- t ReferenceType
- n ReferenceName
- h Hash
- target ReferenceName
-}
-
-// NewReferenceFromStrings creates a reference from name and target as string,
-// the resulting reference can be a SymbolicReference or a HashReference base
-// on the target provided
-func NewReferenceFromStrings(name, target string) *Reference {
- n := ReferenceName(name)
-
- if strings.HasPrefix(target, symrefPrefix) {
- target := ReferenceName(target[len(symrefPrefix):])
- return NewSymbolicReference(n, target)
- }
-
- return NewHashReference(n, NewHash(target))
-}
-
-// NewSymbolicReference creates a new SymbolicReference reference
-func NewSymbolicReference(n, target ReferenceName) *Reference {
- return &Reference{
- t: SymbolicReference,
- n: n,
- target: target,
- }
-}
-
-// NewHashReference creates a new HashReference reference
-func NewHashReference(n ReferenceName, h Hash) *Reference {
- return &Reference{
- t: HashReference,
- n: n,
- h: h,
- }
-}
-
-// Type return the type of a reference
-func (r *Reference) Type() ReferenceType {
- return r.t
-}
-
-// Name return the name of a reference
-func (r *Reference) Name() ReferenceName {
- return r.n
-}
-
-// Hash return the hash of a hash reference
-func (r *Reference) Hash() Hash {
- return r.h
-}
-
-// Target return the target of a symbolic reference
-func (r *Reference) Target() ReferenceName {
- return r.target
-}
-
-// Strings dump a reference as a [2]string
-func (r *Reference) Strings() [2]string {
- var o [2]string
- o[0] = r.Name().String()
-
- switch r.Type() {
- case HashReference:
- o[1] = r.Hash().String()
- case SymbolicReference:
- o[1] = symrefPrefix + r.Target().String()
- }
-
- return o
-}
-
-func (r *Reference) String() string {
- s := r.Strings()
- return fmt.Sprintf("%s %s", s[1], s[0])
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/revision.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/revision.go
deleted file mode 100644
index 5f053b200c..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/revision.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package plumbing
-
-// Revision represents a git revision
-// to get more details about git revisions
-// please check git manual page :
-// https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html
-type Revision string
-
-func (r Revision) String() string {
- return string(r)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/revlist/revlist.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/revlist/revlist.go
deleted file mode 100644
index 7ad71ac044..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/revlist/revlist.go
+++ /dev/null
@@ -1,230 +0,0 @@
-// Package revlist provides support to access the ancestors of commits, in a
-// similar way as the git-rev-list command.
-package revlist
-
-import (
- "fmt"
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/filemode"
- "gopkg.in/src-d/go-git.v4/plumbing/object"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
-)
-
-// Objects applies a complementary set. It gets all the hashes from all
-// the reachable objects from the given objects. Ignore param are object hashes
-// that we want to ignore on the result. All that objects must be accessible
-// from the object storer.
-func Objects(
- s storer.EncodedObjectStorer,
- objs,
- ignore []plumbing.Hash,
-) ([]plumbing.Hash, error) {
- return ObjectsWithStorageForIgnores(s, s, objs, ignore)
-}
-
-// ObjectsWithStorageForIgnores is the same as Objects, but a
-// secondary storage layer can be provided, to be used to finding the
-// full set of objects to be ignored while finding the reachable
-// objects. This is useful when the main `s` storage layer is slow
-// and/or remote, while the ignore list is available somewhere local.
-func ObjectsWithStorageForIgnores(
- s, ignoreStore storer.EncodedObjectStorer,
- objs,
- ignore []plumbing.Hash,
-) ([]plumbing.Hash, error) {
- ignore, err := objects(ignoreStore, ignore, nil, true)
- if err != nil {
- return nil, err
- }
-
- return objects(s, objs, ignore, false)
-}
-
-func objects(
- s storer.EncodedObjectStorer,
- objects,
- ignore []plumbing.Hash,
- allowMissingObjects bool,
-) ([]plumbing.Hash, error) {
- seen := hashListToSet(ignore)
- result := make(map[plumbing.Hash]bool)
- visited := make(map[plumbing.Hash]bool)
-
- walkerFunc := func(h plumbing.Hash) {
- if !seen[h] {
- result[h] = true
- seen[h] = true
- }
- }
-
- for _, h := range objects {
- if err := processObject(s, h, seen, visited, ignore, walkerFunc); err != nil {
- if allowMissingObjects && err == plumbing.ErrObjectNotFound {
- continue
- }
-
- return nil, err
- }
- }
-
- return hashSetToList(result), nil
-}
-
-// processObject obtains the object using the hash an process it depending of its type
-func processObject(
- s storer.EncodedObjectStorer,
- h plumbing.Hash,
- seen map[plumbing.Hash]bool,
- visited map[plumbing.Hash]bool,
- ignore []plumbing.Hash,
- walkerFunc func(h plumbing.Hash),
-) error {
- if seen[h] {
- return nil
- }
-
- o, err := s.EncodedObject(plumbing.AnyObject, h)
- if err != nil {
- return err
- }
-
- do, err := object.DecodeObject(s, o)
- if err != nil {
- return err
- }
-
- switch do := do.(type) {
- case *object.Commit:
- return reachableObjects(do, seen, visited, ignore, walkerFunc)
- case *object.Tree:
- return iterateCommitTrees(seen, do, walkerFunc)
- case *object.Tag:
- walkerFunc(do.Hash)
- return processObject(s, do.Target, seen, visited, ignore, walkerFunc)
- case *object.Blob:
- walkerFunc(do.Hash)
- default:
- return fmt.Errorf("object type not valid: %s. "+
- "Object reference: %s", o.Type(), o.Hash())
- }
-
- return nil
-}
-
-// reachableObjects returns, using the callback function, all the reachable
-// objects from the specified commit. To avoid to iterate over seen commits,
-// if a commit hash is into the 'seen' set, we will not iterate all his trees
-// and blobs objects.
-func reachableObjects(
- commit *object.Commit,
- seen map[plumbing.Hash]bool,
- visited map[plumbing.Hash]bool,
- ignore []plumbing.Hash,
- cb func(h plumbing.Hash),
-) error {
- i := object.NewCommitPreorderIter(commit, seen, ignore)
- pending := make(map[plumbing.Hash]bool)
- addPendingParents(pending, visited, commit)
- for {
- commit, err := i.Next()
- if err == io.EOF {
- break
- }
-
- if err != nil {
- return err
- }
-
- if pending[commit.Hash] {
- delete(pending, commit.Hash)
- }
-
- addPendingParents(pending, visited, commit)
-
- if visited[commit.Hash] && len(pending) == 0 {
- break
- }
-
- if seen[commit.Hash] {
- continue
- }
-
- cb(commit.Hash)
-
- tree, err := commit.Tree()
- if err != nil {
- return err
- }
-
- if err := iterateCommitTrees(seen, tree, cb); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func addPendingParents(pending, visited map[plumbing.Hash]bool, commit *object.Commit) {
- for _, p := range commit.ParentHashes {
- if !visited[p] {
- pending[p] = true
- }
- }
-}
-
-// iterateCommitTrees iterate all reachable trees from the given commit
-func iterateCommitTrees(
- seen map[plumbing.Hash]bool,
- tree *object.Tree,
- cb func(h plumbing.Hash),
-) error {
- if seen[tree.Hash] {
- return nil
- }
-
- cb(tree.Hash)
-
- treeWalker := object.NewTreeWalker(tree, true, seen)
-
- for {
- _, e, err := treeWalker.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- return err
- }
-
- if e.Mode == filemode.Submodule {
- continue
- }
-
- if seen[e.Hash] {
- continue
- }
-
- cb(e.Hash)
- }
-
- return nil
-}
-
-func hashSetToList(hashes map[plumbing.Hash]bool) []plumbing.Hash {
- var result []plumbing.Hash
- for key := range hashes {
- result = append(result, key)
- }
-
- return result
-}
-
-func hashListToSet(hashes []plumbing.Hash) map[plumbing.Hash]bool {
- result := make(map[plumbing.Hash]bool)
- for _, h := range hashes {
- result[h] = true
- }
-
- return result
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/doc.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/doc.go
deleted file mode 100644
index 4d4f179c61..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/doc.go
+++ /dev/null
@@ -1,2 +0,0 @@
-// Package storer defines the interfaces to store objects, references, etc.
-package storer
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/index.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/index.go
deleted file mode 100644
index e087296ec9..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/index.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package storer
-
-import "gopkg.in/src-d/go-git.v4/plumbing/format/index"
-
-// IndexStorer generic storage of index.Index
-type IndexStorer interface {
- SetIndex(*index.Index) error
- Index() (*index.Index, error)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/object.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/object.go
deleted file mode 100644
index 98d1ec3fec..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/object.go
+++ /dev/null
@@ -1,288 +0,0 @@
-package storer
-
-import (
- "errors"
- "io"
- "time"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
-)
-
-var (
- //ErrStop is used to stop a ForEach function in an Iter
- ErrStop = errors.New("stop iter")
-)
-
-// EncodedObjectStorer generic storage of objects
-type EncodedObjectStorer interface {
- // NewEncodedObject returns a new plumbing.EncodedObject, the real type
- // of the object can be a custom implementation or the default one,
- // plumbing.MemoryObject.
- NewEncodedObject() plumbing.EncodedObject
- // SetEncodedObject saves an object into the storage, the object should
- // be create with the NewEncodedObject, method, and file if the type is
- // not supported.
- SetEncodedObject(plumbing.EncodedObject) (plumbing.Hash, error)
- // EncodedObject gets an object by hash with the given
- // plumbing.ObjectType. Implementors should return
- // (nil, plumbing.ErrObjectNotFound) if an object doesn't exist with
- // both the given hash and object type.
- //
- // Valid plumbing.ObjectType values are CommitObject, BlobObject, TagObject,
- // TreeObject and AnyObject. If plumbing.AnyObject is given, the object must
- // be looked up regardless of its type.
- EncodedObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error)
- // IterObjects returns a custom EncodedObjectStorer over all the object
- // on the storage.
- //
- // Valid plumbing.ObjectType values are CommitObject, BlobObject, TagObject,
- IterEncodedObjects(plumbing.ObjectType) (EncodedObjectIter, error)
- // HasEncodedObject returns ErrObjNotFound if the object doesn't
- // exist. If the object does exist, it returns nil.
- HasEncodedObject(plumbing.Hash) error
- // EncodedObjectSize returns the plaintext size of the encoded object.
- EncodedObjectSize(plumbing.Hash) (int64, error)
-}
-
-// DeltaObjectStorer is an EncodedObjectStorer that can return delta
-// objects.
-type DeltaObjectStorer interface {
- // DeltaObject is the same as EncodedObject but without resolving deltas.
- // Deltas will be returned as plumbing.DeltaObject instances.
- DeltaObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error)
-}
-
-// Transactioner is a optional method for ObjectStorer, it enable transaction
-// base write and read operations in the storage
-type Transactioner interface {
- // Begin starts a transaction.
- Begin() Transaction
-}
-
-// LooseObjectStorer is an optional interface for managing "loose"
-// objects, i.e. those not in packfiles.
-type LooseObjectStorer interface {
- // ForEachObjectHash iterates over all the (loose) object hashes
- // in the repository without necessarily having to read those objects.
- // Objects only inside pack files may be omitted.
- // If ErrStop is sent the iteration is stop but no error is returned.
- ForEachObjectHash(func(plumbing.Hash) error) error
- // LooseObjectTime looks up the (m)time associated with the
- // loose object (that is not in a pack file). Some
- // implementations (e.g. without loose objects)
- // always return an error.
- LooseObjectTime(plumbing.Hash) (time.Time, error)
- // DeleteLooseObject deletes a loose object if it exists.
- DeleteLooseObject(plumbing.Hash) error
-}
-
-// PackedObjectStorer is an optional interface for managing objects in
-// packfiles.
-type PackedObjectStorer interface {
- // ObjectPacks returns hashes of object packs if the underlying
- // implementation has pack files.
- ObjectPacks() ([]plumbing.Hash, error)
- // DeleteOldObjectPackAndIndex deletes an object pack and the corresponding index file if they exist.
- // Deletion is only performed if the pack is older than the supplied time (or the time is zero).
- DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error
-}
-
-// PackfileWriter is a optional method for ObjectStorer, it enable direct write
-// of packfile to the storage
-type PackfileWriter interface {
- // PackfileWriter returns a writer for writing a packfile to the storage
- //
- // If the Storer not implements PackfileWriter the objects should be written
- // using the Set method.
- PackfileWriter() (io.WriteCloser, error)
-}
-
-// EncodedObjectIter is a generic closable interface for iterating over objects.
-type EncodedObjectIter interface {
- Next() (plumbing.EncodedObject, error)
- ForEach(func(plumbing.EncodedObject) error) error
- Close()
-}
-
-// Transaction is an in-progress storage transaction. A transaction must end
-// with a call to Commit or Rollback.
-type Transaction interface {
- SetEncodedObject(plumbing.EncodedObject) (plumbing.Hash, error)
- EncodedObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error)
- Commit() error
- Rollback() error
-}
-
-// EncodedObjectLookupIter implements EncodedObjectIter. It iterates over a
-// series of object hashes and yields their associated objects by retrieving
-// each one from object storage. The retrievals are lazy and only occur when the
-// iterator moves forward with a call to Next().
-//
-// The EncodedObjectLookupIter must be closed with a call to Close() when it is
-// no longer needed.
-type EncodedObjectLookupIter struct {
- storage EncodedObjectStorer
- series []plumbing.Hash
- t plumbing.ObjectType
- pos int
-}
-
-// NewEncodedObjectLookupIter returns an object iterator given an object storage
-// and a slice of object hashes.
-func NewEncodedObjectLookupIter(
- storage EncodedObjectStorer, t plumbing.ObjectType, series []plumbing.Hash) *EncodedObjectLookupIter {
- return &EncodedObjectLookupIter{
- storage: storage,
- series: series,
- t: t,
- }
-}
-
-// Next returns the next object from the iterator. If the iterator has reached
-// the end it will return io.EOF as an error. If the object can't be found in
-// the object storage, it will return plumbing.ErrObjectNotFound as an error.
-// If the object is retreieved successfully error will be nil.
-func (iter *EncodedObjectLookupIter) Next() (plumbing.EncodedObject, error) {
- if iter.pos >= len(iter.series) {
- return nil, io.EOF
- }
-
- hash := iter.series[iter.pos]
- obj, err := iter.storage.EncodedObject(iter.t, hash)
- if err == nil {
- iter.pos++
- }
-
- return obj, err
-}
-
-// ForEach call the cb function for each object contained on this iter until
-// an error happens or the end of the iter is reached. If ErrStop is sent
-// the iteration is stop but no error is returned. The iterator is closed.
-func (iter *EncodedObjectLookupIter) ForEach(cb func(plumbing.EncodedObject) error) error {
- return ForEachIterator(iter, cb)
-}
-
-// Close releases any resources used by the iterator.
-func (iter *EncodedObjectLookupIter) Close() {
- iter.pos = len(iter.series)
-}
-
-// EncodedObjectSliceIter implements EncodedObjectIter. It iterates over a
-// series of objects stored in a slice and yields each one in turn when Next()
-// is called.
-//
-// The EncodedObjectSliceIter must be closed with a call to Close() when it is
-// no longer needed.
-type EncodedObjectSliceIter struct {
- series []plumbing.EncodedObject
-}
-
-// NewEncodedObjectSliceIter returns an object iterator for the given slice of
-// objects.
-func NewEncodedObjectSliceIter(series []plumbing.EncodedObject) *EncodedObjectSliceIter {
- return &EncodedObjectSliceIter{
- series: series,
- }
-}
-
-// Next returns the next object from the iterator. If the iterator has reached
-// the end it will return io.EOF as an error. If the object is retreieved
-// successfully error will be nil.
-func (iter *EncodedObjectSliceIter) Next() (plumbing.EncodedObject, error) {
- if len(iter.series) == 0 {
- return nil, io.EOF
- }
-
- obj := iter.series[0]
- iter.series = iter.series[1:]
-
- return obj, nil
-}
-
-// ForEach call the cb function for each object contained on this iter until
-// an error happens or the end of the iter is reached. If ErrStop is sent
-// the iteration is stop but no error is returned. The iterator is closed.
-func (iter *EncodedObjectSliceIter) ForEach(cb func(plumbing.EncodedObject) error) error {
- return ForEachIterator(iter, cb)
-}
-
-// Close releases any resources used by the iterator.
-func (iter *EncodedObjectSliceIter) Close() {
- iter.series = []plumbing.EncodedObject{}
-}
-
-// MultiEncodedObjectIter implements EncodedObjectIter. It iterates over several
-// EncodedObjectIter,
-//
-// The MultiObjectIter must be closed with a call to Close() when it is no
-// longer needed.
-type MultiEncodedObjectIter struct {
- iters []EncodedObjectIter
-}
-
-// NewMultiEncodedObjectIter returns an object iterator for the given slice of
-// EncodedObjectIters.
-func NewMultiEncodedObjectIter(iters []EncodedObjectIter) EncodedObjectIter {
- return &MultiEncodedObjectIter{iters: iters}
-}
-
-// Next returns the next object from the iterator, if one iterator reach io.EOF
-// is removed and the next one is used.
-func (iter *MultiEncodedObjectIter) Next() (plumbing.EncodedObject, error) {
- if len(iter.iters) == 0 {
- return nil, io.EOF
- }
-
- obj, err := iter.iters[0].Next()
- if err == io.EOF {
- iter.iters[0].Close()
- iter.iters = iter.iters[1:]
- return iter.Next()
- }
-
- return obj, err
-}
-
-// ForEach call the cb function for each object contained on this iter until
-// an error happens or the end of the iter is reached. If ErrStop is sent
-// the iteration is stop but no error is returned. The iterator is closed.
-func (iter *MultiEncodedObjectIter) ForEach(cb func(plumbing.EncodedObject) error) error {
- return ForEachIterator(iter, cb)
-}
-
-// Close releases any resources used by the iterator.
-func (iter *MultiEncodedObjectIter) Close() {
- for _, i := range iter.iters {
- i.Close()
- }
-}
-
-type bareIterator interface {
- Next() (plumbing.EncodedObject, error)
- Close()
-}
-
-// ForEachIterator is a helper function to build iterators without need to
-// rewrite the same ForEach function each time.
-func ForEachIterator(iter bareIterator, cb func(plumbing.EncodedObject) error) error {
- defer iter.Close()
- for {
- obj, err := iter.Next()
- if err != nil {
- if err == io.EOF {
- return nil
- }
-
- return err
- }
-
- if err := cb(obj); err != nil {
- if err == ErrStop {
- return nil
- }
-
- return err
- }
- }
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/reference.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/reference.go
deleted file mode 100644
index cce72b4aa3..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/reference.go
+++ /dev/null
@@ -1,240 +0,0 @@
-package storer
-
-import (
- "errors"
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
-)
-
-const MaxResolveRecursion = 1024
-
-// ErrMaxResolveRecursion is returned by ResolveReference is MaxResolveRecursion
-// is exceeded
-var ErrMaxResolveRecursion = errors.New("max. recursion level reached")
-
-// ReferenceStorer is a generic storage of references.
-type ReferenceStorer interface {
- SetReference(*plumbing.Reference) error
- // CheckAndSetReference sets the reference `new`, but if `old` is
- // not `nil`, it first checks that the current stored value for
- // `old.Name()` matches the given reference value in `old`. If
- // not, it returns an error and doesn't update `new`.
- CheckAndSetReference(new, old *plumbing.Reference) error
- Reference(plumbing.ReferenceName) (*plumbing.Reference, error)
- IterReferences() (ReferenceIter, error)
- RemoveReference(plumbing.ReferenceName) error
- CountLooseRefs() (int, error)
- PackRefs() error
-}
-
-// ReferenceIter is a generic closable interface for iterating over references.
-type ReferenceIter interface {
- Next() (*plumbing.Reference, error)
- ForEach(func(*plumbing.Reference) error) error
- Close()
-}
-
-type referenceFilteredIter struct {
- ff func(r *plumbing.Reference) bool
- iter ReferenceIter
-}
-
-// NewReferenceFilteredIter returns a reference iterator for the given reference
-// Iterator. This iterator will iterate only references that accomplish the
-// provided function.
-func NewReferenceFilteredIter(
- ff func(r *plumbing.Reference) bool, iter ReferenceIter) ReferenceIter {
- return &referenceFilteredIter{ff, iter}
-}
-
-// Next returns the next reference from the iterator. If the iterator has reached
-// the end it will return io.EOF as an error.
-func (iter *referenceFilteredIter) Next() (*plumbing.Reference, error) {
- for {
- r, err := iter.iter.Next()
- if err != nil {
- return nil, err
- }
-
- if iter.ff(r) {
- return r, nil
- }
-
- continue
- }
-}
-
-// ForEach call the cb function for each reference contained on this iter until
-// an error happens or the end of the iter is reached. If ErrStop is sent
-// the iteration is stopped but no error is returned. The iterator is closed.
-func (iter *referenceFilteredIter) ForEach(cb func(*plumbing.Reference) error) error {
- defer iter.Close()
- for {
- r, err := iter.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- return err
- }
-
- if err := cb(r); err != nil {
- if err == ErrStop {
- break
- }
-
- return err
- }
- }
-
- return nil
-}
-
-// Close releases any resources used by the iterator.
-func (iter *referenceFilteredIter) Close() {
- iter.iter.Close()
-}
-
-// ReferenceSliceIter implements ReferenceIter. It iterates over a series of
-// references stored in a slice and yields each one in turn when Next() is
-// called.
-//
-// The ReferenceSliceIter must be closed with a call to Close() when it is no
-// longer needed.
-type ReferenceSliceIter struct {
- series []*plumbing.Reference
- pos int
-}
-
-// NewReferenceSliceIter returns a reference iterator for the given slice of
-// objects.
-func NewReferenceSliceIter(series []*plumbing.Reference) ReferenceIter {
- return &ReferenceSliceIter{
- series: series,
- }
-}
-
-// Next returns the next reference from the iterator. If the iterator has
-// reached the end it will return io.EOF as an error.
-func (iter *ReferenceSliceIter) Next() (*plumbing.Reference, error) {
- if iter.pos >= len(iter.series) {
- return nil, io.EOF
- }
-
- obj := iter.series[iter.pos]
- iter.pos++
- return obj, nil
-}
-
-// ForEach call the cb function for each reference contained on this iter until
-// an error happens or the end of the iter is reached. If ErrStop is sent
-// the iteration is stop but no error is returned. The iterator is closed.
-func (iter *ReferenceSliceIter) ForEach(cb func(*plumbing.Reference) error) error {
- return forEachReferenceIter(iter, cb)
-}
-
-type bareReferenceIterator interface {
- Next() (*plumbing.Reference, error)
- Close()
-}
-
-func forEachReferenceIter(iter bareReferenceIterator, cb func(*plumbing.Reference) error) error {
- defer iter.Close()
- for {
- obj, err := iter.Next()
- if err != nil {
- if err == io.EOF {
- return nil
- }
-
- return err
- }
-
- if err := cb(obj); err != nil {
- if err == ErrStop {
- return nil
- }
-
- return err
- }
- }
-}
-
-// Close releases any resources used by the iterator.
-func (iter *ReferenceSliceIter) Close() {
- iter.pos = len(iter.series)
-}
-
-// MultiReferenceIter implements ReferenceIter. It iterates over several
-// ReferenceIter,
-//
-// The MultiReferenceIter must be closed with a call to Close() when it is no
-// longer needed.
-type MultiReferenceIter struct {
- iters []ReferenceIter
-}
-
-// NewMultiReferenceIter returns an reference iterator for the given slice of
-// EncodedObjectIters.
-func NewMultiReferenceIter(iters []ReferenceIter) ReferenceIter {
- return &MultiReferenceIter{iters: iters}
-}
-
-// Next returns the next reference from the iterator, if one iterator reach
-// io.EOF is removed and the next one is used.
-func (iter *MultiReferenceIter) Next() (*plumbing.Reference, error) {
- if len(iter.iters) == 0 {
- return nil, io.EOF
- }
-
- obj, err := iter.iters[0].Next()
- if err == io.EOF {
- iter.iters[0].Close()
- iter.iters = iter.iters[1:]
- return iter.Next()
- }
-
- return obj, err
-}
-
-// ForEach call the cb function for each reference contained on this iter until
-// an error happens or the end of the iter is reached. If ErrStop is sent
-// the iteration is stop but no error is returned. The iterator is closed.
-func (iter *MultiReferenceIter) ForEach(cb func(*plumbing.Reference) error) error {
- return forEachReferenceIter(iter, cb)
-}
-
-// Close releases any resources used by the iterator.
-func (iter *MultiReferenceIter) Close() {
- for _, i := range iter.iters {
- i.Close()
- }
-}
-
-// ResolveReference resolves a SymbolicReference to a HashReference.
-func ResolveReference(s ReferenceStorer, n plumbing.ReferenceName) (*plumbing.Reference, error) {
- r, err := s.Reference(n)
- if err != nil || r == nil {
- return r, err
- }
- return resolveReference(s, r, 0)
-}
-
-func resolveReference(s ReferenceStorer, r *plumbing.Reference, recursion int) (*plumbing.Reference, error) {
- if r.Type() != plumbing.SymbolicReference {
- return r, nil
- }
-
- if recursion > MaxResolveRecursion {
- return nil, ErrMaxResolveRecursion
- }
-
- t, err := s.Reference(r.Target())
- if err != nil {
- return nil, err
- }
-
- recursion++
- return resolveReference(s, t, recursion)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/shallow.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/shallow.go
deleted file mode 100644
index 39aaaa540d..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/shallow.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package storer
-
-import "gopkg.in/src-d/go-git.v4/plumbing"
-
-// ShallowStorer is a storage of references to shallow commits by hash,
-// meaning that these commits have missing parents because of a shallow fetch.
-type ShallowStorer interface {
- SetShallow([]plumbing.Hash) error
- Shallow() ([]plumbing.Hash, error)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/storer.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/storer.go
deleted file mode 100644
index c7bc65a0c4..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/storer.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package storer
-
-// Storer is a basic storer for encoded objects and references.
-type Storer interface {
- EncodedObjectStorer
- ReferenceStorer
-}
-
-// Initializer should be implemented by storers that require to perform any
-// operation when creating a new repository (i.e. git init).
-type Initializer interface {
- // Init performs initialization of the storer and returns the error, if
- // any.
- Init() error
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/client/client.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/client/client.go
deleted file mode 100644
index 90635a5a12..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/client/client.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Package client contains helper function to deal with the different client
-// protocols.
-package client
-
-import (
- "fmt"
-
- "gopkg.in/src-d/go-git.v4/plumbing/transport"
- "gopkg.in/src-d/go-git.v4/plumbing/transport/file"
- "gopkg.in/src-d/go-git.v4/plumbing/transport/git"
- "gopkg.in/src-d/go-git.v4/plumbing/transport/http"
- "gopkg.in/src-d/go-git.v4/plumbing/transport/ssh"
-)
-
-// Protocols are the protocols supported by default.
-var Protocols = map[string]transport.Transport{
- "http": http.DefaultClient,
- "https": http.DefaultClient,
- "ssh": ssh.DefaultClient,
- "git": git.DefaultClient,
- "file": file.DefaultClient,
-}
-
-// InstallProtocol adds or modifies an existing protocol.
-func InstallProtocol(scheme string, c transport.Transport) {
- if c == nil {
- delete(Protocols, scheme)
- return
- }
-
- Protocols[scheme] = c
-}
-
-// NewClient returns the appropriate client among of the set of known protocols:
-// http://, https://, ssh:// and file://.
-// See `InstallProtocol` to add or modify protocols.
-func NewClient(endpoint *transport.Endpoint) (transport.Transport, error) {
- f, ok := Protocols[endpoint.Protocol]
- if !ok {
- return nil, fmt.Errorf("unsupported scheme %q", endpoint.Protocol)
- }
-
- if f == nil {
- return nil, fmt.Errorf("malformed client for scheme %q, client is defined as nil", endpoint.Protocol)
- }
-
- return f, nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/common.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/common.go
deleted file mode 100644
index dcf9391d59..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/common.go
+++ /dev/null
@@ -1,274 +0,0 @@
-// Package transport includes the implementation for different transport
-// protocols.
-//
-// `Client` can be used to fetch and send packfiles to a git server.
-// The `client` package provides higher level functions to instantiate the
-// appropriate `Client` based on the repository URL.
-//
-// go-git supports HTTP and SSH (see `Protocols`), but you can also install
-// your own protocols (see the `client` package).
-//
-// Each protocol has its own implementation of `Client`, but you should
-// generally not use them directly, use `client.NewClient` instead.
-package transport
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "io"
- "net/url"
- "strconv"
- "strings"
-
- giturl "gopkg.in/src-d/go-git.v4/internal/url"
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
-)
-
-var (
- ErrRepositoryNotFound = errors.New("repository not found")
- ErrEmptyRemoteRepository = errors.New("remote repository is empty")
- ErrAuthenticationRequired = errors.New("authentication required")
- ErrAuthorizationFailed = errors.New("authorization failed")
- ErrEmptyUploadPackRequest = errors.New("empty git-upload-pack given")
- ErrInvalidAuthMethod = errors.New("invalid auth method")
- ErrAlreadyConnected = errors.New("session already established")
-)
-
-const (
- UploadPackServiceName = "git-upload-pack"
- ReceivePackServiceName = "git-receive-pack"
-)
-
-// Transport can initiate git-upload-pack and git-receive-pack processes.
-// It is implemented both by the client and the server, making this a RPC.
-type Transport interface {
- // NewUploadPackSession starts a git-upload-pack session for an endpoint.
- NewUploadPackSession(*Endpoint, AuthMethod) (UploadPackSession, error)
- // NewReceivePackSession starts a git-receive-pack session for an endpoint.
- NewReceivePackSession(*Endpoint, AuthMethod) (ReceivePackSession, error)
-}
-
-type Session interface {
- // AdvertisedReferences retrieves the advertised references for a
- // repository.
- // If the repository does not exist, returns ErrRepositoryNotFound.
- // If the repository exists, but is empty, returns ErrEmptyRemoteRepository.
- AdvertisedReferences() (*packp.AdvRefs, error)
- io.Closer
-}
-
-type AuthMethod interface {
- fmt.Stringer
- Name() string
-}
-
-// UploadPackSession represents a git-upload-pack session.
-// A git-upload-pack session has two steps: reference discovery
-// (AdvertisedReferences) and uploading pack (UploadPack).
-type UploadPackSession interface {
- Session
- // UploadPack takes a git-upload-pack request and returns a response,
- // including a packfile. Don't be confused by terminology, the client
- // side of a git-upload-pack is called git-fetch-pack, although here
- // the same interface is used to make it RPC-like.
- UploadPack(context.Context, *packp.UploadPackRequest) (*packp.UploadPackResponse, error)
-}
-
-// ReceivePackSession represents a git-receive-pack session.
-// A git-receive-pack session has two steps: reference discovery
-// (AdvertisedReferences) and receiving pack (ReceivePack).
-// In that order.
-type ReceivePackSession interface {
- Session
- // ReceivePack sends an update references request and a packfile
- // reader and returns a ReportStatus and error. Don't be confused by
- // terminology, the client side of a git-receive-pack is called
- // git-send-pack, although here the same interface is used to make it
- // RPC-like.
- ReceivePack(context.Context, *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error)
-}
-
-// Endpoint represents a Git URL in any supported protocol.
-type Endpoint struct {
- // Protocol is the protocol of the endpoint (e.g. git, https, file).
- Protocol string
- // User is the user.
- User string
- // Password is the password.
- Password string
- // Host is the host.
- Host string
- // Port is the port to connect, if 0 the default port for the given protocol
- // wil be used.
- Port int
- // Path is the repository path.
- Path string
-}
-
-var defaultPorts = map[string]int{
- "http": 80,
- "https": 443,
- "git": 9418,
- "ssh": 22,
-}
-
-// String returns a string representation of the Git URL.
-func (u *Endpoint) String() string {
- var buf bytes.Buffer
- if u.Protocol != "" {
- buf.WriteString(u.Protocol)
- buf.WriteByte(':')
- }
-
- if u.Protocol != "" || u.Host != "" || u.User != "" || u.Password != "" {
- buf.WriteString("//")
-
- if u.User != "" || u.Password != "" {
- buf.WriteString(url.PathEscape(u.User))
- if u.Password != "" {
- buf.WriteByte(':')
- buf.WriteString(url.PathEscape(u.Password))
- }
-
- buf.WriteByte('@')
- }
-
- if u.Host != "" {
- buf.WriteString(u.Host)
-
- if u.Port != 0 {
- port, ok := defaultPorts[strings.ToLower(u.Protocol)]
- if !ok || ok && port != u.Port {
- fmt.Fprintf(&buf, ":%d", u.Port)
- }
- }
- }
- }
-
- if u.Path != "" && u.Path[0] != '/' && u.Host != "" {
- buf.WriteByte('/')
- }
-
- buf.WriteString(u.Path)
- return buf.String()
-}
-
-func NewEndpoint(endpoint string) (*Endpoint, error) {
- if e, ok := parseSCPLike(endpoint); ok {
- return e, nil
- }
-
- if e, ok := parseFile(endpoint); ok {
- return e, nil
- }
-
- return parseURL(endpoint)
-}
-
-func parseURL(endpoint string) (*Endpoint, error) {
- u, err := url.Parse(endpoint)
- if err != nil {
- return nil, err
- }
-
- if !u.IsAbs() {
- return nil, plumbing.NewPermanentError(fmt.Errorf(
- "invalid endpoint: %s", endpoint,
- ))
- }
-
- var user, pass string
- if u.User != nil {
- user = u.User.Username()
- pass, _ = u.User.Password()
- }
-
- return &Endpoint{
- Protocol: u.Scheme,
- User: user,
- Password: pass,
- Host: u.Hostname(),
- Port: getPort(u),
- Path: getPath(u),
- }, nil
-}
-
-func getPort(u *url.URL) int {
- p := u.Port()
- if p == "" {
- return 0
- }
-
- i, err := strconv.Atoi(p)
- if err != nil {
- return 0
- }
-
- return i
-}
-
-func getPath(u *url.URL) string {
- var res string = u.Path
- if u.RawQuery != "" {
- res += "?" + u.RawQuery
- }
-
- if u.Fragment != "" {
- res += "#" + u.Fragment
- }
-
- return res
-}
-
-func parseSCPLike(endpoint string) (*Endpoint, bool) {
- if giturl.MatchesScheme(endpoint) || !giturl.MatchesScpLike(endpoint) {
- return nil, false
- }
-
- user, host, portStr, path := giturl.FindScpLikeComponents(endpoint)
- port, err := strconv.Atoi(portStr)
- if err != nil {
- port = 22
- }
-
- return &Endpoint{
- Protocol: "ssh",
- User: user,
- Host: host,
- Port: port,
- Path: path,
- }, true
-}
-
-func parseFile(endpoint string) (*Endpoint, bool) {
- if giturl.MatchesScheme(endpoint) {
- return nil, false
- }
-
- path := endpoint
- return &Endpoint{
- Protocol: "file",
- Path: path,
- }, true
-}
-
-// UnsupportedCapabilities are the capabilities not supported by any client
-// implementation
-var UnsupportedCapabilities = []capability.Capability{
- capability.MultiACK,
- capability.MultiACKDetailed,
- capability.ThinPack,
-}
-
-// FilterUnsupportedCapabilities it filter out all the UnsupportedCapabilities
-// from a capability.List, the intended usage is on the client implementation
-// to filter the capabilities from an AdvRefs message.
-func FilterUnsupportedCapabilities(list *capability.List) {
- for _, c := range UnsupportedCapabilities {
- list.Delete(c)
- }
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/file/client.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/file/client.go
deleted file mode 100644
index e799ee138f..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/file/client.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Package file implements the file transport protocol.
-package file
-
-import (
- "bufio"
- "errors"
- "io"
- "os"
- "os/exec"
- "path/filepath"
- "strings"
-
- "gopkg.in/src-d/go-git.v4/plumbing/transport"
- "gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common"
-)
-
-// DefaultClient is the default local client.
-var DefaultClient = NewClient(
- transport.UploadPackServiceName,
- transport.ReceivePackServiceName,
-)
-
-type runner struct {
- UploadPackBin string
- ReceivePackBin string
-}
-
-// NewClient returns a new local client using the given git-upload-pack and
-// git-receive-pack binaries.
-func NewClient(uploadPackBin, receivePackBin string) transport.Transport {
- return common.NewClient(&runner{
- UploadPackBin: uploadPackBin,
- ReceivePackBin: receivePackBin,
- })
-}
-
-func prefixExecPath(cmd string) (string, error) {
- // Use `git --exec-path` to find the exec path.
- execCmd := exec.Command("git", "--exec-path")
-
- stdout, err := execCmd.StdoutPipe()
- if err != nil {
- return "", err
- }
- stdoutBuf := bufio.NewReader(stdout)
-
- err = execCmd.Start()
- if err != nil {
- return "", err
- }
-
- execPathBytes, isPrefix, err := stdoutBuf.ReadLine()
- if err != nil {
- return "", err
- }
- if isPrefix {
- return "", errors.New("Couldn't read exec-path line all at once")
- }
-
- err = execCmd.Wait()
- if err != nil {
- return "", err
- }
- execPath := string(execPathBytes)
- execPath = strings.TrimSpace(execPath)
- cmd = filepath.Join(execPath, cmd)
-
- // Make sure it actually exists.
- _, err = exec.LookPath(cmd)
- if err != nil {
- return "", err
- }
- return cmd, nil
-}
-
-func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod,
-) (common.Command, error) {
-
- switch cmd {
- case transport.UploadPackServiceName:
- cmd = r.UploadPackBin
- case transport.ReceivePackServiceName:
- cmd = r.ReceivePackBin
- }
-
- _, err := exec.LookPath(cmd)
- if err != nil {
- if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound {
- cmd, err = prefixExecPath(cmd)
- if err != nil {
- return nil, err
- }
- } else {
- return nil, err
- }
- }
-
- return &command{cmd: exec.Command(cmd, ep.Path)}, nil
-}
-
-type command struct {
- cmd *exec.Cmd
- stderrCloser io.Closer
- closed bool
-}
-
-func (c *command) Start() error {
- return c.cmd.Start()
-}
-
-func (c *command) StderrPipe() (io.Reader, error) {
- // Pipe returned by Command.StderrPipe has a race with Read + Command.Wait.
- // We use an io.Pipe and close it after the command finishes.
- r, w := io.Pipe()
- c.cmd.Stderr = w
- c.stderrCloser = r
- return r, nil
-}
-
-func (c *command) StdinPipe() (io.WriteCloser, error) {
- return c.cmd.StdinPipe()
-}
-
-func (c *command) StdoutPipe() (io.Reader, error) {
- return c.cmd.StdoutPipe()
-}
-
-func (c *command) Kill() error {
- c.cmd.Process.Kill()
- return c.Close()
-}
-
-// Close waits for the command to exit.
-func (c *command) Close() error {
- if c.closed {
- return nil
- }
-
- defer func() {
- c.closed = true
- _ = c.stderrCloser.Close()
-
- }()
-
- err := c.cmd.Wait()
- if _, ok := err.(*os.PathError); ok {
- return nil
- }
-
- // When a repository does not exist, the command exits with code 128.
- if _, ok := err.(*exec.ExitError); ok {
- return nil
- }
-
- return err
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/file/server.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/file/server.go
deleted file mode 100644
index 61dd42d048..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/file/server.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package file
-
-import (
- "fmt"
- "os"
-
- "gopkg.in/src-d/go-git.v4/plumbing/transport"
- "gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common"
- "gopkg.in/src-d/go-git.v4/plumbing/transport/server"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-)
-
-// ServeUploadPack serves a git-upload-pack request using standard output, input
-// and error. This is meant to be used when implementing a git-upload-pack
-// command.
-func ServeUploadPack(path string) error {
- ep, err := transport.NewEndpoint(path)
- if err != nil {
- return err
- }
-
- // TODO: define and implement a server-side AuthMethod
- s, err := server.DefaultServer.NewUploadPackSession(ep, nil)
- if err != nil {
- return fmt.Errorf("error creating session: %s", err)
- }
-
- return common.ServeUploadPack(srvCmd, s)
-}
-
-// ServeReceivePack serves a git-receive-pack request using standard output,
-// input and error. This is meant to be used when implementing a
-// git-receive-pack command.
-func ServeReceivePack(path string) error {
- ep, err := transport.NewEndpoint(path)
- if err != nil {
- return err
- }
-
- // TODO: define and implement a server-side AuthMethod
- s, err := server.DefaultServer.NewReceivePackSession(ep, nil)
- if err != nil {
- return fmt.Errorf("error creating session: %s", err)
- }
-
- return common.ServeReceivePack(srvCmd, s)
-}
-
-var srvCmd = common.ServerCommand{
- Stdin: os.Stdin,
- Stdout: ioutil.WriteNopCloser(os.Stdout),
- Stderr: os.Stderr,
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/git/common.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/git/common.go
deleted file mode 100644
index 78aaa3b067..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/git/common.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Package git implements the git transport protocol.
-package git
-
-import (
- "fmt"
- "io"
- "net"
-
- "gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
- "gopkg.in/src-d/go-git.v4/plumbing/transport"
- "gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-)
-
-// DefaultClient is the default git client.
-var DefaultClient = common.NewClient(&runner{})
-
-const DefaultPort = 9418
-
-type runner struct{}
-
-// Command returns a new Command for the given cmd in the given Endpoint
-func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) {
- // auth not allowed since git protocol doesn't support authentication
- if auth != nil {
- return nil, transport.ErrInvalidAuthMethod
- }
- c := &command{command: cmd, endpoint: ep}
- if err := c.connect(); err != nil {
- return nil, err
- }
- return c, nil
-}
-
-type command struct {
- conn net.Conn
- connected bool
- command string
- endpoint *transport.Endpoint
-}
-
-// Start executes the command sending the required message to the TCP connection
-func (c *command) Start() error {
- cmd := endpointToCommand(c.command, c.endpoint)
-
- e := pktline.NewEncoder(c.conn)
- return e.Encode([]byte(cmd))
-}
-
-func (c *command) connect() error {
- if c.connected {
- return transport.ErrAlreadyConnected
- }
-
- var err error
- c.conn, err = net.Dial("tcp", c.getHostWithPort())
- if err != nil {
- return err
- }
-
- c.connected = true
- return nil
-}
-
-func (c *command) getHostWithPort() string {
- host := c.endpoint.Host
- port := c.endpoint.Port
- if port <= 0 {
- port = DefaultPort
- }
-
- return fmt.Sprintf("%s:%d", host, port)
-}
-
-// StderrPipe git protocol doesn't have any dedicated error channel
-func (c *command) StderrPipe() (io.Reader, error) {
- return nil, nil
-}
-
-// StdinPipe return the underlying connection as WriteCloser, wrapped to prevent
-// call to the Close function from the connection, a command execution in git
-// protocol can't be closed or killed
-func (c *command) StdinPipe() (io.WriteCloser, error) {
- return ioutil.WriteNopCloser(c.conn), nil
-}
-
-// StdoutPipe return the underlying connection as Reader
-func (c *command) StdoutPipe() (io.Reader, error) {
- return c.conn, nil
-}
-
-func endpointToCommand(cmd string, ep *transport.Endpoint) string {
- host := ep.Host
- if ep.Port != DefaultPort {
- host = fmt.Sprintf("%s:%d", ep.Host, ep.Port)
- }
-
- return fmt.Sprintf("%s %s%chost=%s%c", cmd, ep.Path, 0, host, 0)
-}
-
-// Close closes the TCP connection and connection.
-func (c *command) Close() error {
- if !c.connected {
- return nil
- }
-
- c.connected = false
- return c.conn.Close()
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/common.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/common.go
deleted file mode 100644
index 38e903d456..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/common.go
+++ /dev/null
@@ -1,281 +0,0 @@
-// Package http implements the HTTP transport protocol.
-package http
-
-import (
- "bytes"
- "fmt"
- "net"
- "net/http"
- "strconv"
- "strings"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp"
- "gopkg.in/src-d/go-git.v4/plumbing/transport"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-)
-
-// it requires a bytes.Buffer, because we need to know the length
-func applyHeadersToRequest(req *http.Request, content *bytes.Buffer, host string, requestType string) {
- req.Header.Add("User-Agent", "git/1.0")
- req.Header.Add("Host", host) // host:port
-
- if content == nil {
- req.Header.Add("Accept", "*/*")
- return
- }
-
- req.Header.Add("Accept", fmt.Sprintf("application/x-%s-result", requestType))
- req.Header.Add("Content-Type", fmt.Sprintf("application/x-%s-request", requestType))
- req.Header.Add("Content-Length", strconv.Itoa(content.Len()))
-}
-
-const infoRefsPath = "/info/refs"
-
-func advertisedReferences(s *session, serviceName string) (ref *packp.AdvRefs, err error) {
- url := fmt.Sprintf(
- "%s%s?service=%s",
- s.endpoint.String(), infoRefsPath, serviceName,
- )
-
- req, err := http.NewRequest(http.MethodGet, url, nil)
- if err != nil {
- return nil, err
- }
-
- s.ApplyAuthToRequest(req)
- applyHeadersToRequest(req, nil, s.endpoint.Host, serviceName)
- res, err := s.client.Do(req)
- if err != nil {
- return nil, err
- }
-
- s.ModifyEndpointIfRedirect(res)
- defer ioutil.CheckClose(res.Body, &err)
-
- if err = NewErr(res); err != nil {
- return nil, err
- }
-
- ar := packp.NewAdvRefs()
- if err = ar.Decode(res.Body); err != nil {
- if err == packp.ErrEmptyAdvRefs {
- err = transport.ErrEmptyRemoteRepository
- }
-
- return nil, err
- }
-
- transport.FilterUnsupportedCapabilities(ar.Capabilities)
- s.advRefs = ar
-
- return ar, nil
-}
-
-type client struct {
- c *http.Client
-}
-
-// DefaultClient is the default HTTP client, which uses `http.DefaultClient`.
-var DefaultClient = NewClient(nil)
-
-// NewClient creates a new client with a custom net/http client.
-// See `InstallProtocol` to install and override default http client.
-// Unless a properly initialized client is given, it will fall back into
-// `http.DefaultClient`.
-//
-// Note that for HTTP client cannot distinguist between private repositories and
-// unexistent repositories on GitHub. So it returns `ErrAuthorizationRequired`
-// for both.
-func NewClient(c *http.Client) transport.Transport {
- if c == nil {
- return &client{http.DefaultClient}
- }
-
- return &client{
- c: c,
- }
-}
-
-func (c *client) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) (
- transport.UploadPackSession, error) {
-
- return newUploadPackSession(c.c, ep, auth)
-}
-
-func (c *client) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) (
- transport.ReceivePackSession, error) {
-
- return newReceivePackSession(c.c, ep, auth)
-}
-
-type session struct {
- auth AuthMethod
- client *http.Client
- endpoint *transport.Endpoint
- advRefs *packp.AdvRefs
-}
-
-func newSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) {
- s := &session{
- auth: basicAuthFromEndpoint(ep),
- client: c,
- endpoint: ep,
- }
- if auth != nil {
- a, ok := auth.(AuthMethod)
- if !ok {
- return nil, transport.ErrInvalidAuthMethod
- }
-
- s.auth = a
- }
-
- return s, nil
-}
-
-func (s *session) ApplyAuthToRequest(req *http.Request) {
- if s.auth == nil {
- return
- }
-
- s.auth.SetAuth(req)
-}
-
-func (s *session) ModifyEndpointIfRedirect(res *http.Response) {
- if res.Request == nil {
- return
- }
-
- r := res.Request
- if !strings.HasSuffix(r.URL.Path, infoRefsPath) {
- return
- }
-
- h, p, err := net.SplitHostPort(r.URL.Host)
- if err != nil {
- h = r.URL.Host
- }
- if p != "" {
- port, err := strconv.Atoi(p)
- if err == nil {
- s.endpoint.Port = port
- }
- }
- s.endpoint.Host = h
-
- s.endpoint.Protocol = r.URL.Scheme
- s.endpoint.Path = r.URL.Path[:len(r.URL.Path)-len(infoRefsPath)]
-}
-
-func (*session) Close() error {
- return nil
-}
-
-// AuthMethod is concrete implementation of common.AuthMethod for HTTP services
-type AuthMethod interface {
- transport.AuthMethod
- SetAuth(r *http.Request)
-}
-
-func basicAuthFromEndpoint(ep *transport.Endpoint) *BasicAuth {
- u := ep.User
- if u == "" {
- return nil
- }
-
- return &BasicAuth{u, ep.Password}
-}
-
-// BasicAuth represent a HTTP basic auth
-type BasicAuth struct {
- Username, Password string
-}
-
-func (a *BasicAuth) SetAuth(r *http.Request) {
- if a == nil {
- return
- }
-
- r.SetBasicAuth(a.Username, a.Password)
-}
-
-// Name is name of the auth
-func (a *BasicAuth) Name() string {
- return "http-basic-auth"
-}
-
-func (a *BasicAuth) String() string {
- masked := "*******"
- if a.Password == "" {
- masked = "<empty>"
- }
-
- return fmt.Sprintf("%s - %s:%s", a.Name(), a.Username, masked)
-}
-
-// TokenAuth implements an http.AuthMethod that can be used with http transport
-// to authenticate with HTTP token authentication (also known as bearer
-// authentication).
-//
-// IMPORTANT: If you are looking to use OAuth tokens with popular servers (e.g.
-// GitHub, Bitbucket, GitLab) you should use BasicAuth instead. These servers
-// use basic HTTP authentication, with the OAuth token as user or password.
-// Check the documentation of your git server for details.
-type TokenAuth struct {
- Token string
-}
-
-func (a *TokenAuth) SetAuth(r *http.Request) {
- if a == nil {
- return
- }
- r.Header.Add("Authorization", fmt.Sprintf("Bearer %s", a.Token))
-}
-
-// Name is name of the auth
-func (a *TokenAuth) Name() string {
- return "http-token-auth"
-}
-
-func (a *TokenAuth) String() string {
- masked := "*******"
- if a.Token == "" {
- masked = "<empty>"
- }
- return fmt.Sprintf("%s - %s", a.Name(), masked)
-}
-
-// Err is a dedicated error to return errors based on status code
-type Err struct {
- Response *http.Response
-}
-
-// NewErr returns a new Err based on a http response
-func NewErr(r *http.Response) error {
- if r.StatusCode >= http.StatusOK && r.StatusCode < http.StatusMultipleChoices {
- return nil
- }
-
- switch r.StatusCode {
- case http.StatusUnauthorized:
- return transport.ErrAuthenticationRequired
- case http.StatusForbidden:
- return transport.ErrAuthorizationFailed
- case http.StatusNotFound:
- return transport.ErrRepositoryNotFound
- }
-
- return plumbing.NewUnexpectedError(&Err{r})
-}
-
-// StatusCode returns the status code of the response
-func (e *Err) StatusCode() int {
- return e.Response.StatusCode
-}
-
-func (e *Err) Error() string {
- return fmt.Sprintf("unexpected requesting %q status code: %d",
- e.Response.Request.URL, e.Response.StatusCode,
- )
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/receive_pack.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/receive_pack.go
deleted file mode 100644
index 72ba0ec532..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/receive_pack.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package http
-
-import (
- "bytes"
- "context"
- "fmt"
- "io"
- "net/http"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband"
- "gopkg.in/src-d/go-git.v4/plumbing/transport"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-)
-
-type rpSession struct {
- *session
-}
-
-func newReceivePackSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) {
- s, err := newSession(c, ep, auth)
- return &rpSession{s}, err
-}
-
-func (s *rpSession) AdvertisedReferences() (*packp.AdvRefs, error) {
- return advertisedReferences(s.session, transport.ReceivePackServiceName)
-}
-
-func (s *rpSession) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) (
- *packp.ReportStatus, error) {
- url := fmt.Sprintf(
- "%s/%s",
- s.endpoint.String(), transport.ReceivePackServiceName,
- )
-
- buf := bytes.NewBuffer(nil)
- if err := req.Encode(buf); err != nil {
- return nil, err
- }
-
- res, err := s.doRequest(ctx, http.MethodPost, url, buf)
- if err != nil {
- return nil, err
- }
-
- r, err := ioutil.NonEmptyReader(res.Body)
- if err == ioutil.ErrEmptyReader {
- return nil, nil
- }
-
- if err != nil {
- return nil, err
- }
-
- var d *sideband.Demuxer
- if req.Capabilities.Supports(capability.Sideband64k) {
- d = sideband.NewDemuxer(sideband.Sideband64k, r)
- } else if req.Capabilities.Supports(capability.Sideband) {
- d = sideband.NewDemuxer(sideband.Sideband, r)
- }
- if d != nil {
- d.Progress = req.Progress
- r = d
- }
-
- rc := ioutil.NewReadCloser(r, res.Body)
-
- report := packp.NewReportStatus()
- if err := report.Decode(rc); err != nil {
- return nil, err
- }
-
- return report, report.Error()
-}
-
-func (s *rpSession) doRequest(
- ctx context.Context, method, url string, content *bytes.Buffer,
-) (*http.Response, error) {
-
- var body io.Reader
- if content != nil {
- body = content
- }
-
- req, err := http.NewRequest(method, url, body)
- if err != nil {
- return nil, plumbing.NewPermanentError(err)
- }
-
- applyHeadersToRequest(req, content, s.endpoint.Host, transport.ReceivePackServiceName)
- s.ApplyAuthToRequest(req)
-
- res, err := s.client.Do(req.WithContext(ctx))
- if err != nil {
- return nil, plumbing.NewUnexpectedError(err)
- }
-
- if err := NewErr(res); err != nil {
- _ = res.Body.Close()
- return nil, err
- }
-
- return res, nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/upload_pack.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/upload_pack.go
deleted file mode 100644
index fb5ac361c1..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/upload_pack.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package http
-
-import (
- "bytes"
- "context"
- "fmt"
- "io"
- "net/http"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp"
- "gopkg.in/src-d/go-git.v4/plumbing/transport"
- "gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-)
-
-type upSession struct {
- *session
-}
-
-func newUploadPackSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.UploadPackSession, error) {
- s, err := newSession(c, ep, auth)
- return &upSession{s}, err
-}
-
-func (s *upSession) AdvertisedReferences() (*packp.AdvRefs, error) {
- return advertisedReferences(s.session, transport.UploadPackServiceName)
-}
-
-func (s *upSession) UploadPack(
- ctx context.Context, req *packp.UploadPackRequest,
-) (*packp.UploadPackResponse, error) {
-
- if req.IsEmpty() {
- return nil, transport.ErrEmptyUploadPackRequest
- }
-
- if err := req.Validate(); err != nil {
- return nil, err
- }
-
- url := fmt.Sprintf(
- "%s/%s",
- s.endpoint.String(), transport.UploadPackServiceName,
- )
-
- content, err := uploadPackRequestToReader(req)
- if err != nil {
- return nil, err
- }
-
- res, err := s.doRequest(ctx, http.MethodPost, url, content)
- if err != nil {
- return nil, err
- }
-
- r, err := ioutil.NonEmptyReader(res.Body)
- if err != nil {
- if err == ioutil.ErrEmptyReader || err == io.ErrUnexpectedEOF {
- return nil, transport.ErrEmptyUploadPackRequest
- }
-
- return nil, err
- }
-
- rc := ioutil.NewReadCloser(r, res.Body)
- return common.DecodeUploadPackResponse(rc, req)
-}
-
-// Close does nothing.
-func (s *upSession) Close() error {
- return nil
-}
-
-func (s *upSession) doRequest(
- ctx context.Context, method, url string, content *bytes.Buffer,
-) (*http.Response, error) {
-
- var body io.Reader
- if content != nil {
- body = content
- }
-
- req, err := http.NewRequest(method, url, body)
- if err != nil {
- return nil, plumbing.NewPermanentError(err)
- }
-
- applyHeadersToRequest(req, content, s.endpoint.Host, transport.UploadPackServiceName)
- s.ApplyAuthToRequest(req)
-
- res, err := s.client.Do(req.WithContext(ctx))
- if err != nil {
- return nil, plumbing.NewUnexpectedError(err)
- }
-
- if err := NewErr(res); err != nil {
- _ = res.Body.Close()
- return nil, err
- }
-
- return res, nil
-}
-
-func uploadPackRequestToReader(req *packp.UploadPackRequest) (*bytes.Buffer, error) {
- buf := bytes.NewBuffer(nil)
- e := pktline.NewEncoder(buf)
-
- if err := req.UploadRequest.Encode(buf); err != nil {
- return nil, fmt.Errorf("sending upload-req message: %s", err)
- }
-
- if err := req.UploadHaves.Encode(buf, false); err != nil {
- return nil, fmt.Errorf("sending haves message: %s", err)
- }
-
- if err := e.EncodeString("done\n"); err != nil {
- return nil, err
- }
-
- return buf, nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common/common.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common/common.go
deleted file mode 100644
index 00497f3c11..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common/common.go
+++ /dev/null
@@ -1,467 +0,0 @@
-// Package common implements the git pack protocol with a pluggable transport.
-// This is a low-level package to implement new transports. Use a concrete
-// implementation instead (e.g. http, file, ssh).
-//
-// A simple example of usage can be found in the file package.
-package common
-
-import (
- "bufio"
- "context"
- "errors"
- "fmt"
- "io"
- stdioutil "io/ioutil"
- "strings"
- "time"
-
- "gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband"
- "gopkg.in/src-d/go-git.v4/plumbing/transport"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-)
-
-const (
- readErrorSecondsTimeout = 10
-)
-
-var (
- ErrTimeoutExceeded = errors.New("timeout exceeded")
-)
-
-// Commander creates Command instances. This is the main entry point for
-// transport implementations.
-type Commander interface {
- // Command creates a new Command for the given git command and
- // endpoint. cmd can be git-upload-pack or git-receive-pack. An
- // error should be returned if the endpoint is not supported or the
- // command cannot be created (e.g. binary does not exist, connection
- // cannot be established).
- Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (Command, error)
-}
-
-// Command is used for a single command execution.
-// This interface is modeled after exec.Cmd and ssh.Session in the standard
-// library.
-type Command interface {
- // StderrPipe returns a pipe that will be connected to the command's
- // standard error when the command starts. It should not be called after
- // Start.
- StderrPipe() (io.Reader, error)
- // StdinPipe returns a pipe that will be connected to the command's
- // standard input when the command starts. It should not be called after
- // Start. The pipe should be closed when no more input is expected.
- StdinPipe() (io.WriteCloser, error)
- // StdoutPipe returns a pipe that will be connected to the command's
- // standard output when the command starts. It should not be called after
- // Start.
- StdoutPipe() (io.Reader, error)
- // Start starts the specified command. It does not wait for it to
- // complete.
- Start() error
- // Close closes the command and releases any resources used by it. It
- // will block until the command exits.
- Close() error
-}
-
-// CommandKiller expands the Command interface, enableing it for being killed.
-type CommandKiller interface {
- // Kill and close the session whatever the state it is. It will block until
- // the command is terminated.
- Kill() error
-}
-
-type client struct {
- cmdr Commander
-}
-
-// NewClient creates a new client using the given Commander.
-func NewClient(runner Commander) transport.Transport {
- return &client{runner}
-}
-
-// NewUploadPackSession creates a new UploadPackSession.
-func (c *client) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) (
- transport.UploadPackSession, error) {
-
- return c.newSession(transport.UploadPackServiceName, ep, auth)
-}
-
-// NewReceivePackSession creates a new ReceivePackSession.
-func (c *client) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) (
- transport.ReceivePackSession, error) {
-
- return c.newSession(transport.ReceivePackServiceName, ep, auth)
-}
-
-type session struct {
- Stdin io.WriteCloser
- Stdout io.Reader
- Command Command
-
- isReceivePack bool
- advRefs *packp.AdvRefs
- packRun bool
- finished bool
- firstErrLine chan string
-}
-
-func (c *client) newSession(s string, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) {
- cmd, err := c.cmdr.Command(s, ep, auth)
- if err != nil {
- return nil, err
- }
-
- stdin, err := cmd.StdinPipe()
- if err != nil {
- return nil, err
- }
-
- stdout, err := cmd.StdoutPipe()
- if err != nil {
- return nil, err
- }
-
- stderr, err := cmd.StderrPipe()
- if err != nil {
- return nil, err
- }
-
- if err := cmd.Start(); err != nil {
- return nil, err
- }
-
- return &session{
- Stdin: stdin,
- Stdout: stdout,
- Command: cmd,
- firstErrLine: c.listenFirstError(stderr),
- isReceivePack: s == transport.ReceivePackServiceName,
- }, nil
-}
-
-func (c *client) listenFirstError(r io.Reader) chan string {
- if r == nil {
- return nil
- }
-
- errLine := make(chan string, 1)
- go func() {
- s := bufio.NewScanner(r)
- if s.Scan() {
- errLine <- s.Text()
- } else {
- close(errLine)
- }
-
- _, _ = io.Copy(stdioutil.Discard, r)
- }()
-
- return errLine
-}
-
-// AdvertisedReferences retrieves the advertised references from the server.
-func (s *session) AdvertisedReferences() (*packp.AdvRefs, error) {
- if s.advRefs != nil {
- return s.advRefs, nil
- }
-
- ar := packp.NewAdvRefs()
- if err := ar.Decode(s.Stdout); err != nil {
- if err := s.handleAdvRefDecodeError(err); err != nil {
- return nil, err
- }
- }
-
- transport.FilterUnsupportedCapabilities(ar.Capabilities)
- s.advRefs = ar
- return ar, nil
-}
-
-func (s *session) handleAdvRefDecodeError(err error) error {
- // If repository is not found, we get empty stdout and server writes an
- // error to stderr.
- if err == packp.ErrEmptyInput {
- s.finished = true
- if err := s.checkNotFoundError(); err != nil {
- return err
- }
-
- return io.ErrUnexpectedEOF
- }
-
- // For empty (but existing) repositories, we get empty advertised-references
- // message. But valid. That is, it includes at least a flush.
- if err == packp.ErrEmptyAdvRefs {
- // Empty repositories are valid for git-receive-pack.
- if s.isReceivePack {
- return nil
- }
-
- if err := s.finish(); err != nil {
- return err
- }
-
- return transport.ErrEmptyRemoteRepository
- }
-
- // Some server sends the errors as normal content (git protocol), so when
- // we try to decode it fails, we need to check the content of it, to detect
- // not found errors
- if uerr, ok := err.(*packp.ErrUnexpectedData); ok {
- if isRepoNotFoundError(string(uerr.Data)) {
- return transport.ErrRepositoryNotFound
- }
- }
-
- return err
-}
-
-// UploadPack performs a request to the server to fetch a packfile. A reader is
-// returned with the packfile content. The reader must be closed after reading.
-func (s *session) UploadPack(ctx context.Context, req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) {
- if req.IsEmpty() {
- return nil, transport.ErrEmptyUploadPackRequest
- }
-
- if err := req.Validate(); err != nil {
- return nil, err
- }
-
- if _, err := s.AdvertisedReferences(); err != nil {
- return nil, err
- }
-
- s.packRun = true
-
- in := s.StdinContext(ctx)
- out := s.StdoutContext(ctx)
-
- if err := uploadPack(in, out, req); err != nil {
- return nil, err
- }
-
- r, err := ioutil.NonEmptyReader(out)
- if err == ioutil.ErrEmptyReader {
- if c, ok := s.Stdout.(io.Closer); ok {
- _ = c.Close()
- }
-
- return nil, transport.ErrEmptyUploadPackRequest
- }
-
- if err != nil {
- return nil, err
- }
-
- rc := ioutil.NewReadCloser(r, s)
- return DecodeUploadPackResponse(rc, req)
-}
-
-func (s *session) StdinContext(ctx context.Context) io.WriteCloser {
- return ioutil.NewWriteCloserOnError(
- ioutil.NewContextWriteCloser(ctx, s.Stdin),
- s.onError,
- )
-}
-
-func (s *session) StdoutContext(ctx context.Context) io.Reader {
- return ioutil.NewReaderOnError(
- ioutil.NewContextReader(ctx, s.Stdout),
- s.onError,
- )
-}
-
-func (s *session) onError(err error) {
- if k, ok := s.Command.(CommandKiller); ok {
- _ = k.Kill()
- }
-
- _ = s.Close()
-}
-
-func (s *session) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error) {
- if _, err := s.AdvertisedReferences(); err != nil {
- return nil, err
- }
-
- s.packRun = true
-
- w := s.StdinContext(ctx)
- if err := req.Encode(w); err != nil {
- return nil, err
- }
-
- if err := w.Close(); err != nil {
- return nil, err
- }
-
- if !req.Capabilities.Supports(capability.ReportStatus) {
- // If we don't have report-status, we can only
- // check return value error.
- return nil, s.Command.Close()
- }
-
- r := s.StdoutContext(ctx)
-
- var d *sideband.Demuxer
- if req.Capabilities.Supports(capability.Sideband64k) {
- d = sideband.NewDemuxer(sideband.Sideband64k, r)
- } else if req.Capabilities.Supports(capability.Sideband) {
- d = sideband.NewDemuxer(sideband.Sideband, r)
- }
- if d != nil {
- d.Progress = req.Progress
- r = d
- }
-
- report := packp.NewReportStatus()
- if err := report.Decode(r); err != nil {
- return nil, err
- }
-
- if err := report.Error(); err != nil {
- defer s.Close()
- return report, err
- }
-
- return report, s.Command.Close()
-}
-
-func (s *session) finish() error {
- if s.finished {
- return nil
- }
-
- s.finished = true
-
- // If we did not run a upload/receive-pack, we close the connection
- // gracefully by sending a flush packet to the server. If the server
- // operates correctly, it will exit with status 0.
- if !s.packRun {
- _, err := s.Stdin.Write(pktline.FlushPkt)
- return err
- }
-
- return nil
-}
-
-func (s *session) Close() (err error) {
- err = s.finish()
-
- defer ioutil.CheckClose(s.Command, &err)
- return
-}
-
-func (s *session) checkNotFoundError() error {
- t := time.NewTicker(time.Second * readErrorSecondsTimeout)
- defer t.Stop()
-
- select {
- case <-t.C:
- return ErrTimeoutExceeded
- case line, ok := <-s.firstErrLine:
- if !ok {
- return nil
- }
-
- if isRepoNotFoundError(line) {
- return transport.ErrRepositoryNotFound
- }
-
- return fmt.Errorf("unknown error: %s", line)
- }
-}
-
-var (
- githubRepoNotFoundErr = "ERROR: Repository not found."
- bitbucketRepoNotFoundErr = "conq: repository does not exist."
- localRepoNotFoundErr = "does not appear to be a git repository"
- gitProtocolNotFoundErr = "ERR \n Repository not found."
- gitProtocolNoSuchErr = "ERR no such repository"
- gitProtocolAccessDeniedErr = "ERR access denied"
- gogsAccessDeniedErr = "Gogs: Repository does not exist or you do not have access"
-)
-
-func isRepoNotFoundError(s string) bool {
- if strings.HasPrefix(s, githubRepoNotFoundErr) {
- return true
- }
-
- if strings.HasPrefix(s, bitbucketRepoNotFoundErr) {
- return true
- }
-
- if strings.HasSuffix(s, localRepoNotFoundErr) {
- return true
- }
-
- if strings.HasPrefix(s, gitProtocolNotFoundErr) {
- return true
- }
-
- if strings.HasPrefix(s, gitProtocolNoSuchErr) {
- return true
- }
-
- if strings.HasPrefix(s, gitProtocolAccessDeniedErr) {
- return true
- }
-
- if strings.HasPrefix(s, gogsAccessDeniedErr) {
- return true
- }
-
- return false
-}
-
-var (
- nak = []byte("NAK")
- eol = []byte("\n")
-)
-
-// uploadPack implements the git-upload-pack protocol.
-func uploadPack(w io.WriteCloser, r io.Reader, req *packp.UploadPackRequest) error {
- // TODO support multi_ack mode
- // TODO support multi_ack_detailed mode
- // TODO support acks for common objects
- // TODO build a proper state machine for all these processing options
-
- if err := req.UploadRequest.Encode(w); err != nil {
- return fmt.Errorf("sending upload-req message: %s", err)
- }
-
- if err := req.UploadHaves.Encode(w, true); err != nil {
- return fmt.Errorf("sending haves message: %s", err)
- }
-
- if err := sendDone(w); err != nil {
- return fmt.Errorf("sending done message: %s", err)
- }
-
- if err := w.Close(); err != nil {
- return fmt.Errorf("closing input: %s", err)
- }
-
- return nil
-}
-
-func sendDone(w io.Writer) error {
- e := pktline.NewEncoder(w)
-
- return e.Encodef("done\n")
-}
-
-// DecodeUploadPackResponse decodes r into a new packp.UploadPackResponse
-func DecodeUploadPackResponse(r io.ReadCloser, req *packp.UploadPackRequest) (
- *packp.UploadPackResponse, error,
-) {
- res := packp.NewUploadPackResponse(req)
- if err := res.Decode(r); err != nil {
- return nil, fmt.Errorf("error decoding upload-pack response: %s", err)
- }
-
- return res, nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common/server.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common/server.go
deleted file mode 100644
index f4ca6924e1..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common/server.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package common
-
-import (
- "context"
- "fmt"
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp"
- "gopkg.in/src-d/go-git.v4/plumbing/transport"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-)
-
-// ServerCommand is used for a single server command execution.
-type ServerCommand struct {
- Stderr io.Writer
- Stdout io.WriteCloser
- Stdin io.Reader
-}
-
-func ServeUploadPack(cmd ServerCommand, s transport.UploadPackSession) (err error) {
- ioutil.CheckClose(cmd.Stdout, &err)
-
- ar, err := s.AdvertisedReferences()
- if err != nil {
- return err
- }
-
- if err := ar.Encode(cmd.Stdout); err != nil {
- return err
- }
-
- req := packp.NewUploadPackRequest()
- if err := req.Decode(cmd.Stdin); err != nil {
- return err
- }
-
- var resp *packp.UploadPackResponse
- resp, err = s.UploadPack(context.TODO(), req)
- if err != nil {
- return err
- }
-
- return resp.Encode(cmd.Stdout)
-}
-
-func ServeReceivePack(cmd ServerCommand, s transport.ReceivePackSession) error {
- ar, err := s.AdvertisedReferences()
- if err != nil {
- return fmt.Errorf("internal error in advertised references: %s", err)
- }
-
- if err := ar.Encode(cmd.Stdout); err != nil {
- return fmt.Errorf("error in advertised references encoding: %s", err)
- }
-
- req := packp.NewReferenceUpdateRequest()
- if err := req.Decode(cmd.Stdin); err != nil {
- return fmt.Errorf("error decoding: %s", err)
- }
-
- rs, err := s.ReceivePack(context.TODO(), req)
- if rs != nil {
- if err := rs.Encode(cmd.Stdout); err != nil {
- return fmt.Errorf("error in encoding report status %s", err)
- }
- }
-
- if err != nil {
- return fmt.Errorf("error in receive pack: %s", err)
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/server/loader.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/server/loader.go
deleted file mode 100644
index 13b35262de..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/server/loader.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package server
-
-import (
- "gopkg.in/src-d/go-git.v4/plumbing/cache"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
- "gopkg.in/src-d/go-git.v4/plumbing/transport"
- "gopkg.in/src-d/go-git.v4/storage/filesystem"
-
- "gopkg.in/src-d/go-billy.v4"
- "gopkg.in/src-d/go-billy.v4/osfs"
-)
-
-// DefaultLoader is a filesystem loader ignoring host and resolving paths to /.
-var DefaultLoader = NewFilesystemLoader(osfs.New(""))
-
-// Loader loads repository's storer.Storer based on an optional host and a path.
-type Loader interface {
- // Load loads a storer.Storer given a transport.Endpoint.
- // Returns transport.ErrRepositoryNotFound if the repository does not
- // exist.
- Load(ep *transport.Endpoint) (storer.Storer, error)
-}
-
-type fsLoader struct {
- base billy.Filesystem
-}
-
-// NewFilesystemLoader creates a Loader that ignores host and resolves paths
-// with a given base filesystem.
-func NewFilesystemLoader(base billy.Filesystem) Loader {
- return &fsLoader{base}
-}
-
-// Load looks up the endpoint's path in the base file system and returns a
-// storer for it. Returns transport.ErrRepositoryNotFound if a repository does
-// not exist in the given path.
-func (l *fsLoader) Load(ep *transport.Endpoint) (storer.Storer, error) {
- fs, err := l.base.Chroot(ep.Path)
- if err != nil {
- return nil, err
- }
-
- if _, err := fs.Stat("config"); err != nil {
- return nil, transport.ErrRepositoryNotFound
- }
-
- return filesystem.NewStorage(fs, cache.NewObjectLRUDefault()), nil
-}
-
-// MapLoader is a Loader that uses a lookup map of storer.Storer by
-// transport.Endpoint.
-type MapLoader map[string]storer.Storer
-
-// Load returns a storer.Storer for given a transport.Endpoint by looking it up
-// in the map. Returns transport.ErrRepositoryNotFound if the endpoint does not
-// exist.
-func (l MapLoader) Load(ep *transport.Endpoint) (storer.Storer, error) {
- s, ok := l[ep.String()]
- if !ok {
- return nil, transport.ErrRepositoryNotFound
- }
-
- return s, nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/server/server.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/server/server.go
deleted file mode 100644
index 8e0dcc1192..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/server/server.go
+++ /dev/null
@@ -1,422 +0,0 @@
-// Package server implements the git server protocol. For most use cases, the
-// transport-specific implementations should be used.
-package server
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
- "gopkg.in/src-d/go-git.v4/plumbing/revlist"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
- "gopkg.in/src-d/go-git.v4/plumbing/transport"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-)
-
-var DefaultServer = NewServer(DefaultLoader)
-
-type server struct {
- loader Loader
- handler *handler
-}
-
-// NewServer returns a transport.Transport implementing a git server,
-// independent of transport. Each transport must wrap this.
-func NewServer(loader Loader) transport.Transport {
- return &server{
- loader,
- &handler{asClient: false},
- }
-}
-
-// NewClient returns a transport.Transport implementing a client with an
-// embedded server.
-func NewClient(loader Loader) transport.Transport {
- return &server{
- loader,
- &handler{asClient: true},
- }
-}
-
-func (s *server) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) (transport.UploadPackSession, error) {
- sto, err := s.loader.Load(ep)
- if err != nil {
- return nil, err
- }
-
- return s.handler.NewUploadPackSession(sto)
-}
-
-func (s *server) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) {
- sto, err := s.loader.Load(ep)
- if err != nil {
- return nil, err
- }
-
- return s.handler.NewReceivePackSession(sto)
-}
-
-type handler struct {
- asClient bool
-}
-
-func (h *handler) NewUploadPackSession(s storer.Storer) (transport.UploadPackSession, error) {
- return &upSession{
- session: session{storer: s, asClient: h.asClient},
- }, nil
-}
-
-func (h *handler) NewReceivePackSession(s storer.Storer) (transport.ReceivePackSession, error) {
- return &rpSession{
- session: session{storer: s, asClient: h.asClient},
- cmdStatus: map[plumbing.ReferenceName]error{},
- }, nil
-}
-
-type session struct {
- storer storer.Storer
- caps *capability.List
- asClient bool
-}
-
-func (s *session) Close() error {
- return nil
-}
-
-func (s *session) SetAuth(transport.AuthMethod) error {
- //TODO: deprecate
- return nil
-}
-
-func (s *session) checkSupportedCapabilities(cl *capability.List) error {
- for _, c := range cl.All() {
- if !s.caps.Supports(c) {
- return fmt.Errorf("unsupported capability: %s", c)
- }
- }
-
- return nil
-}
-
-type upSession struct {
- session
-}
-
-func (s *upSession) AdvertisedReferences() (*packp.AdvRefs, error) {
- ar := packp.NewAdvRefs()
-
- if err := s.setSupportedCapabilities(ar.Capabilities); err != nil {
- return nil, err
- }
-
- s.caps = ar.Capabilities
-
- if err := setReferences(s.storer, ar); err != nil {
- return nil, err
- }
-
- if err := setHEAD(s.storer, ar); err != nil {
- return nil, err
- }
-
- if s.asClient && len(ar.References) == 0 {
- return nil, transport.ErrEmptyRemoteRepository
- }
-
- return ar, nil
-}
-
-func (s *upSession) UploadPack(ctx context.Context, req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) {
- if req.IsEmpty() {
- return nil, transport.ErrEmptyUploadPackRequest
- }
-
- if err := req.Validate(); err != nil {
- return nil, err
- }
-
- if s.caps == nil {
- s.caps = capability.NewList()
- if err := s.setSupportedCapabilities(s.caps); err != nil {
- return nil, err
- }
- }
-
- if err := s.checkSupportedCapabilities(req.Capabilities); err != nil {
- return nil, err
- }
-
- s.caps = req.Capabilities
-
- if len(req.Shallows) > 0 {
- return nil, fmt.Errorf("shallow not supported")
- }
-
- objs, err := s.objectsToUpload(req)
- if err != nil {
- return nil, err
- }
-
- pr, pw := io.Pipe()
- e := packfile.NewEncoder(pw, s.storer, false)
- go func() {
- // TODO: plumb through a pack window.
- _, err := e.Encode(objs, 10)
- pw.CloseWithError(err)
- }()
-
- return packp.NewUploadPackResponseWithPackfile(req,
- ioutil.NewContextReadCloser(ctx, pr),
- ), nil
-}
-
-func (s *upSession) objectsToUpload(req *packp.UploadPackRequest) ([]plumbing.Hash, error) {
- haves, err := revlist.Objects(s.storer, req.Haves, nil)
- if err != nil {
- return nil, err
- }
-
- return revlist.Objects(s.storer, req.Wants, haves)
-}
-
-func (*upSession) setSupportedCapabilities(c *capability.List) error {
- if err := c.Set(capability.Agent, capability.DefaultAgent); err != nil {
- return err
- }
-
- if err := c.Set(capability.OFSDelta); err != nil {
- return err
- }
-
- return nil
-}
-
-type rpSession struct {
- session
- cmdStatus map[plumbing.ReferenceName]error
- firstErr error
- unpackErr error
-}
-
-func (s *rpSession) AdvertisedReferences() (*packp.AdvRefs, error) {
- ar := packp.NewAdvRefs()
-
- if err := s.setSupportedCapabilities(ar.Capabilities); err != nil {
- return nil, err
- }
-
- s.caps = ar.Capabilities
-
- if err := setReferences(s.storer, ar); err != nil {
- return nil, err
- }
-
- if err := setHEAD(s.storer, ar); err != nil {
- return nil, err
- }
-
- return ar, nil
-}
-
-var (
- ErrUpdateReference = errors.New("failed to update ref")
-)
-
-func (s *rpSession) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error) {
- if s.caps == nil {
- s.caps = capability.NewList()
- if err := s.setSupportedCapabilities(s.caps); err != nil {
- return nil, err
- }
- }
-
- if err := s.checkSupportedCapabilities(req.Capabilities); err != nil {
- return nil, err
- }
-
- s.caps = req.Capabilities
-
- //TODO: Implement 'atomic' update of references.
-
- r := ioutil.NewContextReadCloser(ctx, req.Packfile)
- if err := s.writePackfile(r); err != nil {
- s.unpackErr = err
- s.firstErr = err
- return s.reportStatus(), err
- }
-
- s.updateReferences(req)
- return s.reportStatus(), s.firstErr
-}
-
-func (s *rpSession) updateReferences(req *packp.ReferenceUpdateRequest) {
- for _, cmd := range req.Commands {
- exists, err := referenceExists(s.storer, cmd.Name)
- if err != nil {
- s.setStatus(cmd.Name, err)
- continue
- }
-
- switch cmd.Action() {
- case packp.Create:
- if exists {
- s.setStatus(cmd.Name, ErrUpdateReference)
- continue
- }
-
- ref := plumbing.NewHashReference(cmd.Name, cmd.New)
- err := s.storer.SetReference(ref)
- s.setStatus(cmd.Name, err)
- case packp.Delete:
- if !exists {
- s.setStatus(cmd.Name, ErrUpdateReference)
- continue
- }
-
- err := s.storer.RemoveReference(cmd.Name)
- s.setStatus(cmd.Name, err)
- case packp.Update:
- if !exists {
- s.setStatus(cmd.Name, ErrUpdateReference)
- continue
- }
-
- ref := plumbing.NewHashReference(cmd.Name, cmd.New)
- err := s.storer.SetReference(ref)
- s.setStatus(cmd.Name, err)
- }
- }
-}
-
-func (s *rpSession) writePackfile(r io.ReadCloser) error {
- if r == nil {
- return nil
- }
-
- if err := packfile.UpdateObjectStorage(s.storer, r); err != nil {
- _ = r.Close()
- return err
- }
-
- return r.Close()
-}
-
-func (s *rpSession) setStatus(ref plumbing.ReferenceName, err error) {
- s.cmdStatus[ref] = err
- if s.firstErr == nil && err != nil {
- s.firstErr = err
- }
-}
-
-func (s *rpSession) reportStatus() *packp.ReportStatus {
- if !s.caps.Supports(capability.ReportStatus) {
- return nil
- }
-
- rs := packp.NewReportStatus()
- rs.UnpackStatus = "ok"
-
- if s.unpackErr != nil {
- rs.UnpackStatus = s.unpackErr.Error()
- }
-
- if s.cmdStatus == nil {
- return rs
- }
-
- for ref, err := range s.cmdStatus {
- msg := "ok"
- if err != nil {
- msg = err.Error()
- }
- status := &packp.CommandStatus{
- ReferenceName: ref,
- Status: msg,
- }
- rs.CommandStatuses = append(rs.CommandStatuses, status)
- }
-
- return rs
-}
-
-func (*rpSession) setSupportedCapabilities(c *capability.List) error {
- if err := c.Set(capability.Agent, capability.DefaultAgent); err != nil {
- return err
- }
-
- if err := c.Set(capability.OFSDelta); err != nil {
- return err
- }
-
- if err := c.Set(capability.DeleteRefs); err != nil {
- return err
- }
-
- return c.Set(capability.ReportStatus)
-}
-
-func setHEAD(s storer.Storer, ar *packp.AdvRefs) error {
- ref, err := s.Reference(plumbing.HEAD)
- if err == plumbing.ErrReferenceNotFound {
- return nil
- }
-
- if err != nil {
- return err
- }
-
- if ref.Type() == plumbing.SymbolicReference {
- if err := ar.AddReference(ref); err != nil {
- return nil
- }
-
- ref, err = storer.ResolveReference(s, ref.Target())
- if err == plumbing.ErrReferenceNotFound {
- return nil
- }
-
- if err != nil {
- return err
- }
- }
-
- if ref.Type() != plumbing.HashReference {
- return plumbing.ErrInvalidType
- }
-
- h := ref.Hash()
- ar.Head = &h
-
- return nil
-}
-
-func setReferences(s storer.Storer, ar *packp.AdvRefs) error {
- //TODO: add peeled references.
- iter, err := s.IterReferences()
- if err != nil {
- return err
- }
-
- return iter.ForEach(func(ref *plumbing.Reference) error {
- if ref.Type() != plumbing.HashReference {
- return nil
- }
-
- ar.References[ref.Name().String()] = ref.Hash()
- return nil
- })
-}
-
-func referenceExists(s storer.ReferenceStorer, n plumbing.ReferenceName) (bool, error) {
- _, err := s.Reference(n)
- if err == plumbing.ErrReferenceNotFound {
- return false, nil
- }
-
- return err == nil, err
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/ssh/auth_method.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/ssh/auth_method.go
deleted file mode 100644
index 1e5c38375e..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/ssh/auth_method.go
+++ /dev/null
@@ -1,322 +0,0 @@
-package ssh
-
-import (
- "crypto/x509"
- "encoding/pem"
- "errors"
- "fmt"
- "io/ioutil"
- "os"
- "os/user"
- "path/filepath"
-
- "gopkg.in/src-d/go-git.v4/plumbing/transport"
-
- "github.com/mitchellh/go-homedir"
- "github.com/xanzy/ssh-agent"
- "golang.org/x/crypto/ssh"
- "golang.org/x/crypto/ssh/knownhosts"
-)
-
-const DefaultUsername = "git"
-
-// AuthMethod is the interface all auth methods for the ssh client
-// must implement. The clientConfig method returns the ssh client
-// configuration needed to establish an ssh connection.
-type AuthMethod interface {
- transport.AuthMethod
- // ClientConfig should return a valid ssh.ClientConfig to be used to create
- // a connection to the SSH server.
- ClientConfig() (*ssh.ClientConfig, error)
-}
-
-// The names of the AuthMethod implementations. To be returned by the
-// Name() method. Most git servers only allow PublicKeysName and
-// PublicKeysCallbackName.
-const (
- KeyboardInteractiveName = "ssh-keyboard-interactive"
- PasswordName = "ssh-password"
- PasswordCallbackName = "ssh-password-callback"
- PublicKeysName = "ssh-public-keys"
- PublicKeysCallbackName = "ssh-public-key-callback"
-)
-
-// KeyboardInteractive implements AuthMethod by using a
-// prompt/response sequence controlled by the server.
-type KeyboardInteractive struct {
- User string
- Challenge ssh.KeyboardInteractiveChallenge
- HostKeyCallbackHelper
-}
-
-func (a *KeyboardInteractive) Name() string {
- return KeyboardInteractiveName
-}
-
-func (a *KeyboardInteractive) String() string {
- return fmt.Sprintf("user: %s, name: %s", a.User, a.Name())
-}
-
-func (a *KeyboardInteractive) ClientConfig() (*ssh.ClientConfig, error) {
- return a.SetHostKeyCallback(&ssh.ClientConfig{
- User: a.User,
- Auth: []ssh.AuthMethod{
- a.Challenge,
- },
- })
-}
-
-// Password implements AuthMethod by using the given password.
-type Password struct {
- User string
- Password string
- HostKeyCallbackHelper
-}
-
-func (a *Password) Name() string {
- return PasswordName
-}
-
-func (a *Password) String() string {
- return fmt.Sprintf("user: %s, name: %s", a.User, a.Name())
-}
-
-func (a *Password) ClientConfig() (*ssh.ClientConfig, error) {
- return a.SetHostKeyCallback(&ssh.ClientConfig{
- User: a.User,
- Auth: []ssh.AuthMethod{ssh.Password(a.Password)},
- })
-}
-
-// PasswordCallback implements AuthMethod by using a callback
-// to fetch the password.
-type PasswordCallback struct {
- User string
- Callback func() (pass string, err error)
- HostKeyCallbackHelper
-}
-
-func (a *PasswordCallback) Name() string {
- return PasswordCallbackName
-}
-
-func (a *PasswordCallback) String() string {
- return fmt.Sprintf("user: %s, name: %s", a.User, a.Name())
-}
-
-func (a *PasswordCallback) ClientConfig() (*ssh.ClientConfig, error) {
- return a.SetHostKeyCallback(&ssh.ClientConfig{
- User: a.User,
- Auth: []ssh.AuthMethod{ssh.PasswordCallback(a.Callback)},
- })
-}
-
-// PublicKeys implements AuthMethod by using the given key pairs.
-type PublicKeys struct {
- User string
- Signer ssh.Signer
- HostKeyCallbackHelper
-}
-
-// NewPublicKeys returns a PublicKeys from a PEM encoded private key. An
-// encryption password should be given if the pemBytes contains a password
-// encrypted PEM block otherwise password should be empty. It supports RSA
-// (PKCS#1), DSA (OpenSSL), and ECDSA private keys.
-func NewPublicKeys(user string, pemBytes []byte, password string) (*PublicKeys, error) {
- block, _ := pem.Decode(pemBytes)
- if block == nil {
- return nil, errors.New("invalid PEM data")
- }
- if x509.IsEncryptedPEMBlock(block) {
- key, err := x509.DecryptPEMBlock(block, []byte(password))
- if err != nil {
- return nil, err
- }
-
- block = &pem.Block{Type: block.Type, Bytes: key}
- pemBytes = pem.EncodeToMemory(block)
- }
-
- signer, err := ssh.ParsePrivateKey(pemBytes)
- if err != nil {
- return nil, err
- }
-
- return &PublicKeys{User: user, Signer: signer}, nil
-}
-
-// NewPublicKeysFromFile returns a PublicKeys from a file containing a PEM
-// encoded private key. An encryption password should be given if the pemBytes
-// contains a password encrypted PEM block otherwise password should be empty.
-func NewPublicKeysFromFile(user, pemFile, password string) (*PublicKeys, error) {
- bytes, err := ioutil.ReadFile(pemFile)
- if err != nil {
- return nil, err
- }
-
- return NewPublicKeys(user, bytes, password)
-}
-
-func (a *PublicKeys) Name() string {
- return PublicKeysName
-}
-
-func (a *PublicKeys) String() string {
- return fmt.Sprintf("user: %s, name: %s", a.User, a.Name())
-}
-
-func (a *PublicKeys) ClientConfig() (*ssh.ClientConfig, error) {
- return a.SetHostKeyCallback(&ssh.ClientConfig{
- User: a.User,
- Auth: []ssh.AuthMethod{ssh.PublicKeys(a.Signer)},
- })
-}
-
-func username() (string, error) {
- var username string
- if user, err := user.Current(); err == nil {
- username = user.Username
- } else {
- username = os.Getenv("USER")
- }
-
- if username == "" {
- return "", errors.New("failed to get username")
- }
-
- return username, nil
-}
-
-// PublicKeysCallback implements AuthMethod by asking a
-// ssh.agent.Agent to act as a signer.
-type PublicKeysCallback struct {
- User string
- Callback func() (signers []ssh.Signer, err error)
- HostKeyCallbackHelper
-}
-
-// NewSSHAgentAuth returns a PublicKeysCallback based on a SSH agent, it opens
-// a pipe with the SSH agent and uses the pipe as the implementer of the public
-// key callback function.
-func NewSSHAgentAuth(u string) (*PublicKeysCallback, error) {
- var err error
- if u == "" {
- u, err = username()
- if err != nil {
- return nil, err
- }
- }
-
- a, _, err := sshagent.New()
- if err != nil {
- return nil, fmt.Errorf("error creating SSH agent: %q", err)
- }
-
- return &PublicKeysCallback{
- User: u,
- Callback: a.Signers,
- }, nil
-}
-
-func (a *PublicKeysCallback) Name() string {
- return PublicKeysCallbackName
-}
-
-func (a *PublicKeysCallback) String() string {
- return fmt.Sprintf("user: %s, name: %s", a.User, a.Name())
-}
-
-func (a *PublicKeysCallback) ClientConfig() (*ssh.ClientConfig, error) {
- return a.SetHostKeyCallback(&ssh.ClientConfig{
- User: a.User,
- Auth: []ssh.AuthMethod{ssh.PublicKeysCallback(a.Callback)},
- })
-}
-
-// NewKnownHostsCallback returns ssh.HostKeyCallback based on a file based on a
-// known_hosts file. http://man.openbsd.org/sshd#SSH_KNOWN_HOSTS_FILE_FORMAT
-//
-// If list of files is empty, then it will be read from the SSH_KNOWN_HOSTS
-// environment variable, example:
-// /home/foo/custom_known_hosts_file:/etc/custom_known/hosts_file
-//
-// If SSH_KNOWN_HOSTS is not set the following file locations will be used:
-// ~/.ssh/known_hosts
-// /etc/ssh/ssh_known_hosts
-func NewKnownHostsCallback(files ...string) (ssh.HostKeyCallback, error) {
- var err error
-
- if len(files) == 0 {
- if files, err = getDefaultKnownHostsFiles(); err != nil {
- return nil, err
- }
- }
-
- if files, err = filterKnownHostsFiles(files...); err != nil {
- return nil, err
- }
-
- return knownhosts.New(files...)
-}
-
-func getDefaultKnownHostsFiles() ([]string, error) {
- files := filepath.SplitList(os.Getenv("SSH_KNOWN_HOSTS"))
- if len(files) != 0 {
- return files, nil
- }
-
- homeDirPath, err := homedir.Dir()
- if err != nil {
- return nil, err
- }
-
- return []string{
- filepath.Join(homeDirPath, "/.ssh/known_hosts"),
- "/etc/ssh/ssh_known_hosts",
- }, nil
-}
-
-func filterKnownHostsFiles(files ...string) ([]string, error) {
- var out []string
- for _, file := range files {
- _, err := os.Stat(file)
- if err == nil {
- out = append(out, file)
- continue
- }
-
- if !os.IsNotExist(err) {
- return nil, err
- }
- }
-
- if len(out) == 0 {
- return nil, fmt.Errorf("unable to find any valid known_hosts file, set SSH_KNOWN_HOSTS env variable")
- }
-
- return out, nil
-}
-
-// HostKeyCallbackHelper is a helper that provides common functionality to
-// configure HostKeyCallback into a ssh.ClientConfig.
-type HostKeyCallbackHelper struct {
- // HostKeyCallback is the function type used for verifying server keys.
- // If nil default callback will be create using NewKnownHostsCallback
- // without argument.
- HostKeyCallback ssh.HostKeyCallback
-}
-
-// SetHostKeyCallback sets the field HostKeyCallback in the given cfg. If
-// HostKeyCallback is empty a default callback is created using
-// NewKnownHostsCallback.
-func (m *HostKeyCallbackHelper) SetHostKeyCallback(cfg *ssh.ClientConfig) (*ssh.ClientConfig, error) {
- var err error
- if m.HostKeyCallback == nil {
- if m.HostKeyCallback, err = NewKnownHostsCallback(); err != nil {
- return cfg, err
- }
- }
-
- cfg.HostKeyCallback = m.HostKeyCallback
- return cfg, nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/ssh/common.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/ssh/common.go
deleted file mode 100644
index d320d43386..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/ssh/common.go
+++ /dev/null
@@ -1,228 +0,0 @@
-// Package ssh implements the SSH transport protocol.
-package ssh
-
-import (
- "context"
- "fmt"
- "reflect"
- "strconv"
-
- "gopkg.in/src-d/go-git.v4/plumbing/transport"
- "gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common"
-
- "github.com/kevinburke/ssh_config"
- "golang.org/x/crypto/ssh"
- "golang.org/x/net/proxy"
-)
-
-// DefaultClient is the default SSH client.
-var DefaultClient = NewClient(nil)
-
-// DefaultSSHConfig is the reader used to access parameters stored in the
-// system's ssh_config files. If nil all the ssh_config are ignored.
-var DefaultSSHConfig sshConfig = ssh_config.DefaultUserSettings
-
-type sshConfig interface {
- Get(alias, key string) string
-}
-
-// NewClient creates a new SSH client with an optional *ssh.ClientConfig.
-func NewClient(config *ssh.ClientConfig) transport.Transport {
- return common.NewClient(&runner{config: config})
-}
-
-// DefaultAuthBuilder is the function used to create a default AuthMethod, when
-// the user doesn't provide any.
-var DefaultAuthBuilder = func(user string) (AuthMethod, error) {
- return NewSSHAgentAuth(user)
-}
-
-const DefaultPort = 22
-
-type runner struct {
- config *ssh.ClientConfig
-}
-
-func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) {
- c := &command{command: cmd, endpoint: ep, config: r.config}
- if auth != nil {
- c.setAuth(auth)
- }
-
- if err := c.connect(); err != nil {
- return nil, err
- }
- return c, nil
-}
-
-type command struct {
- *ssh.Session
- connected bool
- command string
- endpoint *transport.Endpoint
- client *ssh.Client
- auth AuthMethod
- config *ssh.ClientConfig
-}
-
-func (c *command) setAuth(auth transport.AuthMethod) error {
- a, ok := auth.(AuthMethod)
- if !ok {
- return transport.ErrInvalidAuthMethod
- }
-
- c.auth = a
- return nil
-}
-
-func (c *command) Start() error {
- return c.Session.Start(endpointToCommand(c.command, c.endpoint))
-}
-
-// Close closes the SSH session and connection.
-func (c *command) Close() error {
- if !c.connected {
- return nil
- }
-
- c.connected = false
-
- //XXX: If did read the full packfile, then the session might be already
- // closed.
- _ = c.Session.Close()
-
- return c.client.Close()
-}
-
-// connect connects to the SSH server, unless a AuthMethod was set with
-// SetAuth method, by default uses an auth method based on PublicKeysCallback,
-// it connects to a SSH agent, using the address stored in the SSH_AUTH_SOCK
-// environment var.
-func (c *command) connect() error {
- if c.connected {
- return transport.ErrAlreadyConnected
- }
-
- if c.auth == nil {
- if err := c.setAuthFromEndpoint(); err != nil {
- return err
- }
- }
-
- var err error
- config, err := c.auth.ClientConfig()
- if err != nil {
- return err
- }
-
- overrideConfig(c.config, config)
-
- c.client, err = dial("tcp", c.getHostWithPort(), config)
- if err != nil {
- return err
- }
-
- c.Session, err = c.client.NewSession()
- if err != nil {
- _ = c.client.Close()
- return err
- }
-
- c.connected = true
- return nil
-}
-
-func dial(network, addr string, config *ssh.ClientConfig) (*ssh.Client, error) {
- var (
- ctx = context.Background()
- cancel context.CancelFunc
- )
- if config.Timeout > 0 {
- ctx, cancel = context.WithTimeout(ctx, config.Timeout)
- } else {
- ctx, cancel = context.WithCancel(ctx)
- }
- defer cancel()
-
- conn, err := proxy.Dial(ctx, network, addr)
- if err != nil {
- return nil, err
- }
- c, chans, reqs, err := ssh.NewClientConn(conn, addr, config)
- if err != nil {
- return nil, err
- }
- return ssh.NewClient(c, chans, reqs), nil
-}
-
-func (c *command) getHostWithPort() string {
- if addr, found := c.doGetHostWithPortFromSSHConfig(); found {
- return addr
- }
-
- host := c.endpoint.Host
- port := c.endpoint.Port
- if port <= 0 {
- port = DefaultPort
- }
-
- return fmt.Sprintf("%s:%d", host, port)
-}
-
-func (c *command) doGetHostWithPortFromSSHConfig() (addr string, found bool) {
- if DefaultSSHConfig == nil {
- return
- }
-
- host := c.endpoint.Host
- port := c.endpoint.Port
-
- configHost := DefaultSSHConfig.Get(c.endpoint.Host, "Hostname")
- if configHost != "" {
- host = configHost
- found = true
- }
-
- if !found {
- return
- }
-
- configPort := DefaultSSHConfig.Get(c.endpoint.Host, "Port")
- if configPort != "" {
- if i, err := strconv.Atoi(configPort); err == nil {
- port = i
- }
- }
-
- addr = fmt.Sprintf("%s:%d", host, port)
- return
-}
-
-func (c *command) setAuthFromEndpoint() error {
- var err error
- c.auth, err = DefaultAuthBuilder(c.endpoint.User)
- return err
-}
-
-func endpointToCommand(cmd string, ep *transport.Endpoint) string {
- return fmt.Sprintf("%s '%s'", cmd, ep.Path)
-}
-
-func overrideConfig(overrides *ssh.ClientConfig, c *ssh.ClientConfig) {
- if overrides == nil {
- return
- }
-
- t := reflect.TypeOf(*c)
- vc := reflect.ValueOf(c).Elem()
- vo := reflect.ValueOf(overrides).Elem()
-
- for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
- vcf := vc.FieldByName(f.Name)
- vof := vo.FieldByName(f.Name)
- vcf.Set(vof)
- }
-
- *c = vc.Interface().(ssh.ClientConfig)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/prune.go b/vendor/gopkg.in/src-d/go-git.v4/prune.go
deleted file mode 100644
index c840325f13..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/prune.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package git
-
-import (
- "errors"
- "time"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
-)
-
-type PruneHandler func(unreferencedObjectHash plumbing.Hash) error
-type PruneOptions struct {
- // OnlyObjectsOlderThan if set to non-zero value
- // selects only objects older than the time provided.
- OnlyObjectsOlderThan time.Time
- // Handler is called on matching objects
- Handler PruneHandler
-}
-
-var ErrLooseObjectsNotSupported = errors.New("Loose objects not supported")
-
-// DeleteObject deletes an object from a repository.
-// The type conveniently matches PruneHandler.
-func (r *Repository) DeleteObject(hash plumbing.Hash) error {
- los, ok := r.Storer.(storer.LooseObjectStorer)
- if !ok {
- return ErrLooseObjectsNotSupported
- }
-
- return los.DeleteLooseObject(hash)
-}
-
-func (r *Repository) Prune(opt PruneOptions) error {
- los, ok := r.Storer.(storer.LooseObjectStorer)
- if !ok {
- return ErrLooseObjectsNotSupported
- }
-
- pw := newObjectWalker(r.Storer)
- err := pw.walkAllRefs()
- if err != nil {
- return err
- }
- // Now walk all (loose) objects in storage.
- return los.ForEachObjectHash(func(hash plumbing.Hash) error {
- // Get out if we have seen this object.
- if pw.isSeen(hash) {
- return nil
- }
- // Otherwise it is a candidate for pruning.
- // Check out for too new objects next.
- if !opt.OnlyObjectsOlderThan.IsZero() {
- // Errors here are non-fatal. The object may be e.g. packed.
- // Or concurrently deleted. Skip such objects.
- t, err := los.LooseObjectTime(hash)
- if err != nil {
- return nil
- }
- // Skip too new objects.
- if !t.Before(opt.OnlyObjectsOlderThan) {
- return nil
- }
- }
- return opt.Handler(hash)
- })
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/references.go b/vendor/gopkg.in/src-d/go-git.v4/references.go
deleted file mode 100644
index 5673ac1356..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/references.go
+++ /dev/null
@@ -1,264 +0,0 @@
-package git
-
-import (
- "io"
- "sort"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/object"
- "gopkg.in/src-d/go-git.v4/utils/diff"
-
- "github.com/sergi/go-diff/diffmatchpatch"
-)
-
-// References returns a slice of Commits for the file at "path", starting from
-// the commit provided that contains the file from the provided path. The last
-// commit into the returned slice is the commit where the file was created.
-// If the provided commit does not contains the specified path, a nil slice is
-// returned. The commits are sorted in commit order, newer to older.
-//
-// Caveats:
-//
-// - Moves and copies are not currently supported.
-//
-// - Cherry-picks are not detected unless there are no commits between them and
-// therefore can appear repeated in the list. (see git path-id for hints on how
-// to fix this).
-func references(c *object.Commit, path string) ([]*object.Commit, error) {
- var result []*object.Commit
- seen := make(map[plumbing.Hash]struct{})
- if err := walkGraph(&result, &seen, c, path); err != nil {
- return nil, err
- }
-
- // TODO result should be returned without ordering
- sortCommits(result)
-
- // for merges of identical cherry-picks
- return removeComp(path, result, equivalent)
-}
-
-type commitSorterer struct {
- l []*object.Commit
-}
-
-func (s commitSorterer) Len() int {
- return len(s.l)
-}
-
-func (s commitSorterer) Less(i, j int) bool {
- return s.l[i].Committer.When.Before(s.l[j].Committer.When) ||
- s.l[i].Committer.When.Equal(s.l[j].Committer.When) &&
- s.l[i].Author.When.Before(s.l[j].Author.When)
-}
-
-func (s commitSorterer) Swap(i, j int) {
- s.l[i], s.l[j] = s.l[j], s.l[i]
-}
-
-// SortCommits sorts a commit list by commit date, from older to newer.
-func sortCommits(l []*object.Commit) {
- s := &commitSorterer{l}
- sort.Sort(s)
-}
-
-// Recursive traversal of the commit graph, generating a linear history of the
-// path.
-func walkGraph(result *[]*object.Commit, seen *map[plumbing.Hash]struct{}, current *object.Commit, path string) error {
- // check and update seen
- if _, ok := (*seen)[current.Hash]; ok {
- return nil
- }
- (*seen)[current.Hash] = struct{}{}
-
- // if the path is not in the current commit, stop searching.
- if _, err := current.File(path); err != nil {
- return nil
- }
-
- // optimization: don't traverse branches that does not
- // contain the path.
- parents, err := parentsContainingPath(path, current)
- if err != nil {
- return err
- }
- switch len(parents) {
- // if the path is not found in any of its parents, the path was
- // created by this commit; we must add it to the revisions list and
- // stop searching. This includes the case when current is the
- // initial commit.
- case 0:
- *result = append(*result, current)
- return nil
- case 1: // only one parent contains the path
- // if the file contents has change, add the current commit
- different, err := differentContents(path, current, parents)
- if err != nil {
- return err
- }
- if len(different) == 1 {
- *result = append(*result, current)
- }
- // in any case, walk the parent
- return walkGraph(result, seen, parents[0], path)
- default: // more than one parent contains the path
- // TODO: detect merges that had a conflict, because they must be
- // included in the result here.
- for _, p := range parents {
- err := walkGraph(result, seen, p, path)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func parentsContainingPath(path string, c *object.Commit) ([]*object.Commit, error) {
- // TODO: benchmark this method making git.object.Commit.parent public instead of using
- // an iterator
- var result []*object.Commit
- iter := c.Parents()
- for {
- parent, err := iter.Next()
- if err == io.EOF {
- return result, nil
- }
- if err != nil {
- return nil, err
- }
- if _, err := parent.File(path); err == nil {
- result = append(result, parent)
- }
- }
-}
-
-// Returns an slice of the commits in "cs" that has the file "path", but with different
-// contents than what can be found in "c".
-func differentContents(path string, c *object.Commit, cs []*object.Commit) ([]*object.Commit, error) {
- result := make([]*object.Commit, 0, len(cs))
- h, found := blobHash(path, c)
- if !found {
- return nil, object.ErrFileNotFound
- }
- for _, cx := range cs {
- if hx, found := blobHash(path, cx); found && h != hx {
- result = append(result, cx)
- }
- }
- return result, nil
-}
-
-// blobHash returns the hash of a path in a commit
-func blobHash(path string, commit *object.Commit) (hash plumbing.Hash, found bool) {
- file, err := commit.File(path)
- if err != nil {
- var empty plumbing.Hash
- return empty, found
- }
- return file.Hash, true
-}
-
-type contentsComparatorFn func(path string, a, b *object.Commit) (bool, error)
-
-// Returns a new slice of commits, with duplicates removed. Expects a
-// sorted commit list. Duplication is defined according to "comp". It
-// will always keep the first commit of a series of duplicated commits.
-func removeComp(path string, cs []*object.Commit, comp contentsComparatorFn) ([]*object.Commit, error) {
- result := make([]*object.Commit, 0, len(cs))
- if len(cs) == 0 {
- return result, nil
- }
- result = append(result, cs[0])
- for i := 1; i < len(cs); i++ {
- equals, err := comp(path, cs[i], cs[i-1])
- if err != nil {
- return nil, err
- }
- if !equals {
- result = append(result, cs[i])
- }
- }
- return result, nil
-}
-
-// Equivalent commits are commits whose patch is the same.
-func equivalent(path string, a, b *object.Commit) (bool, error) {
- numParentsA := a.NumParents()
- numParentsB := b.NumParents()
-
- // the first commit is not equivalent to anyone
- // and "I think" merges can not be equivalent to anything
- if numParentsA != 1 || numParentsB != 1 {
- return false, nil
- }
-
- diffsA, err := patch(a, path)
- if err != nil {
- return false, err
- }
- diffsB, err := patch(b, path)
- if err != nil {
- return false, err
- }
-
- return sameDiffs(diffsA, diffsB), nil
-}
-
-func patch(c *object.Commit, path string) ([]diffmatchpatch.Diff, error) {
- // get contents of the file in the commit
- file, err := c.File(path)
- if err != nil {
- return nil, err
- }
- content, err := file.Contents()
- if err != nil {
- return nil, err
- }
-
- // get contents of the file in the first parent of the commit
- var contentParent string
- iter := c.Parents()
- parent, err := iter.Next()
- if err != nil {
- return nil, err
- }
- file, err = parent.File(path)
- if err != nil {
- contentParent = ""
- } else {
- contentParent, err = file.Contents()
- if err != nil {
- return nil, err
- }
- }
-
- // compare the contents of parent and child
- return diff.Do(content, contentParent), nil
-}
-
-func sameDiffs(a, b []diffmatchpatch.Diff) bool {
- if len(a) != len(b) {
- return false
- }
- for i := range a {
- if !sameDiff(a[i], b[i]) {
- return false
- }
- }
- return true
-}
-
-func sameDiff(a, b diffmatchpatch.Diff) bool {
- if a.Type != b.Type {
- return false
- }
- switch a.Type {
- case 0:
- return countLines(a.Text) == countLines(b.Text)
- case 1, -1:
- return a.Text == b.Text
- default:
- panic("unreachable")
- }
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/remote.go b/vendor/gopkg.in/src-d/go-git.v4/remote.go
deleted file mode 100644
index baee7a0826..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/remote.go
+++ /dev/null
@@ -1,1114 +0,0 @@
-package git
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
-
- "gopkg.in/src-d/go-billy.v4/osfs"
- "gopkg.in/src-d/go-git.v4/config"
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/cache"
- "gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
- "gopkg.in/src-d/go-git.v4/plumbing/object"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
- "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband"
- "gopkg.in/src-d/go-git.v4/plumbing/revlist"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
- "gopkg.in/src-d/go-git.v4/plumbing/transport"
- "gopkg.in/src-d/go-git.v4/plumbing/transport/client"
- "gopkg.in/src-d/go-git.v4/storage"
- "gopkg.in/src-d/go-git.v4/storage/filesystem"
- "gopkg.in/src-d/go-git.v4/storage/memory"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-)
-
-var (
- NoErrAlreadyUpToDate = errors.New("already up-to-date")
- ErrDeleteRefNotSupported = errors.New("server does not support delete-refs")
- ErrForceNeeded = errors.New("some refs were not updated")
-)
-
-const (
- // This describes the maximum number of commits to walk when
- // computing the haves to send to a server, for each ref in the
- // repo containing this remote, when not using the multi-ack
- // protocol. Setting this to 0 means there is no limit.
- maxHavesToVisitPerRef = 100
-)
-
-// Remote represents a connection to a remote repository.
-type Remote struct {
- c *config.RemoteConfig
- s storage.Storer
-}
-
-// NewRemote creates a new Remote.
-// The intended purpose is to use the Remote for tasks such as listing remote references (like using git ls-remote).
-// Otherwise Remotes should be created via the use of a Repository.
-func NewRemote(s storage.Storer, c *config.RemoteConfig) *Remote {
- return &Remote{s: s, c: c}
-}
-
-// Config returns the RemoteConfig object used to instantiate this Remote.
-func (r *Remote) Config() *config.RemoteConfig {
- return r.c
-}
-
-func (r *Remote) String() string {
- var fetch, push string
- if len(r.c.URLs) > 0 {
- fetch = r.c.URLs[0]
- push = r.c.URLs[0]
- }
-
- return fmt.Sprintf("%s\t%s (fetch)\n%[1]s\t%[3]s (push)", r.c.Name, fetch, push)
-}
-
-// Push performs a push to the remote. Returns NoErrAlreadyUpToDate if the
-// remote was already up-to-date.
-func (r *Remote) Push(o *PushOptions) error {
- return r.PushContext(context.Background(), o)
-}
-
-// PushContext performs a push to the remote. Returns NoErrAlreadyUpToDate if
-// the remote was already up-to-date.
-//
-// The provided Context must be non-nil. If the context expires before the
-// operation is complete, an error is returned. The context only affects to the
-// transport operations.
-func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
- if err := o.Validate(); err != nil {
- return err
- }
-
- if o.RemoteName != r.c.Name {
- return fmt.Errorf("remote names don't match: %s != %s", o.RemoteName, r.c.Name)
- }
-
- s, err := newSendPackSession(r.c.URLs[0], o.Auth)
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(s, &err)
-
- ar, err := s.AdvertisedReferences()
- if err != nil {
- return err
- }
-
- remoteRefs, err := ar.AllReferences()
- if err != nil {
- return err
- }
-
- isDelete := false
- allDelete := true
- for _, rs := range o.RefSpecs {
- if rs.IsDelete() {
- isDelete = true
- } else {
- allDelete = false
- }
- if isDelete && !allDelete {
- break
- }
- }
-
- if isDelete && !ar.Capabilities.Supports(capability.DeleteRefs) {
- return ErrDeleteRefNotSupported
- }
-
- localRefs, err := r.references()
- if err != nil {
- return err
- }
-
- req, err := r.newReferenceUpdateRequest(o, localRefs, remoteRefs, ar)
- if err != nil {
- return err
- }
-
- if len(req.Commands) == 0 {
- return NoErrAlreadyUpToDate
- }
-
- objects := objectsToPush(req.Commands)
-
- haves, err := referencesToHashes(remoteRefs)
- if err != nil {
- return err
- }
-
- stop, err := r.s.Shallow()
- if err != nil {
- return err
- }
-
- // if we have shallow we should include this as part of the objects that
- // we are aware.
- haves = append(haves, stop...)
-
- var hashesToPush []plumbing.Hash
- // Avoid the expensive revlist operation if we're only doing deletes.
- if !allDelete {
- if r.c.IsFirstURLLocal() {
- // If we're are pushing to a local repo, it might be much
- // faster to use a local storage layer to get the commits
- // to ignore, when calculating the object revlist.
- localStorer := filesystem.NewStorage(
- osfs.New(r.c.URLs[0]), cache.NewObjectLRUDefault())
- hashesToPush, err = revlist.ObjectsWithStorageForIgnores(
- r.s, localStorer, objects, haves)
- } else {
- hashesToPush, err = revlist.Objects(r.s, objects, haves)
- }
- if err != nil {
- return err
- }
- }
-
- if len(hashesToPush) == 0 {
- allDelete = true
- for _, command := range req.Commands {
- if command.Action() != packp.Delete {
- allDelete = false
- break
- }
- }
- }
-
- rs, err := pushHashes(ctx, s, r.s, req, hashesToPush, r.useRefDeltas(ar), allDelete)
- if err != nil {
- return err
- }
-
- if err = rs.Error(); err != nil {
- return err
- }
-
- return r.updateRemoteReferenceStorage(req, rs)
-}
-
-func (r *Remote) useRefDeltas(ar *packp.AdvRefs) bool {
- return !ar.Capabilities.Supports(capability.OFSDelta)
-}
-
-func (r *Remote) newReferenceUpdateRequest(
- o *PushOptions,
- localRefs []*plumbing.Reference,
- remoteRefs storer.ReferenceStorer,
- ar *packp.AdvRefs,
-) (*packp.ReferenceUpdateRequest, error) {
- req := packp.NewReferenceUpdateRequestFromCapabilities(ar.Capabilities)
-
- if o.Progress != nil {
- req.Progress = o.Progress
- if ar.Capabilities.Supports(capability.Sideband64k) {
- req.Capabilities.Set(capability.Sideband64k)
- } else if ar.Capabilities.Supports(capability.Sideband) {
- req.Capabilities.Set(capability.Sideband)
- }
- }
-
- if err := r.addReferencesToUpdate(o.RefSpecs, localRefs, remoteRefs, req, o.Prune); err != nil {
- return nil, err
- }
-
- return req, nil
-}
-
-func (r *Remote) updateRemoteReferenceStorage(
- req *packp.ReferenceUpdateRequest,
- result *packp.ReportStatus,
-) error {
-
- for _, spec := range r.c.Fetch {
- for _, c := range req.Commands {
- if !spec.Match(c.Name) {
- continue
- }
-
- local := spec.Dst(c.Name)
- ref := plumbing.NewHashReference(local, c.New)
- switch c.Action() {
- case packp.Create, packp.Update:
- if err := r.s.SetReference(ref); err != nil {
- return err
- }
- case packp.Delete:
- if err := r.s.RemoveReference(local); err != nil {
- return err
- }
- }
- }
- }
-
- return nil
-}
-
-// FetchContext fetches references along with the objects necessary to complete
-// their histories.
-//
-// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are
-// no changes to be fetched, or an error.
-//
-// The provided Context must be non-nil. If the context expires before the
-// operation is complete, an error is returned. The context only affects to the
-// transport operations.
-func (r *Remote) FetchContext(ctx context.Context, o *FetchOptions) error {
- _, err := r.fetch(ctx, o)
- return err
-}
-
-// Fetch fetches references along with the objects necessary to complete their
-// histories.
-//
-// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are
-// no changes to be fetched, or an error.
-func (r *Remote) Fetch(o *FetchOptions) error {
- return r.FetchContext(context.Background(), o)
-}
-
-func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.ReferenceStorer, err error) {
- if o.RemoteName == "" {
- o.RemoteName = r.c.Name
- }
-
- if err = o.Validate(); err != nil {
- return nil, err
- }
-
- if len(o.RefSpecs) == 0 {
- o.RefSpecs = r.c.Fetch
- }
-
- s, err := newUploadPackSession(r.c.URLs[0], o.Auth)
- if err != nil {
- return nil, err
- }
-
- defer ioutil.CheckClose(s, &err)
-
- ar, err := s.AdvertisedReferences()
- if err != nil {
- return nil, err
- }
-
- req, err := r.newUploadPackRequest(o, ar)
- if err != nil {
- return nil, err
- }
-
- remoteRefs, err := ar.AllReferences()
- if err != nil {
- return nil, err
- }
-
- localRefs, err := r.references()
- if err != nil {
- return nil, err
- }
-
- refs, err := calculateRefs(o.RefSpecs, remoteRefs, o.Tags)
- if err != nil {
- return nil, err
- }
-
- req.Wants, err = getWants(r.s, refs)
- if len(req.Wants) > 0 {
- req.Haves, err = getHaves(localRefs, remoteRefs, r.s)
- if err != nil {
- return nil, err
- }
-
- if err = r.fetchPack(ctx, o, s, req); err != nil {
- return nil, err
- }
- }
-
- updated, err := r.updateLocalReferenceStorage(o.RefSpecs, refs, remoteRefs, o.Tags, o.Force)
- if err != nil {
- return nil, err
- }
-
- if !updated {
- return remoteRefs, NoErrAlreadyUpToDate
- }
-
- return remoteRefs, nil
-}
-
-func newUploadPackSession(url string, auth transport.AuthMethod) (transport.UploadPackSession, error) {
- c, ep, err := newClient(url)
- if err != nil {
- return nil, err
- }
-
- return c.NewUploadPackSession(ep, auth)
-}
-
-func newSendPackSession(url string, auth transport.AuthMethod) (transport.ReceivePackSession, error) {
- c, ep, err := newClient(url)
- if err != nil {
- return nil, err
- }
-
- return c.NewReceivePackSession(ep, auth)
-}
-
-func newClient(url string) (transport.Transport, *transport.Endpoint, error) {
- ep, err := transport.NewEndpoint(url)
- if err != nil {
- return nil, nil, err
- }
-
- c, err := client.NewClient(ep)
- if err != nil {
- return nil, nil, err
- }
-
- return c, ep, err
-}
-
-func (r *Remote) fetchPack(ctx context.Context, o *FetchOptions, s transport.UploadPackSession,
- req *packp.UploadPackRequest) (err error) {
-
- reader, err := s.UploadPack(ctx, req)
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(reader, &err)
-
- if err = r.updateShallow(o, reader); err != nil {
- return err
- }
-
- if err = packfile.UpdateObjectStorage(r.s,
- buildSidebandIfSupported(req.Capabilities, reader, o.Progress),
- ); err != nil {
- return err
- }
-
- return err
-}
-
-func (r *Remote) addReferencesToUpdate(
- refspecs []config.RefSpec,
- localRefs []*plumbing.Reference,
- remoteRefs storer.ReferenceStorer,
- req *packp.ReferenceUpdateRequest,
- prune bool,
-) error {
- // This references dictionary will be used to search references by name.
- refsDict := make(map[string]*plumbing.Reference)
- for _, ref := range localRefs {
- refsDict[ref.Name().String()] = ref
- }
-
- for _, rs := range refspecs {
- if rs.IsDelete() {
- if err := r.deleteReferences(rs, remoteRefs, refsDict, req, false); err != nil {
- return err
- }
- } else {
- err := r.addOrUpdateReferences(rs, localRefs, refsDict, remoteRefs, req)
- if err != nil {
- return err
- }
-
- if prune {
- if err := r.deleteReferences(rs, remoteRefs, refsDict, req, true); err != nil {
- return err
- }
- }
- }
- }
-
- return nil
-}
-
-func (r *Remote) addOrUpdateReferences(
- rs config.RefSpec,
- localRefs []*plumbing.Reference,
- refsDict map[string]*plumbing.Reference,
- remoteRefs storer.ReferenceStorer,
- req *packp.ReferenceUpdateRequest,
-) error {
- // If it is not a wilcard refspec we can directly search for the reference
- // in the references dictionary.
- if !rs.IsWildcard() {
- ref, ok := refsDict[rs.Src()]
- if !ok {
- return nil
- }
-
- return r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req)
- }
-
- for _, ref := range localRefs {
- err := r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *Remote) deleteReferences(rs config.RefSpec,
- remoteRefs storer.ReferenceStorer,
- refsDict map[string]*plumbing.Reference,
- req *packp.ReferenceUpdateRequest,
- prune bool) error {
- iter, err := remoteRefs.IterReferences()
- if err != nil {
- return err
- }
-
- return iter.ForEach(func(ref *plumbing.Reference) error {
- if ref.Type() != plumbing.HashReference {
- return nil
- }
-
- if prune {
- rs := rs.Reverse()
- if !rs.Match(ref.Name()) {
- return nil
- }
-
- if _, ok := refsDict[rs.Dst(ref.Name()).String()]; ok {
- return nil
- }
- } else {
- if rs.Dst("") != ref.Name() {
- return nil
- }
- }
-
- cmd := &packp.Command{
- Name: ref.Name(),
- Old: ref.Hash(),
- New: plumbing.ZeroHash,
- }
- req.Commands = append(req.Commands, cmd)
- return nil
- })
-}
-
-func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec,
- remoteRefs storer.ReferenceStorer, localRef *plumbing.Reference,
- req *packp.ReferenceUpdateRequest) error {
-
- if localRef.Type() != plumbing.HashReference {
- return nil
- }
-
- if !rs.Match(localRef.Name()) {
- return nil
- }
-
- cmd := &packp.Command{
- Name: rs.Dst(localRef.Name()),
- Old: plumbing.ZeroHash,
- New: localRef.Hash(),
- }
-
- remoteRef, err := remoteRefs.Reference(cmd.Name)
- if err == nil {
- if remoteRef.Type() != plumbing.HashReference {
- //TODO: check actual git behavior here
- return nil
- }
-
- cmd.Old = remoteRef.Hash()
- } else if err != plumbing.ErrReferenceNotFound {
- return err
- }
-
- if cmd.Old == cmd.New {
- return nil
- }
-
- if !rs.IsForceUpdate() {
- if err := checkFastForwardUpdate(r.s, remoteRefs, cmd); err != nil {
- return err
- }
- }
-
- req.Commands = append(req.Commands, cmd)
- return nil
-}
-
-func (r *Remote) references() ([]*plumbing.Reference, error) {
- var localRefs []*plumbing.Reference
- iter, err := r.s.IterReferences()
- if err != nil {
- return nil, err
- }
-
- for {
- ref, err := iter.Next()
- if err == io.EOF {
- break
- }
-
- if err != nil {
- return nil, err
- }
-
- localRefs = append(localRefs, ref)
- }
-
- return localRefs, nil
-}
-
-func getRemoteRefsFromStorer(remoteRefStorer storer.ReferenceStorer) (
- map[plumbing.Hash]bool, error) {
- remoteRefs := map[plumbing.Hash]bool{}
- iter, err := remoteRefStorer.IterReferences()
- if err != nil {
- return nil, err
- }
- err = iter.ForEach(func(ref *plumbing.Reference) error {
- if ref.Type() != plumbing.HashReference {
- return nil
- }
- remoteRefs[ref.Hash()] = true
- return nil
- })
- if err != nil {
- return nil, err
- }
- return remoteRefs, nil
-}
-
-// getHavesFromRef populates the given `haves` map with the given
-// reference, and up to `maxHavesToVisitPerRef` ancestor commits.
-func getHavesFromRef(
- ref *plumbing.Reference,
- remoteRefs map[plumbing.Hash]bool,
- s storage.Storer,
- haves map[plumbing.Hash]bool,
-) error {
- h := ref.Hash()
- if haves[h] {
- return nil
- }
-
- // No need to load the commit if we know the remote already
- // has this hash.
- if remoteRefs[h] {
- haves[h] = true
- return nil
- }
-
- commit, err := object.GetCommit(s, h)
- if err != nil {
- // Ignore the error if this isn't a commit.
- haves[ref.Hash()] = true
- return nil
- }
-
- // Until go-git supports proper commit negotiation during an
- // upload pack request, include up to `maxHavesToVisitPerRef`
- // commits from the history of each ref.
- walker := object.NewCommitPreorderIter(commit, haves, nil)
- toVisit := maxHavesToVisitPerRef
- return walker.ForEach(func(c *object.Commit) error {
- haves[c.Hash] = true
- toVisit--
- // If toVisit starts out at 0 (indicating there is no
- // max), then it will be negative here and we won't stop
- // early.
- if toVisit == 0 || remoteRefs[c.Hash] {
- return storer.ErrStop
- }
- return nil
- })
-}
-
-func getHaves(
- localRefs []*plumbing.Reference,
- remoteRefStorer storer.ReferenceStorer,
- s storage.Storer,
-) ([]plumbing.Hash, error) {
- haves := map[plumbing.Hash]bool{}
-
- // Build a map of all the remote references, to avoid loading too
- // many parent commits for references we know don't need to be
- // transferred.
- remoteRefs, err := getRemoteRefsFromStorer(remoteRefStorer)
- if err != nil {
- return nil, err
- }
-
- for _, ref := range localRefs {
- if haves[ref.Hash()] {
- continue
- }
-
- if ref.Type() != plumbing.HashReference {
- continue
- }
-
- err = getHavesFromRef(ref, remoteRefs, s, haves)
- if err != nil {
- return nil, err
- }
- }
-
- var result []plumbing.Hash
- for h := range haves {
- result = append(result, h)
- }
-
- return result, nil
-}
-
-const refspecAllTags = "+refs/tags/*:refs/tags/*"
-
-func calculateRefs(
- spec []config.RefSpec,
- remoteRefs storer.ReferenceStorer,
- tagMode TagMode,
-) (memory.ReferenceStorage, error) {
- if tagMode == AllTags {
- spec = append(spec, refspecAllTags)
- }
-
- refs := make(memory.ReferenceStorage)
- for _, s := range spec {
- if err := doCalculateRefs(s, remoteRefs, refs); err != nil {
- return nil, err
- }
- }
-
- return refs, nil
-}
-
-func doCalculateRefs(
- s config.RefSpec,
- remoteRefs storer.ReferenceStorer,
- refs memory.ReferenceStorage,
-) error {
- iter, err := remoteRefs.IterReferences()
- if err != nil {
- return err
- }
-
- var matched bool
- err = iter.ForEach(func(ref *plumbing.Reference) error {
- if !s.Match(ref.Name()) {
- return nil
- }
-
- if ref.Type() == plumbing.SymbolicReference {
- target, err := storer.ResolveReference(remoteRefs, ref.Name())
- if err != nil {
- return err
- }
-
- ref = plumbing.NewHashReference(ref.Name(), target.Hash())
- }
-
- if ref.Type() != plumbing.HashReference {
- return nil
- }
-
- matched = true
- if err := refs.SetReference(ref); err != nil {
- return err
- }
-
- if !s.IsWildcard() {
- return storer.ErrStop
- }
-
- return nil
- })
-
- if !matched && !s.IsWildcard() {
- return fmt.Errorf("couldn't find remote ref %q", s.Src())
- }
-
- return err
-}
-
-func getWants(localStorer storage.Storer, refs memory.ReferenceStorage) ([]plumbing.Hash, error) {
- wants := map[plumbing.Hash]bool{}
- for _, ref := range refs {
- hash := ref.Hash()
- exists, err := objectExists(localStorer, ref.Hash())
- if err != nil {
- return nil, err
- }
-
- if !exists {
- wants[hash] = true
- }
- }
-
- var result []plumbing.Hash
- for h := range wants {
- result = append(result, h)
- }
-
- return result, nil
-}
-
-func objectExists(s storer.EncodedObjectStorer, h plumbing.Hash) (bool, error) {
- _, err := s.EncodedObject(plumbing.AnyObject, h)
- if err == plumbing.ErrObjectNotFound {
- return false, nil
- }
-
- return true, err
-}
-
-func checkFastForwardUpdate(s storer.EncodedObjectStorer, remoteRefs storer.ReferenceStorer, cmd *packp.Command) error {
- if cmd.Old == plumbing.ZeroHash {
- _, err := remoteRefs.Reference(cmd.Name)
- if err == plumbing.ErrReferenceNotFound {
- return nil
- }
-
- if err != nil {
- return err
- }
-
- return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String())
- }
-
- ff, err := isFastForward(s, cmd.Old, cmd.New)
- if err != nil {
- return err
- }
-
- if !ff {
- return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String())
- }
-
- return nil
-}
-
-func isFastForward(s storer.EncodedObjectStorer, old, new plumbing.Hash) (bool, error) {
- c, err := object.GetCommit(s, new)
- if err != nil {
- return false, err
- }
-
- found := false
- iter := object.NewCommitPreorderIter(c, nil, nil)
- err = iter.ForEach(func(c *object.Commit) error {
- if c.Hash != old {
- return nil
- }
-
- found = true
- return storer.ErrStop
- })
- return found, err
-}
-
-func (r *Remote) newUploadPackRequest(o *FetchOptions,
- ar *packp.AdvRefs) (*packp.UploadPackRequest, error) {
-
- req := packp.NewUploadPackRequestFromCapabilities(ar.Capabilities)
-
- if o.Depth != 0 {
- req.Depth = packp.DepthCommits(o.Depth)
- if err := req.Capabilities.Set(capability.Shallow); err != nil {
- return nil, err
- }
- }
-
- if o.Progress == nil && ar.Capabilities.Supports(capability.NoProgress) {
- if err := req.Capabilities.Set(capability.NoProgress); err != nil {
- return nil, err
- }
- }
-
- isWildcard := true
- for _, s := range o.RefSpecs {
- if !s.IsWildcard() {
- isWildcard = false
- break
- }
- }
-
- if isWildcard && o.Tags == TagFollowing && ar.Capabilities.Supports(capability.IncludeTag) {
- if err := req.Capabilities.Set(capability.IncludeTag); err != nil {
- return nil, err
- }
- }
-
- return req, nil
-}
-
-func buildSidebandIfSupported(l *capability.List, reader io.Reader, p sideband.Progress) io.Reader {
- var t sideband.Type
-
- switch {
- case l.Supports(capability.Sideband):
- t = sideband.Sideband
- case l.Supports(capability.Sideband64k):
- t = sideband.Sideband64k
- default:
- return reader
- }
-
- d := sideband.NewDemuxer(t, reader)
- d.Progress = p
-
- return d
-}
-
-func (r *Remote) updateLocalReferenceStorage(
- specs []config.RefSpec,
- fetchedRefs, remoteRefs memory.ReferenceStorage,
- tagMode TagMode,
- force bool,
-) (updated bool, err error) {
- isWildcard := true
- forceNeeded := false
-
- for _, spec := range specs {
- if !spec.IsWildcard() {
- isWildcard = false
- }
-
- for _, ref := range fetchedRefs {
- if !spec.Match(ref.Name()) {
- continue
- }
-
- if ref.Type() != plumbing.HashReference {
- continue
- }
-
- localName := spec.Dst(ref.Name())
- old, _ := storer.ResolveReference(r.s, localName)
- new := plumbing.NewHashReference(localName, ref.Hash())
-
- // If the ref exists locally as a branch and force is not specified,
- // only update if the new ref is an ancestor of the old
- if old != nil && old.Name().IsBranch() && !force && !spec.IsForceUpdate() {
- ff, err := isFastForward(r.s, old.Hash(), new.Hash())
- if err != nil {
- return updated, err
- }
-
- if !ff {
- forceNeeded = true
- continue
- }
- }
-
- refUpdated, err := checkAndUpdateReferenceStorerIfNeeded(r.s, new, old)
- if err != nil {
- return updated, err
- }
-
- if refUpdated {
- updated = true
- }
- }
- }
-
- if tagMode == NoTags {
- return updated, nil
- }
-
- tags := fetchedRefs
- if isWildcard {
- tags = remoteRefs
- }
- tagUpdated, err := r.buildFetchedTags(tags)
- if err != nil {
- return updated, err
- }
-
- if tagUpdated {
- updated = true
- }
-
- if forceNeeded {
- err = ErrForceNeeded
- }
-
- return
-}
-
-func (r *Remote) buildFetchedTags(refs memory.ReferenceStorage) (updated bool, err error) {
- for _, ref := range refs {
- if !ref.Name().IsTag() {
- continue
- }
-
- _, err := r.s.EncodedObject(plumbing.AnyObject, ref.Hash())
- if err == plumbing.ErrObjectNotFound {
- continue
- }
-
- if err != nil {
- return false, err
- }
-
- refUpdated, err := updateReferenceStorerIfNeeded(r.s, ref)
- if err != nil {
- return updated, err
- }
-
- if refUpdated {
- updated = true
- }
- }
-
- return
-}
-
-// List the references on the remote repository.
-func (r *Remote) List(o *ListOptions) (rfs []*plumbing.Reference, err error) {
- s, err := newUploadPackSession(r.c.URLs[0], o.Auth)
- if err != nil {
- return nil, err
- }
-
- defer ioutil.CheckClose(s, &err)
-
- ar, err := s.AdvertisedReferences()
- if err != nil {
- return nil, err
- }
-
- allRefs, err := ar.AllReferences()
- if err != nil {
- return nil, err
- }
-
- refs, err := allRefs.IterReferences()
- if err != nil {
- return nil, err
- }
-
- var resultRefs []*plumbing.Reference
- refs.ForEach(func(ref *plumbing.Reference) error {
- resultRefs = append(resultRefs, ref)
- return nil
- })
-
- return resultRefs, nil
-}
-
-func objectsToPush(commands []*packp.Command) []plumbing.Hash {
- var objects []plumbing.Hash
- for _, cmd := range commands {
- if cmd.New == plumbing.ZeroHash {
- continue
- }
-
- objects = append(objects, cmd.New)
- }
- return objects
-}
-
-func referencesToHashes(refs storer.ReferenceStorer) ([]plumbing.Hash, error) {
- iter, err := refs.IterReferences()
- if err != nil {
- return nil, err
- }
-
- var hs []plumbing.Hash
- err = iter.ForEach(func(ref *plumbing.Reference) error {
- if ref.Type() != plumbing.HashReference {
- return nil
- }
-
- hs = append(hs, ref.Hash())
- return nil
- })
- if err != nil {
- return nil, err
- }
-
- return hs, nil
-}
-
-func pushHashes(
- ctx context.Context,
- sess transport.ReceivePackSession,
- s storage.Storer,
- req *packp.ReferenceUpdateRequest,
- hs []plumbing.Hash,
- useRefDeltas bool,
- allDelete bool,
-) (*packp.ReportStatus, error) {
-
- rd, wr := io.Pipe()
-
- config, err := s.Config()
- if err != nil {
- return nil, err
- }
-
- // Set buffer size to 1 so the error message can be written when
- // ReceivePack fails. Otherwise the goroutine will be blocked writing
- // to the channel.
- done := make(chan error, 1)
-
- if !allDelete {
- req.Packfile = rd
- go func() {
- e := packfile.NewEncoder(wr, s, useRefDeltas)
- if _, err := e.Encode(hs, config.Pack.Window); err != nil {
- done <- wr.CloseWithError(err)
- return
- }
-
- done <- wr.Close()
- }()
- } else {
- close(done)
- }
-
- rs, err := sess.ReceivePack(ctx, req)
- if err != nil {
- // close the pipe to unlock encode write
- _ = rd.Close()
- return nil, err
- }
-
- if err := <-done; err != nil {
- return nil, err
- }
-
- return rs, nil
-}
-
-func (r *Remote) updateShallow(o *FetchOptions, resp *packp.UploadPackResponse) error {
- if o.Depth == 0 || len(resp.Shallows) == 0 {
- return nil
- }
-
- shallows, err := r.s.Shallow()
- if err != nil {
- return err
- }
-
-outer:
- for _, s := range resp.Shallows {
- for _, oldS := range shallows {
- if s == oldS {
- continue outer
- }
- }
- shallows = append(shallows, s)
- }
-
- return r.s.SetShallow(shallows)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/repository.go b/vendor/gopkg.in/src-d/go-git.v4/repository.go
deleted file mode 100644
index 2251d6cfe2..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/repository.go
+++ /dev/null
@@ -1,1545 +0,0 @@
-package git
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "io"
- stdioutil "io/ioutil"
- "os"
- "path"
- "path/filepath"
- "strings"
- "time"
-
- "golang.org/x/crypto/openpgp"
- "gopkg.in/src-d/go-git.v4/config"
- "gopkg.in/src-d/go-git.v4/internal/revision"
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/cache"
- "gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
- "gopkg.in/src-d/go-git.v4/plumbing/object"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
- "gopkg.in/src-d/go-git.v4/storage"
- "gopkg.in/src-d/go-git.v4/storage/filesystem"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-
- "gopkg.in/src-d/go-billy.v4"
- "gopkg.in/src-d/go-billy.v4/osfs"
-)
-
-// GitDirName this is a special folder where all the git stuff is.
-const GitDirName = ".git"
-
-var (
- // ErrBranchExists an error stating the specified branch already exists
- ErrBranchExists = errors.New("branch already exists")
- // ErrBranchNotFound an error stating the specified branch does not exist
- ErrBranchNotFound = errors.New("branch not found")
- // ErrTagExists an error stating the specified tag already exists
- ErrTagExists = errors.New("tag already exists")
- // ErrTagNotFound an error stating the specified tag does not exist
- ErrTagNotFound = errors.New("tag not found")
- // ErrFetching is returned when the packfile could not be downloaded
- ErrFetching = errors.New("unable to fetch packfile")
-
- ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch")
- ErrRepositoryNotExists = errors.New("repository does not exist")
- ErrRepositoryAlreadyExists = errors.New("repository already exists")
- ErrRemoteNotFound = errors.New("remote not found")
- ErrRemoteExists = errors.New("remote already exists")
- ErrAnonymousRemoteName = errors.New("anonymous remote name must be 'anonymous'")
- ErrWorktreeNotProvided = errors.New("worktree should be provided")
- ErrIsBareRepository = errors.New("worktree not available in a bare repository")
- ErrUnableToResolveCommit = errors.New("unable to resolve commit")
- ErrPackedObjectsNotSupported = errors.New("Packed objects not supported")
-)
-
-// Repository represents a git repository
-type Repository struct {
- Storer storage.Storer
-
- r map[string]*Remote
- wt billy.Filesystem
-}
-
-// Init creates an empty git repository, based on the given Storer and worktree.
-// The worktree Filesystem is optional, if nil a bare repository is created. If
-// the given storer is not empty ErrRepositoryAlreadyExists is returned
-func Init(s storage.Storer, worktree billy.Filesystem) (*Repository, error) {
- if err := initStorer(s); err != nil {
- return nil, err
- }
-
- r := newRepository(s, worktree)
- _, err := r.Reference(plumbing.HEAD, false)
- switch err {
- case plumbing.ErrReferenceNotFound:
- case nil:
- return nil, ErrRepositoryAlreadyExists
- default:
- return nil, err
- }
-
- h := plumbing.NewSymbolicReference(plumbing.HEAD, plumbing.Master)
- if err := s.SetReference(h); err != nil {
- return nil, err
- }
-
- if worktree == nil {
- r.setIsBare(true)
- return r, nil
- }
-
- return r, setWorktreeAndStoragePaths(r, worktree)
-}
-
-func initStorer(s storer.Storer) error {
- i, ok := s.(storer.Initializer)
- if !ok {
- return nil
- }
-
- return i.Init()
-}
-
-func setWorktreeAndStoragePaths(r *Repository, worktree billy.Filesystem) error {
- type fsBased interface {
- Filesystem() billy.Filesystem
- }
-
- // .git file is only created if the storage is file based and the file
- // system is osfs.OS
- fs, isFSBased := r.Storer.(fsBased)
- if !isFSBased {
- return nil
- }
-
- if err := createDotGitFile(worktree, fs.Filesystem()); err != nil {
- return err
- }
-
- return setConfigWorktree(r, worktree, fs.Filesystem())
-}
-
-func createDotGitFile(worktree, storage billy.Filesystem) error {
- path, err := filepath.Rel(worktree.Root(), storage.Root())
- if err != nil {
- path = storage.Root()
- }
-
- if path == GitDirName {
- // not needed, since the folder is the default place
- return nil
- }
-
- f, err := worktree.Create(GitDirName)
- if err != nil {
- return err
- }
-
- defer f.Close()
- _, err = fmt.Fprintf(f, "gitdir: %s\n", path)
- return err
-}
-
-func setConfigWorktree(r *Repository, worktree, storage billy.Filesystem) error {
- path, err := filepath.Rel(storage.Root(), worktree.Root())
- if err != nil {
- path = worktree.Root()
- }
-
- if path == ".." {
- // not needed, since the folder is the default place
- return nil
- }
-
- cfg, err := r.Storer.Config()
- if err != nil {
- return err
- }
-
- cfg.Core.Worktree = path
- return r.Storer.SetConfig(cfg)
-}
-
-// Open opens a git repository using the given Storer and worktree filesystem,
-// if the given storer is complete empty ErrRepositoryNotExists is returned.
-// The worktree can be nil when the repository being opened is bare, if the
-// repository is a normal one (not bare) and worktree is nil the err
-// ErrWorktreeNotProvided is returned
-func Open(s storage.Storer, worktree billy.Filesystem) (*Repository, error) {
- _, err := s.Reference(plumbing.HEAD)
- if err == plumbing.ErrReferenceNotFound {
- return nil, ErrRepositoryNotExists
- }
-
- if err != nil {
- return nil, err
- }
-
- return newRepository(s, worktree), nil
-}
-
-// Clone a repository into the given Storer and worktree Filesystem with the
-// given options, if worktree is nil a bare repository is created. If the given
-// storer is not empty ErrRepositoryAlreadyExists is returned.
-//
-// The provided Context must be non-nil. If the context expires before the
-// operation is complete, an error is returned. The context only affects to the
-// transport operations.
-func Clone(s storage.Storer, worktree billy.Filesystem, o *CloneOptions) (*Repository, error) {
- return CloneContext(context.Background(), s, worktree, o)
-}
-
-// CloneContext a repository into the given Storer and worktree Filesystem with
-// the given options, if worktree is nil a bare repository is created. If the
-// given storer is not empty ErrRepositoryAlreadyExists is returned.
-//
-// The provided Context must be non-nil. If the context expires before the
-// operation is complete, an error is returned. The context only affects to the
-// transport operations.
-func CloneContext(
- ctx context.Context, s storage.Storer, worktree billy.Filesystem, o *CloneOptions,
-) (*Repository, error) {
- r, err := Init(s, worktree)
- if err != nil {
- return nil, err
- }
-
- return r, r.clone(ctx, o)
-}
-
-// PlainInit create an empty git repository at the given path. isBare defines
-// if the repository will have worktree (non-bare) or not (bare), if the path
-// is not empty ErrRepositoryAlreadyExists is returned.
-func PlainInit(path string, isBare bool) (*Repository, error) {
- var wt, dot billy.Filesystem
-
- if isBare {
- dot = osfs.New(path)
- } else {
- wt = osfs.New(path)
- dot, _ = wt.Chroot(GitDirName)
- }
-
- s := filesystem.NewStorage(dot, cache.NewObjectLRUDefault())
-
- return Init(s, wt)
-}
-
-// PlainOpen opens a git repository from the given path. It detects if the
-// repository is bare or a normal one. If the path doesn't contain a valid
-// repository ErrRepositoryNotExists is returned
-func PlainOpen(path string) (*Repository, error) {
- return PlainOpenWithOptions(path, &PlainOpenOptions{})
-}
-
-// PlainOpenWithOptions opens a git repository from the given path with specific
-// options. See PlainOpen for more info.
-func PlainOpenWithOptions(path string, o *PlainOpenOptions) (*Repository, error) {
- dot, wt, err := dotGitToOSFilesystems(path, o.DetectDotGit)
- if err != nil {
- return nil, err
- }
-
- if _, err := dot.Stat(""); err != nil {
- if os.IsNotExist(err) {
- return nil, ErrRepositoryNotExists
- }
-
- return nil, err
- }
-
- s := filesystem.NewStorage(dot, cache.NewObjectLRUDefault())
-
- return Open(s, wt)
-}
-
-func dotGitToOSFilesystems(path string, detect bool) (dot, wt billy.Filesystem, err error) {
- if path, err = filepath.Abs(path); err != nil {
- return nil, nil, err
- }
- var fs billy.Filesystem
- var fi os.FileInfo
- for {
- fs = osfs.New(path)
- fi, err = fs.Stat(GitDirName)
- if err == nil {
- // no error; stop
- break
- }
- if !os.IsNotExist(err) {
- // unknown error; stop
- return nil, nil, err
- }
- if detect {
- // try its parent as long as we haven't reached
- // the root dir
- if dir := filepath.Dir(path); dir != path {
- path = dir
- continue
- }
- }
- // not detecting via parent dirs and the dir does not exist;
- // stop
- return fs, nil, nil
- }
-
- if fi.IsDir() {
- dot, err = fs.Chroot(GitDirName)
- return dot, fs, err
- }
-
- dot, err = dotGitFileToOSFilesystem(path, fs)
- if err != nil {
- return nil, nil, err
- }
-
- return dot, fs, nil
-}
-
-func dotGitFileToOSFilesystem(path string, fs billy.Filesystem) (bfs billy.Filesystem, err error) {
- f, err := fs.Open(GitDirName)
- if err != nil {
- return nil, err
- }
- defer ioutil.CheckClose(f, &err)
-
- b, err := stdioutil.ReadAll(f)
- if err != nil {
- return nil, err
- }
-
- line := string(b)
- const prefix = "gitdir: "
- if !strings.HasPrefix(line, prefix) {
- return nil, fmt.Errorf(".git file has no %s prefix", prefix)
- }
-
- gitdir := strings.Split(line[len(prefix):], "\n")[0]
- gitdir = strings.TrimSpace(gitdir)
- if filepath.IsAbs(gitdir) {
- return osfs.New(gitdir), nil
- }
-
- return osfs.New(fs.Join(path, gitdir)), nil
-}
-
-// PlainClone a repository into the path with the given options, isBare defines
-// if the new repository will be bare or normal. If the path is not empty
-// ErrRepositoryAlreadyExists is returned.
-//
-// TODO(mcuadros): move isBare to CloneOptions in v5
-func PlainClone(path string, isBare bool, o *CloneOptions) (*Repository, error) {
- return PlainCloneContext(context.Background(), path, isBare, o)
-}
-
-// PlainCloneContext a repository into the path with the given options, isBare
-// defines if the new repository will be bare or normal. If the path is not empty
-// ErrRepositoryAlreadyExists is returned.
-//
-// The provided Context must be non-nil. If the context expires before the
-// operation is complete, an error is returned. The context only affects to the
-// transport operations.
-//
-// TODO(mcuadros): move isBare to CloneOptions in v5
-// TODO(smola): refuse upfront to clone on a non-empty directory in v5, see #1027
-func PlainCloneContext(ctx context.Context, path string, isBare bool, o *CloneOptions) (*Repository, error) {
- cleanup, cleanupParent, err := checkIfCleanupIsNeeded(path)
- if err != nil {
- return nil, err
- }
-
- r, err := PlainInit(path, isBare)
- if err != nil {
- return nil, err
- }
-
- err = r.clone(ctx, o)
- if err != nil && err != ErrRepositoryAlreadyExists {
- if cleanup {
- cleanUpDir(path, cleanupParent)
- }
- }
-
- return r, err
-}
-
-func newRepository(s storage.Storer, worktree billy.Filesystem) *Repository {
- return &Repository{
- Storer: s,
- wt: worktree,
- r: make(map[string]*Remote),
- }
-}
-
-func checkIfCleanupIsNeeded(path string) (cleanup bool, cleanParent bool, err error) {
- fi, err := os.Stat(path)
- if err != nil {
- if os.IsNotExist(err) {
- return true, true, nil
- }
-
- return false, false, err
- }
-
- if !fi.IsDir() {
- return false, false, fmt.Errorf("path is not a directory: %s", path)
- }
-
- f, err := os.Open(path)
- if err != nil {
- return false, false, err
- }
-
- defer ioutil.CheckClose(f, &err)
-
- _, err = f.Readdirnames(1)
- if err == io.EOF {
- return true, false, nil
- }
-
- if err != nil {
- return false, false, err
- }
-
- return false, false, nil
-}
-
-func cleanUpDir(path string, all bool) error {
- if all {
- return os.RemoveAll(path)
- }
-
- f, err := os.Open(path)
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(f, &err)
-
- names, err := f.Readdirnames(-1)
- if err != nil {
- return err
- }
-
- for _, name := range names {
- if err := os.RemoveAll(filepath.Join(path, name)); err != nil {
- return err
- }
- }
-
- return err
-}
-
-// Config return the repository config
-func (r *Repository) Config() (*config.Config, error) {
- return r.Storer.Config()
-}
-
-// Remote return a remote if exists
-func (r *Repository) Remote(name string) (*Remote, error) {
- cfg, err := r.Storer.Config()
- if err != nil {
- return nil, err
- }
-
- c, ok := cfg.Remotes[name]
- if !ok {
- return nil, ErrRemoteNotFound
- }
-
- return NewRemote(r.Storer, c), nil
-}
-
-// Remotes returns a list with all the remotes
-func (r *Repository) Remotes() ([]*Remote, error) {
- cfg, err := r.Storer.Config()
- if err != nil {
- return nil, err
- }
-
- remotes := make([]*Remote, len(cfg.Remotes))
-
- var i int
- for _, c := range cfg.Remotes {
- remotes[i] = NewRemote(r.Storer, c)
- i++
- }
-
- return remotes, nil
-}
-
-// CreateRemote creates a new remote
-func (r *Repository) CreateRemote(c *config.RemoteConfig) (*Remote, error) {
- if err := c.Validate(); err != nil {
- return nil, err
- }
-
- remote := NewRemote(r.Storer, c)
-
- cfg, err := r.Storer.Config()
- if err != nil {
- return nil, err
- }
-
- if _, ok := cfg.Remotes[c.Name]; ok {
- return nil, ErrRemoteExists
- }
-
- cfg.Remotes[c.Name] = c
- return remote, r.Storer.SetConfig(cfg)
-}
-
-// CreateRemoteAnonymous creates a new anonymous remote. c.Name must be "anonymous".
-// It's used like 'git fetch git@github.com:src-d/go-git.git master:master'.
-func (r *Repository) CreateRemoteAnonymous(c *config.RemoteConfig) (*Remote, error) {
- if err := c.Validate(); err != nil {
- return nil, err
- }
-
- if c.Name != "anonymous" {
- return nil, ErrAnonymousRemoteName
- }
-
- remote := NewRemote(r.Storer, c)
-
- return remote, nil
-}
-
-// DeleteRemote delete a remote from the repository and delete the config
-func (r *Repository) DeleteRemote(name string) error {
- cfg, err := r.Storer.Config()
- if err != nil {
- return err
- }
-
- if _, ok := cfg.Remotes[name]; !ok {
- return ErrRemoteNotFound
- }
-
- delete(cfg.Remotes, name)
- return r.Storer.SetConfig(cfg)
-}
-
-// Branch return a Branch if exists
-func (r *Repository) Branch(name string) (*config.Branch, error) {
- cfg, err := r.Storer.Config()
- if err != nil {
- return nil, err
- }
-
- b, ok := cfg.Branches[name]
- if !ok {
- return nil, ErrBranchNotFound
- }
-
- return b, nil
-}
-
-// CreateBranch creates a new Branch
-func (r *Repository) CreateBranch(c *config.Branch) error {
- if err := c.Validate(); err != nil {
- return err
- }
-
- cfg, err := r.Storer.Config()
- if err != nil {
- return err
- }
-
- if _, ok := cfg.Branches[c.Name]; ok {
- return ErrBranchExists
- }
-
- cfg.Branches[c.Name] = c
- return r.Storer.SetConfig(cfg)
-}
-
-// DeleteBranch delete a Branch from the repository and delete the config
-func (r *Repository) DeleteBranch(name string) error {
- cfg, err := r.Storer.Config()
- if err != nil {
- return err
- }
-
- if _, ok := cfg.Branches[name]; !ok {
- return ErrBranchNotFound
- }
-
- delete(cfg.Branches, name)
- return r.Storer.SetConfig(cfg)
-}
-
-// CreateTag creates a tag. If opts is included, the tag is an annotated tag,
-// otherwise a lightweight tag is created.
-func (r *Repository) CreateTag(name string, hash plumbing.Hash, opts *CreateTagOptions) (*plumbing.Reference, error) {
- rname := plumbing.ReferenceName(path.Join("refs", "tags", name))
-
- _, err := r.Storer.Reference(rname)
- switch err {
- case nil:
- // Tag exists, this is an error
- return nil, ErrTagExists
- case plumbing.ErrReferenceNotFound:
- // Tag missing, available for creation, pass this
- default:
- // Some other error
- return nil, err
- }
-
- var target plumbing.Hash
- if opts != nil {
- target, err = r.createTagObject(name, hash, opts)
- if err != nil {
- return nil, err
- }
- } else {
- target = hash
- }
-
- ref := plumbing.NewHashReference(rname, target)
- if err = r.Storer.SetReference(ref); err != nil {
- return nil, err
- }
-
- return ref, nil
-}
-
-func (r *Repository) createTagObject(name string, hash plumbing.Hash, opts *CreateTagOptions) (plumbing.Hash, error) {
- if err := opts.Validate(r, hash); err != nil {
- return plumbing.ZeroHash, err
- }
-
- rawobj, err := object.GetObject(r.Storer, hash)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- tag := &object.Tag{
- Name: name,
- Tagger: *opts.Tagger,
- Message: opts.Message,
- TargetType: rawobj.Type(),
- Target: hash,
- }
-
- if opts.SignKey != nil {
- sig, err := r.buildTagSignature(tag, opts.SignKey)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- tag.PGPSignature = sig
- }
-
- obj := r.Storer.NewEncodedObject()
- if err := tag.Encode(obj); err != nil {
- return plumbing.ZeroHash, err
- }
-
- return r.Storer.SetEncodedObject(obj)
-}
-
-func (r *Repository) buildTagSignature(tag *object.Tag, signKey *openpgp.Entity) (string, error) {
- encoded := &plumbing.MemoryObject{}
- if err := tag.Encode(encoded); err != nil {
- return "", err
- }
-
- rdr, err := encoded.Reader()
- if err != nil {
- return "", err
- }
-
- var b bytes.Buffer
- if err := openpgp.ArmoredDetachSign(&b, signKey, rdr, nil); err != nil {
- return "", err
- }
-
- return b.String(), nil
-}
-
-// Tag returns a tag from the repository.
-//
-// If you want to check to see if the tag is an annotated tag, you can call
-// TagObject on the hash of the reference in ForEach:
-//
-// ref, err := r.Tag("v0.1.0")
-// if err != nil {
-// // Handle error
-// }
-//
-// obj, err := r.TagObject(ref.Hash())
-// switch err {
-// case nil:
-// // Tag object present
-// case plumbing.ErrObjectNotFound:
-// // Not a tag object
-// default:
-// // Some other error
-// }
-//
-func (r *Repository) Tag(name string) (*plumbing.Reference, error) {
- ref, err := r.Reference(plumbing.ReferenceName(path.Join("refs", "tags", name)), false)
- if err != nil {
- if err == plumbing.ErrReferenceNotFound {
- // Return a friendly error for this one, versus just ReferenceNotFound.
- return nil, ErrTagNotFound
- }
-
- return nil, err
- }
-
- return ref, nil
-}
-
-// DeleteTag deletes a tag from the repository.
-func (r *Repository) DeleteTag(name string) error {
- _, err := r.Tag(name)
- if err != nil {
- return err
- }
-
- return r.Storer.RemoveReference(plumbing.ReferenceName(path.Join("refs", "tags", name)))
-}
-
-func (r *Repository) resolveToCommitHash(h plumbing.Hash) (plumbing.Hash, error) {
- obj, err := r.Storer.EncodedObject(plumbing.AnyObject, h)
- if err != nil {
- return plumbing.ZeroHash, err
- }
- switch obj.Type() {
- case plumbing.TagObject:
- t, err := object.DecodeTag(r.Storer, obj)
- if err != nil {
- return plumbing.ZeroHash, err
- }
- return r.resolveToCommitHash(t.Target)
- case plumbing.CommitObject:
- return h, nil
- default:
- return plumbing.ZeroHash, ErrUnableToResolveCommit
- }
-}
-
-// Clone clones a remote repository
-func (r *Repository) clone(ctx context.Context, o *CloneOptions) error {
- if err := o.Validate(); err != nil {
- return err
- }
-
- c := &config.RemoteConfig{
- Name: o.RemoteName,
- URLs: []string{o.URL},
- Fetch: r.cloneRefSpec(o),
- }
-
- if _, err := r.CreateRemote(c); err != nil {
- return err
- }
-
- ref, err := r.fetchAndUpdateReferences(ctx, &FetchOptions{
- RefSpecs: c.Fetch,
- Depth: o.Depth,
- Auth: o.Auth,
- Progress: o.Progress,
- Tags: o.Tags,
- RemoteName: o.RemoteName,
- }, o.ReferenceName)
- if err != nil {
- return err
- }
-
- if r.wt != nil && !o.NoCheckout {
- w, err := r.Worktree()
- if err != nil {
- return err
- }
-
- head, err := r.Head()
- if err != nil {
- return err
- }
-
- if err := w.Reset(&ResetOptions{
- Mode: MergeReset,
- Commit: head.Hash(),
- }); err != nil {
- return err
- }
-
- if o.RecurseSubmodules != NoRecurseSubmodules {
- if err := w.updateSubmodules(&SubmoduleUpdateOptions{
- RecurseSubmodules: o.RecurseSubmodules,
- Auth: o.Auth,
- }); err != nil {
- return err
- }
- }
- }
-
- if err := r.updateRemoteConfigIfNeeded(o, c, ref); err != nil {
- return err
- }
-
- if ref.Name().IsBranch() {
- branchRef := ref.Name()
- branchName := strings.Split(string(branchRef), "refs/heads/")[1]
-
- b := &config.Branch{
- Name: branchName,
- Merge: branchRef,
- }
- if o.RemoteName == "" {
- b.Remote = "origin"
- } else {
- b.Remote = o.RemoteName
- }
- if err := r.CreateBranch(b); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-const (
- refspecTag = "+refs/tags/%s:refs/tags/%[1]s"
- refspecSingleBranch = "+refs/heads/%s:refs/remotes/%s/%[1]s"
- refspecSingleBranchHEAD = "+HEAD:refs/remotes/%s/HEAD"
-)
-
-func (r *Repository) cloneRefSpec(o *CloneOptions) []config.RefSpec {
- switch {
- case o.ReferenceName.IsTag():
- return []config.RefSpec{
- config.RefSpec(fmt.Sprintf(refspecTag, o.ReferenceName.Short())),
- }
- case o.SingleBranch && o.ReferenceName == plumbing.HEAD:
- return []config.RefSpec{
- config.RefSpec(fmt.Sprintf(refspecSingleBranchHEAD, o.RemoteName)),
- config.RefSpec(fmt.Sprintf(refspecSingleBranch, plumbing.Master.Short(), o.RemoteName)),
- }
- case o.SingleBranch:
- return []config.RefSpec{
- config.RefSpec(fmt.Sprintf(refspecSingleBranch, o.ReferenceName.Short(), o.RemoteName)),
- }
- default:
- return []config.RefSpec{
- config.RefSpec(fmt.Sprintf(config.DefaultFetchRefSpec, o.RemoteName)),
- }
- }
-}
-
-func (r *Repository) setIsBare(isBare bool) error {
- cfg, err := r.Storer.Config()
- if err != nil {
- return err
- }
-
- cfg.Core.IsBare = isBare
- return r.Storer.SetConfig(cfg)
-}
-
-func (r *Repository) updateRemoteConfigIfNeeded(o *CloneOptions, c *config.RemoteConfig, head *plumbing.Reference) error {
- if !o.SingleBranch {
- return nil
- }
-
- c.Fetch = r.cloneRefSpec(o)
-
- cfg, err := r.Storer.Config()
- if err != nil {
- return err
- }
-
- cfg.Remotes[c.Name] = c
- return r.Storer.SetConfig(cfg)
-}
-
-func (r *Repository) fetchAndUpdateReferences(
- ctx context.Context, o *FetchOptions, ref plumbing.ReferenceName,
-) (*plumbing.Reference, error) {
-
- if err := o.Validate(); err != nil {
- return nil, err
- }
-
- remote, err := r.Remote(o.RemoteName)
- if err != nil {
- return nil, err
- }
-
- objsUpdated := true
- remoteRefs, err := remote.fetch(ctx, o)
- if err == NoErrAlreadyUpToDate {
- objsUpdated = false
- } else if err == packfile.ErrEmptyPackfile {
- return nil, ErrFetching
- } else if err != nil {
- return nil, err
- }
-
- resolvedRef, err := storer.ResolveReference(remoteRefs, ref)
- if err != nil {
- return nil, err
- }
-
- refsUpdated, err := r.updateReferences(remote.c.Fetch, resolvedRef)
- if err != nil {
- return nil, err
- }
-
- if !objsUpdated && !refsUpdated {
- return nil, NoErrAlreadyUpToDate
- }
-
- return resolvedRef, nil
-}
-
-func (r *Repository) updateReferences(spec []config.RefSpec,
- resolvedRef *plumbing.Reference) (updated bool, err error) {
-
- if !resolvedRef.Name().IsBranch() {
- // Detached HEAD mode
- h, err := r.resolveToCommitHash(resolvedRef.Hash())
- if err != nil {
- return false, err
- }
- head := plumbing.NewHashReference(plumbing.HEAD, h)
- return updateReferenceStorerIfNeeded(r.Storer, head)
- }
-
- refs := []*plumbing.Reference{
- // Create local reference for the resolved ref
- resolvedRef,
- // Create local symbolic HEAD
- plumbing.NewSymbolicReference(plumbing.HEAD, resolvedRef.Name()),
- }
-
- refs = append(refs, r.calculateRemoteHeadReference(spec, resolvedRef)...)
-
- for _, ref := range refs {
- u, err := updateReferenceStorerIfNeeded(r.Storer, ref)
- if err != nil {
- return updated, err
- }
-
- if u {
- updated = true
- }
- }
-
- return
-}
-
-func (r *Repository) calculateRemoteHeadReference(spec []config.RefSpec,
- resolvedHead *plumbing.Reference) []*plumbing.Reference {
-
- var refs []*plumbing.Reference
-
- // Create resolved HEAD reference with remote prefix if it does not
- // exist. This is needed when using single branch and HEAD.
- for _, rs := range spec {
- name := resolvedHead.Name()
- if !rs.Match(name) {
- continue
- }
-
- name = rs.Dst(name)
- _, err := r.Storer.Reference(name)
- if err == plumbing.ErrReferenceNotFound {
- refs = append(refs, plumbing.NewHashReference(name, resolvedHead.Hash()))
- }
- }
-
- return refs
-}
-
-func checkAndUpdateReferenceStorerIfNeeded(
- s storer.ReferenceStorer, r, old *plumbing.Reference) (
- updated bool, err error) {
- p, err := s.Reference(r.Name())
- if err != nil && err != plumbing.ErrReferenceNotFound {
- return false, err
- }
-
- // we use the string method to compare references, is the easiest way
- if err == plumbing.ErrReferenceNotFound || r.String() != p.String() {
- if err := s.CheckAndSetReference(r, old); err != nil {
- return false, err
- }
-
- return true, nil
- }
-
- return false, nil
-}
-
-func updateReferenceStorerIfNeeded(
- s storer.ReferenceStorer, r *plumbing.Reference) (updated bool, err error) {
- return checkAndUpdateReferenceStorerIfNeeded(s, r, nil)
-}
-
-// Fetch fetches references along with the objects necessary to complete
-// their histories, from the remote named as FetchOptions.RemoteName.
-//
-// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are
-// no changes to be fetched, or an error.
-func (r *Repository) Fetch(o *FetchOptions) error {
- return r.FetchContext(context.Background(), o)
-}
-
-// FetchContext fetches references along with the objects necessary to complete
-// their histories, from the remote named as FetchOptions.RemoteName.
-//
-// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are
-// no changes to be fetched, or an error.
-//
-// The provided Context must be non-nil. If the context expires before the
-// operation is complete, an error is returned. The context only affects to the
-// transport operations.
-func (r *Repository) FetchContext(ctx context.Context, o *FetchOptions) error {
- if err := o.Validate(); err != nil {
- return err
- }
-
- remote, err := r.Remote(o.RemoteName)
- if err != nil {
- return err
- }
-
- return remote.FetchContext(ctx, o)
-}
-
-// Push performs a push to the remote. Returns NoErrAlreadyUpToDate if
-// the remote was already up-to-date, from the remote named as
-// FetchOptions.RemoteName.
-func (r *Repository) Push(o *PushOptions) error {
- return r.PushContext(context.Background(), o)
-}
-
-// PushContext performs a push to the remote. Returns NoErrAlreadyUpToDate if
-// the remote was already up-to-date, from the remote named as
-// FetchOptions.RemoteName.
-//
-// The provided Context must be non-nil. If the context expires before the
-// operation is complete, an error is returned. The context only affects to the
-// transport operations.
-func (r *Repository) PushContext(ctx context.Context, o *PushOptions) error {
- if err := o.Validate(); err != nil {
- return err
- }
-
- remote, err := r.Remote(o.RemoteName)
- if err != nil {
- return err
- }
-
- return remote.PushContext(ctx, o)
-}
-
-// Log returns the commit history from the given LogOptions.
-func (r *Repository) Log(o *LogOptions) (object.CommitIter, error) {
- fn := commitIterFunc(o.Order)
- if fn == nil {
- return nil, fmt.Errorf("invalid Order=%v", o.Order)
- }
-
- var (
- it object.CommitIter
- err error
- )
- if o.All {
- it, err = r.logAll(fn)
- } else {
- it, err = r.log(o.From, fn)
- }
-
- if err != nil {
- return nil, err
- }
-
- if o.FileName != nil {
- // for `git log --all` also check parent (if the next commit comes from the real parent)
- it = r.logWithFile(*o.FileName, it, o.All)
- }
-
- return it, nil
-}
-
-func (r *Repository) log(from plumbing.Hash, commitIterFunc func(*object.Commit) object.CommitIter) (object.CommitIter, error) {
- h := from
- if from == plumbing.ZeroHash {
- head, err := r.Head()
- if err != nil {
- return nil, err
- }
-
- h = head.Hash()
- }
-
- commit, err := r.CommitObject(h)
- if err != nil {
- return nil, err
- }
- return commitIterFunc(commit), nil
-}
-
-func (r *Repository) logAll(commitIterFunc func(*object.Commit) object.CommitIter) (object.CommitIter, error) {
- return object.NewCommitAllIter(r.Storer, commitIterFunc)
-}
-
-func (*Repository) logWithFile(fileName string, commitIter object.CommitIter, checkParent bool) object.CommitIter {
- return object.NewCommitFileIterFromIter(fileName, commitIter, checkParent)
-}
-
-func commitIterFunc(order LogOrder) func(c *object.Commit) object.CommitIter {
- switch order {
- case LogOrderDefault:
- return func(c *object.Commit) object.CommitIter {
- return object.NewCommitPreorderIter(c, nil, nil)
- }
- case LogOrderDFS:
- return func(c *object.Commit) object.CommitIter {
- return object.NewCommitPreorderIter(c, nil, nil)
- }
- case LogOrderDFSPost:
- return func(c *object.Commit) object.CommitIter {
- return object.NewCommitPostorderIter(c, nil)
- }
- case LogOrderBSF:
- return func(c *object.Commit) object.CommitIter {
- return object.NewCommitIterBSF(c, nil, nil)
- }
- case LogOrderCommitterTime:
- return func(c *object.Commit) object.CommitIter {
- return object.NewCommitIterCTime(c, nil, nil)
- }
- }
- return nil
-}
-
-// Tags returns all the tag References in a repository.
-//
-// If you want to check to see if the tag is an annotated tag, you can call
-// TagObject on the hash Reference passed in through ForEach:
-//
-// iter, err := r.Tags()
-// if err != nil {
-// // Handle error
-// }
-//
-// if err := iter.ForEach(func (ref *plumbing.Reference) error {
-// obj, err := r.TagObject(ref.Hash())
-// switch err {
-// case nil:
-// // Tag object present
-// case plumbing.ErrObjectNotFound:
-// // Not a tag object
-// default:
-// // Some other error
-// return err
-// }
-// }); err != nil {
-// // Handle outer iterator error
-// }
-//
-func (r *Repository) Tags() (storer.ReferenceIter, error) {
- refIter, err := r.Storer.IterReferences()
- if err != nil {
- return nil, err
- }
-
- return storer.NewReferenceFilteredIter(
- func(r *plumbing.Reference) bool {
- return r.Name().IsTag()
- }, refIter), nil
-}
-
-// Branches returns all the References that are Branches.
-func (r *Repository) Branches() (storer.ReferenceIter, error) {
- refIter, err := r.Storer.IterReferences()
- if err != nil {
- return nil, err
- }
-
- return storer.NewReferenceFilteredIter(
- func(r *plumbing.Reference) bool {
- return r.Name().IsBranch()
- }, refIter), nil
-}
-
-// Notes returns all the References that are notes. For more information:
-// https://git-scm.com/docs/git-notes
-func (r *Repository) Notes() (storer.ReferenceIter, error) {
- refIter, err := r.Storer.IterReferences()
- if err != nil {
- return nil, err
- }
-
- return storer.NewReferenceFilteredIter(
- func(r *plumbing.Reference) bool {
- return r.Name().IsNote()
- }, refIter), nil
-}
-
-// TreeObject return a Tree with the given hash. If not found
-// plumbing.ErrObjectNotFound is returned
-func (r *Repository) TreeObject(h plumbing.Hash) (*object.Tree, error) {
- return object.GetTree(r.Storer, h)
-}
-
-// TreeObjects returns an unsorted TreeIter with all the trees in the repository
-func (r *Repository) TreeObjects() (*object.TreeIter, error) {
- iter, err := r.Storer.IterEncodedObjects(plumbing.TreeObject)
- if err != nil {
- return nil, err
- }
-
- return object.NewTreeIter(r.Storer, iter), nil
-}
-
-// CommitObject return a Commit with the given hash. If not found
-// plumbing.ErrObjectNotFound is returned.
-func (r *Repository) CommitObject(h plumbing.Hash) (*object.Commit, error) {
- return object.GetCommit(r.Storer, h)
-}
-
-// CommitObjects returns an unsorted CommitIter with all the commits in the repository.
-func (r *Repository) CommitObjects() (object.CommitIter, error) {
- iter, err := r.Storer.IterEncodedObjects(plumbing.CommitObject)
- if err != nil {
- return nil, err
- }
-
- return object.NewCommitIter(r.Storer, iter), nil
-}
-
-// BlobObject returns a Blob with the given hash. If not found
-// plumbing.ErrObjectNotFound is returned.
-func (r *Repository) BlobObject(h plumbing.Hash) (*object.Blob, error) {
- return object.GetBlob(r.Storer, h)
-}
-
-// BlobObjects returns an unsorted BlobIter with all the blobs in the repository.
-func (r *Repository) BlobObjects() (*object.BlobIter, error) {
- iter, err := r.Storer.IterEncodedObjects(plumbing.BlobObject)
- if err != nil {
- return nil, err
- }
-
- return object.NewBlobIter(r.Storer, iter), nil
-}
-
-// TagObject returns a Tag with the given hash. If not found
-// plumbing.ErrObjectNotFound is returned. This method only returns
-// annotated Tags, no lightweight Tags.
-func (r *Repository) TagObject(h plumbing.Hash) (*object.Tag, error) {
- return object.GetTag(r.Storer, h)
-}
-
-// TagObjects returns a unsorted TagIter that can step through all of the annotated
-// tags in the repository.
-func (r *Repository) TagObjects() (*object.TagIter, error) {
- iter, err := r.Storer.IterEncodedObjects(plumbing.TagObject)
- if err != nil {
- return nil, err
- }
-
- return object.NewTagIter(r.Storer, iter), nil
-}
-
-// Object returns an Object with the given hash. If not found
-// plumbing.ErrObjectNotFound is returned.
-func (r *Repository) Object(t plumbing.ObjectType, h plumbing.Hash) (object.Object, error) {
- obj, err := r.Storer.EncodedObject(t, h)
- if err != nil {
- return nil, err
- }
-
- return object.DecodeObject(r.Storer, obj)
-}
-
-// Objects returns an unsorted ObjectIter with all the objects in the repository.
-func (r *Repository) Objects() (*object.ObjectIter, error) {
- iter, err := r.Storer.IterEncodedObjects(plumbing.AnyObject)
- if err != nil {
- return nil, err
- }
-
- return object.NewObjectIter(r.Storer, iter), nil
-}
-
-// Head returns the reference where HEAD is pointing to.
-func (r *Repository) Head() (*plumbing.Reference, error) {
- return storer.ResolveReference(r.Storer, plumbing.HEAD)
-}
-
-// Reference returns the reference for a given reference name. If resolved is
-// true, any symbolic reference will be resolved.
-func (r *Repository) Reference(name plumbing.ReferenceName, resolved bool) (
- *plumbing.Reference, error) {
-
- if resolved {
- return storer.ResolveReference(r.Storer, name)
- }
-
- return r.Storer.Reference(name)
-}
-
-// References returns an unsorted ReferenceIter for all references.
-func (r *Repository) References() (storer.ReferenceIter, error) {
- return r.Storer.IterReferences()
-}
-
-// Worktree returns a worktree based on the given fs, if nil the default
-// worktree will be used.
-func (r *Repository) Worktree() (*Worktree, error) {
- if r.wt == nil {
- return nil, ErrIsBareRepository
- }
-
- return &Worktree{r: r, Filesystem: r.wt}, nil
-}
-
-// ResolveRevision resolves revision to corresponding hash. It will always
-// resolve to a commit hash, not a tree or annotated tag.
-//
-// Implemented resolvers : HEAD, branch, tag, heads/branch, refs/heads/branch,
-// refs/tags/tag, refs/remotes/origin/branch, refs/remotes/origin/HEAD, tilde and caret (HEAD~1, master~^, tag~2, ref/heads/master~1, ...), selection by text (HEAD^{/fix nasty bug})
-func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, error) {
- p := revision.NewParserFromString(string(rev))
-
- items, err := p.Parse()
-
- if err != nil {
- return nil, err
- }
-
- var commit *object.Commit
-
- for _, item := range items {
- switch item.(type) {
- case revision.Ref:
- revisionRef := item.(revision.Ref)
-
- var tryHashes []plumbing.Hash
-
- maybeHash := plumbing.NewHash(string(revisionRef))
-
- if !maybeHash.IsZero() {
- tryHashes = append(tryHashes, maybeHash)
- }
-
- for _, rule := range append([]string{"%s"}, plumbing.RefRevParseRules...) {
- ref, err := storer.ResolveReference(r.Storer, plumbing.ReferenceName(fmt.Sprintf(rule, revisionRef)))
-
- if err == nil {
- tryHashes = append(tryHashes, ref.Hash())
- break
- }
- }
-
- // in ambiguous cases, `git rev-parse` will emit a warning, but
- // will always return the oid in preference to a ref; we don't have
- // the ability to emit a warning here, so (for speed purposes)
- // don't bother to detect the ambiguity either, just return in the
- // priority that git would.
- gotOne := false
- for _, hash := range tryHashes {
- commitObj, err := r.CommitObject(hash)
- if err == nil {
- commit = commitObj
- gotOne = true
- break
- }
-
- tagObj, err := r.TagObject(hash)
- if err == nil {
- // If the tag target lookup fails here, this most likely
- // represents some sort of repo corruption, so let the
- // error bubble up.
- tagCommit, err := tagObj.Commit()
- if err != nil {
- return &plumbing.ZeroHash, err
- }
- commit = tagCommit
- gotOne = true
- break
- }
- }
-
- if !gotOne {
- return &plumbing.ZeroHash, plumbing.ErrReferenceNotFound
- }
-
- case revision.CaretPath:
- depth := item.(revision.CaretPath).Depth
-
- if depth == 0 {
- break
- }
-
- iter := commit.Parents()
-
- c, err := iter.Next()
-
- if err != nil {
- return &plumbing.ZeroHash, err
- }
-
- if depth == 1 {
- commit = c
-
- break
- }
-
- c, err = iter.Next()
-
- if err != nil {
- return &plumbing.ZeroHash, err
- }
-
- commit = c
- case revision.TildePath:
- for i := 0; i < item.(revision.TildePath).Depth; i++ {
- c, err := commit.Parents().Next()
-
- if err != nil {
- return &plumbing.ZeroHash, err
- }
-
- commit = c
- }
- case revision.CaretReg:
- history := object.NewCommitPreorderIter(commit, nil, nil)
-
- re := item.(revision.CaretReg).Regexp
- negate := item.(revision.CaretReg).Negate
-
- var c *object.Commit
-
- err := history.ForEach(func(hc *object.Commit) error {
- if !negate && re.MatchString(hc.Message) {
- c = hc
- return storer.ErrStop
- }
-
- if negate && !re.MatchString(hc.Message) {
- c = hc
- return storer.ErrStop
- }
-
- return nil
- })
- if err != nil {
- return &plumbing.ZeroHash, err
- }
-
- if c == nil {
- return &plumbing.ZeroHash, fmt.Errorf(`No commit message match regexp : "%s"`, re.String())
- }
-
- commit = c
- }
- }
-
- return &commit.Hash, nil
-}
-
-type RepackConfig struct {
- // UseRefDeltas configures whether packfile encoder will use reference deltas.
- // By default OFSDeltaObject is used.
- UseRefDeltas bool
- // OnlyDeletePacksOlderThan if set to non-zero value
- // selects only objects older than the time provided.
- OnlyDeletePacksOlderThan time.Time
-}
-
-func (r *Repository) RepackObjects(cfg *RepackConfig) (err error) {
- pos, ok := r.Storer.(storer.PackedObjectStorer)
- if !ok {
- return ErrPackedObjectsNotSupported
- }
-
- // Get the existing object packs.
- hs, err := pos.ObjectPacks()
- if err != nil {
- return err
- }
-
- // Create a new pack.
- nh, err := r.createNewObjectPack(cfg)
- if err != nil {
- return err
- }
-
- // Delete old packs.
- for _, h := range hs {
- // Skip if new hash is the same as an old one.
- if h == nh {
- continue
- }
- err = pos.DeleteOldObjectPackAndIndex(h, cfg.OnlyDeletePacksOlderThan)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// createNewObjectPack is a helper for RepackObjects taking care
-// of creating a new pack. It is used so the the PackfileWriter
-// deferred close has the right scope.
-func (r *Repository) createNewObjectPack(cfg *RepackConfig) (h plumbing.Hash, err error) {
- ow := newObjectWalker(r.Storer)
- err = ow.walkAllRefs()
- if err != nil {
- return h, err
- }
- objs := make([]plumbing.Hash, 0, len(ow.seen))
- for h := range ow.seen {
- objs = append(objs, h)
- }
- pfw, ok := r.Storer.(storer.PackfileWriter)
- if !ok {
- return h, fmt.Errorf("Repository storer is not a storer.PackfileWriter")
- }
- wc, err := pfw.PackfileWriter()
- if err != nil {
- return h, err
- }
- defer ioutil.CheckClose(wc, &err)
- scfg, err := r.Storer.Config()
- if err != nil {
- return h, err
- }
- enc := packfile.NewEncoder(wc, r.Storer, cfg.UseRefDeltas)
- h, err = enc.Encode(objs, scfg.Pack.Window)
- if err != nil {
- return h, err
- }
-
- // Delete the packed, loose objects.
- if los, ok := r.Storer.(storer.LooseObjectStorer); ok {
- err = los.ForEachObjectHash(func(hash plumbing.Hash) error {
- if ow.isSeen(hash) {
- err = los.DeleteLooseObject(hash)
- if err != nil {
- return err
- }
- }
- return nil
- })
- if err != nil {
- return h, err
- }
- }
-
- return h, err
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/status.go b/vendor/gopkg.in/src-d/go-git.v4/status.go
deleted file mode 100644
index 7f18e02278..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/status.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package git
-
-import (
- "bytes"
- "fmt"
- "path/filepath"
-)
-
-// Status represents the current status of a Worktree.
-// The key of the map is the path of the file.
-type Status map[string]*FileStatus
-
-// File returns the FileStatus for a given path, if the FileStatus doesn't
-// exists a new FileStatus is added to the map using the path as key.
-func (s Status) File(path string) *FileStatus {
- if _, ok := (s)[path]; !ok {
- s[path] = &FileStatus{Worktree: Untracked, Staging: Untracked}
- }
-
- return s[path]
-}
-
-// IsUntracked checks if file for given path is 'Untracked'
-func (s Status) IsUntracked(path string) bool {
- stat, ok := (s)[filepath.ToSlash(path)]
- return ok && stat.Worktree == Untracked
-}
-
-// IsClean returns true if all the files are in Unmodified status.
-func (s Status) IsClean() bool {
- for _, status := range s {
- if status.Worktree != Unmodified || status.Staging != Unmodified {
- return false
- }
- }
-
- return true
-}
-
-func (s Status) String() string {
- buf := bytes.NewBuffer(nil)
- for path, status := range s {
- if status.Staging == Unmodified && status.Worktree == Unmodified {
- continue
- }
-
- if status.Staging == Renamed {
- path = fmt.Sprintf("%s -> %s", path, status.Extra)
- }
-
- fmt.Fprintf(buf, "%c%c %s\n", status.Staging, status.Worktree, path)
- }
-
- return buf.String()
-}
-
-// FileStatus contains the status of a file in the worktree
-type FileStatus struct {
- // Staging is the status of a file in the staging area
- Staging StatusCode
- // Worktree is the status of a file in the worktree
- Worktree StatusCode
- // Extra contains extra information, such as the previous name in a rename
- Extra string
-}
-
-// StatusCode status code of a file in the Worktree
-type StatusCode byte
-
-const (
- Unmodified StatusCode = ' '
- Untracked StatusCode = '?'
- Modified StatusCode = 'M'
- Added StatusCode = 'A'
- Deleted StatusCode = 'D'
- Renamed StatusCode = 'R'
- Copied StatusCode = 'C'
- UpdatedButUnmerged StatusCode = 'U'
-)
diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/config.go b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/config.go
deleted file mode 100644
index be812e424f..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/config.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package filesystem
-
-import (
- stdioutil "io/ioutil"
- "os"
-
- "gopkg.in/src-d/go-git.v4/config"
- "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-)
-
-type ConfigStorage struct {
- dir *dotgit.DotGit
-}
-
-func (c *ConfigStorage) Config() (conf *config.Config, err error) {
- cfg := config.NewConfig()
-
- f, err := c.dir.Config()
- if err != nil {
- if os.IsNotExist(err) {
- return cfg, nil
- }
-
- return nil, err
- }
-
- defer ioutil.CheckClose(f, &err)
-
- b, err := stdioutil.ReadAll(f)
- if err != nil {
- return nil, err
- }
-
- if err = cfg.Unmarshal(b); err != nil {
- return nil, err
- }
-
- return cfg, err
-}
-
-func (c *ConfigStorage) SetConfig(cfg *config.Config) (err error) {
- if err = cfg.Validate(); err != nil {
- return err
- }
-
- f, err := c.dir.ConfigWriter()
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(f, &err)
-
- b, err := cfg.Marshal()
- if err != nil {
- return err
- }
-
- _, err = f.Write(b)
- return err
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/deltaobject.go b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/deltaobject.go
deleted file mode 100644
index 66cfb71657..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/deltaobject.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package filesystem
-
-import (
- "gopkg.in/src-d/go-git.v4/plumbing"
-)
-
-type deltaObject struct {
- plumbing.EncodedObject
- base plumbing.Hash
- hash plumbing.Hash
- size int64
-}
-
-func newDeltaObject(
- obj plumbing.EncodedObject,
- hash plumbing.Hash,
- base plumbing.Hash,
- size int64) plumbing.DeltaObject {
- return &deltaObject{
- EncodedObject: obj,
- hash: hash,
- base: base,
- size: size,
- }
-}
-
-func (o *deltaObject) BaseHash() plumbing.Hash {
- return o.base
-}
-
-func (o *deltaObject) ActualSize() int64 {
- return o.size
-}
-
-func (o *deltaObject) ActualHash() plumbing.Hash {
- return o.hash
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit.go b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit.go
deleted file mode 100644
index 111769bf21..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit.go
+++ /dev/null
@@ -1,1099 +0,0 @@
-// https://github.com/git/git/blob/master/Documentation/gitrepository-layout.txt
-package dotgit
-
-import (
- "bufio"
- "errors"
- "fmt"
- "io"
- stdioutil "io/ioutil"
- "os"
- "path/filepath"
- "strings"
- "time"
-
- "gopkg.in/src-d/go-billy.v4/osfs"
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/storage"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-
- "gopkg.in/src-d/go-billy.v4"
-)
-
-const (
- suffix = ".git"
- packedRefsPath = "packed-refs"
- configPath = "config"
- indexPath = "index"
- shallowPath = "shallow"
- modulePath = "modules"
- objectsPath = "objects"
- packPath = "pack"
- refsPath = "refs"
-
- tmpPackedRefsPrefix = "._packed-refs"
-
- packExt = ".pack"
- idxExt = ".idx"
-)
-
-var (
- // ErrNotFound is returned by New when the path is not found.
- ErrNotFound = errors.New("path not found")
- // ErrIdxNotFound is returned by Idxfile when the idx file is not found
- ErrIdxNotFound = errors.New("idx file not found")
- // ErrPackfileNotFound is returned by Packfile when the packfile is not found
- ErrPackfileNotFound = errors.New("packfile not found")
- // ErrConfigNotFound is returned by Config when the config is not found
- ErrConfigNotFound = errors.New("config file not found")
- // ErrPackedRefsDuplicatedRef is returned when a duplicated reference is
- // found in the packed-ref file. This is usually the case for corrupted git
- // repositories.
- ErrPackedRefsDuplicatedRef = errors.New("duplicated ref found in packed-ref file")
- // ErrPackedRefsBadFormat is returned when the packed-ref file corrupt.
- ErrPackedRefsBadFormat = errors.New("malformed packed-ref")
- // ErrSymRefTargetNotFound is returned when a symbolic reference is
- // targeting a non-existing object. This usually means the repository
- // is corrupt.
- ErrSymRefTargetNotFound = errors.New("symbolic reference target not found")
-)
-
-// Options holds configuration for the storage.
-type Options struct {
- // ExclusiveAccess means that the filesystem is not modified externally
- // while the repo is open.
- ExclusiveAccess bool
- // KeepDescriptors makes the file descriptors to be reused but they will
- // need to be manually closed calling Close().
- KeepDescriptors bool
-}
-
-// The DotGit type represents a local git repository on disk. This
-// type is not zero-value-safe, use the New function to initialize it.
-type DotGit struct {
- options Options
- fs billy.Filesystem
-
- // incoming object directory information
- incomingChecked bool
- incomingDirName string
-
- objectList []plumbing.Hash
- objectMap map[plumbing.Hash]struct{}
- packList []plumbing.Hash
- packMap map[plumbing.Hash]struct{}
-
- files map[plumbing.Hash]billy.File
-}
-
-// New returns a DotGit value ready to be used. The path argument must
-// be the absolute path of a git repository directory (e.g.
-// "/foo/bar/.git").
-func New(fs billy.Filesystem) *DotGit {
- return NewWithOptions(fs, Options{})
-}
-
-// NewWithOptions sets non default configuration options.
-// See New for complete help.
-func NewWithOptions(fs billy.Filesystem, o Options) *DotGit {
- return &DotGit{
- options: o,
- fs: fs,
- }
-}
-
-// Initialize creates all the folder scaffolding.
-func (d *DotGit) Initialize() error {
- mustExists := []string{
- d.fs.Join("objects", "info"),
- d.fs.Join("objects", "pack"),
- d.fs.Join("refs", "heads"),
- d.fs.Join("refs", "tags"),
- }
-
- for _, path := range mustExists {
- _, err := d.fs.Stat(path)
- if err == nil {
- continue
- }
-
- if !os.IsNotExist(err) {
- return err
- }
-
- if err := d.fs.MkdirAll(path, os.ModeDir|os.ModePerm); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Close closes all opened files.
-func (d *DotGit) Close() error {
- var firstError error
- if d.files != nil {
- for _, f := range d.files {
- err := f.Close()
- if err != nil && firstError == nil {
- firstError = err
- continue
- }
- }
-
- d.files = nil
- }
-
- if firstError != nil {
- return firstError
- }
-
- return nil
-}
-
-// ConfigWriter returns a file pointer for write to the config file
-func (d *DotGit) ConfigWriter() (billy.File, error) {
- return d.fs.Create(configPath)
-}
-
-// Config returns a file pointer for read to the config file
-func (d *DotGit) Config() (billy.File, error) {
- return d.fs.Open(configPath)
-}
-
-// IndexWriter returns a file pointer for write to the index file
-func (d *DotGit) IndexWriter() (billy.File, error) {
- return d.fs.Create(indexPath)
-}
-
-// Index returns a file pointer for read to the index file
-func (d *DotGit) Index() (billy.File, error) {
- return d.fs.Open(indexPath)
-}
-
-// ShallowWriter returns a file pointer for write to the shallow file
-func (d *DotGit) ShallowWriter() (billy.File, error) {
- return d.fs.Create(shallowPath)
-}
-
-// Shallow returns a file pointer for read to the shallow file
-func (d *DotGit) Shallow() (billy.File, error) {
- f, err := d.fs.Open(shallowPath)
- if err != nil {
- if os.IsNotExist(err) {
- return nil, nil
- }
-
- return nil, err
- }
-
- return f, nil
-}
-
-// NewObjectPack return a writer for a new packfile, it saves the packfile to
-// disk and also generates and save the index for the given packfile.
-func (d *DotGit) NewObjectPack() (*PackWriter, error) {
- d.cleanPackList()
- return newPackWrite(d.fs)
-}
-
-// ObjectPacks returns the list of availables packfiles
-func (d *DotGit) ObjectPacks() ([]plumbing.Hash, error) {
- if !d.options.ExclusiveAccess {
- return d.objectPacks()
- }
-
- err := d.genPackList()
- if err != nil {
- return nil, err
- }
-
- return d.packList, nil
-}
-
-func (d *DotGit) objectPacks() ([]plumbing.Hash, error) {
- packDir := d.fs.Join(objectsPath, packPath)
- files, err := d.fs.ReadDir(packDir)
- if err != nil {
- if os.IsNotExist(err) {
- return nil, nil
- }
-
- return nil, err
- }
-
- var packs []plumbing.Hash
- for _, f := range files {
- if !strings.HasSuffix(f.Name(), packExt) {
- continue
- }
-
- n := f.Name()
- h := plumbing.NewHash(n[5 : len(n)-5]) //pack-(hash).pack
- if h.IsZero() {
- // Ignore files with badly-formatted names.
- continue
- }
- packs = append(packs, h)
- }
-
- return packs, nil
-}
-
-func (d *DotGit) objectPackPath(hash plumbing.Hash, extension string) string {
- return d.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s.%s", hash.String(), extension))
-}
-
-func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.File, error) {
- if d.options.KeepDescriptors && extension == "pack" {
- if d.files == nil {
- d.files = make(map[plumbing.Hash]billy.File)
- }
-
- f, ok := d.files[hash]
- if ok {
- return f, nil
- }
- }
-
- err := d.hasPack(hash)
- if err != nil {
- return nil, err
- }
-
- path := d.objectPackPath(hash, extension)
- pack, err := d.fs.Open(path)
- if err != nil {
- if os.IsNotExist(err) {
- return nil, ErrPackfileNotFound
- }
-
- return nil, err
- }
-
- if d.options.KeepDescriptors && extension == "pack" {
- d.files[hash] = pack
- }
-
- return pack, nil
-}
-
-// ObjectPack returns a fs.File of the given packfile
-func (d *DotGit) ObjectPack(hash plumbing.Hash) (billy.File, error) {
- err := d.hasPack(hash)
- if err != nil {
- return nil, err
- }
-
- return d.objectPackOpen(hash, `pack`)
-}
-
-// ObjectPackIdx returns a fs.File of the index file for a given packfile
-func (d *DotGit) ObjectPackIdx(hash plumbing.Hash) (billy.File, error) {
- err := d.hasPack(hash)
- if err != nil {
- return nil, err
- }
-
- return d.objectPackOpen(hash, `idx`)
-}
-
-func (d *DotGit) DeleteOldObjectPackAndIndex(hash plumbing.Hash, t time.Time) error {
- d.cleanPackList()
-
- path := d.objectPackPath(hash, `pack`)
- if !t.IsZero() {
- fi, err := d.fs.Stat(path)
- if err != nil {
- return err
- }
- // too new, skip deletion.
- if !fi.ModTime().Before(t) {
- return nil
- }
- }
- err := d.fs.Remove(path)
- if err != nil {
- return err
- }
- return d.fs.Remove(d.objectPackPath(hash, `idx`))
-}
-
-// NewObject return a writer for a new object file.
-func (d *DotGit) NewObject() (*ObjectWriter, error) {
- d.cleanObjectList()
-
- return newObjectWriter(d.fs)
-}
-
-// Objects returns a slice with the hashes of objects found under the
-// .git/objects/ directory.
-func (d *DotGit) Objects() ([]plumbing.Hash, error) {
- if d.options.ExclusiveAccess {
- err := d.genObjectList()
- if err != nil {
- return nil, err
- }
-
- return d.objectList, nil
- }
-
- var objects []plumbing.Hash
- err := d.ForEachObjectHash(func(hash plumbing.Hash) error {
- objects = append(objects, hash)
- return nil
- })
- if err != nil {
- return nil, err
- }
- return objects, nil
-}
-
-// ForEachObjectHash iterates over the hashes of objects found under the
-// .git/objects/ directory and executes the provided function.
-func (d *DotGit) ForEachObjectHash(fun func(plumbing.Hash) error) error {
- if !d.options.ExclusiveAccess {
- return d.forEachObjectHash(fun)
- }
-
- err := d.genObjectList()
- if err != nil {
- return err
- }
-
- for _, h := range d.objectList {
- err := fun(h)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *DotGit) forEachObjectHash(fun func(plumbing.Hash) error) error {
- files, err := d.fs.ReadDir(objectsPath)
- if err != nil {
- if os.IsNotExist(err) {
- return nil
- }
-
- return err
- }
-
- for _, f := range files {
- if f.IsDir() && len(f.Name()) == 2 && isHex(f.Name()) {
- base := f.Name()
- d, err := d.fs.ReadDir(d.fs.Join(objectsPath, base))
- if err != nil {
- return err
- }
-
- for _, o := range d {
- h := plumbing.NewHash(base + o.Name())
- if h.IsZero() {
- // Ignore files with badly-formatted names.
- continue
- }
- err = fun(h)
- if err != nil {
- return err
- }
- }
- }
- }
-
- return nil
-}
-
-func (d *DotGit) cleanObjectList() {
- d.objectMap = nil
- d.objectList = nil
-}
-
-func (d *DotGit) genObjectList() error {
- if d.objectMap != nil {
- return nil
- }
-
- d.objectMap = make(map[plumbing.Hash]struct{})
- return d.forEachObjectHash(func(h plumbing.Hash) error {
- d.objectList = append(d.objectList, h)
- d.objectMap[h] = struct{}{}
-
- return nil
- })
-}
-
-func (d *DotGit) hasObject(h plumbing.Hash) error {
- if !d.options.ExclusiveAccess {
- return nil
- }
-
- err := d.genObjectList()
- if err != nil {
- return err
- }
-
- _, ok := d.objectMap[h]
- if !ok {
- return plumbing.ErrObjectNotFound
- }
-
- return nil
-}
-
-func (d *DotGit) cleanPackList() {
- d.packMap = nil
- d.packList = nil
-}
-
-func (d *DotGit) genPackList() error {
- if d.packMap != nil {
- return nil
- }
-
- op, err := d.objectPacks()
- if err != nil {
- return err
- }
-
- d.packMap = make(map[plumbing.Hash]struct{})
- d.packList = nil
-
- for _, h := range op {
- d.packList = append(d.packList, h)
- d.packMap[h] = struct{}{}
- }
-
- return nil
-}
-
-func (d *DotGit) hasPack(h plumbing.Hash) error {
- if !d.options.ExclusiveAccess {
- return nil
- }
-
- err := d.genPackList()
- if err != nil {
- return err
- }
-
- _, ok := d.packMap[h]
- if !ok {
- return ErrPackfileNotFound
- }
-
- return nil
-}
-
-func (d *DotGit) objectPath(h plumbing.Hash) string {
- hash := h.String()
- return d.fs.Join(objectsPath, hash[0:2], hash[2:40])
-}
-
-// incomingObjectPath is intended to add support for a git pre-receive hook
-// to be written it adds support for go-git to find objects in an "incoming"
-// directory, so that the library can be used to write a pre-receive hook
-// that deals with the incoming objects.
-//
-// More on git hooks found here : https://git-scm.com/docs/githooks
-// More on 'quarantine'/incoming directory here:
-// https://git-scm.com/docs/git-receive-pack
-func (d *DotGit) incomingObjectPath(h plumbing.Hash) string {
- hString := h.String()
-
- if d.incomingDirName == "" {
- return d.fs.Join(objectsPath, hString[0:2], hString[2:40])
- }
-
- return d.fs.Join(objectsPath, d.incomingDirName, hString[0:2], hString[2:40])
-}
-
-// hasIncomingObjects searches for an incoming directory and keeps its name
-// so it doesn't have to be found each time an object is accessed.
-func (d *DotGit) hasIncomingObjects() bool {
- if !d.incomingChecked {
- directoryContents, err := d.fs.ReadDir(objectsPath)
- if err == nil {
- for _, file := range directoryContents {
- if strings.HasPrefix(file.Name(), "incoming-") && file.IsDir() {
- d.incomingDirName = file.Name()
- }
- }
- }
-
- d.incomingChecked = true
- }
-
- return d.incomingDirName != ""
-}
-
-// Object returns a fs.File pointing the object file, if exists
-func (d *DotGit) Object(h plumbing.Hash) (billy.File, error) {
- err := d.hasObject(h)
- if err != nil {
- return nil, err
- }
-
- obj1, err1 := d.fs.Open(d.objectPath(h))
- if os.IsNotExist(err1) && d.hasIncomingObjects() {
- obj2, err2 := d.fs.Open(d.incomingObjectPath(h))
- if err2 != nil {
- return obj1, err1
- }
- return obj2, err2
- }
- return obj1, err1
-}
-
-// ObjectStat returns a os.FileInfo pointing the object file, if exists
-func (d *DotGit) ObjectStat(h plumbing.Hash) (os.FileInfo, error) {
- err := d.hasObject(h)
- if err != nil {
- return nil, err
- }
-
- obj1, err1 := d.fs.Stat(d.objectPath(h))
- if os.IsNotExist(err1) && d.hasIncomingObjects() {
- obj2, err2 := d.fs.Stat(d.incomingObjectPath(h))
- if err2 != nil {
- return obj1, err1
- }
- return obj2, err2
- }
- return obj1, err1
-}
-
-// ObjectDelete removes the object file, if exists
-func (d *DotGit) ObjectDelete(h plumbing.Hash) error {
- d.cleanObjectList()
-
- err1 := d.fs.Remove(d.objectPath(h))
- if os.IsNotExist(err1) && d.hasIncomingObjects() {
- err2 := d.fs.Remove(d.incomingObjectPath(h))
- if err2 != nil {
- return err1
- }
- return err2
- }
- return err1
-}
-
-func (d *DotGit) readReferenceFrom(rd io.Reader, name string) (ref *plumbing.Reference, err error) {
- b, err := stdioutil.ReadAll(rd)
- if err != nil {
- return nil, err
- }
-
- line := strings.TrimSpace(string(b))
- return plumbing.NewReferenceFromStrings(name, line), nil
-}
-
-func (d *DotGit) checkReferenceAndTruncate(f billy.File, old *plumbing.Reference) error {
- if old == nil {
- return nil
- }
- ref, err := d.readReferenceFrom(f, old.Name().String())
- if err != nil {
- return err
- }
- if ref.Hash() != old.Hash() {
- return storage.ErrReferenceHasChanged
- }
- _, err = f.Seek(0, io.SeekStart)
- if err != nil {
- return err
- }
- return f.Truncate(0)
-}
-
-func (d *DotGit) SetRef(r, old *plumbing.Reference) error {
- var content string
- switch r.Type() {
- case plumbing.SymbolicReference:
- content = fmt.Sprintf("ref: %s\n", r.Target())
- case plumbing.HashReference:
- content = fmt.Sprintln(r.Hash().String())
- }
-
- fileName := r.Name().String()
-
- return d.setRef(fileName, content, old)
-}
-
-// Refs scans the git directory collecting references, which it returns.
-// Symbolic references are resolved and included in the output.
-func (d *DotGit) Refs() ([]*plumbing.Reference, error) {
- var refs []*plumbing.Reference
- var seen = make(map[plumbing.ReferenceName]bool)
- if err := d.addRefsFromRefDir(&refs, seen); err != nil {
- return nil, err
- }
-
- if err := d.addRefsFromPackedRefs(&refs, seen); err != nil {
- return nil, err
- }
-
- if err := d.addRefFromHEAD(&refs); err != nil {
- return nil, err
- }
-
- return refs, nil
-}
-
-// Ref returns the reference for a given reference name.
-func (d *DotGit) Ref(name plumbing.ReferenceName) (*plumbing.Reference, error) {
- ref, err := d.readReferenceFile(".", name.String())
- if err == nil {
- return ref, nil
- }
-
- return d.packedRef(name)
-}
-
-func (d *DotGit) findPackedRefsInFile(f billy.File) ([]*plumbing.Reference, error) {
- s := bufio.NewScanner(f)
- var refs []*plumbing.Reference
- for s.Scan() {
- ref, err := d.processLine(s.Text())
- if err != nil {
- return nil, err
- }
-
- if ref != nil {
- refs = append(refs, ref)
- }
- }
-
- return refs, s.Err()
-}
-
-func (d *DotGit) findPackedRefs() (r []*plumbing.Reference, err error) {
- f, err := d.fs.Open(packedRefsPath)
- if err != nil {
- if os.IsNotExist(err) {
- return nil, nil
- }
- return nil, err
- }
-
- defer ioutil.CheckClose(f, &err)
- return d.findPackedRefsInFile(f)
-}
-
-func (d *DotGit) packedRef(name plumbing.ReferenceName) (*plumbing.Reference, error) {
- refs, err := d.findPackedRefs()
- if err != nil {
- return nil, err
- }
-
- for _, ref := range refs {
- if ref.Name() == name {
- return ref, nil
- }
- }
-
- return nil, plumbing.ErrReferenceNotFound
-}
-
-// RemoveRef removes a reference by name.
-func (d *DotGit) RemoveRef(name plumbing.ReferenceName) error {
- path := d.fs.Join(".", name.String())
- _, err := d.fs.Stat(path)
- if err == nil {
- err = d.fs.Remove(path)
- // Drop down to remove it from the packed refs file, too.
- }
-
- if err != nil && !os.IsNotExist(err) {
- return err
- }
-
- return d.rewritePackedRefsWithoutRef(name)
-}
-
-func (d *DotGit) addRefsFromPackedRefs(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) (err error) {
- packedRefs, err := d.findPackedRefs()
- if err != nil {
- return err
- }
-
- for _, ref := range packedRefs {
- if !seen[ref.Name()] {
- *refs = append(*refs, ref)
- seen[ref.Name()] = true
- }
- }
- return nil
-}
-
-func (d *DotGit) addRefsFromPackedRefsFile(refs *[]*plumbing.Reference, f billy.File, seen map[plumbing.ReferenceName]bool) (err error) {
- packedRefs, err := d.findPackedRefsInFile(f)
- if err != nil {
- return err
- }
-
- for _, ref := range packedRefs {
- if !seen[ref.Name()] {
- *refs = append(*refs, ref)
- seen[ref.Name()] = true
- }
- }
- return nil
-}
-
-func (d *DotGit) openAndLockPackedRefs(doCreate bool) (
- pr billy.File, err error) {
- var f billy.File
- defer func() {
- if err != nil && f != nil {
- ioutil.CheckClose(f, &err)
- }
- }()
-
- // File mode is retrieved from a constant defined in the target specific
- // files (dotgit_rewrite_packed_refs_*). Some modes are not available
- // in all filesystems.
- openFlags := d.openAndLockPackedRefsMode()
- if doCreate {
- openFlags |= os.O_CREATE
- }
-
- // Keep trying to open and lock the file until we're sure the file
- // didn't change between the open and the lock.
- for {
- f, err = d.fs.OpenFile(packedRefsPath, openFlags, 0600)
- if err != nil {
- if os.IsNotExist(err) && !doCreate {
- return nil, nil
- }
-
- return nil, err
- }
- fi, err := d.fs.Stat(packedRefsPath)
- if err != nil {
- return nil, err
- }
- mtime := fi.ModTime()
-
- err = f.Lock()
- if err != nil {
- return nil, err
- }
-
- fi, err = d.fs.Stat(packedRefsPath)
- if err != nil {
- return nil, err
- }
- if mtime.Equal(fi.ModTime()) {
- break
- }
- // The file has changed since we opened it. Close and retry.
- err = f.Close()
- if err != nil {
- return nil, err
- }
- }
- return f, nil
-}
-
-func (d *DotGit) rewritePackedRefsWithoutRef(name plumbing.ReferenceName) (err error) {
- pr, err := d.openAndLockPackedRefs(false)
- if err != nil {
- return err
- }
- if pr == nil {
- return nil
- }
- defer ioutil.CheckClose(pr, &err)
-
- // Creating the temp file in the same directory as the target file
- // improves our chances for rename operation to be atomic.
- tmp, err := d.fs.TempFile("", tmpPackedRefsPrefix)
- if err != nil {
- return err
- }
- tmpName := tmp.Name()
- defer func() {
- ioutil.CheckClose(tmp, &err)
- _ = d.fs.Remove(tmpName) // don't check err, we might have renamed it
- }()
-
- s := bufio.NewScanner(pr)
- found := false
- for s.Scan() {
- line := s.Text()
- ref, err := d.processLine(line)
- if err != nil {
- return err
- }
-
- if ref != nil && ref.Name() == name {
- found = true
- continue
- }
-
- if _, err := fmt.Fprintln(tmp, line); err != nil {
- return err
- }
- }
-
- if err := s.Err(); err != nil {
- return err
- }
-
- if !found {
- return nil
- }
-
- return d.rewritePackedRefsWhileLocked(tmp, pr)
-}
-
-// process lines from a packed-refs file
-func (d *DotGit) processLine(line string) (*plumbing.Reference, error) {
- if len(line) == 0 {
- return nil, nil
- }
-
- switch line[0] {
- case '#': // comment - ignore
- return nil, nil
- case '^': // annotated tag commit of the previous line - ignore
- return nil, nil
- default:
- ws := strings.Split(line, " ") // hash then ref
- if len(ws) != 2 {
- return nil, ErrPackedRefsBadFormat
- }
-
- return plumbing.NewReferenceFromStrings(ws[1], ws[0]), nil
- }
-}
-
-func (d *DotGit) addRefsFromRefDir(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) error {
- return d.walkReferencesTree(refs, []string{refsPath}, seen)
-}
-
-func (d *DotGit) walkReferencesTree(refs *[]*plumbing.Reference, relPath []string, seen map[plumbing.ReferenceName]bool) error {
- files, err := d.fs.ReadDir(d.fs.Join(relPath...))
- if err != nil {
- if os.IsNotExist(err) {
- return nil
- }
-
- return err
- }
-
- for _, f := range files {
- newRelPath := append(append([]string(nil), relPath...), f.Name())
- if f.IsDir() {
- if err = d.walkReferencesTree(refs, newRelPath, seen); err != nil {
- return err
- }
-
- continue
- }
-
- ref, err := d.readReferenceFile(".", strings.Join(newRelPath, "/"))
- if err != nil {
- return err
- }
-
- if ref != nil && !seen[ref.Name()] {
- *refs = append(*refs, ref)
- seen[ref.Name()] = true
- }
- }
-
- return nil
-}
-
-func (d *DotGit) addRefFromHEAD(refs *[]*plumbing.Reference) error {
- ref, err := d.readReferenceFile(".", "HEAD")
- if err != nil {
- if os.IsNotExist(err) {
- return nil
- }
-
- return err
- }
-
- *refs = append(*refs, ref)
- return nil
-}
-
-func (d *DotGit) readReferenceFile(path, name string) (ref *plumbing.Reference, err error) {
- path = d.fs.Join(path, d.fs.Join(strings.Split(name, "/")...))
- f, err := d.fs.Open(path)
- if err != nil {
- return nil, err
- }
- defer ioutil.CheckClose(f, &err)
-
- return d.readReferenceFrom(f, name)
-}
-
-func (d *DotGit) CountLooseRefs() (int, error) {
- var refs []*plumbing.Reference
- var seen = make(map[plumbing.ReferenceName]bool)
- if err := d.addRefsFromRefDir(&refs, seen); err != nil {
- return 0, err
- }
-
- return len(refs), nil
-}
-
-// PackRefs packs all loose refs into the packed-refs file.
-//
-// This implementation only works under the assumption that the view
-// of the file system won't be updated during this operation. This
-// strategy would not work on a general file system though, without
-// locking each loose reference and checking it again before deleting
-// the file, because otherwise an updated reference could sneak in and
-// then be deleted by the packed-refs process. Alternatively, every
-// ref update could also lock packed-refs, so only one lock is
-// required during ref-packing. But that would worsen performance in
-// the common case.
-//
-// TODO: add an "all" boolean like the `git pack-refs --all` flag.
-// When `all` is false, it would only pack refs that have already been
-// packed, plus all tags.
-func (d *DotGit) PackRefs() (err error) {
- // Lock packed-refs, and create it if it doesn't exist yet.
- f, err := d.openAndLockPackedRefs(true)
- if err != nil {
- return err
- }
- defer ioutil.CheckClose(f, &err)
-
- // Gather all refs using addRefsFromRefDir and addRefsFromPackedRefs.
- var refs []*plumbing.Reference
- seen := make(map[plumbing.ReferenceName]bool)
- if err = d.addRefsFromRefDir(&refs, seen); err != nil {
- return err
- }
- if len(refs) == 0 {
- // Nothing to do!
- return nil
- }
- numLooseRefs := len(refs)
- if err = d.addRefsFromPackedRefsFile(&refs, f, seen); err != nil {
- return err
- }
-
- // Write them all to a new temp packed-refs file.
- tmp, err := d.fs.TempFile("", tmpPackedRefsPrefix)
- if err != nil {
- return err
- }
- tmpName := tmp.Name()
- defer func() {
- ioutil.CheckClose(tmp, &err)
- _ = d.fs.Remove(tmpName) // don't check err, we might have renamed it
- }()
-
- w := bufio.NewWriter(tmp)
- for _, ref := range refs {
- _, err = w.WriteString(ref.String() + "\n")
- if err != nil {
- return err
- }
- }
- err = w.Flush()
- if err != nil {
- return err
- }
-
- // Rename the temp packed-refs file.
- err = d.rewritePackedRefsWhileLocked(tmp, f)
- if err != nil {
- return err
- }
-
- // Delete all the loose refs, while still holding the packed-refs
- // lock.
- for _, ref := range refs[:numLooseRefs] {
- path := d.fs.Join(".", ref.Name().String())
- err = d.fs.Remove(path)
- if err != nil && !os.IsNotExist(err) {
- return err
- }
- }
-
- return nil
-}
-
-// Module return a billy.Filesystem pointing to the module folder
-func (d *DotGit) Module(name string) (billy.Filesystem, error) {
- return d.fs.Chroot(d.fs.Join(modulePath, name))
-}
-
-// Alternates returns DotGit(s) based off paths in objects/info/alternates if
-// available. This can be used to checks if it's a shared repository.
-func (d *DotGit) Alternates() ([]*DotGit, error) {
- altpath := d.fs.Join("objects", "info", "alternates")
- f, err := d.fs.Open(altpath)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- var alternates []*DotGit
-
- // Read alternate paths line-by-line and create DotGit objects.
- scanner := bufio.NewScanner(f)
- for scanner.Scan() {
- path := scanner.Text()
- if !filepath.IsAbs(path) {
- // For relative paths, we can perform an internal conversion to
- // slash so that they work cross-platform.
- slashPath := filepath.ToSlash(path)
- // If the path is not absolute, it must be relative to object
- // database (.git/objects/info).
- // https://www.kernel.org/pub/software/scm/git/docs/gitrepository-layout.html
- // Hence, derive a path relative to DotGit's root.
- // "../../../reponame/.git/" -> "../../reponame/.git"
- // Remove the first ../
- relpath := filepath.Join(strings.Split(slashPath, "/")[1:]...)
- normalPath := filepath.FromSlash(relpath)
- path = filepath.Join(d.fs.Root(), normalPath)
- }
- fs := osfs.New(filepath.Dir(path))
- alternates = append(alternates, New(fs))
- }
-
- if err = scanner.Err(); err != nil {
- return nil, err
- }
-
- return alternates, nil
-}
-
-// Fs returns the underlying filesystem of the DotGit folder.
-func (d *DotGit) Fs() billy.Filesystem {
- return d.fs
-}
-
-func isHex(s string) bool {
- for _, b := range []byte(s) {
- if isNum(b) {
- continue
- }
- if isHexAlpha(b) {
- continue
- }
-
- return false
- }
-
- return true
-}
-
-func isNum(b byte) bool {
- return b >= '0' && b <= '9'
-}
-
-func isHexAlpha(b byte) bool {
- return b >= 'a' && b <= 'f' || b >= 'A' && b <= 'F'
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go
deleted file mode 100644
index 7f1c02c15b..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package dotgit
-
-import (
- "io"
- "os"
- "runtime"
-
- "gopkg.in/src-d/go-billy.v4"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-)
-
-func (d *DotGit) openAndLockPackedRefsMode() int {
- if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) {
- return os.O_RDWR
- }
-
- return os.O_RDONLY
-}
-
-func (d *DotGit) rewritePackedRefsWhileLocked(
- tmp billy.File, pr billy.File) error {
- // Try plain rename. If we aren't using the bare Windows filesystem as the
- // storage layer, we might be able to get away with a rename over a locked
- // file.
- err := d.fs.Rename(tmp.Name(), pr.Name())
- if err == nil {
- return nil
- }
-
- // If we are in a filesystem that does not support rename (e.g. sivafs)
- // a full copy is done.
- if err == billy.ErrNotSupported {
- return d.copyNewFile(tmp, pr)
- }
-
- if runtime.GOOS != "windows" {
- return err
- }
-
- // Otherwise, Windows doesn't let us rename over a locked file, so
- // we have to do a straight copy. Unfortunately this could result
- // in a partially-written file if the process fails before the
- // copy completes.
- return d.copyToExistingFile(tmp, pr)
-}
-
-func (d *DotGit) copyToExistingFile(tmp, pr billy.File) error {
- _, err := pr.Seek(0, io.SeekStart)
- if err != nil {
- return err
- }
- err = pr.Truncate(0)
- if err != nil {
- return err
- }
- _, err = tmp.Seek(0, io.SeekStart)
- if err != nil {
- return err
- }
- _, err = io.Copy(pr, tmp)
-
- return err
-}
-
-func (d *DotGit) copyNewFile(tmp billy.File, pr billy.File) (err error) {
- prWrite, err := d.fs.Create(pr.Name())
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(prWrite, &err)
-
- _, err = tmp.Seek(0, io.SeekStart)
- if err != nil {
- return err
- }
-
- _, err = io.Copy(prWrite, tmp)
-
- return err
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit_setref.go b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit_setref.go
deleted file mode 100644
index 9da2f31e89..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit_setref.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package dotgit
-
-import (
- "fmt"
- "os"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-
- "gopkg.in/src-d/go-billy.v4"
-)
-
-func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) (err error) {
- if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) {
- return d.setRefRwfs(fileName, content, old)
- }
-
- return d.setRefNorwfs(fileName, content, old)
-}
-
-func (d *DotGit) setRefRwfs(fileName, content string, old *plumbing.Reference) (err error) {
- // If we are not checking an old ref, just truncate the file.
- mode := os.O_RDWR | os.O_CREATE
- if old == nil {
- mode |= os.O_TRUNC
- }
-
- f, err := d.fs.OpenFile(fileName, mode, 0666)
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(f, &err)
-
- // Lock is unlocked by the deferred Close above. This is because Unlock
- // does not imply a fsync and thus there would be a race between
- // Unlock+Close and other concurrent writers. Adding Sync to go-billy
- // could work, but this is better (and avoids superfluous syncs).
- err = f.Lock()
- if err != nil {
- return err
- }
-
- // this is a no-op to call even when old is nil.
- err = d.checkReferenceAndTruncate(f, old)
- if err != nil {
- return err
- }
-
- _, err = f.Write([]byte(content))
- return err
-}
-
-// There are some filesystems that don't support opening files in RDWD mode.
-// In these filesystems the standard SetRef function can not be used as it
-// reads the reference file to check that it's not modified before updating it.
-//
-// This version of the function writes the reference without extra checks
-// making it compatible with these simple filesystems. This is usually not
-// a problem as they should be accessed by only one process at a time.
-func (d *DotGit) setRefNorwfs(fileName, content string, old *plumbing.Reference) error {
- _, err := d.fs.Stat(fileName)
- if err == nil && old != nil {
- fRead, err := d.fs.Open(fileName)
- if err != nil {
- return err
- }
-
- ref, err := d.readReferenceFrom(fRead, old.Name().String())
- fRead.Close()
-
- if err != nil {
- return err
- }
-
- if ref.Hash() != old.Hash() {
- return fmt.Errorf("reference has changed concurrently")
- }
- }
-
- f, err := d.fs.Create(fileName)
- if err != nil {
- return err
- }
-
- defer f.Close()
-
- _, err = f.Write([]byte(content))
- return err
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/writers.go b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/writers.go
deleted file mode 100644
index 93d2d8cc7a..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/writers.go
+++ /dev/null
@@ -1,284 +0,0 @@
-package dotgit
-
-import (
- "fmt"
- "io"
- "sync/atomic"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
- "gopkg.in/src-d/go-git.v4/plumbing/format/objfile"
- "gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
-
- "gopkg.in/src-d/go-billy.v4"
-)
-
-// PackWriter is a io.Writer that generates the packfile index simultaneously,
-// a packfile.Decoder is used with a file reader to read the file being written
-// this operation is synchronized with the write operations.
-// The packfile is written in a temp file, when Close is called this file
-// is renamed/moved (depends on the Filesystem implementation) to the final
-// location, if the PackWriter is not used, nothing is written
-type PackWriter struct {
- Notify func(plumbing.Hash, *idxfile.Writer)
-
- fs billy.Filesystem
- fr, fw billy.File
- synced *syncedReader
- checksum plumbing.Hash
- parser *packfile.Parser
- writer *idxfile.Writer
- result chan error
-}
-
-func newPackWrite(fs billy.Filesystem) (*PackWriter, error) {
- fw, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_pack_")
- if err != nil {
- return nil, err
- }
-
- fr, err := fs.Open(fw.Name())
- if err != nil {
- return nil, err
- }
-
- writer := &PackWriter{
- fs: fs,
- fw: fw,
- fr: fr,
- synced: newSyncedReader(fw, fr),
- result: make(chan error),
- }
-
- go writer.buildIndex()
- return writer, nil
-}
-
-func (w *PackWriter) buildIndex() {
- s := packfile.NewScanner(w.synced)
- w.writer = new(idxfile.Writer)
- var err error
- w.parser, err = packfile.NewParser(s, w.writer)
- if err != nil {
- w.result <- err
- return
- }
-
- checksum, err := w.parser.Parse()
- if err != nil {
- w.result <- err
- return
- }
-
- w.checksum = checksum
- w.result <- err
-}
-
-// waitBuildIndex waits until buildIndex function finishes, this can terminate
-// with a packfile.ErrEmptyPackfile, this means that nothing was written so we
-// ignore the error
-func (w *PackWriter) waitBuildIndex() error {
- err := <-w.result
- if err == packfile.ErrEmptyPackfile {
- return nil
- }
-
- return err
-}
-
-func (w *PackWriter) Write(p []byte) (int, error) {
- return w.synced.Write(p)
-}
-
-// Close closes all the file descriptors and save the final packfile, if nothing
-// was written, the tempfiles are deleted without writing a packfile.
-func (w *PackWriter) Close() error {
- defer func() {
- if w.Notify != nil && w.writer != nil && w.writer.Finished() {
- w.Notify(w.checksum, w.writer)
- }
-
- close(w.result)
- }()
-
- if err := w.synced.Close(); err != nil {
- return err
- }
-
- if err := w.waitBuildIndex(); err != nil {
- return err
- }
-
- if err := w.fr.Close(); err != nil {
- return err
- }
-
- if err := w.fw.Close(); err != nil {
- return err
- }
-
- if w.writer == nil || !w.writer.Finished() {
- return w.clean()
- }
-
- return w.save()
-}
-
-func (w *PackWriter) clean() error {
- return w.fs.Remove(w.fw.Name())
-}
-
-func (w *PackWriter) save() error {
- base := w.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s", w.checksum))
- idx, err := w.fs.Create(fmt.Sprintf("%s.idx", base))
- if err != nil {
- return err
- }
-
- if err := w.encodeIdx(idx); err != nil {
- return err
- }
-
- if err := idx.Close(); err != nil {
- return err
- }
-
- return w.fs.Rename(w.fw.Name(), fmt.Sprintf("%s.pack", base))
-}
-
-func (w *PackWriter) encodeIdx(writer io.Writer) error {
- idx, err := w.writer.Index()
- if err != nil {
- return err
- }
-
- e := idxfile.NewEncoder(writer)
- _, err = e.Encode(idx)
- return err
-}
-
-type syncedReader struct {
- w io.Writer
- r io.ReadSeeker
-
- blocked, done uint32
- written, read uint64
- news chan bool
-}
-
-func newSyncedReader(w io.Writer, r io.ReadSeeker) *syncedReader {
- return &syncedReader{
- w: w,
- r: r,
- news: make(chan bool),
- }
-}
-
-func (s *syncedReader) Write(p []byte) (n int, err error) {
- defer func() {
- written := atomic.AddUint64(&s.written, uint64(n))
- read := atomic.LoadUint64(&s.read)
- if written > read {
- s.wake()
- }
- }()
-
- n, err = s.w.Write(p)
- return
-}
-
-func (s *syncedReader) Read(p []byte) (n int, err error) {
- defer func() { atomic.AddUint64(&s.read, uint64(n)) }()
-
- for {
- s.sleep()
- n, err = s.r.Read(p)
- if err == io.EOF && !s.isDone() && n == 0 {
- continue
- }
-
- break
- }
-
- return
-}
-
-func (s *syncedReader) isDone() bool {
- return atomic.LoadUint32(&s.done) == 1
-}
-
-func (s *syncedReader) isBlocked() bool {
- return atomic.LoadUint32(&s.blocked) == 1
-}
-
-func (s *syncedReader) wake() {
- if s.isBlocked() {
- atomic.StoreUint32(&s.blocked, 0)
- s.news <- true
- }
-}
-
-func (s *syncedReader) sleep() {
- read := atomic.LoadUint64(&s.read)
- written := atomic.LoadUint64(&s.written)
- if read >= written {
- atomic.StoreUint32(&s.blocked, 1)
- <-s.news
- }
-
-}
-
-func (s *syncedReader) Seek(offset int64, whence int) (int64, error) {
- if whence == io.SeekCurrent {
- return s.r.Seek(offset, whence)
- }
-
- p, err := s.r.Seek(offset, whence)
- atomic.StoreUint64(&s.read, uint64(p))
-
- return p, err
-}
-
-func (s *syncedReader) Close() error {
- atomic.StoreUint32(&s.done, 1)
- close(s.news)
- return nil
-}
-
-type ObjectWriter struct {
- objfile.Writer
- fs billy.Filesystem
- f billy.File
-}
-
-func newObjectWriter(fs billy.Filesystem) (*ObjectWriter, error) {
- f, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_obj_")
- if err != nil {
- return nil, err
- }
-
- return &ObjectWriter{
- Writer: (*objfile.NewWriter(f)),
- fs: fs,
- f: f,
- }, nil
-}
-
-func (w *ObjectWriter) Close() error {
- if err := w.Writer.Close(); err != nil {
- return err
- }
-
- if err := w.f.Close(); err != nil {
- return err
- }
-
- return w.save()
-}
-
-func (w *ObjectWriter) save() error {
- hash := w.Hash().String()
- file := w.fs.Join(objectsPath, hash[0:2], hash[2:40])
-
- return w.fs.Rename(w.f.Name(), file)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/index.go b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/index.go
deleted file mode 100644
index be800eff3b..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/index.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package filesystem
-
-import (
- "bufio"
- "os"
-
- "gopkg.in/src-d/go-git.v4/plumbing/format/index"
- "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-)
-
-type IndexStorage struct {
- dir *dotgit.DotGit
-}
-
-func (s *IndexStorage) SetIndex(idx *index.Index) (err error) {
- f, err := s.dir.IndexWriter()
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(f, &err)
- bw := bufio.NewWriter(f)
- defer func() {
- if e := bw.Flush(); err == nil && e != nil {
- err = e
- }
- }()
-
- e := index.NewEncoder(bw)
- err = e.Encode(idx)
- return err
-}
-
-func (s *IndexStorage) Index() (i *index.Index, err error) {
- idx := &index.Index{
- Version: 2,
- }
-
- f, err := s.dir.Index()
- if err != nil {
- if os.IsNotExist(err) {
- return idx, nil
- }
-
- return nil, err
- }
-
- defer ioutil.CheckClose(f, &err)
-
- d := index.NewDecoder(bufio.NewReader(f))
- err = d.Decode(idx)
- return idx, err
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/module.go b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/module.go
deleted file mode 100644
index 9272206741..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/module.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package filesystem
-
-import (
- "gopkg.in/src-d/go-git.v4/plumbing/cache"
- "gopkg.in/src-d/go-git.v4/storage"
- "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
-)
-
-type ModuleStorage struct {
- dir *dotgit.DotGit
-}
-
-func (s *ModuleStorage) Module(name string) (storage.Storer, error) {
- fs, err := s.dir.Module(name)
- if err != nil {
- return nil, err
- }
-
- return NewStorage(fs, cache.NewObjectLRUDefault()), nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/object.go b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/object.go
deleted file mode 100644
index ad5d8d0009..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/object.go
+++ /dev/null
@@ -1,815 +0,0 @@
-package filesystem
-
-import (
- "io"
- "os"
- "time"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/cache"
- "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
- "gopkg.in/src-d/go-git.v4/plumbing/format/objfile"
- "gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
- "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-
- "gopkg.in/src-d/go-billy.v4"
-)
-
-type ObjectStorage struct {
- options Options
-
- // objectCache is an object cache uses to cache delta's bases and also recently
- // loaded loose objects
- objectCache cache.Object
-
- dir *dotgit.DotGit
- index map[plumbing.Hash]idxfile.Index
-
- packList []plumbing.Hash
- packListIdx int
- packfiles map[plumbing.Hash]*packfile.Packfile
-}
-
-// NewObjectStorage creates a new ObjectStorage with the given .git directory and cache.
-func NewObjectStorage(dir *dotgit.DotGit, objectCache cache.Object) *ObjectStorage {
- return NewObjectStorageWithOptions(dir, objectCache, Options{})
-}
-
-// NewObjectStorageWithOptions creates a new ObjectStorage with the given .git directory, cache and extra options
-func NewObjectStorageWithOptions(dir *dotgit.DotGit, objectCache cache.Object, ops Options) *ObjectStorage {
- return &ObjectStorage{
- options: ops,
- objectCache: objectCache,
- dir: dir,
- }
-}
-
-func (s *ObjectStorage) requireIndex() error {
- if s.index != nil {
- return nil
- }
-
- s.index = make(map[plumbing.Hash]idxfile.Index)
- packs, err := s.dir.ObjectPacks()
- if err != nil {
- return err
- }
-
- for _, h := range packs {
- if err := s.loadIdxFile(h); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Reindex indexes again all packfiles. Useful if git changed packfiles externally
-func (s *ObjectStorage) Reindex() {
- s.index = nil
-}
-
-func (s *ObjectStorage) loadIdxFile(h plumbing.Hash) (err error) {
- f, err := s.dir.ObjectPackIdx(h)
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(f, &err)
-
- idxf := idxfile.NewMemoryIndex()
- d := idxfile.NewDecoder(f)
- if err = d.Decode(idxf); err != nil {
- return err
- }
-
- s.index[h] = idxf
- return err
-}
-
-func (s *ObjectStorage) NewEncodedObject() plumbing.EncodedObject {
- return &plumbing.MemoryObject{}
-}
-
-func (s *ObjectStorage) PackfileWriter() (io.WriteCloser, error) {
- if err := s.requireIndex(); err != nil {
- return nil, err
- }
-
- w, err := s.dir.NewObjectPack()
- if err != nil {
- return nil, err
- }
-
- w.Notify = func(h plumbing.Hash, writer *idxfile.Writer) {
- index, err := writer.Index()
- if err == nil {
- s.index[h] = index
- }
- }
-
- return w, nil
-}
-
-// SetEncodedObject adds a new object to the storage.
-func (s *ObjectStorage) SetEncodedObject(o plumbing.EncodedObject) (h plumbing.Hash, err error) {
- if o.Type() == plumbing.OFSDeltaObject || o.Type() == plumbing.REFDeltaObject {
- return plumbing.ZeroHash, plumbing.ErrInvalidType
- }
-
- ow, err := s.dir.NewObject()
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- defer ioutil.CheckClose(ow, &err)
-
- or, err := o.Reader()
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- defer ioutil.CheckClose(or, &err)
-
- if err = ow.WriteHeader(o.Type(), o.Size()); err != nil {
- return plumbing.ZeroHash, err
- }
-
- if _, err = io.Copy(ow, or); err != nil {
- return plumbing.ZeroHash, err
- }
-
- return o.Hash(), err
-}
-
-// HasEncodedObject returns nil if the object exists, without actually
-// reading the object data from storage.
-func (s *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) {
- // Check unpacked objects
- f, err := s.dir.Object(h)
- if err != nil {
- if !os.IsNotExist(err) {
- return err
- }
- // Fall through to check packed objects.
- } else {
- defer ioutil.CheckClose(f, &err)
- return nil
- }
-
- // Check packed objects.
- if err := s.requireIndex(); err != nil {
- return err
- }
- _, _, offset := s.findObjectInPackfile(h)
- if offset == -1 {
- return plumbing.ErrObjectNotFound
- }
- return nil
-}
-
-func (s *ObjectStorage) encodedObjectSizeFromUnpacked(h plumbing.Hash) (
- size int64, err error) {
- f, err := s.dir.Object(h)
- if err != nil {
- if os.IsNotExist(err) {
- return 0, plumbing.ErrObjectNotFound
- }
-
- return 0, err
- }
-
- r, err := objfile.NewReader(f)
- if err != nil {
- return 0, err
- }
- defer ioutil.CheckClose(r, &err)
-
- _, size, err = r.Header()
- return size, err
-}
-
-func (s *ObjectStorage) packfile(idx idxfile.Index, pack plumbing.Hash) (*packfile.Packfile, error) {
- if p := s.packfileFromCache(pack); p != nil {
- return p, nil
- }
-
- f, err := s.dir.ObjectPack(pack)
- if err != nil {
- return nil, err
- }
-
- var p *packfile.Packfile
- if s.objectCache != nil {
- p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
- } else {
- p = packfile.NewPackfile(idx, s.dir.Fs(), f)
- }
-
- return p, s.storePackfileInCache(pack, p)
-}
-
-func (s *ObjectStorage) packfileFromCache(hash plumbing.Hash) *packfile.Packfile {
- if s.packfiles == nil {
- if s.options.KeepDescriptors {
- s.packfiles = make(map[plumbing.Hash]*packfile.Packfile)
- } else if s.options.MaxOpenDescriptors > 0 {
- s.packList = make([]plumbing.Hash, s.options.MaxOpenDescriptors)
- s.packfiles = make(map[plumbing.Hash]*packfile.Packfile, s.options.MaxOpenDescriptors)
- }
- }
-
- return s.packfiles[hash]
-}
-
-func (s *ObjectStorage) storePackfileInCache(hash plumbing.Hash, p *packfile.Packfile) error {
- if s.options.KeepDescriptors {
- s.packfiles[hash] = p
- return nil
- }
-
- if s.options.MaxOpenDescriptors <= 0 {
- return nil
- }
-
- // start over as the limit of packList is hit
- if s.packListIdx >= len(s.packList) {
- s.packListIdx = 0
- }
-
- // close the existing packfile if open
- if next := s.packList[s.packListIdx]; !next.IsZero() {
- open := s.packfiles[next]
- delete(s.packfiles, next)
- if open != nil {
- if err := open.Close(); err != nil {
- return err
- }
- }
- }
-
- // cache newly open packfile
- s.packList[s.packListIdx] = hash
- s.packfiles[hash] = p
- s.packListIdx++
-
- return nil
-}
-
-func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
- size int64, err error) {
- if err := s.requireIndex(); err != nil {
- return 0, err
- }
-
- pack, _, offset := s.findObjectInPackfile(h)
- if offset == -1 {
- return 0, plumbing.ErrObjectNotFound
- }
-
- idx := s.index[pack]
- hash, err := idx.FindHash(offset)
- if err == nil {
- obj, ok := s.objectCache.Get(hash)
- if ok {
- return obj.Size(), nil
- }
- } else if err != nil && err != plumbing.ErrObjectNotFound {
- return 0, err
- }
-
- p, err := s.packfile(idx, pack)
- if err != nil {
- return 0, err
- }
-
- if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 {
- defer ioutil.CheckClose(p, &err)
- }
-
- return p.GetSizeByOffset(offset)
-}
-
-// EncodedObjectSize returns the plaintext size of the given object,
-// without actually reading the full object data from storage.
-func (s *ObjectStorage) EncodedObjectSize(h plumbing.Hash) (
- size int64, err error) {
- size, err = s.encodedObjectSizeFromUnpacked(h)
- if err != nil && err != plumbing.ErrObjectNotFound {
- return 0, err
- } else if err == nil {
- return size, nil
- }
-
- return s.encodedObjectSizeFromPackfile(h)
-}
-
-// EncodedObject returns the object with the given hash, by searching for it in
-// the packfile and the git object directories.
-func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) {
- var obj plumbing.EncodedObject
- var err error
-
- if s.index != nil {
- obj, err = s.getFromPackfile(h, false)
- if err == plumbing.ErrObjectNotFound {
- obj, err = s.getFromUnpacked(h)
- }
- } else {
- obj, err = s.getFromUnpacked(h)
- if err == plumbing.ErrObjectNotFound {
- obj, err = s.getFromPackfile(h, false)
- }
- }
-
- // If the error is still object not found, check if it's a shared object
- // repository.
- if err == plumbing.ErrObjectNotFound {
- dotgits, e := s.dir.Alternates()
- if e == nil {
- // Create a new object storage with the DotGit(s) and check for the
- // required hash object. Skip when not found.
- for _, dg := range dotgits {
- o := NewObjectStorage(dg, s.objectCache)
- enobj, enerr := o.EncodedObject(t, h)
- if enerr != nil {
- continue
- }
- return enobj, nil
- }
- }
- }
-
- if err != nil {
- return nil, err
- }
-
- if plumbing.AnyObject != t && obj.Type() != t {
- return nil, plumbing.ErrObjectNotFound
- }
-
- return obj, nil
-}
-
-// DeltaObject returns the object with the given hash, by searching for
-// it in the packfile and the git object directories.
-func (s *ObjectStorage) DeltaObject(t plumbing.ObjectType,
- h plumbing.Hash) (plumbing.EncodedObject, error) {
- obj, err := s.getFromUnpacked(h)
- if err == plumbing.ErrObjectNotFound {
- obj, err = s.getFromPackfile(h, true)
- }
-
- if err != nil {
- return nil, err
- }
-
- if plumbing.AnyObject != t && obj.Type() != t {
- return nil, plumbing.ErrObjectNotFound
- }
-
- return obj, nil
-}
-
-func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedObject, err error) {
- f, err := s.dir.Object(h)
- if err != nil {
- if os.IsNotExist(err) {
- return nil, plumbing.ErrObjectNotFound
- }
-
- return nil, err
- }
- defer ioutil.CheckClose(f, &err)
-
- if cacheObj, found := s.objectCache.Get(h); found {
- return cacheObj, nil
- }
-
- obj = s.NewEncodedObject()
- r, err := objfile.NewReader(f)
- if err != nil {
- return nil, err
- }
-
- defer ioutil.CheckClose(r, &err)
-
- t, size, err := r.Header()
- if err != nil {
- return nil, err
- }
-
- obj.SetType(t)
- obj.SetSize(size)
- w, err := obj.Writer()
- if err != nil {
- return nil, err
- }
-
- s.objectCache.Put(obj)
-
- _, err = io.Copy(w, r)
- return obj, err
-}
-
-// Get returns the object with the given hash, by searching for it in
-// the packfile.
-func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) (
- plumbing.EncodedObject, error) {
-
- if err := s.requireIndex(); err != nil {
- return nil, err
- }
-
- pack, hash, offset := s.findObjectInPackfile(h)
- if offset == -1 {
- return nil, plumbing.ErrObjectNotFound
- }
-
- idx := s.index[pack]
- p, err := s.packfile(idx, pack)
- if err != nil {
- return nil, err
- }
-
- if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 {
- defer ioutil.CheckClose(p, &err)
- }
-
- if canBeDelta {
- return s.decodeDeltaObjectAt(p, offset, hash)
- }
-
- return s.decodeObjectAt(p, offset)
-}
-
-func (s *ObjectStorage) decodeObjectAt(
- p *packfile.Packfile,
- offset int64,
-) (plumbing.EncodedObject, error) {
- hash, err := p.FindHash(offset)
- if err == nil {
- obj, ok := s.objectCache.Get(hash)
- if ok {
- return obj, nil
- }
- }
-
- if err != nil && err != plumbing.ErrObjectNotFound {
- return nil, err
- }
-
- return p.GetByOffset(offset)
-}
-
-func (s *ObjectStorage) decodeDeltaObjectAt(
- p *packfile.Packfile,
- offset int64,
- hash plumbing.Hash,
-) (plumbing.EncodedObject, error) {
- scan := p.Scanner()
- header, err := scan.SeekObjectHeader(offset)
- if err != nil {
- return nil, err
- }
-
- var (
- base plumbing.Hash
- )
-
- switch header.Type {
- case plumbing.REFDeltaObject:
- base = header.Reference
- case plumbing.OFSDeltaObject:
- base, err = p.FindHash(header.OffsetReference)
- if err != nil {
- return nil, err
- }
- default:
- return s.decodeObjectAt(p, offset)
- }
-
- obj := &plumbing.MemoryObject{}
- obj.SetType(header.Type)
- w, err := obj.Writer()
- if err != nil {
- return nil, err
- }
-
- if _, _, err := scan.NextObject(w); err != nil {
- return nil, err
- }
-
- return newDeltaObject(obj, hash, base, header.Length), nil
-}
-
-func (s *ObjectStorage) findObjectInPackfile(h plumbing.Hash) (plumbing.Hash, plumbing.Hash, int64) {
- for packfile, index := range s.index {
- offset, err := index.FindOffset(h)
- if err == nil {
- return packfile, h, offset
- }
- }
-
- return plumbing.ZeroHash, plumbing.ZeroHash, -1
-}
-
-// IterEncodedObjects returns an iterator for all the objects in the packfile
-// with the given type.
-func (s *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) {
- objects, err := s.dir.Objects()
- if err != nil {
- return nil, err
- }
-
- seen := make(map[plumbing.Hash]struct{})
- var iters []storer.EncodedObjectIter
- if len(objects) != 0 {
- iters = append(iters, &objectsIter{s: s, t: t, h: objects})
- seen = hashListAsMap(objects)
- }
-
- packi, err := s.buildPackfileIters(t, seen)
- if err != nil {
- return nil, err
- }
-
- iters = append(iters, packi)
- return storer.NewMultiEncodedObjectIter(iters), nil
-}
-
-func (s *ObjectStorage) buildPackfileIters(
- t plumbing.ObjectType,
- seen map[plumbing.Hash]struct{},
-) (storer.EncodedObjectIter, error) {
- if err := s.requireIndex(); err != nil {
- return nil, err
- }
-
- packs, err := s.dir.ObjectPacks()
- if err != nil {
- return nil, err
- }
- return &lazyPackfilesIter{
- hashes: packs,
- open: func(h plumbing.Hash) (storer.EncodedObjectIter, error) {
- pack, err := s.dir.ObjectPack(h)
- if err != nil {
- return nil, err
- }
- return newPackfileIter(
- s.dir.Fs(), pack, t, seen, s.index[h],
- s.objectCache, s.options.KeepDescriptors,
- )
- },
- }, nil
-}
-
-// Close closes all opened files.
-func (s *ObjectStorage) Close() error {
- var firstError error
- if s.options.KeepDescriptors || s.options.MaxOpenDescriptors > 0 {
- for _, packfile := range s.packfiles {
- err := packfile.Close()
- if firstError == nil && err != nil {
- firstError = err
- }
- }
- }
-
- s.packfiles = nil
- s.dir.Close()
-
- return firstError
-}
-
-type lazyPackfilesIter struct {
- hashes []plumbing.Hash
- open func(h plumbing.Hash) (storer.EncodedObjectIter, error)
- cur storer.EncodedObjectIter
-}
-
-func (it *lazyPackfilesIter) Next() (plumbing.EncodedObject, error) {
- for {
- if it.cur == nil {
- if len(it.hashes) == 0 {
- return nil, io.EOF
- }
- h := it.hashes[0]
- it.hashes = it.hashes[1:]
-
- sub, err := it.open(h)
- if err == io.EOF {
- continue
- } else if err != nil {
- return nil, err
- }
- it.cur = sub
- }
- ob, err := it.cur.Next()
- if err == io.EOF {
- it.cur.Close()
- it.cur = nil
- continue
- } else if err != nil {
- return nil, err
- }
- return ob, nil
- }
-}
-
-func (it *lazyPackfilesIter) ForEach(cb func(plumbing.EncodedObject) error) error {
- return storer.ForEachIterator(it, cb)
-}
-
-func (it *lazyPackfilesIter) Close() {
- if it.cur != nil {
- it.cur.Close()
- it.cur = nil
- }
- it.hashes = nil
-}
-
-type packfileIter struct {
- pack billy.File
- iter storer.EncodedObjectIter
- seen map[plumbing.Hash]struct{}
-
- // tells whether the pack file should be left open after iteration or not
- keepPack bool
-}
-
-// NewPackfileIter returns a new EncodedObjectIter for the provided packfile
-// and object type. Packfile and index file will be closed after they're
-// used. If keepPack is true the packfile won't be closed after the iteration
-// finished.
-func NewPackfileIter(
- fs billy.Filesystem,
- f billy.File,
- idxFile billy.File,
- t plumbing.ObjectType,
- keepPack bool,
-) (storer.EncodedObjectIter, error) {
- idx := idxfile.NewMemoryIndex()
- if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil {
- return nil, err
- }
-
- if err := idxFile.Close(); err != nil {
- return nil, err
- }
-
- seen := make(map[plumbing.Hash]struct{})
- return newPackfileIter(fs, f, t, seen, idx, nil, keepPack)
-}
-
-func newPackfileIter(
- fs billy.Filesystem,
- f billy.File,
- t plumbing.ObjectType,
- seen map[plumbing.Hash]struct{},
- index idxfile.Index,
- cache cache.Object,
- keepPack bool,
-) (storer.EncodedObjectIter, error) {
- var p *packfile.Packfile
- if cache != nil {
- p = packfile.NewPackfileWithCache(index, fs, f, cache)
- } else {
- p = packfile.NewPackfile(index, fs, f)
- }
-
- iter, err := p.GetByType(t)
- if err != nil {
- return nil, err
- }
-
- return &packfileIter{
- pack: f,
- iter: iter,
- seen: seen,
- keepPack: keepPack,
- }, nil
-}
-
-func (iter *packfileIter) Next() (plumbing.EncodedObject, error) {
- for {
- obj, err := iter.iter.Next()
- if err != nil {
- return nil, err
- }
-
- if _, ok := iter.seen[obj.Hash()]; ok {
- continue
- }
-
- return obj, nil
- }
-}
-
-func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error {
- for {
- o, err := iter.Next()
- if err != nil {
- if err == io.EOF {
- iter.Close()
- return nil
- }
- return err
- }
-
- if err := cb(o); err != nil {
- return err
- }
- }
-}
-
-func (iter *packfileIter) Close() {
- iter.iter.Close()
- if !iter.keepPack {
- _ = iter.pack.Close()
- }
-}
-
-type objectsIter struct {
- s *ObjectStorage
- t plumbing.ObjectType
- h []plumbing.Hash
-}
-
-func (iter *objectsIter) Next() (plumbing.EncodedObject, error) {
- if len(iter.h) == 0 {
- return nil, io.EOF
- }
-
- obj, err := iter.s.getFromUnpacked(iter.h[0])
- iter.h = iter.h[1:]
-
- if err != nil {
- return nil, err
- }
-
- if iter.t != plumbing.AnyObject && iter.t != obj.Type() {
- return iter.Next()
- }
-
- return obj, err
-}
-
-func (iter *objectsIter) ForEach(cb func(plumbing.EncodedObject) error) error {
- for {
- o, err := iter.Next()
- if err != nil {
- if err == io.EOF {
- return nil
- }
- return err
- }
-
- if err := cb(o); err != nil {
- return err
- }
- }
-}
-
-func (iter *objectsIter) Close() {
- iter.h = []plumbing.Hash{}
-}
-
-func hashListAsMap(l []plumbing.Hash) map[plumbing.Hash]struct{} {
- m := make(map[plumbing.Hash]struct{}, len(l))
- for _, h := range l {
- m[h] = struct{}{}
- }
- return m
-}
-
-func (s *ObjectStorage) ForEachObjectHash(fun func(plumbing.Hash) error) error {
- err := s.dir.ForEachObjectHash(fun)
- if err == storer.ErrStop {
- return nil
- }
- return err
-}
-
-func (s *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) {
- fi, err := s.dir.ObjectStat(hash)
- if err != nil {
- return time.Time{}, err
- }
- return fi.ModTime(), nil
-}
-
-func (s *ObjectStorage) DeleteLooseObject(hash plumbing.Hash) error {
- return s.dir.ObjectDelete(hash)
-}
-
-func (s *ObjectStorage) ObjectPacks() ([]plumbing.Hash, error) {
- return s.dir.ObjectPacks()
-}
-
-func (s *ObjectStorage) DeleteOldObjectPackAndIndex(h plumbing.Hash, t time.Time) error {
- return s.dir.DeleteOldObjectPackAndIndex(h, t)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/reference.go b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/reference.go
deleted file mode 100644
index a891b837be..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/reference.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package filesystem
-
-import (
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
- "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
-)
-
-type ReferenceStorage struct {
- dir *dotgit.DotGit
-}
-
-func (r *ReferenceStorage) SetReference(ref *plumbing.Reference) error {
- return r.dir.SetRef(ref, nil)
-}
-
-func (r *ReferenceStorage) CheckAndSetReference(ref, old *plumbing.Reference) error {
- return r.dir.SetRef(ref, old)
-}
-
-func (r *ReferenceStorage) Reference(n plumbing.ReferenceName) (*plumbing.Reference, error) {
- return r.dir.Ref(n)
-}
-
-func (r *ReferenceStorage) IterReferences() (storer.ReferenceIter, error) {
- refs, err := r.dir.Refs()
- if err != nil {
- return nil, err
- }
-
- return storer.NewReferenceSliceIter(refs), nil
-}
-
-func (r *ReferenceStorage) RemoveReference(n plumbing.ReferenceName) error {
- return r.dir.RemoveRef(n)
-}
-
-func (r *ReferenceStorage) CountLooseRefs() (int, error) {
- return r.dir.CountLooseRefs()
-}
-
-func (r *ReferenceStorage) PackRefs() error {
- return r.dir.PackRefs()
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/shallow.go b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/shallow.go
deleted file mode 100644
index 502d406da6..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/shallow.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package filesystem
-
-import (
- "bufio"
- "fmt"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
-)
-
-// ShallowStorage where the shallow commits are stored, an internal to
-// manipulate the shallow file
-type ShallowStorage struct {
- dir *dotgit.DotGit
-}
-
-// SetShallow save the shallows in the shallow file in the .git folder as one
-// commit per line represented by 40-byte hexadecimal object terminated by a
-// newline.
-func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error {
- f, err := s.dir.ShallowWriter()
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(f, &err)
- for _, h := range commits {
- if _, err := fmt.Fprintf(f, "%s\n", h); err != nil {
- return err
- }
- }
-
- return err
-}
-
-// Shallow return the shallow commits reading from shallo file from .git
-func (s *ShallowStorage) Shallow() ([]plumbing.Hash, error) {
- f, err := s.dir.Shallow()
- if f == nil || err != nil {
- return nil, err
- }
-
- defer ioutil.CheckClose(f, &err)
-
- var hash []plumbing.Hash
-
- scn := bufio.NewScanner(f)
- for scn.Scan() {
- hash = append(hash, plumbing.NewHash(scn.Text()))
- }
-
- return hash, scn.Err()
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/storage.go b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/storage.go
deleted file mode 100644
index 88d1ed483c..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/storage.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Package filesystem is a storage backend base on filesystems
-package filesystem
-
-import (
- "gopkg.in/src-d/go-git.v4/plumbing/cache"
- "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
-
- "gopkg.in/src-d/go-billy.v4"
-)
-
-// Storage is an implementation of git.Storer that stores data on disk in the
-// standard git format (this is, the .git directory). Zero values of this type
-// are not safe to use, see the NewStorage function below.
-type Storage struct {
- fs billy.Filesystem
- dir *dotgit.DotGit
-
- ObjectStorage
- ReferenceStorage
- IndexStorage
- ShallowStorage
- ConfigStorage
- ModuleStorage
-}
-
-// Options holds configuration for the storage.
-type Options struct {
- // ExclusiveAccess means that the filesystem is not modified externally
- // while the repo is open.
- ExclusiveAccess bool
- // KeepDescriptors makes the file descriptors to be reused but they will
- // need to be manually closed calling Close().
- KeepDescriptors bool
- // MaxOpenDescriptors is the max number of file descriptors to keep
- // open. If KeepDescriptors is true, all file descriptors will remain open.
- MaxOpenDescriptors int
-}
-
-// NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache.
-func NewStorage(fs billy.Filesystem, cache cache.Object) *Storage {
- return NewStorageWithOptions(fs, cache, Options{})
-}
-
-// NewStorageWithOptions returns a new Storage with extra options,
-// backed by a given `fs.Filesystem` and cache.
-func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options) *Storage {
- dirOps := dotgit.Options{
- ExclusiveAccess: ops.ExclusiveAccess,
- }
- dir := dotgit.NewWithOptions(fs, dirOps)
-
- return &Storage{
- fs: fs,
- dir: dir,
-
- ObjectStorage: *NewObjectStorageWithOptions(dir, cache, ops),
- ReferenceStorage: ReferenceStorage{dir: dir},
- IndexStorage: IndexStorage{dir: dir},
- ShallowStorage: ShallowStorage{dir: dir},
- ConfigStorage: ConfigStorage{dir: dir},
- ModuleStorage: ModuleStorage{dir: dir},
- }
-}
-
-// Filesystem returns the underlying filesystem
-func (s *Storage) Filesystem() billy.Filesystem {
- return s.fs
-}
-
-// Init initializes .git directory
-func (s *Storage) Init() error {
- return s.dir.Initialize()
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/memory/storage.go b/vendor/gopkg.in/src-d/go-git.v4/storage/memory/storage.go
deleted file mode 100644
index f240f2a1f9..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/storage/memory/storage.go
+++ /dev/null
@@ -1,320 +0,0 @@
-// Package memory is a storage backend base on memory
-package memory
-
-import (
- "fmt"
- "time"
-
- "gopkg.in/src-d/go-git.v4/config"
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/format/index"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
- "gopkg.in/src-d/go-git.v4/storage"
-)
-
-var ErrUnsupportedObjectType = fmt.Errorf("unsupported object type")
-
-// Storage is an implementation of git.Storer that stores data on memory, being
-// ephemeral. The use of this storage should be done in controlled envoriments,
-// since the representation in memory of some repository can fill the machine
-// memory. in the other hand this storage has the best performance.
-type Storage struct {
- ConfigStorage
- ObjectStorage
- ShallowStorage
- IndexStorage
- ReferenceStorage
- ModuleStorage
-}
-
-// NewStorage returns a new Storage base on memory
-func NewStorage() *Storage {
- return &Storage{
- ReferenceStorage: make(ReferenceStorage),
- ConfigStorage: ConfigStorage{},
- ShallowStorage: ShallowStorage{},
- ObjectStorage: ObjectStorage{
- Objects: make(map[plumbing.Hash]plumbing.EncodedObject),
- Commits: make(map[plumbing.Hash]plumbing.EncodedObject),
- Trees: make(map[plumbing.Hash]plumbing.EncodedObject),
- Blobs: make(map[plumbing.Hash]plumbing.EncodedObject),
- Tags: make(map[plumbing.Hash]plumbing.EncodedObject),
- },
- ModuleStorage: make(ModuleStorage),
- }
-}
-
-type ConfigStorage struct {
- config *config.Config
-}
-
-func (c *ConfigStorage) SetConfig(cfg *config.Config) error {
- if err := cfg.Validate(); err != nil {
- return err
- }
-
- c.config = cfg
- return nil
-}
-
-func (c *ConfigStorage) Config() (*config.Config, error) {
- if c.config == nil {
- c.config = config.NewConfig()
- }
-
- return c.config, nil
-}
-
-type IndexStorage struct {
- index *index.Index
-}
-
-func (c *IndexStorage) SetIndex(idx *index.Index) error {
- c.index = idx
- return nil
-}
-
-func (c *IndexStorage) Index() (*index.Index, error) {
- if c.index == nil {
- c.index = &index.Index{Version: 2}
- }
-
- return c.index, nil
-}
-
-type ObjectStorage struct {
- Objects map[plumbing.Hash]plumbing.EncodedObject
- Commits map[plumbing.Hash]plumbing.EncodedObject
- Trees map[plumbing.Hash]plumbing.EncodedObject
- Blobs map[plumbing.Hash]plumbing.EncodedObject
- Tags map[plumbing.Hash]plumbing.EncodedObject
-}
-
-func (o *ObjectStorage) NewEncodedObject() plumbing.EncodedObject {
- return &plumbing.MemoryObject{}
-}
-
-func (o *ObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) {
- h := obj.Hash()
- o.Objects[h] = obj
-
- switch obj.Type() {
- case plumbing.CommitObject:
- o.Commits[h] = o.Objects[h]
- case plumbing.TreeObject:
- o.Trees[h] = o.Objects[h]
- case plumbing.BlobObject:
- o.Blobs[h] = o.Objects[h]
- case plumbing.TagObject:
- o.Tags[h] = o.Objects[h]
- default:
- return h, ErrUnsupportedObjectType
- }
-
- return h, nil
-}
-
-func (o *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) {
- if _, ok := o.Objects[h]; !ok {
- return plumbing.ErrObjectNotFound
- }
- return nil
-}
-
-func (o *ObjectStorage) EncodedObjectSize(h plumbing.Hash) (
- size int64, err error) {
- obj, ok := o.Objects[h]
- if !ok {
- return 0, plumbing.ErrObjectNotFound
- }
-
- return obj.Size(), nil
-}
-
-func (o *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) {
- obj, ok := o.Objects[h]
- if !ok || (plumbing.AnyObject != t && obj.Type() != t) {
- return nil, plumbing.ErrObjectNotFound
- }
-
- return obj, nil
-}
-
-func (o *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) {
- var series []plumbing.EncodedObject
- switch t {
- case plumbing.AnyObject:
- series = flattenObjectMap(o.Objects)
- case plumbing.CommitObject:
- series = flattenObjectMap(o.Commits)
- case plumbing.TreeObject:
- series = flattenObjectMap(o.Trees)
- case plumbing.BlobObject:
- series = flattenObjectMap(o.Blobs)
- case plumbing.TagObject:
- series = flattenObjectMap(o.Tags)
- }
-
- return storer.NewEncodedObjectSliceIter(series), nil
-}
-
-func flattenObjectMap(m map[plumbing.Hash]plumbing.EncodedObject) []plumbing.EncodedObject {
- objects := make([]plumbing.EncodedObject, 0, len(m))
- for _, obj := range m {
- objects = append(objects, obj)
- }
- return objects
-}
-
-func (o *ObjectStorage) Begin() storer.Transaction {
- return &TxObjectStorage{
- Storage: o,
- Objects: make(map[plumbing.Hash]plumbing.EncodedObject),
- }
-}
-
-func (o *ObjectStorage) ForEachObjectHash(fun func(plumbing.Hash) error) error {
- for h := range o.Objects {
- err := fun(h)
- if err != nil {
- if err == storer.ErrStop {
- return nil
- }
- return err
- }
- }
- return nil
-}
-
-func (o *ObjectStorage) ObjectPacks() ([]plumbing.Hash, error) {
- return nil, nil
-}
-func (o *ObjectStorage) DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error {
- return nil
-}
-
-var errNotSupported = fmt.Errorf("Not supported")
-
-func (s *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) {
- return time.Time{}, errNotSupported
-}
-func (s *ObjectStorage) DeleteLooseObject(plumbing.Hash) error {
- return errNotSupported
-}
-
-type TxObjectStorage struct {
- Storage *ObjectStorage
- Objects map[plumbing.Hash]plumbing.EncodedObject
-}
-
-func (tx *TxObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) {
- h := obj.Hash()
- tx.Objects[h] = obj
-
- return h, nil
-}
-
-func (tx *TxObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) {
- obj, ok := tx.Objects[h]
- if !ok || (plumbing.AnyObject != t && obj.Type() != t) {
- return nil, plumbing.ErrObjectNotFound
- }
-
- return obj, nil
-}
-
-func (tx *TxObjectStorage) Commit() error {
- for h, obj := range tx.Objects {
- delete(tx.Objects, h)
- if _, err := tx.Storage.SetEncodedObject(obj); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (tx *TxObjectStorage) Rollback() error {
- tx.Objects = make(map[plumbing.Hash]plumbing.EncodedObject)
- return nil
-}
-
-type ReferenceStorage map[plumbing.ReferenceName]*plumbing.Reference
-
-func (r ReferenceStorage) SetReference(ref *plumbing.Reference) error {
- if ref != nil {
- r[ref.Name()] = ref
- }
-
- return nil
-}
-
-func (r ReferenceStorage) CheckAndSetReference(ref, old *plumbing.Reference) error {
- if ref == nil {
- return nil
- }
-
- if old != nil {
- tmp := r[ref.Name()]
- if tmp != nil && tmp.Hash() != old.Hash() {
- return storage.ErrReferenceHasChanged
- }
- }
- r[ref.Name()] = ref
- return nil
-}
-
-func (r ReferenceStorage) Reference(n plumbing.ReferenceName) (*plumbing.Reference, error) {
- ref, ok := r[n]
- if !ok {
- return nil, plumbing.ErrReferenceNotFound
- }
-
- return ref, nil
-}
-
-func (r ReferenceStorage) IterReferences() (storer.ReferenceIter, error) {
- var refs []*plumbing.Reference
- for _, ref := range r {
- refs = append(refs, ref)
- }
-
- return storer.NewReferenceSliceIter(refs), nil
-}
-
-func (r ReferenceStorage) CountLooseRefs() (int, error) {
- return len(r), nil
-}
-
-func (r ReferenceStorage) PackRefs() error {
- return nil
-}
-
-func (r ReferenceStorage) RemoveReference(n plumbing.ReferenceName) error {
- delete(r, n)
- return nil
-}
-
-type ShallowStorage []plumbing.Hash
-
-func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error {
- *s = commits
- return nil
-}
-
-func (s ShallowStorage) Shallow() ([]plumbing.Hash, error) {
- return s, nil
-}
-
-type ModuleStorage map[string]*Storage
-
-func (s ModuleStorage) Module(name string) (storage.Storer, error) {
- if m, ok := s[name]; ok {
- return m, nil
- }
-
- m := NewStorage()
- s[name] = m
-
- return m, nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/storer.go b/vendor/gopkg.in/src-d/go-git.v4/storage/storer.go
deleted file mode 100644
index 5de0cfb96c..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/storage/storer.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package storage
-
-import (
- "errors"
-
- "gopkg.in/src-d/go-git.v4/config"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
-)
-
-var ErrReferenceHasChanged = errors.New("reference has changed concurrently")
-
-// Storer is a generic storage of objects, references and any information
-// related to a particular repository. The package gopkg.in/src-d/go-git.v4/storage
-// contains two implementation a filesystem base implementation (such as `.git`)
-// and a memory implementations being ephemeral
-type Storer interface {
- storer.EncodedObjectStorer
- storer.ReferenceStorer
- storer.ShallowStorer
- storer.IndexStorer
- config.ConfigStorer
- ModuleStorer
-}
-
-// ModuleStorer allows interact with the modules' Storers
-type ModuleStorer interface {
- // Module returns a Storer representing a submodule, if not exists returns a
- // new empty Storer is returned
- Module(name string) (Storer, error)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/submodule.go b/vendor/gopkg.in/src-d/go-git.v4/submodule.go
deleted file mode 100644
index a4eb7ded89..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/submodule.go
+++ /dev/null
@@ -1,357 +0,0 @@
-package git
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
-
- "gopkg.in/src-d/go-billy.v4"
- "gopkg.in/src-d/go-git.v4/config"
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/format/index"
-)
-
-var (
- ErrSubmoduleAlreadyInitialized = errors.New("submodule already initialized")
- ErrSubmoduleNotInitialized = errors.New("submodule not initialized")
-)
-
-// Submodule a submodule allows you to keep another Git repository in a
-// subdirectory of your repository.
-type Submodule struct {
- // initialized defines if a submodule was already initialized.
- initialized bool
-
- c *config.Submodule
- w *Worktree
-}
-
-// Config returns the submodule config
-func (s *Submodule) Config() *config.Submodule {
- return s.c
-}
-
-// Init initialize the submodule reading the recorded Entry in the index for
-// the given submodule
-func (s *Submodule) Init() error {
- cfg, err := s.w.r.Storer.Config()
- if err != nil {
- return err
- }
-
- _, ok := cfg.Submodules[s.c.Name]
- if ok {
- return ErrSubmoduleAlreadyInitialized
- }
-
- s.initialized = true
-
- cfg.Submodules[s.c.Name] = s.c
- return s.w.r.Storer.SetConfig(cfg)
-}
-
-// Status returns the status of the submodule.
-func (s *Submodule) Status() (*SubmoduleStatus, error) {
- idx, err := s.w.r.Storer.Index()
- if err != nil {
- return nil, err
- }
-
- return s.status(idx)
-}
-
-func (s *Submodule) status(idx *index.Index) (*SubmoduleStatus, error) {
- status := &SubmoduleStatus{
- Path: s.c.Path,
- }
-
- e, err := idx.Entry(s.c.Path)
- if err != nil && err != index.ErrEntryNotFound {
- return nil, err
- }
-
- if e != nil {
- status.Expected = e.Hash
- }
-
- if !s.initialized {
- return status, nil
- }
-
- r, err := s.Repository()
- if err != nil {
- return nil, err
- }
-
- head, err := r.Head()
- if err == nil {
- status.Current = head.Hash()
- }
-
- if err != nil && err == plumbing.ErrReferenceNotFound {
- err = nil
- }
-
- return status, err
-}
-
-// Repository returns the Repository represented by this submodule
-func (s *Submodule) Repository() (*Repository, error) {
- if !s.initialized {
- return nil, ErrSubmoduleNotInitialized
- }
-
- storer, err := s.w.r.Storer.Module(s.c.Name)
- if err != nil {
- return nil, err
- }
-
- _, err = storer.Reference(plumbing.HEAD)
- if err != nil && err != plumbing.ErrReferenceNotFound {
- return nil, err
- }
-
- var exists bool
- if err == nil {
- exists = true
- }
-
- var worktree billy.Filesystem
- if worktree, err = s.w.Filesystem.Chroot(s.c.Path); err != nil {
- return nil, err
- }
-
- if exists {
- return Open(storer, worktree)
- }
-
- r, err := Init(storer, worktree)
- if err != nil {
- return nil, err
- }
-
- _, err = r.CreateRemote(&config.RemoteConfig{
- Name: DefaultRemoteName,
- URLs: []string{s.c.URL},
- })
-
- return r, err
-}
-
-// Update the registered submodule to match what the superproject expects, the
-// submodule should be initialized first calling the Init method or setting in
-// the options SubmoduleUpdateOptions.Init equals true
-func (s *Submodule) Update(o *SubmoduleUpdateOptions) error {
- return s.UpdateContext(context.Background(), o)
-}
-
-// UpdateContext the registered submodule to match what the superproject
-// expects, the submodule should be initialized first calling the Init method or
-// setting in the options SubmoduleUpdateOptions.Init equals true.
-//
-// The provided Context must be non-nil. If the context expires before the
-// operation is complete, an error is returned. The context only affects to the
-// transport operations.
-func (s *Submodule) UpdateContext(ctx context.Context, o *SubmoduleUpdateOptions) error {
- return s.update(ctx, o, plumbing.ZeroHash)
-}
-
-func (s *Submodule) update(ctx context.Context, o *SubmoduleUpdateOptions, forceHash plumbing.Hash) error {
- if !s.initialized && !o.Init {
- return ErrSubmoduleNotInitialized
- }
-
- if !s.initialized && o.Init {
- if err := s.Init(); err != nil {
- return err
- }
- }
-
- idx, err := s.w.r.Storer.Index()
- if err != nil {
- return err
- }
-
- hash := forceHash
- if hash.IsZero() {
- e, err := idx.Entry(s.c.Path)
- if err != nil {
- return err
- }
-
- hash = e.Hash
- }
-
- r, err := s.Repository()
- if err != nil {
- return err
- }
-
- if err := s.fetchAndCheckout(ctx, r, o, hash); err != nil {
- return err
- }
-
- return s.doRecursiveUpdate(r, o)
-}
-
-func (s *Submodule) doRecursiveUpdate(r *Repository, o *SubmoduleUpdateOptions) error {
- if o.RecurseSubmodules == NoRecurseSubmodules {
- return nil
- }
-
- w, err := r.Worktree()
- if err != nil {
- return err
- }
-
- l, err := w.Submodules()
- if err != nil {
- return err
- }
-
- new := &SubmoduleUpdateOptions{}
- *new = *o
-
- new.RecurseSubmodules--
- return l.Update(new)
-}
-
-func (s *Submodule) fetchAndCheckout(
- ctx context.Context, r *Repository, o *SubmoduleUpdateOptions, hash plumbing.Hash,
-) error {
- if !o.NoFetch {
- err := r.FetchContext(ctx, &FetchOptions{Auth: o.Auth})
- if err != nil && err != NoErrAlreadyUpToDate {
- return err
- }
- }
-
- w, err := r.Worktree()
- if err != nil {
- return err
- }
-
- if err := w.Checkout(&CheckoutOptions{Hash: hash}); err != nil {
- return err
- }
-
- head := plumbing.NewHashReference(plumbing.HEAD, hash)
- return r.Storer.SetReference(head)
-}
-
-// Submodules list of several submodules from the same repository.
-type Submodules []*Submodule
-
-// Init initializes the submodules in this list.
-func (s Submodules) Init() error {
- for _, sub := range s {
- if err := sub.Init(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Update updates all the submodules in this list.
-func (s Submodules) Update(o *SubmoduleUpdateOptions) error {
- return s.UpdateContext(context.Background(), o)
-}
-
-// UpdateContext updates all the submodules in this list.
-//
-// The provided Context must be non-nil. If the context expires before the
-// operation is complete, an error is returned. The context only affects to the
-// transport operations.
-func (s Submodules) UpdateContext(ctx context.Context, o *SubmoduleUpdateOptions) error {
- for _, sub := range s {
- if err := sub.UpdateContext(ctx, o); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Status returns the status of the submodules.
-func (s Submodules) Status() (SubmodulesStatus, error) {
- var list SubmodulesStatus
-
- var r *Repository
- for _, sub := range s {
- if r == nil {
- r = sub.w.r
- }
-
- idx, err := r.Storer.Index()
- if err != nil {
- return nil, err
- }
-
- status, err := sub.status(idx)
- if err != nil {
- return nil, err
- }
-
- list = append(list, status)
- }
-
- return list, nil
-}
-
-// SubmodulesStatus contains the status for all submodiles in the worktree
-type SubmodulesStatus []*SubmoduleStatus
-
-// String is equivalent to `git submodule status`
-func (s SubmodulesStatus) String() string {
- buf := bytes.NewBuffer(nil)
- for _, sub := range s {
- fmt.Fprintln(buf, sub)
- }
-
- return buf.String()
-}
-
-// SubmoduleStatus contains the status for a submodule in the worktree
-type SubmoduleStatus struct {
- Path string
- Current plumbing.Hash
- Expected plumbing.Hash
- Branch plumbing.ReferenceName
-}
-
-// IsClean is the HEAD of the submodule is equals to the expected commit
-func (s *SubmoduleStatus) IsClean() bool {
- return s.Current == s.Expected
-}
-
-// String is equivalent to `git submodule status <submodule>`
-//
-// This will print the SHA-1 of the currently checked out commit for a
-// submodule, along with the submodule path and the output of git describe fo
-// the SHA-1. Each SHA-1 will be prefixed with - if the submodule is not
-// initialized, + if the currently checked out submodule commit does not match
-// the SHA-1 found in the index of the containing repository.
-func (s *SubmoduleStatus) String() string {
- var extra string
- var status = ' '
-
- if s.Current.IsZero() {
- status = '-'
- } else if !s.IsClean() {
- status = '+'
- }
-
- if len(s.Branch) != 0 {
- extra = string(s.Branch[5:])
- } else if !s.Current.IsZero() {
- extra = s.Current.String()[:7]
- }
-
- if extra != "" {
- extra = fmt.Sprintf(" (%s)", extra)
- }
-
- return fmt.Sprintf("%c%s %s%s", status, s.Expected, s.Path, extra)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/binary/read.go b/vendor/gopkg.in/src-d/go-git.v4/utils/binary/read.go
deleted file mode 100644
index 12e57c300e..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/utils/binary/read.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Package binary implements sintax-sugar functions on top of the standard
-// library binary package
-package binary
-
-import (
- "bufio"
- "encoding/binary"
- "io"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
-)
-
-// Read reads structured binary data from r into data. Bytes are read and
-// decoded in BigEndian order
-// https://golang.org/pkg/encoding/binary/#Read
-func Read(r io.Reader, data ...interface{}) error {
- for _, v := range data {
- if err := binary.Read(r, binary.BigEndian, v); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// ReadUntil reads from r untin delim is found
-func ReadUntil(r io.Reader, delim byte) ([]byte, error) {
- if bufr, ok := r.(*bufio.Reader); ok {
- return ReadUntilFromBufioReader(bufr, delim)
- }
-
- var buf [1]byte
- value := make([]byte, 0, 16)
- for {
- if _, err := io.ReadFull(r, buf[:]); err != nil {
- if err == io.EOF {
- return nil, err
- }
-
- return nil, err
- }
-
- if buf[0] == delim {
- return value, nil
- }
-
- value = append(value, buf[0])
- }
-}
-
-// ReadUntilFromBufioReader is like bufio.ReadBytes but drops the delimiter
-// from the result.
-func ReadUntilFromBufioReader(r *bufio.Reader, delim byte) ([]byte, error) {
- value, err := r.ReadBytes(delim)
- if err != nil || len(value) == 0 {
- return nil, err
- }
-
- return value[:len(value)-1], nil
-}
-
-// ReadVariableWidthInt reads and returns an int in Git VLQ special format:
-//
-// Ordinary VLQ has some redundancies, example: the number 358 can be
-// encoded as the 2-octet VLQ 0x8166 or the 3-octet VLQ 0x808166 or the
-// 4-octet VLQ 0x80808166 and so forth.
-//
-// To avoid these redundancies, the VLQ format used in Git removes this
-// prepending redundancy and extends the representable range of shorter
-// VLQs by adding an offset to VLQs of 2 or more octets in such a way
-// that the lowest possible value for such an (N+1)-octet VLQ becomes
-// exactly one more than the maximum possible value for an N-octet VLQ.
-// In particular, since a 1-octet VLQ can store a maximum value of 127,
-// the minimum 2-octet VLQ (0x8000) is assigned the value 128 instead of
-// 0. Conversely, the maximum value of such a 2-octet VLQ (0xff7f) is
-// 16511 instead of just 16383. Similarly, the minimum 3-octet VLQ
-// (0x808000) has a value of 16512 instead of zero, which means
-// that the maximum 3-octet VLQ (0xffff7f) is 2113663 instead of
-// just 2097151. And so forth.
-//
-// This is how the offset is saved in C:
-//
-// dheader[pos] = ofs & 127;
-// while (ofs >>= 7)
-// dheader[--pos] = 128 | (--ofs & 127);
-//
-func ReadVariableWidthInt(r io.Reader) (int64, error) {
- var c byte
- if err := Read(r, &c); err != nil {
- return 0, err
- }
-
- var v = int64(c & maskLength)
- for c&maskContinue > 0 {
- v++
- if err := Read(r, &c); err != nil {
- return 0, err
- }
-
- v = (v << lengthBits) + int64(c&maskLength)
- }
-
- return v, nil
-}
-
-const (
- maskContinue = uint8(128) // 1000 000
- maskLength = uint8(127) // 0111 1111
- lengthBits = uint8(7) // subsequent bytes has 7 bits to store the length
-)
-
-// ReadUint64 reads 8 bytes and returns them as a BigEndian uint32
-func ReadUint64(r io.Reader) (uint64, error) {
- var v uint64
- if err := binary.Read(r, binary.BigEndian, &v); err != nil {
- return 0, err
- }
-
- return v, nil
-}
-
-// ReadUint32 reads 4 bytes and returns them as a BigEndian uint32
-func ReadUint32(r io.Reader) (uint32, error) {
- var v uint32
- if err := binary.Read(r, binary.BigEndian, &v); err != nil {
- return 0, err
- }
-
- return v, nil
-}
-
-// ReadUint16 reads 2 bytes and returns them as a BigEndian uint16
-func ReadUint16(r io.Reader) (uint16, error) {
- var v uint16
- if err := binary.Read(r, binary.BigEndian, &v); err != nil {
- return 0, err
- }
-
- return v, nil
-}
-
-// ReadHash reads a plumbing.Hash from r
-func ReadHash(r io.Reader) (plumbing.Hash, error) {
- var h plumbing.Hash
- if err := binary.Read(r, binary.BigEndian, h[:]); err != nil {
- return plumbing.ZeroHash, err
- }
-
- return h, nil
-}
-
-const sniffLen = 8000
-
-// IsBinary detects if data is a binary value based on:
-// http://git.kernel.org/cgit/git/git.git/tree/xdiff-interface.c?id=HEAD#n198
-func IsBinary(r io.Reader) (bool, error) {
- reader := bufio.NewReader(r)
- c := 0
- for {
- if c == sniffLen {
- break
- }
-
- b, err := reader.ReadByte()
- if err == io.EOF {
- break
- }
- if err != nil {
- return false, err
- }
-
- if b == byte(0) {
- return true, nil
- }
-
- c++
- }
-
- return false, nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/binary/write.go b/vendor/gopkg.in/src-d/go-git.v4/utils/binary/write.go
deleted file mode 100644
index c08c73a06b..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/utils/binary/write.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package binary
-
-import (
- "encoding/binary"
- "io"
-)
-
-// Write writes the binary representation of data into w, using BigEndian order
-// https://golang.org/pkg/encoding/binary/#Write
-func Write(w io.Writer, data ...interface{}) error {
- for _, v := range data {
- if err := binary.Write(w, binary.BigEndian, v); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func WriteVariableWidthInt(w io.Writer, n int64) error {
- buf := []byte{byte(n & 0x7f)}
- n >>= 7
- for n != 0 {
- n--
- buf = append([]byte{0x80 | (byte(n & 0x7f))}, buf...)
- n >>= 7
- }
-
- _, err := w.Write(buf)
-
- return err
-}
-
-// WriteUint64 writes the binary representation of a uint64 into w, in BigEndian
-// order
-func WriteUint64(w io.Writer, value uint64) error {
- return binary.Write(w, binary.BigEndian, value)
-}
-
-// WriteUint32 writes the binary representation of a uint32 into w, in BigEndian
-// order
-func WriteUint32(w io.Writer, value uint32) error {
- return binary.Write(w, binary.BigEndian, value)
-}
-
-// WriteUint16 writes the binary representation of a uint16 into w, in BigEndian
-// order
-func WriteUint16(w io.Writer, value uint16) error {
- return binary.Write(w, binary.BigEndian, value)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/diff/diff.go b/vendor/gopkg.in/src-d/go-git.v4/utils/diff/diff.go
deleted file mode 100644
index 6142ed0515..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/utils/diff/diff.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Package diff implements line oriented diffs, similar to the ancient
-// Unix diff command.
-//
-// The current implementation is just a wrapper around Sergi's
-// go-diff/diffmatchpatch library, which is a go port of Neil
-// Fraser's google-diff-match-patch code
-package diff
-
-import (
- "bytes"
- "time"
-
- "github.com/sergi/go-diff/diffmatchpatch"
-)
-
-// Do computes the (line oriented) modifications needed to turn the src
-// string into the dst string. The underlying algorithm is Meyers,
-// its complexity is O(N*d) where N is min(lines(src), lines(dst)) and d
-// is the size of the diff.
-func Do(src, dst string) (diffs []diffmatchpatch.Diff) {
- // the default timeout is time.Second which may be too small under heavy load
- return DoWithTimeout(src, dst, time.Hour)
-}
-
-// DoWithTimeout computes the (line oriented) modifications needed to turn the src
-// string into the dst string. The `timeout` argument specifies the maximum
-// amount of time it is allowed to spend in this function. If the timeout
-// is exceeded, the parts of the strings which were not considered are turned into
-// a bulk delete+insert and the half-baked suboptimal result is returned at once.
-// The underlying algorithm is Meyers, its complexity is O(N*d) where N is
-// min(lines(src), lines(dst)) and d is the size of the diff.
-func DoWithTimeout (src, dst string, timeout time.Duration) (diffs []diffmatchpatch.Diff) {
- dmp := diffmatchpatch.New()
- dmp.DiffTimeout = timeout
- wSrc, wDst, warray := dmp.DiffLinesToRunes(src, dst)
- diffs = dmp.DiffMainRunes(wSrc, wDst, false)
- diffs = dmp.DiffCharsToLines(diffs, warray)
- return diffs
-}
-
-// Dst computes and returns the destination text.
-func Dst(diffs []diffmatchpatch.Diff) string {
- var text bytes.Buffer
- for _, d := range diffs {
- if d.Type != diffmatchpatch.DiffDelete {
- text.WriteString(d.Text)
- }
- }
- return text.String()
-}
-
-// Src computes and returns the source text
-func Src(diffs []diffmatchpatch.Diff) string {
- var text bytes.Buffer
- for _, d := range diffs {
- if d.Type != diffmatchpatch.DiffInsert {
- text.WriteString(d.Text)
- }
- }
- return text.String()
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/ioutil/common.go b/vendor/gopkg.in/src-d/go-git.v4/utils/ioutil/common.go
deleted file mode 100644
index e9dcbfe49b..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/utils/ioutil/common.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Package ioutil implements some I/O utility functions.
-package ioutil
-
-import (
- "bufio"
- "context"
- "errors"
- "io"
-
- "github.com/jbenet/go-context/io"
-)
-
-type readPeeker interface {
- io.Reader
- Peek(int) ([]byte, error)
-}
-
-var (
- ErrEmptyReader = errors.New("reader is empty")
-)
-
-// NonEmptyReader takes a reader and returns it if it is not empty, or
-// `ErrEmptyReader` if it is empty. If there is an error when reading the first
-// byte of the given reader, it will be propagated.
-func NonEmptyReader(r io.Reader) (io.Reader, error) {
- pr, ok := r.(readPeeker)
- if !ok {
- pr = bufio.NewReader(r)
- }
-
- _, err := pr.Peek(1)
- if err == io.EOF {
- return nil, ErrEmptyReader
- }
-
- if err != nil {
- return nil, err
- }
-
- return pr, nil
-}
-
-type readCloser struct {
- io.Reader
- closer io.Closer
-}
-
-func (r *readCloser) Close() error {
- return r.closer.Close()
-}
-
-// NewReadCloser creates an `io.ReadCloser` with the given `io.Reader` and
-// `io.Closer`.
-func NewReadCloser(r io.Reader, c io.Closer) io.ReadCloser {
- return &readCloser{Reader: r, closer: c}
-}
-
-type writeCloser struct {
- io.Writer
- closer io.Closer
-}
-
-func (r *writeCloser) Close() error {
- return r.closer.Close()
-}
-
-// NewWriteCloser creates an `io.WriteCloser` with the given `io.Writer` and
-// `io.Closer`.
-func NewWriteCloser(w io.Writer, c io.Closer) io.WriteCloser {
- return &writeCloser{Writer: w, closer: c}
-}
-
-type writeNopCloser struct {
- io.Writer
-}
-
-func (writeNopCloser) Close() error { return nil }
-
-// WriteNopCloser returns a WriteCloser with a no-op Close method wrapping
-// the provided Writer w.
-func WriteNopCloser(w io.Writer) io.WriteCloser {
- return writeNopCloser{w}
-}
-
-// CheckClose calls Close on the given io.Closer. If the given *error points to
-// nil, it will be assigned the error returned by Close. Otherwise, any error
-// returned by Close will be ignored. CheckClose is usually called with defer.
-func CheckClose(c io.Closer, err *error) {
- if cerr := c.Close(); cerr != nil && *err == nil {
- *err = cerr
- }
-}
-
-// NewContextWriter wraps a writer to make it respect given Context.
-// If there is a blocking write, the returned Writer will return whenever the
-// context is cancelled (the return values are n=0 and err=ctx.Err()).
-func NewContextWriter(ctx context.Context, w io.Writer) io.Writer {
- return ctxio.NewWriter(ctx, w)
-}
-
-// NewContextReader wraps a reader to make it respect given Context.
-// If there is a blocking read, the returned Reader will return whenever the
-// context is cancelled (the return values are n=0 and err=ctx.Err()).
-func NewContextReader(ctx context.Context, r io.Reader) io.Reader {
- return ctxio.NewReader(ctx, r)
-}
-
-// NewContextWriteCloser as NewContextWriter but with io.Closer interface.
-func NewContextWriteCloser(ctx context.Context, w io.WriteCloser) io.WriteCloser {
- ctxw := ctxio.NewWriter(ctx, w)
- return NewWriteCloser(ctxw, w)
-}
-
-// NewContextReadCloser as NewContextReader but with io.Closer interface.
-func NewContextReadCloser(ctx context.Context, r io.ReadCloser) io.ReadCloser {
- ctxr := ctxio.NewReader(ctx, r)
- return NewReadCloser(ctxr, r)
-}
-
-type readerOnError struct {
- io.Reader
- notify func(error)
-}
-
-// NewReaderOnError returns a io.Reader that call the notify function when an
-// unexpected (!io.EOF) error happens, after call Read function.
-func NewReaderOnError(r io.Reader, notify func(error)) io.Reader {
- return &readerOnError{r, notify}
-}
-
-// NewReadCloserOnError returns a io.ReadCloser that call the notify function
-// when an unexpected (!io.EOF) error happens, after call Read function.
-func NewReadCloserOnError(r io.ReadCloser, notify func(error)) io.ReadCloser {
- return NewReadCloser(NewReaderOnError(r, notify), r)
-}
-
-func (r *readerOnError) Read(buf []byte) (n int, err error) {
- n, err = r.Reader.Read(buf)
- if err != nil && err != io.EOF {
- r.notify(err)
- }
-
- return
-}
-
-type writerOnError struct {
- io.Writer
- notify func(error)
-}
-
-// NewWriterOnError returns a io.Writer that call the notify function when an
-// unexpected (!io.EOF) error happens, after call Write function.
-func NewWriterOnError(w io.Writer, notify func(error)) io.Writer {
- return &writerOnError{w, notify}
-}
-
-// NewWriteCloserOnError returns a io.WriteCloser that call the notify function
-//when an unexpected (!io.EOF) error happens, after call Write function.
-func NewWriteCloserOnError(w io.WriteCloser, notify func(error)) io.WriteCloser {
- return NewWriteCloser(NewWriterOnError(w, notify), w)
-}
-
-func (r *writerOnError) Write(p []byte) (n int, err error) {
- n, err = r.Writer.Write(p)
- if err != nil && err != io.EOF {
- r.notify(err)
- }
-
- return
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/change.go b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/change.go
deleted file mode 100644
index 0b50ca71d3..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/change.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package merkletrie
-
-import (
- "fmt"
- "io"
-
- "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder"
-)
-
-// Action values represent the kind of things a Change can represent:
-// insertion, deletions or modifications of files.
-type Action int
-
-// The set of possible actions in a change.
-const (
- _ Action = iota
- Insert
- Delete
- Modify
-)
-
-// String returns the action as a human readable text.
-func (a Action) String() string {
- switch a {
- case Insert:
- return "Insert"
- case Delete:
- return "Delete"
- case Modify:
- return "Modify"
- default:
- panic(fmt.Sprintf("unsupported action: %d", a))
- }
-}
-
-// A Change value represent how a noder has change between to merkletries.
-type Change struct {
- // The noder before the change or nil if it was inserted.
- From noder.Path
- // The noder after the change or nil if it was deleted.
- To noder.Path
-}
-
-// Action is convenience method that returns what Action c represents.
-func (c *Change) Action() (Action, error) {
- if c.From == nil && c.To == nil {
- return Action(0), fmt.Errorf("malformed change: nil from and to")
- }
- if c.From == nil {
- return Insert, nil
- }
- if c.To == nil {
- return Delete, nil
- }
-
- return Modify, nil
-}
-
-// NewInsert returns a new Change representing the insertion of n.
-func NewInsert(n noder.Path) Change { return Change{To: n} }
-
-// NewDelete returns a new Change representing the deletion of n.
-func NewDelete(n noder.Path) Change { return Change{From: n} }
-
-// NewModify returns a new Change representing that a has been modified and
-// it is now b.
-func NewModify(a, b noder.Path) Change {
- return Change{
- From: a,
- To: b,
- }
-}
-
-// String returns a single change in human readable form, using the
-// format: '<' + action + space + path + '>'. The contents of the file
-// before or after the change are not included in this format.
-//
-// Example: inserting a file at the path a/b/c.txt will return "<Insert
-// a/b/c.txt>".
-func (c Change) String() string {
- action, err := c.Action()
- if err != nil {
- panic(err)
- }
-
- var path string
- if action == Delete {
- path = c.From.String()
- } else {
- path = c.To.String()
- }
-
- return fmt.Sprintf("<%s %s>", action, path)
-}
-
-// Changes is a list of changes between to merkletries.
-type Changes []Change
-
-// NewChanges returns an empty list of changes.
-func NewChanges() Changes {
- return Changes{}
-}
-
-// Add adds the change c to the list of changes.
-func (l *Changes) Add(c Change) {
- *l = append(*l, c)
-}
-
-// AddRecursiveInsert adds the required changes to insert all the
-// file-like noders found in root, recursively.
-func (l *Changes) AddRecursiveInsert(root noder.Path) error {
- return l.addRecursive(root, NewInsert)
-}
-
-// AddRecursiveDelete adds the required changes to delete all the
-// file-like noders found in root, recursively.
-func (l *Changes) AddRecursiveDelete(root noder.Path) error {
- return l.addRecursive(root, NewDelete)
-}
-
-type noderToChangeFn func(noder.Path) Change // NewInsert or NewDelete
-
-func (l *Changes) addRecursive(root noder.Path, ctor noderToChangeFn) error {
- if !root.IsDir() {
- l.Add(ctor(root))
- return nil
- }
-
- i, err := NewIterFromPath(root)
- if err != nil {
- return err
- }
-
- var current noder.Path
- for {
- if current, err = i.Step(); err != nil {
- if err == io.EOF {
- break
- }
- return err
- }
- if current.IsDir() {
- continue
- }
- l.Add(ctor(current))
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/difftree.go b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/difftree.go
deleted file mode 100644
index d57ed13332..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/difftree.go
+++ /dev/null
@@ -1,424 +0,0 @@
-package merkletrie
-
-// The focus of this difftree implementation is to save time by
-// skipping whole directories if their hash is the same in both
-// trees.
-//
-// The diff algorithm implemented here is based on the doubleiter
-// type defined in this same package; we will iterate over both
-// trees at the same time, while comparing the current noders in
-// each iterator. Depending on how they differ we will output the
-// corresponding chages and move the iterators further over both
-// trees.
-//
-// The table bellow show all the possible comparison results, along
-// with what changes should we produce and how to advance the
-// iterators.
-//
-// The table is implemented by the switches in this function,
-// diffTwoNodes, diffTwoNodesSameName and diffTwoDirs.
-//
-// Many Bothans died to bring us this information, make sure you
-// understand the table before modifying this code.
-
-// # Cases
-//
-// When comparing noders in both trees you will found yourself in
-// one of 169 possible cases, but if we ignore moves, we can
-// simplify a lot the search space into the following table:
-//
-// - "-": nothing, no file or directory
-// - a<>: an empty file named "a".
-// - a<1>: a file named "a", with "1" as its contents.
-// - a<2>: a file named "a", with "2" as its contents.
-// - a(): an empty dir named "a".
-// - a(...): a dir named "a", with some files and/or dirs inside (possibly
-// empty).
-// - a(;;;): a dir named "a", with some other files and/or dirs inside
-// (possibly empty), which different from the ones in "a(...)".
-//
-// \ to - a<> a<1> a<2> a() a(...) a(;;;)
-// from \
-// - 00 01 02 03 04 05 06
-// a<> 10 11 12 13 14 15 16
-// a<1> 20 21 22 23 24 25 26
-// a<2> 30 31 32 33 34 35 36
-// a() 40 41 42 43 44 45 46
-// a(...) 50 51 52 53 54 55 56
-// a(;;;) 60 61 62 63 64 65 66
-//
-// Every (from, to) combination in the table is a special case, but
-// some of them can be merged into some more general cases, for
-// instance 11 and 22 can be merged into the general case: both
-// noders are equal.
-//
-// Here is a full list of all the cases that are similar and how to
-// merge them together into more general cases. Each general case
-// is labeled with an uppercase letter for further reference, and it
-// is followed by the pseudocode of the checks you have to perfrom
-// on both noders to see if you are in such a case, the actions to
-// perform (i.e. what changes to output) and how to advance the
-// iterators of each tree to continue the comparison process.
-//
-// ## A. Impossible: 00
-//
-// ## B. Same thing on both sides: 11, 22, 33, 44, 55, 66
-// - check: `SameName() && SameHash()`
-// - action: do nothing.
-// - advance: `FromNext(); ToNext()`
-//
-// ### C. To was created: 01, 02, 03, 04, 05, 06
-// - check: `DifferentName() && ToBeforeFrom()`
-// - action: inserRecursively(to)
-// - advance: `ToNext()`
-//
-// ### D. From was deleted: 10, 20, 30, 40, 50, 60
-// - check: `DifferentName() && FromBeforeTo()`
-// - action: `DeleteRecursively(from)`
-// - advance: `FromNext()`
-//
-// ### E. Empty file to file with contents: 12, 13
-// - check: `SameName() && DifferentHash() && FromIsFile() &&
-// ToIsFile() && FromIsEmpty()`
-// - action: `modifyFile(from, to)`
-// - advance: `FromNext()` or `FromStep()`
-//
-// ### E'. file with contents to empty file: 21, 31
-// - check: `SameName() && DifferentHash() && FromIsFile() &&
-// ToIsFile() && ToIsEmpty()`
-// - action: `modifyFile(from, to)`
-// - advance: `FromNext()` or `FromStep()`
-//
-// ### F. empty file to empty dir with the same name: 14
-// - check: `SameName() && FromIsFile() && FromIsEmpty() &&
-// ToIsDir() && ToIsEmpty()`
-// - action: `DeleteFile(from); InsertEmptyDir(to)`
-// - advance: `FromNext(); ToNext()`
-//
-// ### F'. empty dir to empty file of the same name: 41
-// - check: `SameName() && FromIsDir() && FromIsEmpty &&
-// ToIsFile() && ToIsEmpty()`
-// - action: `DeleteEmptyDir(from); InsertFile(to)`
-// - advance: `FromNext(); ToNext()` or step for any of them.
-//
-// ### G. empty file to non-empty dir of the same name: 15, 16
-// - check: `SameName() && FromIsFile() && ToIsDir() &&
-// FromIsEmpty() && ToIsNotEmpty()`
-// - action: `DeleteFile(from); InsertDirRecursively(to)`
-// - advance: `FromNext(); ToNext()`
-//
-// ### G'. non-empty dir to empty file of the same name: 51, 61
-// - check: `SameName() && FromIsDir() && FromIsNotEmpty() &&
-// ToIsFile() && FromIsEmpty()`
-// - action: `DeleteDirRecursively(from); InsertFile(to)`
-// - advance: `FromNext(); ToNext()`
-//
-// ### H. modify file contents: 23, 32
-// - check: `SameName() && FromIsFile() && ToIsFile() &&
-// FromIsNotEmpty() && ToIsNotEmpty()`
-// - action: `ModifyFile(from, to)`
-// - advance: `FromNext(); ToNext()`
-//
-// ### I. file with contents to empty dir: 24, 34
-// - check: `SameName() && DifferentHash() && FromIsFile() &&
-// FromIsNotEmpty() && ToIsDir() && ToIsEmpty()`
-// - action: `DeleteFile(from); InsertEmptyDir(to)`
-// - advance: `FromNext(); ToNext()`
-//
-// ### I'. empty dir to file with contents: 42, 43
-// - check: `SameName() && DifferentHash() && FromIsDir() &&
-// FromIsEmpty() && ToIsFile() && ToIsEmpty()`
-// - action: `DeleteDir(from); InsertFile(to)`
-// - advance: `FromNext(); ToNext()`
-//
-// ### J. file with contents to dir with contetns: 25, 26, 35, 36
-// - check: `SameName() && DifferentHash() && FromIsFile() &&
-// FromIsNotEmpty() && ToIsDir() && ToIsNotEmpty()`
-// - action: `DeleteFile(from); InsertDirRecursively(to)`
-// - advance: `FromNext(); ToNext()`
-//
-// ### J'. dir with contetns to file with contents: 52, 62, 53, 63
-// - check: `SameName() && DifferentHash() && FromIsDir() &&
-// FromIsNotEmpty() && ToIsFile() && ToIsNotEmpty()`
-// - action: `DeleteDirRecursively(from); InsertFile(to)`
-// - advance: `FromNext(); ToNext()`
-//
-// ### K. empty dir to dir with contents: 45, 46
-// - check: `SameName() && DifferentHash() && FromIsDir() &&
-// FromIsEmpty() && ToIsDir() && ToIsNotEmpty()`
-// - action: `InsertChildrenRecursively(to)`
-// - advance: `FromNext(); ToNext()`
-//
-// ### K'. dir with contents to empty dir: 54, 64
-// - check: `SameName() && DifferentHash() && FromIsDir() &&
-// FromIsEmpty() && ToIsDir() && ToIsNotEmpty()`
-// - action: `DeleteChildrenRecursively(from)`
-// - advance: `FromNext(); ToNext()`
-//
-// ### L. dir with contents to dir with different contents: 56, 65
-// - check: `SameName() && DifferentHash() && FromIsDir() &&
-// FromIsNotEmpty() && ToIsDir() && ToIsNotEmpty()`
-// - action: nothing
-// - advance: `FromStep(); ToStep()`
-//
-//
-
-// All these cases can be further simplified by a truth table
-// reduction process, in which we gather similar checks together to
-// make the final code easier to read and understand.
-//
-// The first 6 columns are the outputs of the checks to perform on
-// both noders. I have labeled them 1 to 6, this is what they mean:
-//
-// 1: SameName()
-// 2: SameHash()
-// 3: FromIsDir()
-// 4: ToIsDir()
-// 5: FromIsEmpty()
-// 6: ToIsEmpty()
-//
-// The from and to columns are a fsnoder example of the elements
-// that you will find on each tree under the specified comparison
-// results (columns 1 to 6).
-//
-// The type column identifies the case we are into, from the list above.
-//
-// The type' column identifies the new set of reduced cases, using
-// lowercase letters, and they are explained after the table.
-//
-// The last column is the set of actions and advances for each case.
-//
-// "---" means impossible except in case of hash collision.
-//
-// advance meaning:
-// - NN: from.Next(); to.Next()
-// - SS: from.Step(); to.Step()
-//
-// 1 2 3 4 5 6 | from | to |type|type'|action ; advance
-// ------------+--------+--------+----+------------------------------------
-// 0 0 0 0 0 0 | | | | | if !SameName() {
-// . | | | | | if FromBeforeTo() {
-// . | | | D | d | delete(from); from.Next()
-// . | | | | | } else {
-// . | | | C | c | insert(to); to.Next()
-// . | | | | | }
-// 0 1 1 1 1 1 | | | | | }
-// 1 0 0 0 0 0 | a<1> | a<2> | H | e | modify(from, to); NN
-// 1 0 0 0 0 1 | a<1> | a<> | E' | e | modify(from, to); NN
-// 1 0 0 0 1 0 | a<> | a<1> | E | e | modify(from, to); NN
-// 1 0 0 0 1 1 | ---- | ---- | | e |
-// 1 0 0 1 0 0 | a<1> | a(...) | J | f | delete(from); insert(to); NN
-// 1 0 0 1 0 1 | a<1> | a() | I | f | delete(from); insert(to); NN
-// 1 0 0 1 1 0 | a<> | a(...) | G | f | delete(from); insert(to); NN
-// 1 0 0 1 1 1 | a<> | a() | F | f | delete(from); insert(to); NN
-// 1 0 1 0 0 0 | a(...) | a<1> | J' | f | delete(from); insert(to); NN
-// 1 0 1 0 0 1 | a(...) | a<> | G' | f | delete(from); insert(to); NN
-// 1 0 1 0 1 0 | a() | a<1> | I' | f | delete(from); insert(to); NN
-// 1 0 1 0 1 1 | a() | a<> | F' | f | delete(from); insert(to); NN
-// 1 0 1 1 0 0 | a(...) | a(;;;) | L | g | nothing; SS
-// 1 0 1 1 0 1 | a(...) | a() | K' | h | deleteChidren(from); NN
-// 1 0 1 1 1 0 | a() | a(...) | K | i | insertChildren(to); NN
-// 1 0 1 1 1 1 | ---- | ---- | | |
-// 1 1 0 0 0 0 | a<1> | a<1> | B | b | nothing; NN
-// 1 1 0 0 0 1 | ---- | ---- | | b |
-// 1 1 0 0 1 0 | ---- | ---- | | b |
-// 1 1 0 0 1 1 | a<> | a<> | B | b | nothing; NN
-// 1 1 0 1 0 0 | ---- | ---- | | b |
-// 1 1 0 1 0 1 | ---- | ---- | | b |
-// 1 1 0 1 1 0 | ---- | ---- | | b |
-// 1 1 0 1 1 1 | ---- | ---- | | b |
-// 1 1 1 0 0 0 | ---- | ---- | | b |
-// 1 1 1 0 0 1 | ---- | ---- | | b |
-// 1 1 1 0 1 0 | ---- | ---- | | b |
-// 1 1 1 0 1 1 | ---- | ---- | | b |
-// 1 1 1 1 0 0 | a(...) | a(...) | B | b | nothing; NN
-// 1 1 1 1 0 1 | ---- | ---- | | b |
-// 1 1 1 1 1 0 | ---- | ---- | | b |
-// 1 1 1 1 1 1 | a() | a() | B | b | nothing; NN
-//
-// c and d:
-// if !SameName()
-// d if FromBeforeTo()
-// c else
-// b: SameName) && sameHash()
-// e: SameName() && !sameHash() && BothAreFiles()
-// f: SameName() && !sameHash() && FileAndDir()
-// g: SameName() && !sameHash() && BothAreDirs() && NoneIsEmpty
-// i: SameName() && !sameHash() && BothAreDirs() && FromIsEmpty
-// h: else of i
-
-import (
- "context"
- "errors"
- "fmt"
-
- "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder"
-)
-
-var (
- ErrCanceled = errors.New("operation canceled")
-)
-
-// DiffTree calculates the list of changes between two merkletries. It
-// uses the provided hashEqual callback to compare noders.
-func DiffTree(fromTree, toTree noder.Noder,
- hashEqual noder.Equal) (Changes, error) {
- return DiffTreeContext(context.Background(), fromTree, toTree, hashEqual)
-}
-
-// DiffTree calculates the list of changes between two merkletries. It
-// uses the provided hashEqual callback to compare noders.
-// Error will be returned if context expires
-// Provided context must be non nil
-func DiffTreeContext(ctx context.Context, fromTree, toTree noder.Noder,
- hashEqual noder.Equal) (Changes, error) {
- ret := NewChanges()
-
- ii, err := newDoubleIter(fromTree, toTree, hashEqual)
- if err != nil {
- return nil, err
- }
-
- for {
- select {
- case <-ctx.Done():
- return nil, ErrCanceled
- default:
- }
-
- from := ii.from.current
- to := ii.to.current
-
- switch r := ii.remaining(); r {
- case noMoreNoders:
- return ret, nil
- case onlyFromRemains:
- if err = ret.AddRecursiveDelete(from); err != nil {
- return nil, err
- }
- if err = ii.nextFrom(); err != nil {
- return nil, err
- }
- case onlyToRemains:
- if err = ret.AddRecursiveInsert(to); err != nil {
- return nil, err
- }
- if err = ii.nextTo(); err != nil {
- return nil, err
- }
- case bothHaveNodes:
- if err = diffNodes(&ret, ii); err != nil {
- return nil, err
- }
- default:
- panic(fmt.Sprintf("unknown remaining value: %d", r))
- }
- }
-}
-
-func diffNodes(changes *Changes, ii *doubleIter) error {
- from := ii.from.current
- to := ii.to.current
- var err error
-
- // compare their full paths as strings
- switch from.Compare(to) {
- case -1:
- if err = changes.AddRecursiveDelete(from); err != nil {
- return err
- }
- if err = ii.nextFrom(); err != nil {
- return err
- }
- case 1:
- if err = changes.AddRecursiveInsert(to); err != nil {
- return err
- }
- if err = ii.nextTo(); err != nil {
- return err
- }
- default:
- if err := diffNodesSameName(changes, ii); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func diffNodesSameName(changes *Changes, ii *doubleIter) error {
- from := ii.from.current
- to := ii.to.current
-
- status, err := ii.compare()
- if err != nil {
- return err
- }
-
- switch {
- case status.sameHash:
- // do nothing
- if err = ii.nextBoth(); err != nil {
- return err
- }
- case status.bothAreFiles:
- changes.Add(NewModify(from, to))
- if err = ii.nextBoth(); err != nil {
- return err
- }
- case status.fileAndDir:
- if err = changes.AddRecursiveDelete(from); err != nil {
- return err
- }
- if err = changes.AddRecursiveInsert(to); err != nil {
- return err
- }
- if err = ii.nextBoth(); err != nil {
- return err
- }
- case status.bothAreDirs:
- if err = diffDirs(changes, ii); err != nil {
- return err
- }
- default:
- return fmt.Errorf("bad status from double iterator")
- }
-
- return nil
-}
-
-func diffDirs(changes *Changes, ii *doubleIter) error {
- from := ii.from.current
- to := ii.to.current
-
- status, err := ii.compare()
- if err != nil {
- return err
- }
-
- switch {
- case status.fromIsEmptyDir:
- if err = changes.AddRecursiveInsert(to); err != nil {
- return err
- }
- if err = ii.nextBoth(); err != nil {
- return err
- }
- case status.toIsEmptyDir:
- if err = changes.AddRecursiveDelete(from); err != nil {
- return err
- }
- if err = ii.nextBoth(); err != nil {
- return err
- }
- case !status.fromIsEmptyDir && !status.toIsEmptyDir:
- // do nothing
- if err = ii.stepBoth(); err != nil {
- return err
- }
- default:
- return fmt.Errorf("both dirs are empty but has different hash")
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/doc.go b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/doc.go
deleted file mode 100644
index 5204024ad4..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/doc.go
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
-Package merkletrie provides support for n-ary trees that are at the same
-time Merkle trees and Radix trees (tries).
-
-Git trees are Radix n-ary trees in virtue of the names of their
-tree entries. At the same time, git trees are Merkle trees thanks to
-their hashes.
-
-This package defines Merkle tries as nodes that should have:
-
-- a hash: the Merkle part of the Merkle trie
-
-- a key: the Radix part of the Merkle trie
-
-The Merkle hash condition is not enforced by this package though. This
-means that the hash of a node doesn't have to take into account the hashes of
-their children, which is good for testing purposes.
-
-Nodes in the Merkle trie are abstracted by the Noder interface. The
-intended use is that git trees implements this interface, either
-directly or using a simple wrapper.
-
-This package provides an iterator for merkletries that can skip whole
-directory-like noders and an efficient merkletrie comparison algorithm.
-
-When comparing git trees, the simple approach of alphabetically sorting
-their elements and comparing the resulting lists is too slow as it
-depends linearly on the number of files in the trees: When a directory
-has lots of files but none of them has been modified, this approach is
-very expensive. We can do better by prunning whole directories that
-have not change, just by looking at their hashes. This package provides
-the tools to do exactly that.
-*/
-package merkletrie
diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/doubleiter.go b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/doubleiter.go
deleted file mode 100644
index e56dee701f..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/doubleiter.go
+++ /dev/null
@@ -1,187 +0,0 @@
-package merkletrie
-
-import (
- "fmt"
- "io"
-
- "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder"
-)
-
-// A doubleIter is a convenience type to keep track of the current
-// noders in two merkletries that are going to be iterated in parallel.
-// It has methods for:
-//
-// - iterating over the merkletries, both at the same time or
-// individually: nextFrom, nextTo, nextBoth, stepBoth
-//
-// - checking if there are noders left in one or both of them with the
-// remaining method and its associated returned type.
-//
-// - comparing the current noders of both merkletries in several ways,
-// with the compare method and its associated returned type.
-type doubleIter struct {
- from struct {
- iter *Iter
- current noder.Path // nil if no more nodes
- }
- to struct {
- iter *Iter
- current noder.Path // nil if no more nodes
- }
- hashEqual noder.Equal
-}
-
-// NewdoubleIter returns a new doubleIter for the merkletries "from" and
-// "to". The hashEqual callback function will be used by the doubleIter
-// to compare the hash of the noders in the merkletries. The doubleIter
-// will be initialized to the first elements in each merkletrie if any.
-func newDoubleIter(from, to noder.Noder, hashEqual noder.Equal) (
- *doubleIter, error) {
- var ii doubleIter
- var err error
-
- if ii.from.iter, err = NewIter(from); err != nil {
- return nil, fmt.Errorf("from: %s", err)
- }
- if ii.from.current, err = ii.from.iter.Next(); turnEOFIntoNil(err) != nil {
- return nil, fmt.Errorf("from: %s", err)
- }
-
- if ii.to.iter, err = NewIter(to); err != nil {
- return nil, fmt.Errorf("to: %s", err)
- }
- if ii.to.current, err = ii.to.iter.Next(); turnEOFIntoNil(err) != nil {
- return nil, fmt.Errorf("to: %s", err)
- }
-
- ii.hashEqual = hashEqual
-
- return &ii, nil
-}
-
-func turnEOFIntoNil(e error) error {
- if e != nil && e != io.EOF {
- return e
- }
- return nil
-}
-
-// NextBoth makes d advance to the next noder in both merkletries. If
-// any of them is a directory, it skips its contents.
-func (d *doubleIter) nextBoth() error {
- if err := d.nextFrom(); err != nil {
- return err
- }
- if err := d.nextTo(); err != nil {
- return err
- }
-
- return nil
-}
-
-// NextFrom makes d advance to the next noder in the "from" merkletrie,
-// skipping its contents if it is a directory.
-func (d *doubleIter) nextFrom() (err error) {
- d.from.current, err = d.from.iter.Next()
- return turnEOFIntoNil(err)
-}
-
-// NextTo makes d advance to the next noder in the "to" merkletrie,
-// skipping its contents if it is a directory.
-func (d *doubleIter) nextTo() (err error) {
- d.to.current, err = d.to.iter.Next()
- return turnEOFIntoNil(err)
-}
-
-// StepBoth makes d advance to the next noder in both merkletries,
-// getting deeper into directories if that is the case.
-func (d *doubleIter) stepBoth() (err error) {
- if d.from.current, err = d.from.iter.Step(); turnEOFIntoNil(err) != nil {
- return err
- }
- if d.to.current, err = d.to.iter.Step(); turnEOFIntoNil(err) != nil {
- return err
- }
- return nil
-}
-
-// Remaining returns if there are no more noders in the tree, if both
-// have noders or if one of them doesn't.
-func (d *doubleIter) remaining() remaining {
- if d.from.current == nil && d.to.current == nil {
- return noMoreNoders
- }
-
- if d.from.current == nil && d.to.current != nil {
- return onlyToRemains
- }
-
- if d.from.current != nil && d.to.current == nil {
- return onlyFromRemains
- }
-
- return bothHaveNodes
-}
-
-// Remaining values tells you whether both trees still have noders, or
-// only one of them or none of them.
-type remaining int
-
-const (
- noMoreNoders remaining = iota
- onlyToRemains
- onlyFromRemains
- bothHaveNodes
-)
-
-// Compare returns the comparison between the current elements in the
-// merkletries.
-func (d *doubleIter) compare() (s comparison, err error) {
- s.sameHash = d.hashEqual(d.from.current, d.to.current)
-
- fromIsDir := d.from.current.IsDir()
- toIsDir := d.to.current.IsDir()
-
- s.bothAreDirs = fromIsDir && toIsDir
- s.bothAreFiles = !fromIsDir && !toIsDir
- s.fileAndDir = !s.bothAreDirs && !s.bothAreFiles
-
- fromNumChildren, err := d.from.current.NumChildren()
- if err != nil {
- return comparison{}, fmt.Errorf("from: %s", err)
- }
-
- toNumChildren, err := d.to.current.NumChildren()
- if err != nil {
- return comparison{}, fmt.Errorf("to: %s", err)
- }
-
- s.fromIsEmptyDir = fromIsDir && fromNumChildren == 0
- s.toIsEmptyDir = toIsDir && toNumChildren == 0
-
- return
-}
-
-// Answers to a lot of questions you can ask about how to noders are
-// equal or different.
-type comparison struct {
- // the following are only valid if both nodes have the same name
- // (i.e. nameComparison == 0)
-
- // Do both nodes have the same hash?
- sameHash bool
- // Are both nodes files?
- bothAreFiles bool
-
- // the following are only valid if any of the noders are dirs,
- // this is, if !bothAreFiles
-
- // Is one a file and the other a dir?
- fileAndDir bool
- // Are both nodes dirs?
- bothAreDirs bool
- // Is the from node an empty dir?
- fromIsEmptyDir bool
- // Is the to Node an empty dir?
- toIsEmptyDir bool
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/filesystem/node.go b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/filesystem/node.go
deleted file mode 100644
index 12d00189a3..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/filesystem/node.go
+++ /dev/null
@@ -1,196 +0,0 @@
-package filesystem
-
-import (
- "io"
- "os"
- "path"
-
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/filemode"
- "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder"
-
- "gopkg.in/src-d/go-billy.v4"
-)
-
-var ignore = map[string]bool{
- ".git": true,
-}
-
-// The node represents a file or a directory in a billy.Filesystem. It
-// implements the interface noder.Noder of merkletrie package.
-//
-// This implementation implements a "standard" hash method being able to be
-// compared with any other noder.Noder implementation inside of go-git.
-type node struct {
- fs billy.Filesystem
- submodules map[string]plumbing.Hash
-
- path string
- hash []byte
- children []noder.Noder
- isDir bool
-}
-
-// NewRootNode returns the root node based on a given billy.Filesystem.
-//
-// In order to provide the submodule hash status, a map[string]plumbing.Hash
-// should be provided where the key is the path of the submodule and the commit
-// of the submodule HEAD
-func NewRootNode(
- fs billy.Filesystem,
- submodules map[string]plumbing.Hash,
-) noder.Noder {
- return &node{fs: fs, submodules: submodules, isDir: true}
-}
-
-// Hash the hash of a filesystem is the result of concatenating the computed
-// plumbing.Hash of the file as a Blob and its plumbing.FileMode; that way the
-// difftree algorithm will detect changes in the contents of files and also in
-// their mode.
-//
-// The hash of a directory is always a 24-bytes slice of zero values
-func (n *node) Hash() []byte {
- return n.hash
-}
-
-func (n *node) Name() string {
- return path.Base(n.path)
-}
-
-func (n *node) IsDir() bool {
- return n.isDir
-}
-
-func (n *node) Children() ([]noder.Noder, error) {
- if err := n.calculateChildren(); err != nil {
- return nil, err
- }
-
- return n.children, nil
-}
-
-func (n *node) NumChildren() (int, error) {
- if err := n.calculateChildren(); err != nil {
- return -1, err
- }
-
- return len(n.children), nil
-}
-
-func (n *node) calculateChildren() error {
- if !n.IsDir() {
- return nil
- }
-
- if len(n.children) != 0 {
- return nil
- }
-
- files, err := n.fs.ReadDir(n.path)
- if err != nil {
- if os.IsNotExist(err) {
- return nil
- }
-
- return nil
- }
-
- for _, file := range files {
- if _, ok := ignore[file.Name()]; ok {
- continue
- }
-
- c, err := n.newChildNode(file)
- if err != nil {
- return err
- }
-
- n.children = append(n.children, c)
- }
-
- return nil
-}
-
-func (n *node) newChildNode(file os.FileInfo) (*node, error) {
- path := path.Join(n.path, file.Name())
-
- hash, err := n.calculateHash(path, file)
- if err != nil {
- return nil, err
- }
-
- node := &node{
- fs: n.fs,
- submodules: n.submodules,
-
- path: path,
- hash: hash,
- isDir: file.IsDir(),
- }
-
- if hash, isSubmodule := n.submodules[path]; isSubmodule {
- node.hash = append(hash[:], filemode.Submodule.Bytes()...)
- node.isDir = false
- }
-
- return node, nil
-}
-
-func (n *node) calculateHash(path string, file os.FileInfo) ([]byte, error) {
- if file.IsDir() {
- return make([]byte, 24), nil
- }
-
- var hash plumbing.Hash
- var err error
- if file.Mode()&os.ModeSymlink != 0 {
- hash, err = n.doCalculateHashForSymlink(path, file)
- } else {
- hash, err = n.doCalculateHashForRegular(path, file)
- }
-
- if err != nil {
- return nil, err
- }
-
- mode, err := filemode.NewFromOSFileMode(file.Mode())
- if err != nil {
- return nil, err
- }
-
- return append(hash[:], mode.Bytes()...), nil
-}
-
-func (n *node) doCalculateHashForRegular(path string, file os.FileInfo) (plumbing.Hash, error) {
- f, err := n.fs.Open(path)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- defer f.Close()
-
- h := plumbing.NewHasher(plumbing.BlobObject, file.Size())
- if _, err := io.Copy(h, f); err != nil {
- return plumbing.ZeroHash, err
- }
-
- return h.Sum(), nil
-}
-
-func (n *node) doCalculateHashForSymlink(path string, file os.FileInfo) (plumbing.Hash, error) {
- target, err := n.fs.Readlink(path)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- h := plumbing.NewHasher(plumbing.BlobObject, file.Size())
- if _, err := h.Write([]byte(target)); err != nil {
- return plumbing.ZeroHash, err
- }
-
- return h.Sum(), nil
-}
-
-func (n *node) String() string {
- return n.path
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/index/node.go b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/index/node.go
deleted file mode 100644
index 9622622483..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/index/node.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package index
-
-import (
- "path"
- "strings"
-
- "gopkg.in/src-d/go-git.v4/plumbing/format/index"
- "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder"
-)
-
-// The node represents a index.Entry or a directory inferred from the path
-// of all entries. It implements the interface noder.Noder of merkletrie
-// package.
-//
-// This implementation implements a "standard" hash method being able to be
-// compared with any other noder.Noder implementation inside of go-git
-type node struct {
- path string
- entry *index.Entry
- children []noder.Noder
- isDir bool
-}
-
-// NewRootNode returns the root node of a computed tree from a index.Index,
-func NewRootNode(idx *index.Index) noder.Noder {
- const rootNode = ""
-
- m := map[string]*node{rootNode: {isDir: true}}
-
- for _, e := range idx.Entries {
- parts := strings.Split(e.Name, string("/"))
-
- var fullpath string
- for _, part := range parts {
- parent := fullpath
- fullpath = path.Join(fullpath, part)
-
- if _, ok := m[fullpath]; ok {
- continue
- }
-
- n := &node{path: fullpath}
- if fullpath == e.Name {
- n.entry = e
- } else {
- n.isDir = true
- }
-
- m[n.path] = n
- m[parent].children = append(m[parent].children, n)
- }
- }
-
- return m[rootNode]
-}
-
-func (n *node) String() string {
- return n.path
-}
-
-// Hash the hash of a filesystem is a 24-byte slice, is the result of
-// concatenating the computed plumbing.Hash of the file as a Blob and its
-// plumbing.FileMode; that way the difftree algorithm will detect changes in the
-// contents of files and also in their mode.
-//
-// If the node is computed and not based on a index.Entry the hash is equals
-// to a 24-bytes slices of zero values.
-func (n *node) Hash() []byte {
- if n.entry == nil {
- return make([]byte, 24)
- }
-
- return append(n.entry.Hash[:], n.entry.Mode.Bytes()...)
-}
-
-func (n *node) Name() string {
- return path.Base(n.path)
-}
-
-func (n *node) IsDir() bool {
- return n.isDir
-}
-
-func (n *node) Children() ([]noder.Noder, error) {
- return n.children, nil
-}
-
-func (n *node) NumChildren() (int, error) {
- return len(n.children), nil
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/internal/frame/frame.go b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/internal/frame/frame.go
deleted file mode 100644
index a0b042ee61..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/internal/frame/frame.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package frame
-
-import (
- "bytes"
- "fmt"
- "sort"
- "strings"
-
- "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder"
-)
-
-// A Frame is a collection of siblings in a trie, sorted alphabetically
-// by name.
-type Frame struct {
- // siblings, sorted in reverse alphabetical order by name
- stack []noder.Noder
-}
-
-type byName []noder.Noder
-
-func (a byName) Len() int { return len(a) }
-func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a byName) Less(i, j int) bool {
- return strings.Compare(a[i].Name(), a[j].Name()) < 0
-}
-
-// New returns a frame with the children of the provided node.
-func New(n noder.Noder) (*Frame, error) {
- children, err := n.Children()
- if err != nil {
- return nil, err
- }
-
- sort.Sort(sort.Reverse(byName(children)))
- return &Frame{
- stack: children,
- }, nil
-}
-
-// String returns the quoted names of the noders in the frame sorted in
-// alphabeticall order by name, surrounded by square brackets and
-// separated by comas.
-//
-// Examples:
-// []
-// ["a", "b"]
-func (f *Frame) String() string {
- var buf bytes.Buffer
- _ = buf.WriteByte('[')
-
- sep := ""
- for i := f.Len() - 1; i >= 0; i-- {
- _, _ = buf.WriteString(sep)
- sep = ", "
- _, _ = buf.WriteString(fmt.Sprintf("%q", f.stack[i].Name()))
- }
-
- _ = buf.WriteByte(']')
-
- return buf.String()
-}
-
-// First returns, but dont extract, the noder with the alphabetically
-// smaller name in the frame and true if the frame was not empy.
-// Otherwise it returns nil and false.
-func (f *Frame) First() (noder.Noder, bool) {
- if f.Len() == 0 {
- return nil, false
- }
-
- top := f.Len() - 1
-
- return f.stack[top], true
-}
-
-// Drop extracts the noder with the alphabetically smaller name in the
-// frame or does nothing if the frame was empty.
-func (f *Frame) Drop() {
- if f.Len() == 0 {
- return
- }
-
- top := f.Len() - 1
- f.stack[top] = nil
- f.stack = f.stack[:top]
-}
-
-// Len returns the number of noders in the frame.
-func (f *Frame) Len() int {
- return len(f.stack)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/iter.go b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/iter.go
deleted file mode 100644
index b4d4c99a33..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/iter.go
+++ /dev/null
@@ -1,216 +0,0 @@
-package merkletrie
-
-import (
- "fmt"
- "io"
-
- "gopkg.in/src-d/go-git.v4/utils/merkletrie/internal/frame"
- "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder"
-)
-
-// Iter is an iterator for merkletries (only the trie part of the
-// merkletrie is relevant here, it does not use the Hasher interface).
-//
-// The iteration is performed in depth-first pre-order. Entries at each
-// depth are traversed in (case-sensitive) alphabetical order.
-//
-// This is the kind of traversal you will expect when listing ordinary
-// files and directories recursively, for example:
-//
-// Trie Traversal order
-// ---- ---------------
-// .
-// / | \ c
-// / | \ d/
-// d c z ===> d/a
-// / \ d/b
-// b a z
-//
-//
-// This iterator is somewhat especial as you can chose to skip whole
-// "directories" when iterating:
-//
-// - The Step method will iterate normally.
-//
-// - the Next method will not descend deeper into the tree.
-//
-// For example, if the iterator is at `d/`, the Step method will return
-// `d/a` while the Next would have returned `z` instead (skipping `d/`
-// and its descendants). The name of the these two methods are based on
-// the well known "next" and "step" operations, quite common in
-// debuggers, like gdb.
-//
-// The paths returned by the iterator will be relative, if the iterator
-// was created from a single node, or absolute, if the iterator was
-// created from the path to the node (the path will be prefixed to all
-// returned paths).
-type Iter struct {
- // Tells if the iteration has started.
- hasStarted bool
- // The top of this stack has the current node and its siblings. The
- // rest of the stack keeps the ancestors of the current node and
- // their corresponding siblings. The current element is always the
- // top element of the top frame.
- //
- // When "step"ping into a node, its children are pushed as a new
- // frame.
- //
- // When "next"ing pass a node, the current element is dropped by
- // popping the top frame.
- frameStack []*frame.Frame
- // The base path used to turn the relative paths used internally by
- // the iterator into absolute paths used by external applications.
- // For relative iterator this will be nil.
- base noder.Path
-}
-
-// NewIter returns a new relative iterator using the provider noder as
-// its unnamed root. When iterating, all returned paths will be
-// relative to node.
-func NewIter(n noder.Noder) (*Iter, error) {
- return newIter(n, nil)
-}
-
-// NewIterFromPath returns a new absolute iterator from the noder at the
-// end of the path p. When iterating, all returned paths will be
-// absolute, using the root of the path p as their root.
-func NewIterFromPath(p noder.Path) (*Iter, error) {
- return newIter(p, p) // Path implements Noder
-}
-
-func newIter(root noder.Noder, base noder.Path) (*Iter, error) {
- ret := &Iter{
- base: base,
- }
-
- if root == nil {
- return ret, nil
- }
-
- frame, err := frame.New(root)
- if err != nil {
- return nil, err
- }
- ret.push(frame)
-
- return ret, nil
-}
-
-func (iter *Iter) top() (*frame.Frame, bool) {
- if len(iter.frameStack) == 0 {
- return nil, false
- }
- top := len(iter.frameStack) - 1
-
- return iter.frameStack[top], true
-}
-
-func (iter *Iter) push(f *frame.Frame) {
- iter.frameStack = append(iter.frameStack, f)
-}
-
-const (
- doDescend = true
- dontDescend = false
-)
-
-// Next returns the path of the next node without descending deeper into
-// the trie and nil. If there are no more entries in the trie it
-// returns nil and io.EOF. In case of error, it will return nil and the
-// error.
-func (iter *Iter) Next() (noder.Path, error) {
- return iter.advance(dontDescend)
-}
-
-// Step returns the path to the next node in the trie, descending deeper
-// into it if needed, and nil. If there are no more nodes in the trie,
-// it returns nil and io.EOF. In case of error, it will return nil and
-// the error.
-func (iter *Iter) Step() (noder.Path, error) {
- return iter.advance(doDescend)
-}
-
-// Advances the iterator in the desired direction: descend or
-// dontDescend.
-//
-// Returns the new current element and a nil error on success. If there
-// are no more elements in the trie below the base, it returns nil, and
-// io.EOF. Returns nil and an error in case of errors.
-func (iter *Iter) advance(wantDescend bool) (noder.Path, error) {
- current, err := iter.current()
- if err != nil {
- return nil, err
- }
-
- // The first time we just return the current node.
- if !iter.hasStarted {
- iter.hasStarted = true
- return current, nil
- }
-
- // Advances means getting a next current node, either its first child or
- // its next sibling, depending if we must descend or not.
- numChildren, err := current.NumChildren()
- if err != nil {
- return nil, err
- }
-
- mustDescend := numChildren != 0 && wantDescend
- if mustDescend {
- // descend: add a new frame with the current's children.
- frame, err := frame.New(current)
- if err != nil {
- return nil, err
- }
- iter.push(frame)
- } else {
- // don't descend: just drop the current node
- iter.drop()
- }
-
- return iter.current()
-}
-
-// Returns the path to the current node, adding the base if there was
-// one, and a nil error. If there were no noders left, it returns nil
-// and io.EOF. If an error occurred, it returns nil and the error.
-func (iter *Iter) current() (noder.Path, error) {
- if topFrame, ok := iter.top(); !ok {
- return nil, io.EOF
- } else if _, ok := topFrame.First(); !ok {
- return nil, io.EOF
- }
-
- ret := make(noder.Path, 0, len(iter.base)+len(iter.frameStack))
-
- // concat the base...
- ret = append(ret, iter.base...)
- // ... and the current node and all its ancestors
- for i, f := range iter.frameStack {
- t, ok := f.First()
- if !ok {
- panic(fmt.Sprintf("frame %d is empty", i))
- }
- ret = append(ret, t)
- }
-
- return ret, nil
-}
-
-// removes the current node if any, and all the frames that become empty as a
-// consequence of this action.
-func (iter *Iter) drop() {
- frame, ok := iter.top()
- if !ok {
- return
- }
-
- frame.Drop()
- // if the frame is empty, remove it and its parent, recursively
- if frame.Len() == 0 {
- top := len(iter.frameStack) - 1
- iter.frameStack[top] = nil
- iter.frameStack = iter.frameStack[:top]
- iter.drop()
- }
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/noder/noder.go b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/noder/noder.go
deleted file mode 100644
index d6b3de4ada..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/noder/noder.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Package noder provide an interface for defining nodes in a
-// merkletrie, their hashes and their paths (a noders and its
-// ancestors).
-//
-// The hasher interface is easy to implement naively by elements that
-// already have a hash, like git blobs and trees. More sophisticated
-// implementations can implement the Equal function in exotic ways
-// though: for instance, comparing the modification time of directories
-// in a filesystem.
-package noder
-
-import "fmt"
-
-// Hasher interface is implemented by types that can tell you
-// their hash.
-type Hasher interface {
- Hash() []byte
-}
-
-// Equal functions take two hashers and return if they are equal.
-//
-// These functions are expected to be faster than reflect.Equal or
-// reflect.DeepEqual because they can compare just the hash of the
-// objects, instead of their contents, so they are expected to be O(1).
-type Equal func(a, b Hasher) bool
-
-// The Noder interface is implemented by the elements of a Merkle Trie.
-//
-// There are two types of elements in a Merkle Trie:
-//
-// - file-like nodes: they cannot have children.
-//
-// - directory-like nodes: they can have 0 or more children and their
-// hash is calculated by combining their children hashes.
-type Noder interface {
- Hasher
- fmt.Stringer // for testing purposes
- // Name returns the name of an element (relative, not its full
- // path).
- Name() string
- // IsDir returns true if the element is a directory-like node or
- // false if it is a file-like node.
- IsDir() bool
- // Children returns the children of the element. Note that empty
- // directory-like noders and file-like noders will both return
- // NoChildren.
- Children() ([]Noder, error)
- // NumChildren returns the number of children this element has.
- //
- // This method is an optimization: the number of children is easily
- // calculated as the length of the value returned by the Children
- // method (above); yet, some implementations will be able to
- // implement NumChildren in O(1) while Children is usually more
- // complex.
- NumChildren() (int, error)
-}
-
-// NoChildren represents the children of a noder without children.
-var NoChildren = []Noder{}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/noder/path.go b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/noder/path.go
deleted file mode 100644
index 1c7ef54eeb..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/noder/path.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package noder
-
-import (
- "bytes"
- "strings"
-)
-
-// Path values represent a noder and its ancestors. The root goes first
-// and the actual final noder the path is referring to will be the last.
-//
-// A path implements the Noder interface, redirecting all the interface
-// calls to its final noder.
-//
-// Paths build from an empty Noder slice are not valid paths and should
-// not be used.
-type Path []Noder
-
-// String returns the full path of the final noder as a string, using
-// "/" as the separator.
-func (p Path) String() string {
- var buf bytes.Buffer
- sep := ""
- for _, e := range p {
- _, _ = buf.WriteString(sep)
- sep = "/"
- _, _ = buf.WriteString(e.Name())
- }
-
- return buf.String()
-}
-
-// Last returns the final noder in the path.
-func (p Path) Last() Noder {
- return p[len(p)-1]
-}
-
-// Hash returns the hash of the final noder of the path.
-func (p Path) Hash() []byte {
- return p.Last().Hash()
-}
-
-// Name returns the name of the final noder of the path.
-func (p Path) Name() string {
- return p.Last().Name()
-}
-
-// IsDir returns if the final noder of the path is a directory-like
-// noder.
-func (p Path) IsDir() bool {
- return p.Last().IsDir()
-}
-
-// Children returns the children of the final noder in the path.
-func (p Path) Children() ([]Noder, error) {
- return p.Last().Children()
-}
-
-// NumChildren returns the number of children the final noder of the
-// path has.
-func (p Path) NumChildren() (int, error) {
- return p.Last().NumChildren()
-}
-
-// Compare returns -1, 0 or 1 if the path p is smaller, equal or bigger
-// than other, in "directory order"; for example:
-//
-// "a" < "b"
-// "a/b/c/d/z" < "b"
-// "a/b/a" > "a/b"
-func (p Path) Compare(other Path) int {
- i := 0
- for {
- switch {
- case len(other) == len(p) && i == len(p):
- return 0
- case i == len(other):
- return 1
- case i == len(p):
- return -1
- default:
- // We do *not* normalize Unicode here. CGit doesn't.
- // https://github.com/src-d/go-git/issues/1057
- cmp := strings.Compare(p[i].Name(), other[i].Name())
- if cmp != 0 {
- return cmp
- }
- }
- i++
- }
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/worktree.go b/vendor/gopkg.in/src-d/go-git.v4/worktree.go
deleted file mode 100644
index 4a609e9eab..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/worktree.go
+++ /dev/null
@@ -1,954 +0,0 @@
-package git
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
- stdioutil "io/ioutil"
- "os"
- "path/filepath"
- "strings"
- "sync"
-
- "gopkg.in/src-d/go-git.v4/config"
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/filemode"
- "gopkg.in/src-d/go-git.v4/plumbing/format/gitignore"
- "gopkg.in/src-d/go-git.v4/plumbing/format/index"
- "gopkg.in/src-d/go-git.v4/plumbing/object"
- "gopkg.in/src-d/go-git.v4/plumbing/storer"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
- "gopkg.in/src-d/go-git.v4/utils/merkletrie"
-
- "gopkg.in/src-d/go-billy.v4"
- "gopkg.in/src-d/go-billy.v4/util"
-)
-
-var (
- ErrWorktreeNotClean = errors.New("worktree is not clean")
- ErrSubmoduleNotFound = errors.New("submodule not found")
- ErrUnstagedChanges = errors.New("worktree contains unstaged changes")
- ErrGitModulesSymlink = errors.New(gitmodulesFile + " is a symlink")
- ErrNonFastForwardUpdate = errors.New("non-fast-forward update")
-)
-
-// Worktree represents a git worktree.
-type Worktree struct {
- // Filesystem underlying filesystem.
- Filesystem billy.Filesystem
- // External excludes not found in the repository .gitignore
- Excludes []gitignore.Pattern
-
- r *Repository
-}
-
-// Pull incorporates changes from a remote repository into the current branch.
-// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are
-// no changes to be fetched, or an error.
-//
-// Pull only supports merges where the can be resolved as a fast-forward.
-func (w *Worktree) Pull(o *PullOptions) error {
- return w.PullContext(context.Background(), o)
-}
-
-// PullContext incorporates changes from a remote repository into the current
-// branch. Returns nil if the operation is successful, NoErrAlreadyUpToDate if
-// there are no changes to be fetched, or an error.
-//
-// Pull only supports merges where the can be resolved as a fast-forward.
-//
-// The provided Context must be non-nil. If the context expires before the
-// operation is complete, an error is returned. The context only affects to the
-// transport operations.
-func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error {
- if err := o.Validate(); err != nil {
- return err
- }
-
- remote, err := w.r.Remote(o.RemoteName)
- if err != nil {
- return err
- }
-
- fetchHead, err := remote.fetch(ctx, &FetchOptions{
- RemoteName: o.RemoteName,
- Depth: o.Depth,
- Auth: o.Auth,
- Progress: o.Progress,
- Force: o.Force,
- })
-
- updated := true
- if err == NoErrAlreadyUpToDate {
- updated = false
- } else if err != nil {
- return err
- }
-
- ref, err := storer.ResolveReference(fetchHead, o.ReferenceName)
- if err != nil {
- return err
- }
-
- head, err := w.r.Head()
- if err == nil {
- if !updated && head.Hash() == ref.Hash() {
- return NoErrAlreadyUpToDate
- }
-
- ff, err := isFastForward(w.r.Storer, head.Hash(), ref.Hash())
- if err != nil {
- return err
- }
-
- if !ff {
- return ErrNonFastForwardUpdate
- }
- }
-
- if err != nil && err != plumbing.ErrReferenceNotFound {
- return err
- }
-
- if err := w.updateHEAD(ref.Hash()); err != nil {
- return err
- }
-
- if err := w.Reset(&ResetOptions{
- Mode: MergeReset,
- Commit: ref.Hash(),
- }); err != nil {
- return err
- }
-
- if o.RecurseSubmodules != NoRecurseSubmodules {
- return w.updateSubmodules(&SubmoduleUpdateOptions{
- RecurseSubmodules: o.RecurseSubmodules,
- Auth: o.Auth,
- })
- }
-
- return nil
-}
-
-func (w *Worktree) updateSubmodules(o *SubmoduleUpdateOptions) error {
- s, err := w.Submodules()
- if err != nil {
- return err
- }
- o.Init = true
- return s.Update(o)
-}
-
-// Checkout switch branches or restore working tree files.
-func (w *Worktree) Checkout(opts *CheckoutOptions) error {
- if err := opts.Validate(); err != nil {
- return err
- }
-
- if opts.Create {
- if err := w.createBranch(opts); err != nil {
- return err
- }
- }
-
- c, err := w.getCommitFromCheckoutOptions(opts)
- if err != nil {
- return err
- }
-
- ro := &ResetOptions{Commit: c, Mode: MergeReset}
- if opts.Force {
- ro.Mode = HardReset
- } else if opts.Keep {
- ro.Mode = SoftReset
- }
-
- if !opts.Hash.IsZero() && !opts.Create {
- err = w.setHEADToCommit(opts.Hash)
- } else {
- err = w.setHEADToBranch(opts.Branch, c)
- }
-
- if err != nil {
- return err
- }
-
- return w.Reset(ro)
-}
-func (w *Worktree) createBranch(opts *CheckoutOptions) error {
- _, err := w.r.Storer.Reference(opts.Branch)
- if err == nil {
- return fmt.Errorf("a branch named %q already exists", opts.Branch)
- }
-
- if err != plumbing.ErrReferenceNotFound {
- return err
- }
-
- if opts.Hash.IsZero() {
- ref, err := w.r.Head()
- if err != nil {
- return err
- }
-
- opts.Hash = ref.Hash()
- }
-
- return w.r.Storer.SetReference(
- plumbing.NewHashReference(opts.Branch, opts.Hash),
- )
-}
-
-func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing.Hash, error) {
- if !opts.Hash.IsZero() {
- return opts.Hash, nil
- }
-
- b, err := w.r.Reference(opts.Branch, true)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- if !b.Name().IsTag() {
- return b.Hash(), nil
- }
-
- o, err := w.r.Object(plumbing.AnyObject, b.Hash())
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- switch o := o.(type) {
- case *object.Tag:
- if o.TargetType != plumbing.CommitObject {
- return plumbing.ZeroHash, fmt.Errorf("unsupported tag object target %q", o.TargetType)
- }
-
- return o.Target, nil
- case *object.Commit:
- return o.Hash, nil
- }
-
- return plumbing.ZeroHash, fmt.Errorf("unsupported tag target %q", o.Type())
-}
-
-func (w *Worktree) setHEADToCommit(commit plumbing.Hash) error {
- head := plumbing.NewHashReference(plumbing.HEAD, commit)
- return w.r.Storer.SetReference(head)
-}
-
-func (w *Worktree) setHEADToBranch(branch plumbing.ReferenceName, commit plumbing.Hash) error {
- target, err := w.r.Storer.Reference(branch)
- if err != nil {
- return err
- }
-
- var head *plumbing.Reference
- if target.Name().IsBranch() {
- head = plumbing.NewSymbolicReference(plumbing.HEAD, target.Name())
- } else {
- head = plumbing.NewHashReference(plumbing.HEAD, commit)
- }
-
- return w.r.Storer.SetReference(head)
-}
-
-// Reset the worktree to a specified state.
-func (w *Worktree) Reset(opts *ResetOptions) error {
- if err := opts.Validate(w.r); err != nil {
- return err
- }
-
- if opts.Mode == MergeReset {
- unstaged, err := w.containsUnstagedChanges()
- if err != nil {
- return err
- }
-
- if unstaged {
- return ErrUnstagedChanges
- }
- }
-
- if err := w.setHEADCommit(opts.Commit); err != nil {
- return err
- }
-
- if opts.Mode == SoftReset {
- return nil
- }
-
- t, err := w.getTreeFromCommitHash(opts.Commit)
- if err != nil {
- return err
- }
-
- if opts.Mode == MixedReset || opts.Mode == MergeReset || opts.Mode == HardReset {
- if err := w.resetIndex(t); err != nil {
- return err
- }
- }
-
- if opts.Mode == MergeReset || opts.Mode == HardReset {
- if err := w.resetWorktree(t); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (w *Worktree) resetIndex(t *object.Tree) error {
- idx, err := w.r.Storer.Index()
- if err != nil {
- return err
- }
- b := newIndexBuilder(idx)
-
- changes, err := w.diffTreeWithStaging(t, true)
- if err != nil {
- return err
- }
-
- for _, ch := range changes {
- a, err := ch.Action()
- if err != nil {
- return err
- }
-
- var name string
- var e *object.TreeEntry
-
- switch a {
- case merkletrie.Modify, merkletrie.Insert:
- name = ch.To.String()
- e, err = t.FindEntry(name)
- if err != nil {
- return err
- }
- case merkletrie.Delete:
- name = ch.From.String()
- }
-
- b.Remove(name)
- if e == nil {
- continue
- }
-
- b.Add(&index.Entry{
- Name: name,
- Hash: e.Hash,
- Mode: e.Mode,
- })
-
- }
-
- b.Write(idx)
- return w.r.Storer.SetIndex(idx)
-}
-
-func (w *Worktree) resetWorktree(t *object.Tree) error {
- changes, err := w.diffStagingWithWorktree(true)
- if err != nil {
- return err
- }
-
- idx, err := w.r.Storer.Index()
- if err != nil {
- return err
- }
- b := newIndexBuilder(idx)
-
- for _, ch := range changes {
- if err := w.checkoutChange(ch, t, b); err != nil {
- return err
- }
- }
-
- b.Write(idx)
- return w.r.Storer.SetIndex(idx)
-}
-
-func (w *Worktree) checkoutChange(ch merkletrie.Change, t *object.Tree, idx *indexBuilder) error {
- a, err := ch.Action()
- if err != nil {
- return err
- }
-
- var e *object.TreeEntry
- var name string
- var isSubmodule bool
-
- switch a {
- case merkletrie.Modify, merkletrie.Insert:
- name = ch.To.String()
- e, err = t.FindEntry(name)
- if err != nil {
- return err
- }
-
- isSubmodule = e.Mode == filemode.Submodule
- case merkletrie.Delete:
- return rmFileAndDirIfEmpty(w.Filesystem, ch.From.String())
- }
-
- if isSubmodule {
- return w.checkoutChangeSubmodule(name, a, e, idx)
- }
-
- return w.checkoutChangeRegularFile(name, a, t, e, idx)
-}
-
-func (w *Worktree) containsUnstagedChanges() (bool, error) {
- ch, err := w.diffStagingWithWorktree(false)
- if err != nil {
- return false, err
- }
-
- for _, c := range ch {
- a, err := c.Action()
- if err != nil {
- return false, err
- }
-
- if a == merkletrie.Insert {
- continue
- }
-
- return true, nil
- }
-
- return false, nil
-}
-
-func (w *Worktree) setHEADCommit(commit plumbing.Hash) error {
- head, err := w.r.Reference(plumbing.HEAD, false)
- if err != nil {
- return err
- }
-
- if head.Type() == plumbing.HashReference {
- head = plumbing.NewHashReference(plumbing.HEAD, commit)
- return w.r.Storer.SetReference(head)
- }
-
- branch, err := w.r.Reference(head.Target(), false)
- if err != nil {
- return err
- }
-
- if !branch.Name().IsBranch() {
- return fmt.Errorf("invalid HEAD target should be a branch, found %s", branch.Type())
- }
-
- branch = plumbing.NewHashReference(branch.Name(), commit)
- return w.r.Storer.SetReference(branch)
-}
-
-func (w *Worktree) checkoutChangeSubmodule(name string,
- a merkletrie.Action,
- e *object.TreeEntry,
- idx *indexBuilder,
-) error {
- switch a {
- case merkletrie.Modify:
- sub, err := w.Submodule(name)
- if err != nil {
- return err
- }
-
- if !sub.initialized {
- return nil
- }
-
- return w.addIndexFromTreeEntry(name, e, idx)
- case merkletrie.Insert:
- mode, err := e.Mode.ToOSFileMode()
- if err != nil {
- return err
- }
-
- if err := w.Filesystem.MkdirAll(name, mode); err != nil {
- return err
- }
-
- return w.addIndexFromTreeEntry(name, e, idx)
- }
-
- return nil
-}
-
-func (w *Worktree) checkoutChangeRegularFile(name string,
- a merkletrie.Action,
- t *object.Tree,
- e *object.TreeEntry,
- idx *indexBuilder,
-) error {
- switch a {
- case merkletrie.Modify:
- idx.Remove(name)
-
- // to apply perm changes the file is deleted, billy doesn't implement
- // chmod
- if err := w.Filesystem.Remove(name); err != nil {
- return err
- }
-
- fallthrough
- case merkletrie.Insert:
- f, err := t.File(name)
- if err != nil {
- return err
- }
-
- if err := w.checkoutFile(f); err != nil {
- return err
- }
-
- return w.addIndexFromFile(name, e.Hash, idx)
- }
-
- return nil
-}
-
-var copyBufferPool = sync.Pool{
- New: func() interface{} {
- return make([]byte, 32*1024)
- },
-}
-
-func (w *Worktree) checkoutFile(f *object.File) (err error) {
- mode, err := f.Mode.ToOSFileMode()
- if err != nil {
- return
- }
-
- if mode&os.ModeSymlink != 0 {
- return w.checkoutFileSymlink(f)
- }
-
- from, err := f.Reader()
- if err != nil {
- return
- }
-
- defer ioutil.CheckClose(from, &err)
-
- to, err := w.Filesystem.OpenFile(f.Name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode.Perm())
- if err != nil {
- return
- }
-
- defer ioutil.CheckClose(to, &err)
- buf := copyBufferPool.Get().([]byte)
- _, err = io.CopyBuffer(to, from, buf)
- copyBufferPool.Put(buf)
- return
-}
-
-func (w *Worktree) checkoutFileSymlink(f *object.File) (err error) {
- from, err := f.Reader()
- if err != nil {
- return
- }
-
- defer ioutil.CheckClose(from, &err)
-
- bytes, err := stdioutil.ReadAll(from)
- if err != nil {
- return
- }
-
- err = w.Filesystem.Symlink(string(bytes), f.Name)
-
- // On windows, this might fail.
- // Follow Git on Windows behavior by writing the link as it is.
- if err != nil && isSymlinkWindowsNonAdmin(err) {
- mode, _ := f.Mode.ToOSFileMode()
-
- to, err := w.Filesystem.OpenFile(f.Name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode.Perm())
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(to, &err)
-
- _, err = to.Write(bytes)
- return err
- }
- return
-}
-
-func (w *Worktree) addIndexFromTreeEntry(name string, f *object.TreeEntry, idx *indexBuilder) error {
- idx.Remove(name)
- idx.Add(&index.Entry{
- Hash: f.Hash,
- Name: name,
- Mode: filemode.Submodule,
- })
- return nil
-}
-
-func (w *Worktree) addIndexFromFile(name string, h plumbing.Hash, idx *indexBuilder) error {
- idx.Remove(name)
- fi, err := w.Filesystem.Lstat(name)
- if err != nil {
- return err
- }
-
- mode, err := filemode.NewFromOSFileMode(fi.Mode())
- if err != nil {
- return err
- }
-
- e := &index.Entry{
- Hash: h,
- Name: name,
- Mode: mode,
- ModifiedAt: fi.ModTime(),
- Size: uint32(fi.Size()),
- }
-
- // if the FileInfo.Sys() comes from os the ctime, dev, inode, uid and gid
- // can be retrieved, otherwise this doesn't apply
- if fillSystemInfo != nil {
- fillSystemInfo(e, fi.Sys())
- }
- idx.Add(e)
- return nil
-}
-
-func (w *Worktree) getTreeFromCommitHash(commit plumbing.Hash) (*object.Tree, error) {
- c, err := w.r.CommitObject(commit)
- if err != nil {
- return nil, err
- }
-
- return c.Tree()
-}
-
-var fillSystemInfo func(e *index.Entry, sys interface{})
-
-const gitmodulesFile = ".gitmodules"
-
-// Submodule returns the submodule with the given name
-func (w *Worktree) Submodule(name string) (*Submodule, error) {
- l, err := w.Submodules()
- if err != nil {
- return nil, err
- }
-
- for _, m := range l {
- if m.Config().Name == name {
- return m, nil
- }
- }
-
- return nil, ErrSubmoduleNotFound
-}
-
-// Submodules returns all the available submodules
-func (w *Worktree) Submodules() (Submodules, error) {
- l := make(Submodules, 0)
- m, err := w.readGitmodulesFile()
- if err != nil || m == nil {
- return l, err
- }
-
- c, err := w.r.Config()
- if err != nil {
- return nil, err
- }
-
- for _, s := range m.Submodules {
- l = append(l, w.newSubmodule(s, c.Submodules[s.Name]))
- }
-
- return l, nil
-}
-
-func (w *Worktree) newSubmodule(fromModules, fromConfig *config.Submodule) *Submodule {
- m := &Submodule{w: w}
- m.initialized = fromConfig != nil
-
- if !m.initialized {
- m.c = fromModules
- return m
- }
-
- m.c = fromConfig
- m.c.Path = fromModules.Path
- return m
-}
-
-func (w *Worktree) isSymlink(path string) bool {
- if s, err := w.Filesystem.Lstat(path); err == nil {
- return s.Mode()&os.ModeSymlink != 0
- }
- return false
-}
-
-func (w *Worktree) readGitmodulesFile() (*config.Modules, error) {
- if w.isSymlink(gitmodulesFile) {
- return nil, ErrGitModulesSymlink
- }
-
- f, err := w.Filesystem.Open(gitmodulesFile)
- if err != nil {
- if os.IsNotExist(err) {
- return nil, nil
- }
-
- return nil, err
- }
-
- defer f.Close()
- input, err := stdioutil.ReadAll(f)
- if err != nil {
- return nil, err
- }
-
- m := config.NewModules()
- return m, m.Unmarshal(input)
-}
-
-// Clean the worktree by removing untracked files.
-// An empty dir could be removed - this is what `git clean -f -d .` does.
-func (w *Worktree) Clean(opts *CleanOptions) error {
- s, err := w.Status()
- if err != nil {
- return err
- }
-
- root := ""
- files, err := w.Filesystem.ReadDir(root)
- if err != nil {
- return err
- }
- return w.doClean(s, opts, root, files)
-}
-
-func (w *Worktree) doClean(status Status, opts *CleanOptions, dir string, files []os.FileInfo) error {
- for _, fi := range files {
- if fi.Name() == GitDirName {
- continue
- }
-
- // relative path under the root
- path := filepath.Join(dir, fi.Name())
- if fi.IsDir() {
- if !opts.Dir {
- continue
- }
-
- subfiles, err := w.Filesystem.ReadDir(path)
- if err != nil {
- return err
- }
- err = w.doClean(status, opts, path, subfiles)
- if err != nil {
- return err
- }
- } else {
- if status.IsUntracked(path) {
- if err := w.Filesystem.Remove(path); err != nil {
- return err
- }
- }
- }
- }
-
- if opts.Dir {
- return doCleanDirectories(w.Filesystem, dir)
- }
- return nil
-}
-
-// GrepResult is structure of a grep result.
-type GrepResult struct {
- // FileName is the name of file which contains match.
- FileName string
- // LineNumber is the line number of a file at which a match was found.
- LineNumber int
- // Content is the content of the file at the matching line.
- Content string
- // TreeName is the name of the tree (reference name/commit hash) at
- // which the match was performed.
- TreeName string
-}
-
-func (gr GrepResult) String() string {
- return fmt.Sprintf("%s:%s:%d:%s", gr.TreeName, gr.FileName, gr.LineNumber, gr.Content)
-}
-
-// Grep performs grep on a worktree.
-func (w *Worktree) Grep(opts *GrepOptions) ([]GrepResult, error) {
- if err := opts.Validate(w); err != nil {
- return nil, err
- }
-
- // Obtain commit hash from options (CommitHash or ReferenceName).
- var commitHash plumbing.Hash
- // treeName contains the value of TreeName in GrepResult.
- var treeName string
-
- if opts.ReferenceName != "" {
- ref, err := w.r.Reference(opts.ReferenceName, true)
- if err != nil {
- return nil, err
- }
- commitHash = ref.Hash()
- treeName = opts.ReferenceName.String()
- } else if !opts.CommitHash.IsZero() {
- commitHash = opts.CommitHash
- treeName = opts.CommitHash.String()
- }
-
- // Obtain a tree from the commit hash and get a tracked files iterator from
- // the tree.
- tree, err := w.getTreeFromCommitHash(commitHash)
- if err != nil {
- return nil, err
- }
- fileiter := tree.Files()
-
- return findMatchInFiles(fileiter, treeName, opts)
-}
-
-// findMatchInFiles takes a FileIter, worktree name and GrepOptions, and
-// returns a slice of GrepResult containing the result of regex pattern matching
-// in content of all the files.
-func findMatchInFiles(fileiter *object.FileIter, treeName string, opts *GrepOptions) ([]GrepResult, error) {
- var results []GrepResult
-
- err := fileiter.ForEach(func(file *object.File) error {
- var fileInPathSpec bool
-
- // When no pathspecs are provided, search all the files.
- if len(opts.PathSpecs) == 0 {
- fileInPathSpec = true
- }
-
- // Check if the file name matches with the pathspec. Break out of the
- // loop once a match is found.
- for _, pathSpec := range opts.PathSpecs {
- if pathSpec != nil && pathSpec.MatchString(file.Name) {
- fileInPathSpec = true
- break
- }
- }
-
- // If the file does not match with any of the pathspec, skip it.
- if !fileInPathSpec {
- return nil
- }
-
- grepResults, err := findMatchInFile(file, treeName, opts)
- if err != nil {
- return err
- }
- results = append(results, grepResults...)
-
- return nil
- })
-
- return results, err
-}
-
-// findMatchInFile takes a single File, worktree name and GrepOptions,
-// and returns a slice of GrepResult containing the result of regex pattern
-// matching in the given file.
-func findMatchInFile(file *object.File, treeName string, opts *GrepOptions) ([]GrepResult, error) {
- var grepResults []GrepResult
-
- content, err := file.Contents()
- if err != nil {
- return grepResults, err
- }
-
- // Split the file content and parse line-by-line.
- contentByLine := strings.Split(content, "\n")
- for lineNum, cnt := range contentByLine {
- addToResult := false
-
- // Match the patterns and content. Break out of the loop once a
- // match is found.
- for _, pattern := range opts.Patterns {
- if pattern != nil && pattern.MatchString(cnt) {
- // Add to result only if invert match is not enabled.
- if !opts.InvertMatch {
- addToResult = true
- break
- }
- } else if opts.InvertMatch {
- // If matching fails, and invert match is enabled, add to
- // results.
- addToResult = true
- break
- }
- }
-
- if addToResult {
- grepResults = append(grepResults, GrepResult{
- FileName: file.Name,
- LineNumber: lineNum + 1,
- Content: cnt,
- TreeName: treeName,
- })
- }
- }
-
- return grepResults, nil
-}
-
-func rmFileAndDirIfEmpty(fs billy.Filesystem, name string) error {
- if err := util.RemoveAll(fs, name); err != nil {
- return err
- }
-
- dir := filepath.Dir(name)
- return doCleanDirectories(fs, dir)
-}
-
-// doCleanDirectories removes empty subdirs (without files)
-func doCleanDirectories(fs billy.Filesystem, dir string) error {
- files, err := fs.ReadDir(dir)
- if err != nil {
- return err
- }
- if len(files) == 0 {
- return fs.Remove(dir)
- }
- return nil
-}
-
-type indexBuilder struct {
- entries map[string]*index.Entry
-}
-
-func newIndexBuilder(idx *index.Index) *indexBuilder {
- entries := make(map[string]*index.Entry, len(idx.Entries))
- for _, e := range idx.Entries {
- entries[e.Name] = e
- }
- return &indexBuilder{
- entries: entries,
- }
-}
-
-func (b *indexBuilder) Write(idx *index.Index) {
- idx.Entries = idx.Entries[:0]
- for _, e := range b.entries {
- idx.Entries = append(idx.Entries, e)
- }
-}
-
-func (b *indexBuilder) Add(e *index.Entry) {
- b.entries[e.Name] = e
-}
-
-func (b *indexBuilder) Remove(name string) {
- delete(b.entries, filepath.ToSlash(name))
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/worktree_bsd.go b/vendor/gopkg.in/src-d/go-git.v4/worktree_bsd.go
deleted file mode 100644
index 9ff670e0f1..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/worktree_bsd.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// +build darwin freebsd netbsd
-
-package git
-
-import (
- "syscall"
- "time"
-
- "gopkg.in/src-d/go-git.v4/plumbing/format/index"
-)
-
-func init() {
- fillSystemInfo = func(e *index.Entry, sys interface{}) {
- if os, ok := sys.(*syscall.Stat_t); ok {
- e.CreatedAt = time.Unix(int64(os.Atimespec.Sec), int64(os.Atimespec.Nsec))
- e.Dev = uint32(os.Dev)
- e.Inode = uint32(os.Ino)
- e.GID = os.Gid
- e.UID = os.Uid
- }
- }
-}
-
-func isSymlinkWindowsNonAdmin(err error) bool {
- return false
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/worktree_commit.go b/vendor/gopkg.in/src-d/go-git.v4/worktree_commit.go
deleted file mode 100644
index 673eb16786..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/worktree_commit.go
+++ /dev/null
@@ -1,228 +0,0 @@
-package git
-
-import (
- "bytes"
- "path"
- "sort"
- "strings"
-
- "golang.org/x/crypto/openpgp"
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/filemode"
- "gopkg.in/src-d/go-git.v4/plumbing/format/index"
- "gopkg.in/src-d/go-git.v4/plumbing/object"
- "gopkg.in/src-d/go-git.v4/storage"
-
- "gopkg.in/src-d/go-billy.v4"
-)
-
-// Commit stores the current contents of the index in a new commit along with
-// a log message from the user describing the changes.
-func (w *Worktree) Commit(msg string, opts *CommitOptions) (plumbing.Hash, error) {
- if err := opts.Validate(w.r); err != nil {
- return plumbing.ZeroHash, err
- }
-
- if opts.All {
- if err := w.autoAddModifiedAndDeleted(); err != nil {
- return plumbing.ZeroHash, err
- }
- }
-
- idx, err := w.r.Storer.Index()
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- h := &buildTreeHelper{
- fs: w.Filesystem,
- s: w.r.Storer,
- }
-
- tree, err := h.BuildTree(idx)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- commit, err := w.buildCommitObject(msg, opts, tree)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- return commit, w.updateHEAD(commit)
-}
-
-func (w *Worktree) autoAddModifiedAndDeleted() error {
- s, err := w.Status()
- if err != nil {
- return err
- }
-
- for path, fs := range s {
- if fs.Worktree != Modified && fs.Worktree != Deleted {
- continue
- }
-
- if _, err := w.Add(path); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (w *Worktree) updateHEAD(commit plumbing.Hash) error {
- head, err := w.r.Storer.Reference(plumbing.HEAD)
- if err != nil {
- return err
- }
-
- name := plumbing.HEAD
- if head.Type() != plumbing.HashReference {
- name = head.Target()
- }
-
- ref := plumbing.NewHashReference(name, commit)
- return w.r.Storer.SetReference(ref)
-}
-
-func (w *Worktree) buildCommitObject(msg string, opts *CommitOptions, tree plumbing.Hash) (plumbing.Hash, error) {
- commit := &object.Commit{
- Author: *opts.Author,
- Committer: *opts.Committer,
- Message: msg,
- TreeHash: tree,
- ParentHashes: opts.Parents,
- }
-
- if opts.SignKey != nil {
- sig, err := w.buildCommitSignature(commit, opts.SignKey)
- if err != nil {
- return plumbing.ZeroHash, err
- }
- commit.PGPSignature = sig
- }
-
- obj := w.r.Storer.NewEncodedObject()
- if err := commit.Encode(obj); err != nil {
- return plumbing.ZeroHash, err
- }
- return w.r.Storer.SetEncodedObject(obj)
-}
-
-func (w *Worktree) buildCommitSignature(commit *object.Commit, signKey *openpgp.Entity) (string, error) {
- encoded := &plumbing.MemoryObject{}
- if err := commit.Encode(encoded); err != nil {
- return "", err
- }
- r, err := encoded.Reader()
- if err != nil {
- return "", err
- }
- var b bytes.Buffer
- if err := openpgp.ArmoredDetachSign(&b, signKey, r, nil); err != nil {
- return "", err
- }
- return b.String(), nil
-}
-
-// buildTreeHelper converts a given index.Index file into multiple git objects
-// reading the blobs from the given filesystem and creating the trees from the
-// index structure. The created objects are pushed to a given Storer.
-type buildTreeHelper struct {
- fs billy.Filesystem
- s storage.Storer
-
- trees map[string]*object.Tree
- entries map[string]*object.TreeEntry
-}
-
-// BuildTree builds the tree objects and push its to the storer, the hash
-// of the root tree is returned.
-func (h *buildTreeHelper) BuildTree(idx *index.Index) (plumbing.Hash, error) {
- const rootNode = ""
- h.trees = map[string]*object.Tree{rootNode: {}}
- h.entries = map[string]*object.TreeEntry{}
-
- for _, e := range idx.Entries {
- if err := h.commitIndexEntry(e); err != nil {
- return plumbing.ZeroHash, err
- }
- }
-
- return h.copyTreeToStorageRecursive(rootNode, h.trees[rootNode])
-}
-
-func (h *buildTreeHelper) commitIndexEntry(e *index.Entry) error {
- parts := strings.Split(e.Name, "/")
-
- var fullpath string
- for _, part := range parts {
- parent := fullpath
- fullpath = path.Join(fullpath, part)
-
- h.doBuildTree(e, parent, fullpath)
- }
-
- return nil
-}
-
-func (h *buildTreeHelper) doBuildTree(e *index.Entry, parent, fullpath string) {
- if _, ok := h.trees[fullpath]; ok {
- return
- }
-
- if _, ok := h.entries[fullpath]; ok {
- return
- }
-
- te := object.TreeEntry{Name: path.Base(fullpath)}
-
- if fullpath == e.Name {
- te.Mode = e.Mode
- te.Hash = e.Hash
- } else {
- te.Mode = filemode.Dir
- h.trees[fullpath] = &object.Tree{}
- }
-
- h.trees[parent].Entries = append(h.trees[parent].Entries, te)
-}
-
-type sortableEntries []object.TreeEntry
-
-func (sortableEntries) sortName(te object.TreeEntry) string {
- if te.Mode == filemode.Dir {
- return te.Name + "/"
- }
- return te.Name
-}
-func (se sortableEntries) Len() int { return len(se) }
-func (se sortableEntries) Less(i int, j int) bool { return se.sortName(se[i]) < se.sortName(se[j]) }
-func (se sortableEntries) Swap(i int, j int) { se[i], se[j] = se[j], se[i] }
-
-func (h *buildTreeHelper) copyTreeToStorageRecursive(parent string, t *object.Tree) (plumbing.Hash, error) {
- sort.Sort(sortableEntries(t.Entries))
- for i, e := range t.Entries {
- if e.Mode != filemode.Dir && !e.Hash.IsZero() {
- continue
- }
-
- path := path.Join(parent, e.Name)
-
- var err error
- e.Hash, err = h.copyTreeToStorageRecursive(path, h.trees[path])
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- t.Entries[i] = e
- }
-
- o := h.s.NewEncodedObject()
- if err := t.Encode(o); err != nil {
- return plumbing.ZeroHash, err
- }
-
- return h.s.SetEncodedObject(o)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/worktree_linux.go b/vendor/gopkg.in/src-d/go-git.v4/worktree_linux.go
deleted file mode 100644
index 891cb1cf39..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/worktree_linux.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// +build linux
-
-package git
-
-import (
- "syscall"
- "time"
-
- "gopkg.in/src-d/go-git.v4/plumbing/format/index"
-)
-
-func init() {
- fillSystemInfo = func(e *index.Entry, sys interface{}) {
- if os, ok := sys.(*syscall.Stat_t); ok {
- e.CreatedAt = time.Unix(int64(os.Ctim.Sec), int64(os.Ctim.Nsec))
- e.Dev = uint32(os.Dev)
- e.Inode = uint32(os.Ino)
- e.GID = os.Gid
- e.UID = os.Uid
- }
- }
-}
-
-func isSymlinkWindowsNonAdmin(err error) bool {
- return false
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/worktree_status.go b/vendor/gopkg.in/src-d/go-git.v4/worktree_status.go
deleted file mode 100644
index 16ce937077..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/worktree_status.go
+++ /dev/null
@@ -1,660 +0,0 @@
-package git
-
-import (
- "bytes"
- "errors"
- "io"
- "os"
- "path"
- "path/filepath"
-
- "gopkg.in/src-d/go-billy.v4/util"
- "gopkg.in/src-d/go-git.v4/plumbing"
- "gopkg.in/src-d/go-git.v4/plumbing/filemode"
- "gopkg.in/src-d/go-git.v4/plumbing/format/gitignore"
- "gopkg.in/src-d/go-git.v4/plumbing/format/index"
- "gopkg.in/src-d/go-git.v4/plumbing/object"
- "gopkg.in/src-d/go-git.v4/utils/ioutil"
- "gopkg.in/src-d/go-git.v4/utils/merkletrie"
- "gopkg.in/src-d/go-git.v4/utils/merkletrie/filesystem"
- mindex "gopkg.in/src-d/go-git.v4/utils/merkletrie/index"
- "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder"
-)
-
-var (
- // ErrDestinationExists in an Move operation means that the target exists on
- // the worktree.
- ErrDestinationExists = errors.New("destination exists")
- // ErrGlobNoMatches in an AddGlob if the glob pattern does not match any
- // files in the worktree.
- ErrGlobNoMatches = errors.New("glob pattern did not match any files")
-)
-
-// Status returns the working tree status.
-func (w *Worktree) Status() (Status, error) {
- var hash plumbing.Hash
-
- ref, err := w.r.Head()
- if err != nil && err != plumbing.ErrReferenceNotFound {
- return nil, err
- }
-
- if err == nil {
- hash = ref.Hash()
- }
-
- return w.status(hash)
-}
-
-func (w *Worktree) status(commit plumbing.Hash) (Status, error) {
- s := make(Status)
-
- left, err := w.diffCommitWithStaging(commit, false)
- if err != nil {
- return nil, err
- }
-
- for _, ch := range left {
- a, err := ch.Action()
- if err != nil {
- return nil, err
- }
-
- fs := s.File(nameFromAction(&ch))
- fs.Worktree = Unmodified
-
- switch a {
- case merkletrie.Delete:
- s.File(ch.From.String()).Staging = Deleted
- case merkletrie.Insert:
- s.File(ch.To.String()).Staging = Added
- case merkletrie.Modify:
- s.File(ch.To.String()).Staging = Modified
- }
- }
-
- right, err := w.diffStagingWithWorktree(false)
- if err != nil {
- return nil, err
- }
-
- for _, ch := range right {
- a, err := ch.Action()
- if err != nil {
- return nil, err
- }
-
- fs := s.File(nameFromAction(&ch))
- if fs.Staging == Untracked {
- fs.Staging = Unmodified
- }
-
- switch a {
- case merkletrie.Delete:
- fs.Worktree = Deleted
- case merkletrie.Insert:
- fs.Worktree = Untracked
- fs.Staging = Untracked
- case merkletrie.Modify:
- fs.Worktree = Modified
- }
- }
-
- return s, nil
-}
-
-func nameFromAction(ch *merkletrie.Change) string {
- name := ch.To.String()
- if name == "" {
- return ch.From.String()
- }
-
- return name
-}
-
-func (w *Worktree) diffStagingWithWorktree(reverse bool) (merkletrie.Changes, error) {
- idx, err := w.r.Storer.Index()
- if err != nil {
- return nil, err
- }
-
- from := mindex.NewRootNode(idx)
- submodules, err := w.getSubmodulesStatus()
- if err != nil {
- return nil, err
- }
-
- to := filesystem.NewRootNode(w.Filesystem, submodules)
-
- var c merkletrie.Changes
- if reverse {
- c, err = merkletrie.DiffTree(to, from, diffTreeIsEquals)
- } else {
- c, err = merkletrie.DiffTree(from, to, diffTreeIsEquals)
- }
-
- if err != nil {
- return nil, err
- }
-
- return w.excludeIgnoredChanges(c), nil
-}
-
-func (w *Worktree) excludeIgnoredChanges(changes merkletrie.Changes) merkletrie.Changes {
- patterns, err := gitignore.ReadPatterns(w.Filesystem, nil)
- if err != nil {
- return changes
- }
-
- patterns = append(patterns, w.Excludes...)
-
- if len(patterns) == 0 {
- return changes
- }
-
- m := gitignore.NewMatcher(patterns)
-
- var res merkletrie.Changes
- for _, ch := range changes {
- var path []string
- for _, n := range ch.To {
- path = append(path, n.Name())
- }
- if len(path) == 0 {
- for _, n := range ch.From {
- path = append(path, n.Name())
- }
- }
- if len(path) != 0 {
- isDir := (len(ch.To) > 0 && ch.To.IsDir()) || (len(ch.From) > 0 && ch.From.IsDir())
- if m.Match(path, isDir) {
- continue
- }
- }
- res = append(res, ch)
- }
- return res
-}
-
-func (w *Worktree) getSubmodulesStatus() (map[string]plumbing.Hash, error) {
- o := map[string]plumbing.Hash{}
-
- sub, err := w.Submodules()
- if err != nil {
- return nil, err
- }
-
- status, err := sub.Status()
- if err != nil {
- return nil, err
- }
-
- for _, s := range status {
- if s.Current.IsZero() {
- o[s.Path] = s.Expected
- continue
- }
-
- o[s.Path] = s.Current
- }
-
- return o, nil
-}
-
-func (w *Worktree) diffCommitWithStaging(commit plumbing.Hash, reverse bool) (merkletrie.Changes, error) {
- var t *object.Tree
- if !commit.IsZero() {
- c, err := w.r.CommitObject(commit)
- if err != nil {
- return nil, err
- }
-
- t, err = c.Tree()
- if err != nil {
- return nil, err
- }
- }
-
- return w.diffTreeWithStaging(t, reverse)
-}
-
-func (w *Worktree) diffTreeWithStaging(t *object.Tree, reverse bool) (merkletrie.Changes, error) {
- var from noder.Noder
- if t != nil {
- from = object.NewTreeRootNode(t)
- }
-
- idx, err := w.r.Storer.Index()
- if err != nil {
- return nil, err
- }
-
- to := mindex.NewRootNode(idx)
-
- if reverse {
- return merkletrie.DiffTree(to, from, diffTreeIsEquals)
- }
-
- return merkletrie.DiffTree(from, to, diffTreeIsEquals)
-}
-
-var emptyNoderHash = make([]byte, 24)
-
-// diffTreeIsEquals is a implementation of noder.Equals, used to compare
-// noder.Noder, it compare the content and the length of the hashes.
-//
-// Since some of the noder.Noder implementations doesn't compute a hash for
-// some directories, if any of the hashes is a 24-byte slice of zero values
-// the comparison is not done and the hashes are take as different.
-func diffTreeIsEquals(a, b noder.Hasher) bool {
- hashA := a.Hash()
- hashB := b.Hash()
-
- if bytes.Equal(hashA, emptyNoderHash) || bytes.Equal(hashB, emptyNoderHash) {
- return false
- }
-
- return bytes.Equal(hashA, hashB)
-}
-
-// Add adds the file contents of a file in the worktree to the index. if the
-// file is already staged in the index no error is returned. If a file deleted
-// from the Workspace is given, the file is removed from the index. If a
-// directory given, adds the files and all his sub-directories recursively in
-// the worktree to the index. If any of the files is already staged in the index
-// no error is returned. When path is a file, the blob.Hash is returned.
-func (w *Worktree) Add(path string) (plumbing.Hash, error) {
- // TODO(mcuadros): remove plumbing.Hash from signature at v5.
- s, err := w.Status()
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- idx, err := w.r.Storer.Index()
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- var h plumbing.Hash
- var added bool
-
- fi, err := w.Filesystem.Lstat(path)
- if err != nil || !fi.IsDir() {
- added, h, err = w.doAddFile(idx, s, path)
- } else {
- added, err = w.doAddDirectory(idx, s, path)
- }
-
- if err != nil {
- return h, err
- }
-
- if !added {
- return h, nil
- }
-
- return h, w.r.Storer.SetIndex(idx)
-}
-
-func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string) (added bool, err error) {
- files, err := w.Filesystem.ReadDir(directory)
- if err != nil {
- return false, err
- }
-
- for _, file := range files {
- name := path.Join(directory, file.Name())
-
- var a bool
- if file.IsDir() {
- if file.Name() == GitDirName {
- // ignore special git directory
- continue
- }
- a, err = w.doAddDirectory(idx, s, name)
- } else {
- a, _, err = w.doAddFile(idx, s, name)
- }
-
- if err != nil {
- return
- }
-
- if !added && a {
- added = true
- }
- }
-
- return
-}
-
-// AddGlob adds all paths, matching pattern, to the index. If pattern matches a
-// directory path, all directory contents are added to the index recursively. No
-// error is returned if all matching paths are already staged in index.
-func (w *Worktree) AddGlob(pattern string) error {
- files, err := util.Glob(w.Filesystem, pattern)
- if err != nil {
- return err
- }
-
- if len(files) == 0 {
- return ErrGlobNoMatches
- }
-
- s, err := w.Status()
- if err != nil {
- return err
- }
-
- idx, err := w.r.Storer.Index()
- if err != nil {
- return err
- }
-
- var saveIndex bool
- for _, file := range files {
- fi, err := w.Filesystem.Lstat(file)
- if err != nil {
- return err
- }
-
- var added bool
- if fi.IsDir() {
- added, err = w.doAddDirectory(idx, s, file)
- } else {
- added, _, err = w.doAddFile(idx, s, file)
- }
-
- if err != nil {
- return err
- }
-
- if !saveIndex && added {
- saveIndex = true
- }
- }
-
- if saveIndex {
- return w.r.Storer.SetIndex(idx)
- }
-
- return nil
-}
-
-// doAddFile create a new blob from path and update the index, added is true if
-// the file added is different from the index.
-func (w *Worktree) doAddFile(idx *index.Index, s Status, path string) (added bool, h plumbing.Hash, err error) {
- if s.File(path).Worktree == Unmodified {
- return false, h, nil
- }
-
- h, err = w.copyFileToStorage(path)
- if err != nil {
- if os.IsNotExist(err) {
- added = true
- h, err = w.deleteFromIndex(idx, path)
- }
-
- return
- }
-
- if err := w.addOrUpdateFileToIndex(idx, path, h); err != nil {
- return false, h, err
- }
-
- return true, h, err
-}
-
-func (w *Worktree) copyFileToStorage(path string) (hash plumbing.Hash, err error) {
- fi, err := w.Filesystem.Lstat(path)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- obj := w.r.Storer.NewEncodedObject()
- obj.SetType(plumbing.BlobObject)
- obj.SetSize(fi.Size())
-
- writer, err := obj.Writer()
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- defer ioutil.CheckClose(writer, &err)
-
- if fi.Mode()&os.ModeSymlink != 0 {
- err = w.fillEncodedObjectFromSymlink(writer, path, fi)
- } else {
- err = w.fillEncodedObjectFromFile(writer, path, fi)
- }
-
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- return w.r.Storer.SetEncodedObject(obj)
-}
-
-func (w *Worktree) fillEncodedObjectFromFile(dst io.Writer, path string, fi os.FileInfo) (err error) {
- src, err := w.Filesystem.Open(path)
- if err != nil {
- return err
- }
-
- defer ioutil.CheckClose(src, &err)
-
- if _, err := io.Copy(dst, src); err != nil {
- return err
- }
-
- return err
-}
-
-func (w *Worktree) fillEncodedObjectFromSymlink(dst io.Writer, path string, fi os.FileInfo) error {
- target, err := w.Filesystem.Readlink(path)
- if err != nil {
- return err
- }
-
- _, err = dst.Write([]byte(target))
- return err
-}
-
-func (w *Worktree) addOrUpdateFileToIndex(idx *index.Index, filename string, h plumbing.Hash) error {
- e, err := idx.Entry(filename)
- if err != nil && err != index.ErrEntryNotFound {
- return err
- }
-
- if err == index.ErrEntryNotFound {
- return w.doAddFileToIndex(idx, filename, h)
- }
-
- return w.doUpdateFileToIndex(e, filename, h)
-}
-
-func (w *Worktree) doAddFileToIndex(idx *index.Index, filename string, h plumbing.Hash) error {
- return w.doUpdateFileToIndex(idx.Add(filename), filename, h)
-}
-
-func (w *Worktree) doUpdateFileToIndex(e *index.Entry, filename string, h plumbing.Hash) error {
- info, err := w.Filesystem.Lstat(filename)
- if err != nil {
- return err
- }
-
- e.Hash = h
- e.ModifiedAt = info.ModTime()
- e.Mode, err = filemode.NewFromOSFileMode(info.Mode())
- if err != nil {
- return err
- }
-
- if e.Mode.IsRegular() {
- e.Size = uint32(info.Size())
- }
-
- fillSystemInfo(e, info.Sys())
- return nil
-}
-
-// Remove removes files from the working tree and from the index.
-func (w *Worktree) Remove(path string) (plumbing.Hash, error) {
- // TODO(mcuadros): remove plumbing.Hash from signature at v5.
- idx, err := w.r.Storer.Index()
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- var h plumbing.Hash
-
- fi, err := w.Filesystem.Lstat(path)
- if err != nil || !fi.IsDir() {
- h, err = w.doRemoveFile(idx, path)
- } else {
- _, err = w.doRemoveDirectory(idx, path)
- }
- if err != nil {
- return h, err
- }
-
- return h, w.r.Storer.SetIndex(idx)
-}
-
-func (w *Worktree) doRemoveDirectory(idx *index.Index, directory string) (removed bool, err error) {
- files, err := w.Filesystem.ReadDir(directory)
- if err != nil {
- return false, err
- }
-
- for _, file := range files {
- name := path.Join(directory, file.Name())
-
- var r bool
- if file.IsDir() {
- r, err = w.doRemoveDirectory(idx, name)
- } else {
- _, err = w.doRemoveFile(idx, name)
- if err == index.ErrEntryNotFound {
- err = nil
- }
- }
-
- if err != nil {
- return
- }
-
- if !removed && r {
- removed = true
- }
- }
-
- err = w.removeEmptyDirectory(directory)
- return
-}
-
-func (w *Worktree) removeEmptyDirectory(path string) error {
- files, err := w.Filesystem.ReadDir(path)
- if err != nil {
- return err
- }
-
- if len(files) != 0 {
- return nil
- }
-
- return w.Filesystem.Remove(path)
-}
-
-func (w *Worktree) doRemoveFile(idx *index.Index, path string) (plumbing.Hash, error) {
- hash, err := w.deleteFromIndex(idx, path)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- return hash, w.deleteFromFilesystem(path)
-}
-
-func (w *Worktree) deleteFromIndex(idx *index.Index, path string) (plumbing.Hash, error) {
- e, err := idx.Remove(path)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- return e.Hash, nil
-}
-
-func (w *Worktree) deleteFromFilesystem(path string) error {
- err := w.Filesystem.Remove(path)
- if os.IsNotExist(err) {
- return nil
- }
-
- return err
-}
-
-// RemoveGlob removes all paths, matching pattern, from the index. If pattern
-// matches a directory path, all directory contents are removed from the index
-// recursively.
-func (w *Worktree) RemoveGlob(pattern string) error {
- idx, err := w.r.Storer.Index()
- if err != nil {
- return err
- }
-
- entries, err := idx.Glob(pattern)
- if err != nil {
- return err
- }
-
- for _, e := range entries {
- file := filepath.FromSlash(e.Name)
- if _, err := w.Filesystem.Lstat(file); err != nil && !os.IsNotExist(err) {
- return err
- }
-
- if _, err := w.doRemoveFile(idx, file); err != nil {
- return err
- }
-
- dir, _ := filepath.Split(file)
- if err := w.removeEmptyDirectory(dir); err != nil {
- return err
- }
- }
-
- return w.r.Storer.SetIndex(idx)
-}
-
-// Move moves or rename a file in the worktree and the index, directories are
-// not supported.
-func (w *Worktree) Move(from, to string) (plumbing.Hash, error) {
- // TODO(mcuadros): support directories and/or implement support for glob
- if _, err := w.Filesystem.Lstat(from); err != nil {
- return plumbing.ZeroHash, err
- }
-
- if _, err := w.Filesystem.Lstat(to); err == nil {
- return plumbing.ZeroHash, ErrDestinationExists
- }
-
- idx, err := w.r.Storer.Index()
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- hash, err := w.deleteFromIndex(idx, from)
- if err != nil {
- return plumbing.ZeroHash, err
- }
-
- if err := w.Filesystem.Rename(from, to); err != nil {
- return hash, err
- }
-
- if err := w.addOrUpdateFileToIndex(idx, to, hash); err != nil {
- return hash, err
- }
-
- return hash, w.r.Storer.SetIndex(idx)
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/worktree_unix_other.go b/vendor/gopkg.in/src-d/go-git.v4/worktree_unix_other.go
deleted file mode 100644
index d632767667..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/worktree_unix_other.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// +build openbsd dragonfly solaris
-
-package git
-
-import (
- "syscall"
- "time"
-
- "gopkg.in/src-d/go-git.v4/plumbing/format/index"
-)
-
-func init() {
- fillSystemInfo = func(e *index.Entry, sys interface{}) {
- if os, ok := sys.(*syscall.Stat_t); ok {
- e.CreatedAt = time.Unix(int64(os.Atim.Sec), int64(os.Atim.Nsec))
- e.Dev = uint32(os.Dev)
- e.Inode = uint32(os.Ino)
- e.GID = os.Gid
- e.UID = os.Uid
- }
- }
-}
-
-func isSymlinkWindowsNonAdmin(err error) bool {
- return false
-}
diff --git a/vendor/gopkg.in/src-d/go-git.v4/worktree_windows.go b/vendor/gopkg.in/src-d/go-git.v4/worktree_windows.go
deleted file mode 100644
index 1bef6f759c..0000000000
--- a/vendor/gopkg.in/src-d/go-git.v4/worktree_windows.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// +build windows
-
-package git
-
-import (
- "os"
- "syscall"
- "time"
-
- "gopkg.in/src-d/go-git.v4/plumbing/format/index"
-)
-
-func init() {
- fillSystemInfo = func(e *index.Entry, sys interface{}) {
- if os, ok := sys.(*syscall.Win32FileAttributeData); ok {
- seconds := os.CreationTime.Nanoseconds() / 1000000000
- nanoseconds := os.CreationTime.Nanoseconds() - seconds*1000000000
- e.CreatedAt = time.Unix(seconds, nanoseconds)
- }
- }
-}
-
-func isSymlinkWindowsNonAdmin(err error) bool {
- const ERROR_PRIVILEGE_NOT_HELD syscall.Errno = 1314
-
- if err != nil {
- if errLink, ok := err.(*os.LinkError); ok {
- if errNo, ok := errLink.Err.(syscall.Errno); ok {
- return errNo == ERROR_PRIVILEGE_NOT_HELD
- }
- }
- }
-
- return false
-}
diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go
index e4e56e28e0..5310876555 100644
--- a/vendor/gopkg.in/yaml.v2/decode.go
+++ b/vendor/gopkg.in/yaml.v2/decode.go
@@ -229,6 +229,10 @@ type decoder struct {
mapType reflect.Type
terrors []string
strict bool
+
+ decodeCount int
+ aliasCount int
+ aliasDepth int
}
var (
@@ -314,7 +318,39 @@ func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unm
return out, false, false
}
+const (
+ // 400,000 decode operations is ~500kb of dense object declarations, or ~5kb of dense object declarations with 10000% alias expansion
+ alias_ratio_range_low = 400000
+ // 4,000,000 decode operations is ~5MB of dense object declarations, or ~4.5MB of dense object declarations with 10% alias expansion
+ alias_ratio_range_high = 4000000
+ // alias_ratio_range is the range over which we scale allowed alias ratios
+ alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
+)
+
+func allowedAliasRatio(decodeCount int) float64 {
+ switch {
+ case decodeCount <= alias_ratio_range_low:
+ // allow 99% to come from alias expansion for small-to-medium documents
+ return 0.99
+ case decodeCount >= alias_ratio_range_high:
+ // allow 10% to come from alias expansion for very large documents
+ return 0.10
+ default:
+ // scale smoothly from 99% down to 10% over the range.
+ // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
+ // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
+ return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
+ }
+}
+
func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
+ d.decodeCount++
+ if d.aliasDepth > 0 {
+ d.aliasCount++
+ }
+ if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
+ failf("document contains excessive aliasing")
+ }
switch n.kind {
case documentNode:
return d.document(n, out)
@@ -353,7 +389,9 @@ func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
failf("anchor '%s' value contains itself", n.value)
}
d.aliases[n] = true
+ d.aliasDepth++
good = d.unmarshal(n.alias, out)
+ d.aliasDepth--
delete(d.aliases, n)
return good
}
diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go
index 6c151db6fb..4120e0c916 100644
--- a/vendor/gopkg.in/yaml.v2/resolve.go
+++ b/vendor/gopkg.in/yaml.v2/resolve.go
@@ -81,7 +81,7 @@ func resolvableTag(tag string) bool {
return false
}
-var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`)
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
func resolve(tag string, in string) (rtag string, out interface{}) {
if !resolvableTag(tag) {
diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go
index 077fd1dd2d..570b8ecd10 100644
--- a/vendor/gopkg.in/yaml.v2/scannerc.go
+++ b/vendor/gopkg.in/yaml.v2/scannerc.go
@@ -906,6 +906,9 @@ func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
return true
}
+// max_flow_level limits the flow_level
+const max_flow_level = 10000
+
// Increase the flow level and resize the simple key list if needed.
func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
// Reset the simple key on the next level.
@@ -913,6 +916,11 @@ func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
// Increase the flow level.
parser.flow_level++
+ if parser.flow_level > max_flow_level {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_flow_level))
+ }
return true
}
@@ -925,6 +933,9 @@ func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
return true
}
+// max_indents limits the indents stack size
+const max_indents = 10000
+
// Push the current indentation level to the stack and set the new level
// the current column is greater than the indentation level. In this case,
// append or insert the specified token into the token queue.
@@ -939,6 +950,11 @@ func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml
// indentation level.
parser.indents = append(parser.indents, parser.indent)
parser.indent = column
+ if len(parser.indents) > max_indents {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_indents))
+ }
// Create a token and insert it into the queue.
token := yaml_token_t{