Browse Source

single commit and bumping go-git version

zach rice 7 years ago
parent
commit
15dedda59a
48 changed files with 1445 additions and 382 deletions
  1. 3 3
      Gopkg.lock
  2. 1 1
      Gopkg.toml
  3. 19 5
      main.go
  4. 2 2
      vendor/github.com/xanzy/go-gitlab/gitlab.go
  5. 1 1
      vendor/gopkg.in/src-d/go-git.v4/.travis.yml
  6. 2 1
      vendor/gopkg.in/src-d/go-git.v4/CONTRIBUTING.md
  7. 2 2
      vendor/gopkg.in/src-d/go-git.v4/LICENSE
  8. 7 7
      vendor/gopkg.in/src-d/go-git.v4/README.md
  9. 16 5
      vendor/gopkg.in/src-d/go-git.v4/blame.go
  10. 1 1
      vendor/gopkg.in/src-d/go-git.v4/config/refspec.go
  11. 29 0
      vendor/gopkg.in/src-d/go-git.v4/go.mod
  12. 59 0
      vendor/gopkg.in/src-d/go-git.v4/go.sum
  13. 2 0
      vendor/gopkg.in/src-d/go-git.v4/object_walker.go
  14. 51 4
      vendor/gopkg.in/src-d/go-git.v4/options.go
  15. 12 12
      vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/buffer_lru.go
  16. 12 12
      vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/object_lru.go
  17. 6 2
      vendor/gopkg.in/src-d/go-git.v4/plumbing/format/diff/unified_encoder.go
  18. 26 1
      vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/decoder.go
  19. 60 1
      vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/doc.go
  20. 21 3
      vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/index.go
  21. 7 1
      vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/common.go
  22. 1 1
      vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/fsobject.go
  23. 40 73
      vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/packfile.go
  24. 46 52
      vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/parser.go
  25. 42 4
      vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/scanner.go
  26. 132 0
      vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker.go
  27. 145 0
      vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_file.go
  28. 8 7
      vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tag.go
  29. 11 0
      vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tree.go
  30. 30 0
      vendor/gopkg.in/src-d/go-git.v4/plumbing/reference.go
  31. 2 0
      vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/object.go
  32. 21 1
      vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/common.go
  33. 2 1
      vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/server/loader.go
  34. 8 6
      vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/ssh/auth_method.go
  35. 1 1
      vendor/gopkg.in/src-d/go-git.v4/prune.go
  36. 3 1
      vendor/gopkg.in/src-d/go-git.v4/references.go
  37. 7 2
      vendor/gopkg.in/src-d/go-git.v4/remote.go
  38. 369 60
      vendor/gopkg.in/src-d/go-git.v4/repository.go
  39. 1 1
      vendor/gopkg.in/src-d/go-git.v4/status.go
  40. 2 2
      vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit.go
  41. 49 2
      vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit_setref.go
  42. 0 47
      vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit_setref_norwfs.go
  43. 2 1
      vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/module.go
  44. 137 40
      vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/object.go
  45. 10 15
      vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/storage.go
  46. 10 0
      vendor/gopkg.in/src-d/go-git.v4/storage/memory/storage.go
  47. 1 1
      vendor/gopkg.in/src-d/go-git.v4/worktree_bsd.go
  48. 26 0
      vendor/gopkg.in/src-d/go-git.v4/worktree_unix_other.go

+ 3 - 3
Gopkg.lock

@@ -249,8 +249,8 @@
     "utils/merkletrie/internal/frame",
     "utils/merkletrie/noder"
   ]
-  revision = "d3cec13ac0b195bfb897ed038a08b5130ab9969e"
-  version = "v4.7.0"
+  revision = "a1f6ef44dfed1253ef7f3bc049f66b15f8fc2ab2"
+  version = "v4.9.1"
 
 [[projects]]
   name = "gopkg.in/warnings.v0"
@@ -261,6 +261,6 @@
 [solve-meta]
   analyzer-name = "dep"
   analyzer-version = 1
-  inputs-digest = "2f65be0be7c6fe6a209a87c7e8cf47d994af94a33c77478edb096a8e3b416d2d"
+  inputs-digest = "94efaa76ee4e1f9c4d3a38138dc1ceebf7e8f50c8cdc56721c2dc122f810b8c5"
   solver-name = "gps-cdcl"
   solver-version = 1

+ 1 - 1
Gopkg.toml

@@ -51,7 +51,7 @@
 
 [[constraint]]
   name = "gopkg.in/src-d/go-git.v4"
-  version = "4.7.0"
+  version = "4.9.1"
 
 [prune]
   go-tests = true

+ 19 - 5
main.go

@@ -70,8 +70,9 @@ type Options struct {
 	GitLabUser string `long:"gitlab-user" description:"GitLab user ID to audit"`
 	GitLabOrg  string `long:"gitlab-org" description:"GitLab group ID to audit"`
 
-	Commit string `short:"c" long:"commit" description:"sha of commit to stop at"`
-	Depth  int    `long:"depth" description:"maximum commit depth"`
+	CommitStop string `long:"commit-stop" description:"sha of commit to stop at"`
+	Commit     string `long:"commit" description:"sha of commit to investigate"`
+	Depth      int    `long:"depth" description:"maximum commit depth"`
 
 	// local target option
 	RepoPath  string `long:"repo-path" description:"Path to repo"`
@@ -548,7 +549,7 @@ func auditGitReference(repo *RepoDescriptor, ref *plumbing.Reference) []Leak {
 		return nil
 	}
 	err = cIter.ForEach(func(c *object.Commit) error {
-		if c == nil || c.Hash.String() == opts.Commit || (opts.Depth != 0 && commitCount == opts.Depth) {
+		if c == nil || (opts.Depth != 0 && commitCount == opts.Depth) {
 			cIter.Close()
 			return errors.New("ErrStop")
 		}
@@ -558,8 +559,8 @@ func auditGitReference(repo *RepoDescriptor, ref *plumbing.Reference) []Leak {
 			return nil
 		}
 
-		// commits w/o parent (root of git the git ref)
-		if len(c.ParentHashes) == 0 {
+		// commits w/o parent (root of git the git ref) or option for single commit is not empty str
+		if len(c.ParentHashes) == 0 || opts.Commit == c.Hash.String() {
 			if commitMap[c.Hash.String()] {
 				return nil
 			}
@@ -605,6 +606,12 @@ func auditGitReference(repo *RepoDescriptor, ref *plumbing.Reference) []Leak {
 			})
 			return nil
 		}
+
+		// single commit
+		if opts.Commit != "" {
+			return nil
+		}
+
 		skipCount := false
 		err = c.Parents().ForEach(func(parent *object.Commit) error {
 			// check if we've seen this diff before
@@ -683,7 +690,14 @@ func auditGitReference(repo *RepoDescriptor, ref *plumbing.Reference) []Leak {
 					}
 				}
 			}(c, parent)
+
+			// stop audit if we are at commitStop
+			if c.Hash.String() == opts.CommitStop {
+				cIter.Close()
+				return errors.New("ErrStop")
+			}
 			return nil
+
 		})
 		return nil
 	})

+ 2 - 2
vendor/github.com/xanzy/go-gitlab/gitlab.go

@@ -72,8 +72,8 @@ const (
 	OwnerPermissions      AccessLevelValue = 50
 
 	// These are deprecated and should be removed in a future version
-	MasterPermissions AccessLevelValue = 40
-	OwnerPermission   AccessLevelValue = 50
+	MasterPermissions     AccessLevelValue = 40
+	OwnerPermission       AccessLevelValue = 50
 )
 
 // BuildStateValue represents a GitLab build state.

+ 1 - 1
vendor/gopkg.in/src-d/go-git.v4/.travis.yml

@@ -1,8 +1,8 @@
 language: go
 
 go:
-  - 1.9.x
   - "1.10"
+  - "1.11"
 
 go_import_path: gopkg.in/src-d/go-git.v4
 

+ 2 - 1
vendor/gopkg.in/src-d/go-git.v4/CONTRIBUTING.md

@@ -21,7 +21,8 @@ This can be done easily using the [`-s`](https://github.com/git/git/blob/b2c150d
 
 The official support channels, for both users and contributors, are:
 
-- GitHub [issues](https://github.com/src-d/go-git/issues)*
+- [StackOverflow go-git tag](https://stackoverflow.com/questions/tagged/go-git) for user questions.
+- GitHub [Issues](https://github.com/src-d/go-git/issues)* for bug reports and feature requests.
 - Slack: #go-git room in the [source{d} Slack](https://join.slack.com/t/sourced-community/shared_invite/enQtMjc4Njk5MzEyNzM2LTFjNzY4NjEwZGEwMzRiNTM4MzRlMzQ4MmIzZjkwZmZlM2NjODUxZmJjNDI1OTcxNDAyMmZlNmFjODZlNTg0YWM)
 
 *Before opening a new issue or submitting a new pull request, it's helpful to

+ 2 - 2
vendor/gopkg.in/src-d/go-git.v4/LICENSE

@@ -186,7 +186,7 @@
       same "printed page" as the copyright notice for easier
       identification within third-party archives.
 
-   Copyright 2017 Sourced Technologies, S.L.
+   Copyright 2018 Sourced Technologies, S.L.
 
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
@@ -198,4 +198,4 @@
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
-   limitations under the License.
+   limitations under the License.

+ 7 - 7
vendor/gopkg.in/src-d/go-git.v4/README.md

@@ -3,16 +3,16 @@
 
 *go-git* is a highly extensible git implementation library written in **pure Go**.
 
-It can be used to manipulate git repositories at low level *(plumbing)* or high level *(porcelain)*, through an idiomatic Go API. It also supports several type of storage, such as in-memory filesystems, or custom implementations thanks to the [`Storer`](https://godoc.org/gopkg.in/src-d/go-git.v4/plumbing/storer) interface.
+It can be used to manipulate git repositories at low level *(plumbing)* or high level *(porcelain)*, through an idiomatic Go API. It also supports several types of storage, such as in-memory filesystems, or custom implementations thanks to the [`Storer`](https://godoc.org/gopkg.in/src-d/go-git.v4/plumbing/storer) interface.
 
-It's being actively develop since 2015 and is being use extensively by [source{d}](https://sourced.tech/) and [Keybase](https://keybase.io/blog/encrypted-git-for-everyone), and by many other libraries and tools.
+It's being actively developed since 2015 and is being used extensively by [source{d}](https://sourced.tech/) and [Keybase](https://keybase.io/blog/encrypted-git-for-everyone), and by many other libraries and tools.
 
 Comparison with git
 -------------------
 
 *go-git* aims to be fully compatible with [git](https://github.com/git/git), all the *porcelain* operations are implemented to work exactly as *git* does.
 
-*git* is a humongous project with years of development by thousands of contributors, making it challenging for *go-git* implement all the features. You can find a comparison of *go-git* vs *git* in the [compatibility documentation](COMPATIBILITY.md).
+*git* is a humongous project with years of development by thousands of contributors, making it challenging for *go-git* to implement all the features. You can find a comparison of *go-git* vs *git* in the [compatibility documentation](COMPATIBILITY.md).
 
 
 Installation
@@ -24,12 +24,12 @@ The recommended way to install *go-git* is:
 go get -u gopkg.in/src-d/go-git.v4/...
 ```
 
-> We use [gopkg.in](http://labix.org/gopkg.in) for having a versioned API, this means that when `go get` clones the package, is the latest tag matching `v4.*` cloned and not the master branch.
+> We use [gopkg.in](http://labix.org/gopkg.in) to version the API, this means that when `go get` clones the package, it's the latest tag matching `v4.*` that is cloned and not the master branch.
 
 Examples
 --------
 
-> Please note that the functions `CheckIfError` and `Info` used in the examples are from the [examples package](https://github.com/src-d/go-git/blob/master/_examples/common.go#L17) just to be used in the examples.
+> Please note that the `CheckIfError` and `Info` functions  used in the examples are from the [examples package](https://github.com/src-d/go-git/blob/master/_examples/common.go#L17) just to be used in the examples.
 
 
 ### Basic example
@@ -71,7 +71,7 @@ r, err := git.Clone(memory.NewStorage(), nil, &git.CloneOptions{
 
 CheckIfError(err)
 
-// Gets the HEAD history from HEAD, just like does:
+// Gets the HEAD history from HEAD, just like this command:
 Info("git log")
 
 // ... retrieves the branch pointed by HEAD
@@ -110,7 +110,7 @@ Date:   Fri Nov 11 13:23:22 2016 +0100
 ...
 ```
 
-You can find this [example](_examples/log/main.go) and many others at the [examples](_examples) folder
+You can find this [example](_examples/log/main.go) and many others in the [examples](_examples) folder.
 
 Contribute
 ----------

+ 16 - 5
vendor/gopkg.in/src-d/go-git.v4/blame.go

@@ -123,14 +123,25 @@ func newLine(author, text string, date time.Time, hash plumbing.Hash) *Line {
 }
 
 func newLines(contents []string, commits []*object.Commit) ([]*Line, error) {
-	if len(contents) != len(commits) {
-		return nil, errors.New("contents and commits have different length")
+	lcontents := len(contents)
+	lcommits := len(commits)
+
+	if lcontents != lcommits {
+		if lcontents == lcommits-1 && contents[lcontents-1] != "\n" {
+			contents = append(contents, "\n")
+		} else {
+			return nil, errors.New("contents and commits have different length")
+		}
 	}
-	result := make([]*Line, 0, len(contents))
+
+	result := make([]*Line, 0, lcontents)
 	for i := range contents {
-		l := newLine(commits[i].Author.Email, contents[i], commits[i].Author.When, commits[i].Hash)
-		result = append(result, l)
+		result = append(result, newLine(
+			commits[i].Author.Email, contents[i],
+			commits[i].Author.When, commits[i].Hash,
+		))
 	}
+
 	return result, nil
 }
 

+ 1 - 1
vendor/gopkg.in/src-d/go-git.v4/config/refspec.go

@@ -15,7 +15,7 @@ const (
 
 var (
 	ErrRefSpecMalformedSeparator = errors.New("malformed refspec, separators are wrong")
-	ErrRefSpecMalformedWildcard  = errors.New("malformed refspec, missmatched number of wildcards")
+	ErrRefSpecMalformedWildcard  = errors.New("malformed refspec, mismatched number of wildcards")
 )
 
 // RefSpec is a mapping from local branches to remote references

+ 29 - 0
vendor/gopkg.in/src-d/go-git.v4/go.mod

@@ -0,0 +1,29 @@
+module gopkg.in/src-d/go-git.v4
+
+require (
+	github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 // indirect
+	github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect
+	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/emirpasic/gods v1.9.0
+	github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
+	github.com/gliderlabs/ssh v0.1.1
+	github.com/google/go-cmp v0.2.0
+	github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99
+	github.com/jessevdk/go-flags v1.4.0
+	github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e
+	github.com/mitchellh/go-homedir v1.0.0
+	github.com/pelletier/go-buffruneio v0.2.0 // indirect
+	github.com/pkg/errors v0.8.0 // indirect
+	github.com/pmezard/go-difflib v1.0.0 // indirect
+	github.com/sergi/go-diff v1.0.0
+	github.com/src-d/gcfg v1.4.0
+	github.com/stretchr/testify v1.2.2 // indirect
+	github.com/xanzy/ssh-agent v0.2.0
+	golang.org/x/crypto v0.0.0-20180904163835-0709b304e793
+	golang.org/x/net v0.0.0-20180906233101-161cd47e91fd // indirect
+	golang.org/x/text v0.3.0
+	gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127
+	gopkg.in/src-d/go-billy.v4 v4.2.1
+	gopkg.in/src-d/go-git-fixtures.v3 v3.1.1
+	gopkg.in/warnings.v0 v0.1.2 // indirect
+)

+ 59 - 0
vendor/gopkg.in/src-d/go-git.v4/go.sum

@@ -0,0 +1,59 @@
+github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=
+github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
+github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
+github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/emirpasic/gods v1.9.0 h1:rUF4PuzEjMChMiNsVjdI+SyLu7rEqpQ5reNFnhC7oFo=
+github.com/emirpasic/gods v1.9.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
+github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
+github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
+github.com/gliderlabs/ssh v0.1.1 h1:j3L6gSLQalDETeEg/Jg0mGY0/y/N6zI2xX1978P0Uqw=
+github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
+github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
+github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
+github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=
+github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e h1:RgQk53JHp/Cjunrr1WlsXSZpqXn+uREuHvUVcK82CV8=
+github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/pelletier/go-buffruneio v0.2.0 h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA=
+github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
+github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/src-d/gcfg v1.3.0 h1:2BEDr8r0I0b8h/fOqwtxCEiq2HJu8n2JGZJQFGXWLjg=
+github.com/src-d/gcfg v1.3.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
+github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4=
+github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/xanzy/ssh-agent v0.2.0 h1:Adglfbi5p9Z0BmK2oKU9nTG+zKfniSfnaMYB+ULd+Ro=
+github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9 h1:lkiLiLBHGoH3XnqSLUIaBsilGMUjI+Uy2Xu2JLUtTas=
+golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/src-d/go-billy.v4 v4.2.1 h1:omN5CrMrMcQ+4I8bJ0wEhOBPanIRWzFC953IiXKdYzo=
+gopkg.in/src-d/go-billy.v4 v4.2.1/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
+gopkg.in/src-d/go-git-fixtures.v3 v3.1.1 h1:XWW/s5W18RaJpmo1l0IYGqXKuJITWRFuA45iOf1dKJs=
+gopkg.in/src-d/go-git-fixtures.v3 v3.1.1/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
+gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
+gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=

+ 2 - 0
vendor/gopkg.in/src-d/go-git.v4/object_walker.go

@@ -94,6 +94,8 @@ func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error {
 				return err
 			}
 		}
+	case *object.Tag:
+		return p.walkObjectTree(obj.Target)
 	default:
 		// Error out on unhandled object types.
 		return fmt.Errorf("Unknown object %X %s %T\n", obj.ID(), obj.Type(), obj)

+ 51 - 4
vendor/gopkg.in/src-d/go-git.v4/options.go

@@ -3,6 +3,7 @@ package git
 import (
 	"errors"
 	"regexp"
+	"strings"
 
 	"golang.org/x/crypto/openpgp"
 	"gopkg.in/src-d/go-git.v4/config"
@@ -230,8 +231,9 @@ var (
 
 // CheckoutOptions describes how a checkout 31operation should be performed.
 type CheckoutOptions struct {
-	// Hash to be checked out, if used HEAD will in detached mode. Branch and
-	// Hash are mutually exclusive, if Create is not used.
+	// Hash is the hash of the commit to be checked out. If used, HEAD will be
+	// in detached mode. If Create is not used, Branch and Hash are mutually
+	// exclusive.
 	Hash plumbing.Hash
 	// Branch to be checked out, if Branch and Hash are empty is set to `master`.
 	Branch plumbing.ReferenceName
@@ -329,6 +331,15 @@ type LogOptions struct {
 	// set Order=LogOrderCommitterTime for ordering by committer time (more compatible with `git log`)
 	// set Order=LogOrderBSF for Breadth-first search
 	Order LogOrder
+
+	// Show only those commits in which the specified file was inserted/updated.
+	// It is equivalent to running `git log -- <file-name>`.
+	FileName *string
+
+	// Pretend as if all the refs in refs/, along with HEAD, are listed on the command line as <commit>.
+	// It is equivalent to running `git log --all`.
+	// If set on true, the From option will be ignored.
+	All bool
 }
 
 var (
@@ -348,8 +359,9 @@ type CommitOptions struct {
 	// Parents are the parents commits for the new commit, by default when
 	// len(Parents) is zero, the hash of HEAD reference is used.
 	Parents []plumbing.Hash
-	// A key to sign the commit with. A nil value here means the commit will not
-	// be signed. The private key must be present and already decrypted.
+	// SignKey denotes a key to sign the commit with. A nil value here means the
+	// commit will not be signed. The private key must be present and already
+	// decrypted.
 	SignKey *openpgp.Entity
 }
 
@@ -377,6 +389,41 @@ func (o *CommitOptions) Validate(r *Repository) error {
 	return nil
 }
 
+var (
+	ErrMissingName    = errors.New("name field is required")
+	ErrMissingTagger  = errors.New("tagger field is required")
+	ErrMissingMessage = errors.New("message field is required")
+)
+
+// CreateTagOptions describes how a tag object should be created.
+type CreateTagOptions struct {
+	// Tagger defines the signature of the tag creator.
+	Tagger *object.Signature
+	// Message defines the annotation of the tag. It is canonicalized during
+	// validation into the format expected by git - no leading whitespace and
+	// ending in a newline.
+	Message string
+	// SignKey denotes a key to sign the tag with. A nil value here means the tag
+	// will not be signed. The private key must be present and already decrypted.
+	SignKey *openpgp.Entity
+}
+
+// Validate validates the fields and sets the default values.
+func (o *CreateTagOptions) Validate(r *Repository, hash plumbing.Hash) error {
+	if o.Tagger == nil {
+		return ErrMissingTagger
+	}
+
+	if o.Message == "" {
+		return ErrMissingMessage
+	}
+
+	// Canonicalize the message into the expected message format.
+	o.Message = strings.TrimSpace(o.Message) + "\n"
+
+	return nil
+}
+
 // ListOptions describes how a remote list should be performed.
 type ListOptions struct {
 	// Auth credentials, if required, to use with the remote repository.

+ 12 - 12
vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/buffer_lru.go

@@ -45,19 +45,23 @@ func (c *BufferLRU) Put(key int64, slice []byte) {
 		c.ll = list.New()
 	}
 
+	bufSize := FileSize(len(slice))
 	if ee, ok := c.cache[key]; ok {
+		oldBuf := ee.Value.(buffer)
+		// in this case bufSize is a delta: new size - old size
+		bufSize -= FileSize(len(oldBuf.Slice))
 		c.ll.MoveToFront(ee)
 		ee.Value = buffer{key, slice}
-		return
+	} else {
+		if bufSize > c.MaxSize {
+			return
+		}
+		ee := c.ll.PushFront(buffer{key, slice})
+		c.cache[key] = ee
 	}
 
-	objSize := FileSize(len(slice))
-
-	if objSize > c.MaxSize {
-		return
-	}
-
-	for c.actualSize+objSize > c.MaxSize {
+	c.actualSize += bufSize
+	for c.actualSize > c.MaxSize {
 		last := c.ll.Back()
 		lastObj := last.Value.(buffer)
 		lastSize := FileSize(len(lastObj.Slice))
@@ -66,10 +70,6 @@ func (c *BufferLRU) Put(key int64, slice []byte) {
 		delete(c.cache, lastObj.Key)
 		c.actualSize -= lastSize
 	}
-
-	ee := c.ll.PushFront(buffer{key, slice})
-	c.cache[key] = ee
-	c.actualSize += objSize
 }
 
 // Get returns a buffer by its key. It marks the buffer as used. If the buffer

+ 12 - 12
vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/object_lru.go

@@ -42,20 +42,24 @@ func (c *ObjectLRU) Put(obj plumbing.EncodedObject) {
 		c.ll = list.New()
 	}
 
+	objSize := FileSize(obj.Size())
 	key := obj.Hash()
 	if ee, ok := c.cache[key]; ok {
+		oldObj := ee.Value.(plumbing.EncodedObject)
+		// in this case objSize is a delta: new size - old size
+		objSize -= FileSize(oldObj.Size())
 		c.ll.MoveToFront(ee)
 		ee.Value = obj
-		return
-	}
-
-	objSize := FileSize(obj.Size())
-
-	if objSize > c.MaxSize {
-		return
+	} else {
+		if objSize > c.MaxSize {
+			return
+		}
+		ee := c.ll.PushFront(obj)
+		c.cache[key] = ee
 	}
 
-	for c.actualSize+objSize > c.MaxSize {
+	c.actualSize += objSize
+	for c.actualSize > c.MaxSize {
 		last := c.ll.Back()
 		lastObj := last.Value.(plumbing.EncodedObject)
 		lastSize := FileSize(lastObj.Size())
@@ -64,10 +68,6 @@ func (c *ObjectLRU) Put(obj plumbing.EncodedObject) {
 		delete(c.cache, lastObj.Hash())
 		c.actualSize -= lastSize
 	}
-
-	ee := c.ll.PushFront(obj)
-	c.cache[key] = ee
-	c.actualSize += objSize
 }
 
 // Get returns an object by its hash. It marks the object as used. If the object

+ 6 - 2
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/diff/unified_encoder.go

@@ -237,9 +237,13 @@ func (c *hunksGenerator) addLineNumbers(la, lb int, linesBefore int, i int, op O
 	// we need to search for a reference for the next diff
 	switch {
 	case linesBefore != 0 && c.ctxLines != 0:
-		clb = lb - c.ctxLines + 1
+		if lb > c.ctxLines {
+			clb = lb - c.ctxLines + 1
+		} else {
+			clb = 1
+		}
 	case c.ctxLines == 0:
-		clb = lb - c.ctxLines
+		clb = lb
 	case i != len(c.chunks)-1:
 		next := c.chunks[i+1]
 		if next.Type() == op || next.Type() == Equal {

+ 26 - 1
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/decoder.go

@@ -21,7 +21,7 @@ var (
 	// ErrMalformedSignature is returned by Decode when the index header file is
 	// malformed
 	ErrMalformedSignature = errors.New("malformed index signature file")
-	// ErrInvalidChecksum is returned by Decode if the SHA1 hash missmatch with
+	// ErrInvalidChecksum is returned by Decode if the SHA1 hash mismatch with
 	// the read content
 	ErrInvalidChecksum = errors.New("invalid checksum")
 
@@ -261,6 +261,17 @@ func (d *Decoder) readExtension(idx *Index, header []byte) error {
 		if err := d.Decode(idx.ResolveUndo); err != nil {
 			return err
 		}
+	case bytes.Equal(header, endOfIndexEntryExtSignature):
+		r, err := d.getExtensionReader()
+		if err != nil {
+			return err
+		}
+
+		idx.EndOfIndexEntry = &EndOfIndexEntry{}
+		d := &endOfIndexEntryDecoder{r}
+		if err := d.Decode(idx.EndOfIndexEntry); err != nil {
+			return err
+		}
 	default:
 		return errUnknownExtension
 	}
@@ -449,3 +460,17 @@ func (d *resolveUndoDecoder) readStage(e *ResolveUndoEntry, s Stage) error {
 
 	return nil
 }
+
+type endOfIndexEntryDecoder struct {
+	r io.Reader
+}
+
+func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error {
+	var err error
+	e.Offset, err = binary.ReadUint32(d.r)
+	if err != nil {
+		return err
+	}
+
+	return binary.Read(d.r, &e.Hash)
+}

+ 60 - 1
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/doc.go

@@ -297,5 +297,64 @@
 //        in the previous ewah bitmap.
 //
 //      - One NUL.
-// Source https://www.kernel.org/pub/software/scm/git/docs/technical/index-format.txt
+//
+//   == File System Monitor cache
+//
+//     The file system monitor cache tracks files for which the core.fsmonitor
+//     hook has told us about changes.  The signature for this extension is
+//     { 'F', 'S', 'M', 'N' }.
+//
+//     The extension starts with
+//
+//     - 32-bit version number: the current supported version is 1.
+//
+//     - 64-bit time: the extension data reflects all changes through the given
+//       time which is stored as the nanoseconds elapsed since midnight,
+//       January 1, 1970.
+//
+//    - 32-bit bitmap size: the size of the CE_FSMONITOR_VALID bitmap.
+//
+//    - An ewah bitmap, the n-th bit indicates whether the n-th index entry
+//      is not CE_FSMONITOR_VALID.
+//
+//  == End of Index Entry
+//
+//    The End of Index Entry (EOIE) is used to locate the end of the variable
+//    length index entries and the begining of the extensions. Code can take
+//    advantage of this to quickly locate the index extensions without having
+//    to parse through all of the index entries.
+//
+//    Because it must be able to be loaded before the variable length cache
+//    entries and other index extensions, this extension must be written last.
+//    The signature for this extension is { 'E', 'O', 'I', 'E' }.
+//
+//    The extension consists of:
+//
+//    - 32-bit offset to the end of the index entries
+//
+//    - 160-bit SHA-1 over the extension types and their sizes (but not
+//      their contents).  E.g. if we have "TREE" extension that is N-bytes
+//      long, "REUC" extension that is M-bytes long, followed by "EOIE",
+//      then the hash would be:
+//
+//      SHA-1("TREE" + <binary representation of N> +
+//        "REUC" + <binary representation of M>)
+//
+//  == Index Entry Offset Table
+//
+//    The Index Entry Offset Table (IEOT) is used to help address the CPU
+//    cost of loading the index by enabling multi-threading the process of
+//    converting cache entries from the on-disk format to the in-memory format.
+//    The signature for this extension is { 'I', 'E', 'O', 'T' }.
+//
+//    The extension consists of:
+//
+//    - 32-bit version (currently 1)
+//
+//    - A number of index offset entries each consisting of:
+//
+//    - 32-bit offset from the begining of the file to the first cache entry
+//      in this block of entries.
+//
+//    - 32-bit count of cache entries in this blockpackage index
 package index

+ 21 - 3
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/index.go

@@ -18,9 +18,10 @@ var (
 	// ErrEntryNotFound is returned by Index.Entry, if an entry is not found.
 	ErrEntryNotFound = errors.New("entry not found")
 
-	indexSignature          = []byte{'D', 'I', 'R', 'C'}
-	treeExtSignature        = []byte{'T', 'R', 'E', 'E'}
-	resolveUndoExtSignature = []byte{'R', 'E', 'U', 'C'}
+	indexSignature              = []byte{'D', 'I', 'R', 'C'}
+	treeExtSignature            = []byte{'T', 'R', 'E', 'E'}
+	resolveUndoExtSignature     = []byte{'R', 'E', 'U', 'C'}
+	endOfIndexEntryExtSignature = []byte{'E', 'O', 'I', 'E'}
 )
 
 // Stage during merge
@@ -50,6 +51,8 @@ type Index struct {
 	Cache *Tree
 	// ResolveUndo represents the 'Resolve undo' extension
 	ResolveUndo *ResolveUndo
+	// EndOfIndexEntry represents the 'End of Index Entry' extension
+	EndOfIndexEntry *EndOfIndexEntry
 }
 
 // Add creates a new Entry and returns it. The caller should first check that
@@ -193,3 +196,18 @@ type ResolveUndoEntry struct {
 	Path   string
 	Stages map[Stage]plumbing.Hash
 }
+
+// EndOfIndexEntry is the End of Index Entry (EOIE) is used to locate the end of
+// the variable length index entries and the begining of the extensions. Code
+// can take advantage of this to quickly locate the index extensions without
+// having to parse through all of the index entries.
+//
+//  Because it must be able to be loaded before the variable length cache
+//  entries and other index extensions, this extension must be written last.
+type EndOfIndexEntry struct {
+	// Offset to the end of the index entries
+	Offset uint32
+	// Hash is a SHA-1 over the extension types and their sizes (but not
+	//	their contents).
+	Hash plumbing.Hash
+}

+ 7 - 1
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/common.go

@@ -51,7 +51,13 @@ func WritePackfileToObjectStorage(
 	}
 
 	defer ioutil.CheckClose(w, &err)
-	_, err = io.Copy(w, packfile)
+
+	var n int64
+	n, err = io.Copy(w, packfile)
+	if err == nil && n == 0 {
+		return ErrEmptyPackfile
+	}
+
 	return err
 }
 

+ 1 - 1
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/fsobject.go

@@ -48,7 +48,7 @@ func NewFSObject(
 // Reader implements the plumbing.EncodedObject interface.
 func (o *FSObject) Reader() (io.ReadCloser, error) {
 	obj, ok := o.cache.Get(o.hash)
-	if ok {
+	if ok && obj != o {
 		reader, err := obj.Reader()
 		if err != nil {
 			return nil, err

+ 40 - 73
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/packfile.go

@@ -21,6 +21,16 @@ var (
 	ErrZLib = NewError("zlib reading error")
 )
 
+// When reading small objects from packfile it is beneficial to do so at
+// once to exploit the buffered I/O. In many cases the objects are so small
+// that they were already loaded to memory when the object header was
+// loaded from the packfile. Wrapping in FSObject would cause this buffered
+// data to be thrown away and then re-read later, with the additional
+// seeking causing reloads from disk. Objects smaller than this threshold
+// are now always read into memory and stored in cache instead of being
+// wrapped in FSObject.
+const smallObjectThreshold = 16 * 1024
+
 // Packfile allows retrieving information from inside a packfile.
 type Packfile struct {
 	idxfile.Index
@@ -79,77 +89,37 @@ func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) {
 		}
 	}
 
+	return p.objectAtOffset(o)
+}
+
+// GetSizeByOffset retrieves the size of the encoded object from the
+// packfile with the given offset.
+func (p *Packfile) GetSizeByOffset(o int64) (size int64, err error) {
 	if _, err := p.s.SeekFromStart(o); err != nil {
 		if err == io.EOF || isInvalid(err) {
-			return nil, plumbing.ErrObjectNotFound
+			return 0, plumbing.ErrObjectNotFound
 		}
 
-		return nil, err
+		return 0, err
 	}
 
-	return p.nextObject()
+	h, err := p.nextObjectHeader()
+	if err != nil {
+		return 0, err
+	}
+	return h.Length, nil
 }
 
-func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) {
-	h, err := p.s.NextObjectHeader()
+func (p *Packfile) objectHeaderAtOffset(offset int64) (*ObjectHeader, error) {
+	h, err := p.s.SeekObjectHeader(offset)
 	p.s.pendingObject = nil
 	return h, err
 }
 
-func (p *Packfile) getObjectData(
-	h *ObjectHeader,
-) (typ plumbing.ObjectType, size int64, err error) {
-	switch h.Type {
-	case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
-		typ = h.Type
-		size = h.Length
-	case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
-		buf := bufPool.Get().(*bytes.Buffer)
-		buf.Reset()
-		defer bufPool.Put(buf)
-
-		_, _, err = p.s.NextObject(buf)
-		if err != nil {
-			return
-		}
-
-		delta := buf.Bytes()
-		_, delta = decodeLEB128(delta) // skip src size
-		sz, _ := decodeLEB128(delta)
-		size = int64(sz)
-
-		var offset int64
-		if h.Type == plumbing.REFDeltaObject {
-			offset, err = p.FindOffset(h.Reference)
-			if err != nil {
-				return
-			}
-		} else {
-			offset = h.OffsetReference
-		}
-
-		if baseType, ok := p.offsetToType[offset]; ok {
-			typ = baseType
-		} else {
-			if _, err = p.s.SeekFromStart(offset); err != nil {
-				return
-			}
-
-			h, err = p.nextObjectHeader()
-			if err != nil {
-				return
-			}
-
-			typ, _, err = p.getObjectData(h)
-			if err != nil {
-				return
-			}
-		}
-	default:
-		err = ErrInvalidObject.AddDetails("type %q", h.Type)
-	}
-
-	return
+func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) {
+	h, err := p.s.NextObjectHeader()
+	p.s.pendingObject = nil
+	return h, err
 }
 
 func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) {
@@ -192,11 +162,7 @@ func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err
 		if baseType, ok := p.offsetToType[offset]; ok {
 			typ = baseType
 		} else {
-			if _, err = p.s.SeekFromStart(offset); err != nil {
-				return
-			}
-
-			h, err = p.nextObjectHeader()
+			h, err = p.objectHeaderAtOffset(offset)
 			if err != nil {
 				return
 			}
@@ -213,8 +179,8 @@ func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err
 	return
 }
 
-func (p *Packfile) nextObject() (plumbing.EncodedObject, error) {
-	h, err := p.nextObjectHeader()
+func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error) {
+	h, err := p.objectHeaderAtOffset(offset)
 	if err != nil {
 		if err == io.EOF || isInvalid(err) {
 			return nil, plumbing.ErrObjectNotFound
@@ -228,6 +194,13 @@ func (p *Packfile) nextObject() (plumbing.EncodedObject, error) {
 		return p.getNextObject(h)
 	}
 
+	// If the object is not a delta and it's small enough then read it
+	// completely into memory now since it is already read from disk
+	// into buffer anyway.
+	if h.Length <= smallObjectThreshold && h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject {
+		return p.getNextObject(h)
+	}
+
 	hash, err := p.FindHash(h.Offset)
 	if err != nil {
 		return nil, err
@@ -271,11 +244,7 @@ func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
 		}
 	}
 
-	if _, err := p.s.SeekFromStart(offset); err != nil {
-		return nil, err
-	}
-
-	h, err := p.nextObjectHeader()
+	h, err := p.objectHeaderAtOffset(offset)
 	if err != nil {
 		return nil, err
 	}
@@ -367,8 +336,6 @@ func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset
 		if err != nil {
 			return err
 		}
-
-		p.cachePut(base)
 	}
 
 	obj.SetType(base.Type())

+ 46 - 52
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/parser.go

@@ -38,15 +38,14 @@ type Observer interface {
 // Parser decodes a packfile and calls any observer associated to it. Is used
 // to generate indexes.
 type Parser struct {
-	storage          storer.EncodedObjectStorer
-	scanner          *Scanner
-	count            uint32
-	oi               []*objectInfo
-	oiByHash         map[plumbing.Hash]*objectInfo
-	oiByOffset       map[int64]*objectInfo
-	hashOffset       map[plumbing.Hash]int64
-	pendingRefDeltas map[plumbing.Hash][]*objectInfo
-	checksum         plumbing.Hash
+	storage    storer.EncodedObjectStorer
+	scanner    *Scanner
+	count      uint32
+	oi         []*objectInfo
+	oiByHash   map[plumbing.Hash]*objectInfo
+	oiByOffset map[int64]*objectInfo
+	hashOffset map[plumbing.Hash]int64
+	checksum   plumbing.Hash
 
 	cache *cache.BufferLRU
 	// delta content by offset, only used if source is not seekable
@@ -78,13 +77,12 @@ func NewParserWithStorage(
 	}
 
 	return &Parser{
-		storage:          storage,
-		scanner:          scanner,
-		ob:               ob,
-		count:            0,
-		cache:            cache.NewBufferLRUDefault(),
-		pendingRefDeltas: make(map[plumbing.Hash][]*objectInfo),
-		deltas:           deltas,
+		storage: storage,
+		scanner: scanner,
+		ob:      ob,
+		count:   0,
+		cache:   cache.NewBufferLRUDefault(),
+		deltas:  deltas,
 	}, nil
 }
 
@@ -150,10 +148,6 @@ func (p *Parser) Parse() (plumbing.Hash, error) {
 		return plumbing.ZeroHash, err
 	}
 
-	if len(p.pendingRefDeltas) > 0 {
-		return plumbing.ZeroHash, ErrReferenceDeltaNotFound
-	}
-
 	if err := p.onFooter(p.checksum); err != nil {
 		return plumbing.ZeroHash, err
 	}
@@ -205,18 +199,21 @@ func (p *Parser) indexObjects() error {
 			parent.Children = append(parent.Children, ota)
 		case plumbing.REFDeltaObject:
 			delta = true
-
 			parent, ok := p.oiByHash[oh.Reference]
-			if ok {
-				ota = newDeltaObject(oh.Offset, oh.Length, t, parent)
-				parent.Children = append(parent.Children, ota)
-			} else {
-				ota = newBaseObject(oh.Offset, oh.Length, t)
-				p.pendingRefDeltas[oh.Reference] = append(
-					p.pendingRefDeltas[oh.Reference],
-					ota,
-				)
+			if !ok {
+				// can't find referenced object in this pack file
+				// this must be a "thin" pack.
+				parent = &objectInfo{ //Placeholder parent
+					SHA1:        oh.Reference,
+					ExternalRef: true, // mark as an external reference that must be resolved
+					Type:        plumbing.AnyObject,
+					DiskType:    plumbing.AnyObject,
+				}
+				p.oiByHash[oh.Reference] = parent
 			}
+			ota = newDeltaObject(oh.Offset, oh.Length, t, parent)
+			parent.Children = append(parent.Children, ota)
+
 		default:
 			ota = newBaseObject(oh.Offset, oh.Length, t)
 		}
@@ -297,16 +294,20 @@ func (p *Parser) resolveDeltas() error {
 	return nil
 }
 
-func (p *Parser) get(o *objectInfo) ([]byte, error) {
-	b, ok := p.cache.Get(o.Offset)
+func (p *Parser) get(o *objectInfo) (b []byte, err error) {
+	var ok bool
+	if !o.ExternalRef { // skip cache check for placeholder parents
+		b, ok = p.cache.Get(o.Offset)
+	}
+
 	// If it's not on the cache and is not a delta we can try to find it in the
-	// storage, if there's one.
+	// storage, if there's one. External refs must enter here.
 	if !ok && p.storage != nil && !o.Type.IsDelta() {
-		var err error
 		e, err := p.storage.EncodedObject(plumbing.AnyObject, o.SHA1)
 		if err != nil {
 			return nil, err
 		}
+		o.Type = e.Type()
 
 		r, err := e.Reader()
 		if err != nil {
@@ -323,6 +324,11 @@ func (p *Parser) get(o *objectInfo) ([]byte, error) {
 		return b, nil
 	}
 
+	if o.ExternalRef {
+		// we were not able to resolve a ref in a thin pack
+		return nil, ErrReferenceDeltaNotFound
+	}
+
 	var data []byte
 	if o.DiskType.IsDelta() {
 		base, err := p.get(o.Parent)
@@ -335,7 +341,6 @@ func (p *Parser) get(o *objectInfo) ([]byte, error) {
 			return nil, err
 		}
 	} else {
-		var err error
 		data, err = p.readData(o)
 		if err != nil {
 			return nil, err
@@ -367,14 +372,6 @@ func (p *Parser) resolveObject(
 		return nil, err
 	}
 
-	if pending, ok := p.pendingRefDeltas[o.SHA1]; ok {
-		for _, po := range pending {
-			po.Parent = o
-			o.Children = append(o.Children, po)
-		}
-		delete(p.pendingRefDeltas, o.SHA1)
-	}
-
 	if p.storage != nil {
 		obj := new(plumbing.MemoryObject)
 		obj.SetSize(o.Size())
@@ -401,11 +398,7 @@ func (p *Parser) readData(o *objectInfo) ([]byte, error) {
 		return data, nil
 	}
 
-	if _, err := p.scanner.SeekFromStart(o.Offset); err != nil {
-		return nil, err
-	}
-
-	if _, err := p.scanner.NextObjectHeader(); err != nil {
+	if _, err := p.scanner.SeekObjectHeader(o.Offset); err != nil {
 		return nil, err
 	}
 
@@ -447,10 +440,11 @@ func getSHA1(t plumbing.ObjectType, data []byte) (plumbing.Hash, error) {
 }
 
 type objectInfo struct {
-	Offset   int64
-	Length   int64
-	Type     plumbing.ObjectType
-	DiskType plumbing.ObjectType
+	Offset      int64
+	Length      int64
+	Type        plumbing.ObjectType
+	DiskType    plumbing.ObjectType
+	ExternalRef bool // indicates this is an external reference in a thin pack file
 
 	Crc32 uint32
 

+ 42 - 4
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/scanner.go

@@ -138,14 +138,52 @@ func (s *Scanner) readCount() (uint32, error) {
 	return binary.ReadUint32(s.r)
 }
 
+// SeekObjectHeader seeks to specified offset and returns the ObjectHeader
+// for the next object in the reader
+func (s *Scanner) SeekObjectHeader(offset int64) (*ObjectHeader, error) {
+	// if seeking we assume that you are not interested in the header
+	if s.version == 0 {
+		s.version = VersionSupported
+	}
+
+	if _, err := s.r.Seek(offset, io.SeekStart); err != nil {
+		return nil, err
+	}
+
+	h, err := s.nextObjectHeader()
+	if err != nil {
+		return nil, err
+	}
+
+	h.Offset = offset
+	return h, nil
+}
+
 // NextObjectHeader returns the ObjectHeader for the next object in the reader
 func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) {
-	defer s.Flush()
-
 	if err := s.doPending(); err != nil {
 		return nil, err
 	}
 
+	offset, err := s.r.Seek(0, io.SeekCurrent)
+	if err != nil {
+		return nil, err
+	}
+
+	h, err := s.nextObjectHeader()
+	if err != nil {
+		return nil, err
+	}
+
+	h.Offset = offset
+	return h, nil
+}
+
+// nextObjectHeader returns the ObjectHeader for the next object in the reader
+// without the Offset field
+func (s *Scanner) nextObjectHeader() (*ObjectHeader, error) {
+	defer s.Flush()
+
 	s.crc.Reset()
 
 	h := &ObjectHeader{}
@@ -308,7 +346,7 @@ var byteSlicePool = sync.Pool{
 // SeekFromStart sets a new offset from start, returns the old position before
 // the change.
 func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) {
-	// if seeking we assume that you are not interested on the header
+	// if seeking we assume that you are not interested in the header
 	if s.version == 0 {
 		s.version = VersionSupported
 	}
@@ -385,7 +423,7 @@ type bufferedSeeker struct {
 }
 
 func (r *bufferedSeeker) Seek(offset int64, whence int) (int64, error) {
-	if whence == io.SeekCurrent {
+	if whence == io.SeekCurrent && offset == 0 {
 		current, err := r.r.Seek(offset, whence)
 		if err != nil {
 			return current, err

+ 132 - 0
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker.go

@@ -1,10 +1,12 @@
 package object
 
 import (
+	"container/list"
 	"io"
 
 	"gopkg.in/src-d/go-git.v4/plumbing"
 	"gopkg.in/src-d/go-git.v4/plumbing/storer"
+	"gopkg.in/src-d/go-git.v4/storage"
 )
 
 type commitPreIterator struct {
@@ -181,3 +183,133 @@ func (w *commitPostIterator) ForEach(cb func(*Commit) error) error {
 }
 
 func (w *commitPostIterator) Close() {}
+
+// commitAllIterator stands for commit iterator for all refs.
+type commitAllIterator struct {
+	// currCommit points to the current commit.
+	currCommit *list.Element
+}
+
+// NewCommitAllIter returns a new commit iterator for all refs.
+// repoStorer is a repo Storer used to get commits and references.
+// commitIterFunc is a commit iterator function, used to iterate through ref commits in chosen order
+func NewCommitAllIter(repoStorer storage.Storer, commitIterFunc func(*Commit) CommitIter) (CommitIter, error) {
+	commitsPath := list.New()
+	commitsLookup := make(map[plumbing.Hash]*list.Element)
+	head, err := storer.ResolveReference(repoStorer, plumbing.HEAD)
+	if err != nil {
+		return nil, err
+	}
+
+	// add all references along with the HEAD
+	if err = addReference(repoStorer, commitIterFunc, head, commitsPath, commitsLookup); err != nil {
+		return nil, err
+	}
+	refIter, err := repoStorer.IterReferences()
+	if err != nil {
+		return nil, err
+	}
+	defer refIter.Close()
+	err = refIter.ForEach(
+		func(ref *plumbing.Reference) error {
+			return addReference(repoStorer, commitIterFunc, ref, commitsPath, commitsLookup)
+		},
+	)
+	if err != nil {
+		return nil, err
+	}
+
+	return &commitAllIterator{commitsPath.Front()}, nil
+}
+
+func addReference(
+	repoStorer storage.Storer,
+	commitIterFunc func(*Commit) CommitIter,
+	ref *plumbing.Reference,
+	commitsPath *list.List,
+	commitsLookup map[plumbing.Hash]*list.Element) error {
+
+	_, exists := commitsLookup[ref.Hash()]
+	if exists {
+		// we already have it - skip the reference.
+		return nil
+	}
+
+	refCommit, _ := GetCommit(repoStorer, ref.Hash())
+	if refCommit == nil {
+		// if it's not a commit - skip it.
+		return nil
+	}
+
+	var (
+		refCommits []*Commit
+		parent     *list.Element
+	)
+	// collect all ref commits to add
+	commitIter := commitIterFunc(refCommit)
+	for c, e := commitIter.Next(); e == nil; {
+		parent, exists = commitsLookup[c.Hash]
+		if exists {
+			break
+		}
+		refCommits = append(refCommits, c)
+		c, e = commitIter.Next()
+	}
+	commitIter.Close()
+
+	if parent == nil {
+		// common parent - not found
+		// add all commits to the path from this ref (maybe it's a HEAD and we don't have anything, yet)
+		for _, c := range refCommits {
+			parent = commitsPath.PushBack(c)
+			commitsLookup[c.Hash] = parent
+		}
+	} else {
+		// add ref's commits to the path in reverse order (from the latest)
+		for i := len(refCommits) - 1; i >= 0; i-- {
+			c := refCommits[i]
+			// insert before found common parent
+			parent = commitsPath.InsertBefore(c, parent)
+			commitsLookup[c.Hash] = parent
+		}
+	}
+
+	return nil
+}
+
+func (it *commitAllIterator) Next() (*Commit, error) {
+	if it.currCommit == nil {
+		return nil, io.EOF
+	}
+
+	c := it.currCommit.Value.(*Commit)
+	it.currCommit = it.currCommit.Next()
+
+	return c, nil
+}
+
+func (it *commitAllIterator) ForEach(cb func(*Commit) error) error {
+	for {
+		c, err := it.Next()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		err = cb(c)
+		if err == storer.ErrStop {
+			break
+		}
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (it *commitAllIterator) Close() {
+	it.currCommit = nil
+}

+ 145 - 0
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_file.go

@@ -0,0 +1,145 @@
+package object
+
+import (
+	"io"
+
+	"gopkg.in/src-d/go-git.v4/plumbing"
+
+	"gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+type commitFileIter struct {
+	fileName      string
+	sourceIter    CommitIter
+	currentCommit *Commit
+	checkParent   bool
+}
+
+// NewCommitFileIterFromIter returns a commit iterator which performs diffTree between
+// successive trees returned from the commit iterator from the argument. The purpose of this is
+// to find the commits that explain how the files that match the path came to be.
+// If checkParent is true then the function double checks if potential parent (next commit in a path)
+// is one of the parents in the tree (it's used by `git log --all`).
+func NewCommitFileIterFromIter(fileName string, commitIter CommitIter, checkParent bool) CommitIter {
+	iterator := new(commitFileIter)
+	iterator.sourceIter = commitIter
+	iterator.fileName = fileName
+	iterator.checkParent = checkParent
+	return iterator
+}
+
+func (c *commitFileIter) Next() (*Commit, error) {
+	if c.currentCommit == nil {
+		var err error
+		c.currentCommit, err = c.sourceIter.Next()
+		if err != nil {
+			return nil, err
+		}
+	}
+	commit, commitErr := c.getNextFileCommit()
+
+	// Setting current-commit to nil to prevent unwanted states when errors are raised
+	if commitErr != nil {
+		c.currentCommit = nil
+	}
+	return commit, commitErr
+}
+
+func (c *commitFileIter) getNextFileCommit() (*Commit, error) {
+	for {
+		// Parent-commit can be nil if the current-commit is the initial commit
+		parentCommit, parentCommitErr := c.sourceIter.Next()
+		if parentCommitErr != nil {
+			// If the parent-commit is beyond the initial commit, keep it nil
+			if parentCommitErr != io.EOF {
+				return nil, parentCommitErr
+			}
+			parentCommit = nil
+		}
+
+		// Fetch the trees of the current and parent commits
+		currentTree, currTreeErr := c.currentCommit.Tree()
+		if currTreeErr != nil {
+			return nil, currTreeErr
+		}
+
+		var parentTree *Tree
+		if parentCommit != nil {
+			var parentTreeErr error
+			parentTree, parentTreeErr = parentCommit.Tree()
+			if parentTreeErr != nil {
+				return nil, parentTreeErr
+			}
+		}
+
+		// Find diff between current and parent trees
+		changes, diffErr := DiffTree(currentTree, parentTree)
+		if diffErr != nil {
+			return nil, diffErr
+		}
+
+		found := c.hasFileChange(changes, parentCommit)
+
+		// Storing the current-commit in-case a change is found, and
+		// Updating the current-commit for the next-iteration
+		prevCommit := c.currentCommit
+		c.currentCommit = parentCommit
+
+		if found {
+			return prevCommit, nil
+		}
+
+		// If not matches found and if parent-commit is beyond the initial commit, then return with EOF
+		if parentCommit == nil {
+			return nil, io.EOF
+		}
+	}
+}
+
+func (c *commitFileIter) hasFileChange(changes Changes, parent *Commit) bool {
+	for _, change := range changes {
+		if change.name() != c.fileName {
+			continue
+		}
+
+		// filename matches, now check if source iterator contains all commits (from all refs)
+		if c.checkParent {
+			if parent != nil && isParentHash(parent.Hash, c.currentCommit) {
+				return true
+			}
+			continue
+		}
+
+		return true
+	}
+
+	return false
+}
+
+func isParentHash(hash plumbing.Hash, commit *Commit) bool {
+	for _, h := range commit.ParentHashes {
+		if h == hash {
+			return true
+		}
+	}
+	return false
+}
+
+func (c *commitFileIter) ForEach(cb func(*Commit) error) error {
+	for {
+		commit, nextErr := c.Next()
+		if nextErr != nil {
+			return nextErr
+		}
+		err := cb(commit)
+		if err == storer.ErrStop {
+			return nil
+		} else if err != nil {
+			return err
+		}
+	}
+}
+
+func (c *commitFileIter) Close() {
+	c.sourceIter.Close()
+}

+ 8 - 7
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tag.go

@@ -195,13 +195,14 @@ func (t *Tag) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
 		return err
 	}
 
-	if t.PGPSignature != "" && includeSig {
-		// Split all the signature lines and write with a newline at the end.
-		lines := strings.Split(t.PGPSignature, "\n")
-		for _, line := range lines {
-			if _, err = fmt.Fprintf(w, "%s\n", line); err != nil {
-				return err
-			}
+	// Note that this is highly sensitive to what it sent along in the message.
+	// Message *always* needs to end with a newline, or else the message and the
+	// signature will be concatenated into a corrupt object. Since this is a
+	// lower-level method, we assume you know what you are doing and have already
+	// done the needful on the message in the caller.
+	if includeSig {
+		if _, err = fmt.Fprint(w, t.PGPSignature); err != nil {
+			return err
 		}
 	}
 

+ 11 - 0
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tree.go

@@ -87,6 +87,17 @@ func (t *Tree) File(path string) (*File, error) {
 	return NewFile(path, e.Mode, blob), nil
 }
 
+// Size returns the plaintext size of an object, without reading it
+// into memory.
+func (t *Tree) Size(path string) (int64, error) {
+	e, err := t.FindEntry(path)
+	if err != nil {
+		return 0, ErrEntryNotFound
+	}
+
+	return t.s.EncodedObjectSize(e.Hash)
+}
+
 // Tree returns the tree identified by the `path` argument.
 // The path is interpreted as relative to the tree receiver.
 func (t *Tree) Tree(path string) (*Tree, error) {

+ 30 - 0
vendor/gopkg.in/src-d/go-git.v4/plumbing/reference.go

@@ -55,6 +55,36 @@ func (r ReferenceType) String() string {
 // ReferenceName reference name's
 type ReferenceName string
 
+// NewBranchReferenceName returns a reference name describing a branch based on
+// his short name.
+func NewBranchReferenceName(name string) ReferenceName {
+	return ReferenceName(refHeadPrefix + name)
+}
+
+// NewNoteReferenceName returns a reference name describing a note based on his
+// short name.
+func NewNoteReferenceName(name string) ReferenceName {
+	return ReferenceName(refNotePrefix + name)
+}
+
+// NewRemoteReferenceName returns a reference name describing a remote branch
+// based on his short name and the remote name.
+func NewRemoteReferenceName(remote, name string) ReferenceName {
+	return ReferenceName(refRemotePrefix + fmt.Sprintf("%s/%s", remote, name))
+}
+
+// NewRemoteHEADReferenceName returns a reference name describing a the HEAD
+// branch of a remote.
+func NewRemoteHEADReferenceName(remote string) ReferenceName {
+	return ReferenceName(refRemotePrefix + fmt.Sprintf("%s/%s", remote, HEAD))
+}
+
+// NewTagReferenceName returns a reference name describing a tag based on short
+// his name.
+func NewTagReferenceName(name string) ReferenceName {
+	return ReferenceName(refTagPrefix + name)
+}
+
 // IsBranch check if a reference is a branch
 func (r ReferenceName) IsBranch() bool {
 	return strings.HasPrefix(string(r), refHeadPrefix)

+ 2 - 0
vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/object.go

@@ -40,6 +40,8 @@ type EncodedObjectStorer interface {
 	// HasEncodedObject returns ErrObjNotFound if the object doesn't
 	// exist.  If the object does exist, it returns nil.
 	HasEncodedObject(plumbing.Hash) error
+	// EncodedObjectSize returns the plaintext size of the encoded object.
+	EncodedObjectSize(plumbing.Hash) (int64, error)
 }
 
 // DeltaObjectStorer is an EncodedObjectStorer that can return delta

+ 21 - 1
vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/common.go

@@ -4,6 +4,7 @@ package http
 import (
 	"bytes"
 	"fmt"
+	"net"
 	"net/http"
 	"strconv"
 	"strings"
@@ -151,6 +152,18 @@ func (s *session) ModifyEndpointIfRedirect(res *http.Response) {
 		return
 	}
 
+	h, p, err := net.SplitHostPort(r.URL.Host)
+	if err != nil {
+		h = r.URL.Host
+	}
+	if p != "" {
+		port, err := strconv.Atoi(p)
+		if err == nil {
+			s.endpoint.Port = port
+		}
+	}
+	s.endpoint.Host = h
+
 	s.endpoint.Protocol = r.URL.Scheme
 	s.endpoint.Path = r.URL.Path[:len(r.URL.Path)-len(infoRefsPath)]
 }
@@ -201,7 +214,14 @@ func (a *BasicAuth) String() string {
 	return fmt.Sprintf("%s - %s:%s", a.Name(), a.Username, masked)
 }
 
-// TokenAuth implements the go-git http.AuthMethod and transport.AuthMethod interfaces
+// TokenAuth implements an http.AuthMethod that can be used with http transport
+// to authenticate with HTTP token authentication (also known as bearer
+// authentication).
+//
+// IMPORTANT: If you are looking to use OAuth tokens with popular servers (e.g.
+// GitHub, Bitbucket, GitLab) you should use BasicAuth instead. These servers
+// use basic HTTP authentication, with the OAuth token as user or password.
+// Check the documentation of your git server for details.
 type TokenAuth struct {
 	Token string
 }

+ 2 - 1
vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/server/loader.go

@@ -1,6 +1,7 @@
 package server
 
 import (
+	"gopkg.in/src-d/go-git.v4/plumbing/cache"
 	"gopkg.in/src-d/go-git.v4/plumbing/storer"
 	"gopkg.in/src-d/go-git.v4/plumbing/transport"
 	"gopkg.in/src-d/go-git.v4/storage/filesystem"
@@ -43,7 +44,7 @@ func (l *fsLoader) Load(ep *transport.Endpoint) (storer.Storer, error) {
 		return nil, transport.ErrRepositoryNotFound
 	}
 
-	return filesystem.NewStorage(fs)
+	return filesystem.NewStorage(fs, cache.NewObjectLRUDefault()), nil
 }
 
 // MapLoader is a Loader that uses a lookup map of storer.Storer by

+ 8 - 6
vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/ssh/auth_method.go

@@ -236,7 +236,7 @@ func (a *PublicKeysCallback) ClientConfig() (*ssh.ClientConfig, error) {
 // NewKnownHostsCallback returns ssh.HostKeyCallback based on a file based on a
 // known_hosts file. http://man.openbsd.org/sshd#SSH_KNOWN_HOSTS_FILE_FORMAT
 //
-// If files is empty, the list of files will be read from the SSH_KNOWN_HOSTS
+// If list of files is empty, then it will be read from the SSH_KNOWN_HOSTS
 // environment variable, example:
 //   /home/foo/custom_known_hosts_file:/etc/custom_known/hosts_file
 //
@@ -244,13 +244,15 @@ func (a *PublicKeysCallback) ClientConfig() (*ssh.ClientConfig, error) {
 //   ~/.ssh/known_hosts
 //   /etc/ssh/ssh_known_hosts
 func NewKnownHostsCallback(files ...string) (ssh.HostKeyCallback, error) {
-	files, err := getDefaultKnownHostsFiles()
-	if err != nil {
-		return nil, err
+	var err error
+
+	if len(files) == 0 {
+		if files, err = getDefaultKnownHostsFiles(); err != nil {
+			return nil, err
+		}
 	}
 
-	files, err = filterKnownHostsFiles(files...)
-	if err != nil {
+	if files, err = filterKnownHostsFiles(files...); err != nil {
 		return nil, err
 	}
 

+ 1 - 1
vendor/gopkg.in/src-d/go-git.v4/prune.go

@@ -49,7 +49,7 @@ func (r *Repository) Prune(opt PruneOptions) error {
 		}
 		// Otherwise it is a candidate for pruning.
 		// Check out for too new objects next.
-		if opt.OnlyObjectsOlderThan != (time.Time{}) {
+		if !opt.OnlyObjectsOlderThan.IsZero() {
 			// Errors here are non-fatal. The object may be e.g. packed.
 			// Or concurrently deleted. Skip such objects.
 			t, err := los.LooseObjectTime(hash)

+ 3 - 1
vendor/gopkg.in/src-d/go-git.v4/references.go

@@ -47,7 +47,9 @@ func (s commitSorterer) Len() int {
 }
 
 func (s commitSorterer) Less(i, j int) bool {
-	return s.l[i].Committer.When.Before(s.l[j].Committer.When)
+	return s.l[i].Committer.When.Before(s.l[j].Committer.When) ||
+		s.l[i].Committer.When.Equal(s.l[j].Committer.When) &&
+			s.l[i].Author.When.Before(s.l[j].Author.When)
 }
 
 func (s commitSorterer) Swap(i, j int) {

+ 7 - 2
vendor/gopkg.in/src-d/go-git.v4/remote.go

@@ -155,7 +155,7 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
 		}
 	}
 
-	rs, err := pushHashes(ctx, s, r.s, req, hashesToPush)
+	rs, err := pushHashes(ctx, s, r.s, req, hashesToPush, r.useRefDeltas(ar))
 	if err != nil {
 		return err
 	}
@@ -167,6 +167,10 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
 	return r.updateRemoteReferenceStorage(req, rs)
 }
 
+func (r *Remote) useRefDeltas(ar *packp.AdvRefs) bool {
+	return !ar.Capabilities.Supports(capability.OFSDelta)
+}
+
 func (r *Remote) newReferenceUpdateRequest(
 	o *PushOptions,
 	localRefs []*plumbing.Reference,
@@ -994,6 +998,7 @@ func pushHashes(
 	s storage.Storer,
 	req *packp.ReferenceUpdateRequest,
 	hs []plumbing.Hash,
+	useRefDeltas bool,
 ) (*packp.ReportStatus, error) {
 
 	rd, wr := io.Pipe()
@@ -1004,7 +1009,7 @@ func pushHashes(
 	}
 	done := make(chan error)
 	go func() {
-		e := packfile.NewEncoder(wr, s, false)
+		e := packfile.NewEncoder(wr, s, useRefDeltas)
 		if _, err := e.Encode(hs, config.Pack.Window); err != nil {
 			done <- wr.CloseWithError(err)
 			return

+ 369 - 60
vendor/gopkg.in/src-d/go-git.v4/repository.go

@@ -1,18 +1,23 @@
 package git
 
 import (
+	"bytes"
 	"context"
 	"errors"
 	"fmt"
+	"io"
 	stdioutil "io/ioutil"
 	"os"
+	"path"
 	"path/filepath"
 	"strings"
 	"time"
 
+	"golang.org/x/crypto/openpgp"
 	"gopkg.in/src-d/go-git.v4/config"
 	"gopkg.in/src-d/go-git.v4/internal/revision"
 	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/plumbing/cache"
 	"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
 	"gopkg.in/src-d/go-git.v4/plumbing/object"
 	"gopkg.in/src-d/go-git.v4/plumbing/storer"
@@ -31,7 +36,14 @@ var (
 	// ErrBranchExists an error stating the specified branch already exists
 	ErrBranchExists = errors.New("branch already exists")
 	// ErrBranchNotFound an error stating the specified branch does not exist
-	ErrBranchNotFound            = errors.New("branch not found")
+	ErrBranchNotFound = errors.New("branch not found")
+	// ErrTagExists an error stating the specified tag already exists
+	ErrTagExists = errors.New("tag already exists")
+	// ErrTagNotFound an error stating the specified tag does not exist
+	ErrTagNotFound = errors.New("tag not found")
+	// ErrFetching is returned when the packfile could not be downloaded
+	ErrFetching = errors.New("unable to fetch packfile")
+
 	ErrInvalidReference          = errors.New("invalid reference, should be a tag or a branch")
 	ErrRepositoryNotExists       = errors.New("repository does not exist")
 	ErrRepositoryAlreadyExists   = errors.New("repository already exists")
@@ -166,15 +178,6 @@ func Open(s storage.Storer, worktree billy.Filesystem) (*Repository, error) {
 		return nil, err
 	}
 
-	cfg, err := s.Config()
-	if err != nil {
-		return nil, err
-	}
-
-	if !cfg.Core.IsBare && worktree == nil {
-		return nil, ErrWorktreeNotProvided
-	}
-
 	return newRepository(s, worktree), nil
 }
 
@@ -220,10 +223,7 @@ func PlainInit(path string, isBare bool) (*Repository, error) {
 		dot, _ = wt.Chroot(GitDirName)
 	}
 
-	s, err := filesystem.NewStorage(dot)
-	if err != nil {
-		return nil, err
-	}
+	s := filesystem.NewStorage(dot, cache.NewObjectLRUDefault())
 
 	return Init(s, wt)
 }
@@ -251,10 +251,7 @@ func PlainOpenWithOptions(path string, o *PlainOpenOptions) (*Repository, error)
 		return nil, err
 	}
 
-	s, err := filesystem.NewStorage(dot)
-	if err != nil {
-		return nil, err
-	}
+	s := filesystem.NewStorage(dot, cache.NewObjectLRUDefault())
 
 	return Open(s, wt)
 }
@@ -332,6 +329,8 @@ func dotGitFileToOSFilesystem(path string, fs billy.Filesystem) (bfs billy.Files
 // PlainClone a repository into the path with the given options, isBare defines
 // if the new repository will be bare or normal. If the path is not empty
 // ErrRepositoryAlreadyExists is returned.
+//
+// TODO(mcuadros): move isBare to CloneOptions in v5
 func PlainClone(path string, isBare bool, o *CloneOptions) (*Repository, error) {
 	return PlainCloneContext(context.Background(), path, isBare, o)
 }
@@ -343,13 +342,28 @@ func PlainClone(path string, isBare bool, o *CloneOptions) (*Repository, error)
 // The provided Context must be non-nil. If the context expires before the
 // operation is complete, an error is returned. The context only affects to the
 // transport operations.
+//
+// TODO(mcuadros): move isBare to CloneOptions in v5
+// TODO(smola): refuse upfront to clone on a non-empty directory in v5, see #1027
 func PlainCloneContext(ctx context.Context, path string, isBare bool, o *CloneOptions) (*Repository, error) {
+	cleanup, cleanupParent, err := checkIfCleanupIsNeeded(path)
+	if err != nil {
+		return nil, err
+	}
+
 	r, err := PlainInit(path, isBare)
 	if err != nil {
 		return nil, err
 	}
 
-	return r, r.clone(ctx, o)
+	err = r.clone(ctx, o)
+	if err != nil && err != ErrRepositoryAlreadyExists {
+		if cleanup {
+			cleanUpDir(path, cleanupParent)
+		}
+	}
+
+	return r, err
 }
 
 func newRepository(s storage.Storer, worktree billy.Filesystem) *Repository {
@@ -360,6 +374,65 @@ func newRepository(s storage.Storer, worktree billy.Filesystem) *Repository {
 	}
 }
 
+func checkIfCleanupIsNeeded(path string) (cleanup bool, cleanParent bool, err error) {
+	fi, err := os.Stat(path)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return true, true, nil
+		}
+
+		return false, false, err
+	}
+
+	if !fi.IsDir() {
+		return false, false, fmt.Errorf("path is not a directory: %s", path)
+	}
+
+	f, err := os.Open(path)
+	if err != nil {
+		return false, false, err
+	}
+
+	defer ioutil.CheckClose(f, &err)
+
+	_, err = f.Readdirnames(1)
+	if err == io.EOF {
+		return true, false, nil
+	}
+
+	if err != nil {
+		return false, false, err
+	}
+
+	return false, false, nil
+}
+
+func cleanUpDir(path string, all bool) error {
+	if all {
+		return os.RemoveAll(path)
+	}
+
+	f, err := os.Open(path)
+	if err != nil {
+		return err
+	}
+
+	defer ioutil.CheckClose(f, &err)
+
+	names, err := f.Readdirnames(-1)
+	if err != nil {
+		return err
+	}
+
+	for _, name := range names {
+		if err := os.RemoveAll(filepath.Join(path, name)); err != nil {
+			return err
+		}
+	}
+
+	return err
+}
+
 // Config return the repository config
 func (r *Repository) Config() (*config.Config, error) {
 	return r.Storer.Config()
@@ -483,6 +556,139 @@ func (r *Repository) DeleteBranch(name string) error {
 	return r.Storer.SetConfig(cfg)
 }
 
+// CreateTag creates a tag. If opts is included, the tag is an annotated tag,
+// otherwise a lightweight tag is created.
+func (r *Repository) CreateTag(name string, hash plumbing.Hash, opts *CreateTagOptions) (*plumbing.Reference, error) {
+	rname := plumbing.ReferenceName(path.Join("refs", "tags", name))
+
+	_, err := r.Storer.Reference(rname)
+	switch err {
+	case nil:
+		// Tag exists, this is an error
+		return nil, ErrTagExists
+	case plumbing.ErrReferenceNotFound:
+		// Tag missing, available for creation, pass this
+	default:
+		// Some other error
+		return nil, err
+	}
+
+	var target plumbing.Hash
+	if opts != nil {
+		target, err = r.createTagObject(name, hash, opts)
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		target = hash
+	}
+
+	ref := plumbing.NewHashReference(rname, target)
+	if err = r.Storer.SetReference(ref); err != nil {
+		return nil, err
+	}
+
+	return ref, nil
+}
+
+func (r *Repository) createTagObject(name string, hash plumbing.Hash, opts *CreateTagOptions) (plumbing.Hash, error) {
+	if err := opts.Validate(r, hash); err != nil {
+		return plumbing.ZeroHash, err
+	}
+
+	rawobj, err := object.GetObject(r.Storer, hash)
+	if err != nil {
+		return plumbing.ZeroHash, err
+	}
+
+	tag := &object.Tag{
+		Name:       name,
+		Tagger:     *opts.Tagger,
+		Message:    opts.Message,
+		TargetType: rawobj.Type(),
+		Target:     hash,
+	}
+
+	if opts.SignKey != nil {
+		sig, err := r.buildTagSignature(tag, opts.SignKey)
+		if err != nil {
+			return plumbing.ZeroHash, err
+		}
+
+		tag.PGPSignature = sig
+	}
+
+	obj := r.Storer.NewEncodedObject()
+	if err := tag.Encode(obj); err != nil {
+		return plumbing.ZeroHash, err
+	}
+
+	return r.Storer.SetEncodedObject(obj)
+}
+
+func (r *Repository) buildTagSignature(tag *object.Tag, signKey *openpgp.Entity) (string, error) {
+	encoded := &plumbing.MemoryObject{}
+	if err := tag.Encode(encoded); err != nil {
+		return "", err
+	}
+
+	rdr, err := encoded.Reader()
+	if err != nil {
+		return "", err
+	}
+
+	var b bytes.Buffer
+	if err := openpgp.ArmoredDetachSign(&b, signKey, rdr, nil); err != nil {
+		return "", err
+	}
+
+	return b.String(), nil
+}
+
+// Tag returns a tag from the repository.
+//
+// If you want to check to see if the tag is an annotated tag, you can call
+// TagObject on the hash of the reference in ForEach:
+//
+//   ref, err := r.Tag("v0.1.0")
+//   if err != nil {
+//     // Handle error
+//   }
+//
+//   obj, err := r.TagObject(ref.Hash())
+//   switch err {
+//   case nil:
+//     // Tag object present
+//   case plumbing.ErrObjectNotFound:
+//     // Not a tag object
+//   default:
+//     // Some other error
+//   }
+//
+func (r *Repository) Tag(name string) (*plumbing.Reference, error) {
+	ref, err := r.Reference(plumbing.ReferenceName(path.Join("refs", "tags", name)), false)
+	if err != nil {
+		if err == plumbing.ErrReferenceNotFound {
+			// Return a friendly error for this one, versus just ReferenceNotFound.
+			return nil, ErrTagNotFound
+		}
+
+		return nil, err
+	}
+
+	return ref, nil
+}
+
+// DeleteTag deletes a tag from the repository.
+func (r *Repository) DeleteTag(name string) error {
+	_, err := r.Tag(name)
+	if err != nil {
+		return err
+	}
+
+	return r.Storer.RemoveReference(plumbing.ReferenceName(path.Join("refs", "tags", name)))
+}
+
 func (r *Repository) resolveToCommitHash(h plumbing.Hash) (plumbing.Hash, error) {
 	obj, err := r.Storer.EncodedObject(plumbing.AnyObject, h)
 	if err != nil {
@@ -509,8 +715,9 @@ func (r *Repository) clone(ctx context.Context, o *CloneOptions) error {
 	}
 
 	c := &config.RemoteConfig{
-		Name: o.RemoteName,
-		URLs: []string{o.URL},
+		Name:  o.RemoteName,
+		URLs:  []string{o.URL},
+		Fetch: r.cloneRefSpec(o),
 	}
 
 	if _, err := r.CreateRemote(c); err != nil {
@@ -518,11 +725,12 @@ func (r *Repository) clone(ctx context.Context, o *CloneOptions) error {
 	}
 
 	ref, err := r.fetchAndUpdateReferences(ctx, &FetchOptions{
-		RefSpecs: r.cloneRefSpec(o, c),
-		Depth:    o.Depth,
-		Auth:     o.Auth,
-		Progress: o.Progress,
-		Tags:     o.Tags,
+		RefSpecs:   c.Fetch,
+		Depth:      o.Depth,
+		Auth:       o.Auth,
+		Progress:   o.Progress,
+		Tags:       o.Tags,
+		RemoteName: o.RemoteName,
 	}, o.ReferenceName)
 	if err != nil {
 		return err
@@ -587,21 +795,26 @@ const (
 	refspecSingleBranchHEAD = "+HEAD:refs/remotes/%s/HEAD"
 )
 
-func (r *Repository) cloneRefSpec(o *CloneOptions, c *config.RemoteConfig) []config.RefSpec {
-	var rs string
-
+func (r *Repository) cloneRefSpec(o *CloneOptions) []config.RefSpec {
 	switch {
 	case o.ReferenceName.IsTag():
-		rs = fmt.Sprintf(refspecTag, o.ReferenceName.Short())
+		return []config.RefSpec{
+			config.RefSpec(fmt.Sprintf(refspecTag, o.ReferenceName.Short())),
+		}
 	case o.SingleBranch && o.ReferenceName == plumbing.HEAD:
-		rs = fmt.Sprintf(refspecSingleBranchHEAD, c.Name)
+		return []config.RefSpec{
+			config.RefSpec(fmt.Sprintf(refspecSingleBranchHEAD, o.RemoteName)),
+			config.RefSpec(fmt.Sprintf(refspecSingleBranch, plumbing.Master.Short(), o.RemoteName)),
+		}
 	case o.SingleBranch:
-		rs = fmt.Sprintf(refspecSingleBranch, o.ReferenceName.Short(), c.Name)
+		return []config.RefSpec{
+			config.RefSpec(fmt.Sprintf(refspecSingleBranch, o.ReferenceName.Short(), o.RemoteName)),
+		}
 	default:
-		return c.Fetch
+		return []config.RefSpec{
+			config.RefSpec(fmt.Sprintf(config.DefaultFetchRefSpec, o.RemoteName)),
+		}
 	}
-
-	return []config.RefSpec{config.RefSpec(rs)}
 }
 
 func (r *Repository) setIsBare(isBare bool) error {
@@ -619,9 +832,7 @@ func (r *Repository) updateRemoteConfigIfNeeded(o *CloneOptions, c *config.Remot
 		return nil
 	}
 
-	c.Fetch = []config.RefSpec{config.RefSpec(fmt.Sprintf(
-		refspecSingleBranch, head.Name().Short(), c.Name,
-	))}
+	c.Fetch = r.cloneRefSpec(o)
 
 	cfg, err := r.Storer.Config()
 	if err != nil {
@@ -649,6 +860,8 @@ func (r *Repository) fetchAndUpdateReferences(
 	remoteRefs, err := remote.fetch(ctx, o)
 	if err == NoErrAlreadyUpToDate {
 		objsUpdated = false
+	} else if err == packfile.ErrEmptyPackfile {
+		return nil, ErrFetching
 	} else if err != nil {
 		return nil, err
 	}
@@ -814,8 +1027,36 @@ func (r *Repository) PushContext(ctx context.Context, o *PushOptions) error {
 
 // Log returns the commit history from the given LogOptions.
 func (r *Repository) Log(o *LogOptions) (object.CommitIter, error) {
-	h := o.From
-	if o.From == plumbing.ZeroHash {
+	fn := commitIterFunc(o.Order)
+	if fn == nil {
+		return nil, fmt.Errorf("invalid Order=%v", o.Order)
+	}
+
+	var (
+		it  object.CommitIter
+		err error
+	)
+	if o.All {
+		it, err = r.logAll(fn)
+	} else {
+		it, err = r.log(o.From, fn)
+	}
+
+	if err != nil {
+		return nil, err
+	}
+
+	if o.FileName != nil {
+		// for `git log --all` also check parent (if the next commit comes from the real parent)
+		it = r.logWithFile(*o.FileName, it, o.All)
+	}
+
+	return it, nil
+}
+
+func (r *Repository) log(from plumbing.Hash, commitIterFunc func(*object.Commit) object.CommitIter) (object.CommitIter, error) {
+	h := from
+	if from == plumbing.ZeroHash {
 		head, err := r.Head()
 		if err != nil {
 			return nil, err
@@ -828,25 +1069,68 @@ func (r *Repository) Log(o *LogOptions) (object.CommitIter, error) {
 	if err != nil {
 		return nil, err
 	}
+	return commitIterFunc(commit), nil
+}
 
-	switch o.Order {
+func (r *Repository) logAll(commitIterFunc func(*object.Commit) object.CommitIter) (object.CommitIter, error) {
+	return object.NewCommitAllIter(r.Storer, commitIterFunc)
+}
+
+func (*Repository) logWithFile(fileName string, commitIter object.CommitIter, checkParent bool) object.CommitIter {
+	return object.NewCommitFileIterFromIter(fileName, commitIter, checkParent)
+}
+
+func commitIterFunc(order LogOrder) func(c *object.Commit) object.CommitIter {
+	switch order {
 	case LogOrderDefault:
-		return object.NewCommitPreorderIter(commit, nil, nil), nil
+		return func(c *object.Commit) object.CommitIter {
+			return object.NewCommitPreorderIter(c, nil, nil)
+		}
 	case LogOrderDFS:
-		return object.NewCommitPreorderIter(commit, nil, nil), nil
+		return func(c *object.Commit) object.CommitIter {
+			return object.NewCommitPreorderIter(c, nil, nil)
+		}
 	case LogOrderDFSPost:
-		return object.NewCommitPostorderIter(commit, nil), nil
+		return func(c *object.Commit) object.CommitIter {
+			return object.NewCommitPostorderIter(c, nil)
+		}
 	case LogOrderBSF:
-		return object.NewCommitIterBSF(commit, nil, nil), nil
+		return func(c *object.Commit) object.CommitIter {
+			return object.NewCommitIterBSF(c, nil, nil)
+		}
 	case LogOrderCommitterTime:
-		return object.NewCommitIterCTime(commit, nil, nil), nil
+		return func(c *object.Commit) object.CommitIter {
+			return object.NewCommitIterCTime(c, nil, nil)
+		}
 	}
-	return nil, fmt.Errorf("invalid Order=%v", o.Order)
+	return nil
 }
 
-// Tags returns all the References from Tags. This method returns only lightweight
-// tags. Note that not all the tags are lightweight ones. To return annotated tags
-// too, you need to call TagObjects() method.
+// Tags returns all the tag References in a repository.
+//
+// If you want to check to see if the tag is an annotated tag, you can call
+// TagObject on the hash Reference passed in through ForEach:
+//
+//   iter, err := r.Tags()
+//   if err != nil {
+//     // Handle error
+//   }
+//
+//   if err := iter.ForEach(func (ref *plumbing.Reference) error {
+//     obj, err := r.TagObject(ref.Hash())
+//     switch err {
+//     case nil:
+//       // Tag object present
+//     case plumbing.ErrObjectNotFound:
+//       // Not a tag object
+//     default:
+//       // Some other error
+//       return err
+//     }
+//   }); err != nil {
+//     // Handle outer iterator error
+//   }
+//
 func (r *Repository) Tags() (storer.ReferenceIter, error) {
 	refIter, err := r.Storer.IterReferences()
 	if err != nil {
@@ -1005,7 +1289,18 @@ func (r *Repository) Worktree() (*Worktree, error) {
 	return &Worktree{r: r, Filesystem: r.wt}, nil
 }
 
-// ResolveRevision resolves revision to corresponding hash.
+func countTrue(vals ...bool) int {
+	sum := 0
+	for _, v := range vals {
+		if v {
+			sum++
+		}
+	}
+	return sum
+}
+
+// ResolveRevision resolves revision to corresponding hash. It will always
+// resolve to a commit hash, not a tree or annotated tag.
 //
 // Implemented resolvers : HEAD, branch, tag, heads/branch, refs/heads/branch,
 // refs/tags/tag, refs/remotes/origin/branch, refs/remotes/origin/HEAD, tilde and caret (HEAD~1, master~^, tag~2, ref/heads/master~1, ...), selection by text (HEAD^{/fix nasty bug})
@@ -1025,8 +1320,8 @@ func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, err
 		case revision.Ref:
 			revisionRef := item.(revision.Ref)
 			var ref *plumbing.Reference
-			var hashCommit, refCommit *object.Commit
-			var rErr, hErr error
+			var hashCommit, refCommit, tagCommit *object.Commit
+			var rErr, hErr, tErr error
 
 			for _, rule := range append([]string{"%s"}, plumbing.RefRevParseRules...) {
 				ref, err = storer.ResolveReference(r.Storer, plumbing.ReferenceName(fmt.Sprintf(rule, revisionRef)))
@@ -1037,24 +1332,38 @@ func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, err
 			}
 
 			if ref != nil {
+				tag, tObjErr := r.TagObject(ref.Hash())
+				if tObjErr != nil {
+					tErr = tObjErr
+				} else {
+					tagCommit, tErr = tag.Commit()
+				}
 				refCommit, rErr = r.CommitObject(ref.Hash())
 			} else {
 				rErr = plumbing.ErrReferenceNotFound
+				tErr = plumbing.ErrReferenceNotFound
 			}
 
-			isHash := plumbing.NewHash(string(revisionRef)).String() == string(revisionRef)
-
-			if isHash {
+			maybeHash := plumbing.NewHash(string(revisionRef)).String() == string(revisionRef)
+			if maybeHash {
 				hashCommit, hErr = r.CommitObject(plumbing.NewHash(string(revisionRef)))
+			} else {
+				hErr = plumbing.ErrReferenceNotFound
 			}
 
+			isTag := tErr == nil
+			isCommit := rErr == nil
+			isHash := hErr == nil
+
 			switch {
-			case rErr == nil && !isHash:
+			case countTrue(isTag, isCommit, isHash) > 1:
+				return &plumbing.ZeroHash, fmt.Errorf(`refname "%s" is ambiguous`, revisionRef)
+			case isTag:
+				commit = tagCommit
+			case isCommit:
 				commit = refCommit
-			case rErr != nil && isHash && hErr == nil:
+			case isHash:
 				commit = hashCommit
-			case rErr == nil && isHash && hErr == nil:
-				return &plumbing.ZeroHash, fmt.Errorf(`refname "%s" is ambiguous`, revisionRef)
 			default:
 				return &plumbing.ZeroHash, plumbing.ErrReferenceNotFound
 			}

+ 1 - 1
vendor/gopkg.in/src-d/go-git.v4/status.go

@@ -26,7 +26,7 @@ func (s Status) IsUntracked(path string) bool {
 	return ok && stat.Worktree == Untracked
 }
 
-// IsClean returns true if all the files aren't in Unmodified status.
+// IsClean returns true if all the files are in Unmodified status.
 func (s Status) IsClean() bool {
 	for _, status := range s {
 		if status.Worktree != Unmodified || status.Staging != Unmodified {

+ 2 - 2
vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit.go

@@ -92,8 +92,8 @@ func New(fs billy.Filesystem) *DotGit {
 	return NewWithOptions(fs, Options{})
 }
 
-// NewWithOptions creates a new DotGit and sets non default configuration
-// options. See New for complete help.
+// NewWithOptions sets non default configuration options.
+// See New for complete help.
 func NewWithOptions(fs billy.Filesystem, o Options) *DotGit {
 	return &DotGit{
 		options: o,

+ 49 - 2
vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit_setref.go

@@ -1,15 +1,24 @@
-// +build !norwfs
-
 package dotgit
 
 import (
+	"fmt"
 	"os"
 
 	"gopkg.in/src-d/go-git.v4/plumbing"
 	"gopkg.in/src-d/go-git.v4/utils/ioutil"
+
+	"gopkg.in/src-d/go-billy.v4"
 )
 
 func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) (err error) {
+	if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) {
+		return d.setRefRwfs(fileName, content, old)
+	}
+
+	return d.setRefNorwfs(fileName, content, old)
+}
+
+func (d *DotGit) setRefRwfs(fileName, content string, old *plumbing.Reference) (err error) {
 	// If we are not checking an old ref, just truncate the file.
 	mode := os.O_RDWR | os.O_CREATE
 	if old == nil {
@@ -41,3 +50,41 @@ func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) (err
 	_, err = f.Write([]byte(content))
 	return err
 }
+
+// There are some filesystems that don't support opening files in RDWD mode.
+// In these filesystems the standard SetRef function can not be used as it
+// reads the reference file to check that it's not modified before updating it.
+//
+// This version of the function writes the reference without extra checks
+// making it compatible with these simple filesystems. This is usually not
+// a problem as they should be accessed by only one process at a time.
+func (d *DotGit) setRefNorwfs(fileName, content string, old *plumbing.Reference) error {
+	_, err := d.fs.Stat(fileName)
+	if err == nil && old != nil {
+		fRead, err := d.fs.Open(fileName)
+		if err != nil {
+			return err
+		}
+
+		ref, err := d.readReferenceFrom(fRead, old.Name().String())
+		fRead.Close()
+
+		if err != nil {
+			return err
+		}
+
+		if ref.Hash() != old.Hash() {
+			return fmt.Errorf("reference has changed concurrently")
+		}
+	}
+
+	f, err := d.fs.Create(fileName)
+	if err != nil {
+		return err
+	}
+
+	defer f.Close()
+
+	_, err = f.Write([]byte(content))
+	return err
+}

+ 0 - 47
vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit_setref_norwfs.go

@@ -1,47 +0,0 @@
-// +build norwfs
-
-package dotgit
-
-import (
-	"fmt"
-
-	"gopkg.in/src-d/go-git.v4/plumbing"
-)
-
-// There are some filesystems that don't support opening files in RDWD mode.
-// In these filesystems the standard SetRef function can not be used as i
-// reads the reference file to check that it's not modified before updating it.
-//
-// This version of the function writes the reference without extra checks
-// making it compatible with these simple filesystems. This is usually not
-// a problem as they should be accessed by only one process at a time.
-func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) error {
-	_, err := d.fs.Stat(fileName)
-	if err == nil && old != nil {
-		fRead, err := d.fs.Open(fileName)
-		if err != nil {
-			return err
-		}
-
-		ref, err := d.readReferenceFrom(fRead, old.Name().String())
-		fRead.Close()
-
-		if err != nil {
-			return err
-		}
-
-		if ref.Hash() != old.Hash() {
-			return fmt.Errorf("reference has changed concurrently")
-		}
-	}
-
-	f, err := d.fs.Create(fileName)
-	if err != nil {
-		return err
-	}
-
-	defer f.Close()
-
-	_, err = f.Write([]byte(content))
-	return err
-}

+ 2 - 1
vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/module.go

@@ -1,6 +1,7 @@
 package filesystem
 
 import (
+	"gopkg.in/src-d/go-git.v4/plumbing/cache"
 	"gopkg.in/src-d/go-git.v4/storage"
 	"gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
 )
@@ -15,5 +16,5 @@ func (s *ModuleStorage) Module(name string) (storage.Storer, error) {
 		return nil, err
 	}
 
-	return NewStorage(fs)
+	return NewStorage(fs, cache.NewObjectLRUDefault()), nil
 }

+ 137 - 40
vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/object.go

@@ -20,31 +20,26 @@ import (
 type ObjectStorage struct {
 	options Options
 
-	// deltaBaseCache is an object cache uses to cache delta's bases when
-	deltaBaseCache cache.Object
+	// objectCache is an object cache uses to cache delta's bases and also recently
+	// loaded loose objects
+	objectCache cache.Object
 
 	dir   *dotgit.DotGit
 	index map[plumbing.Hash]idxfile.Index
 }
 
-// NewObjectStorage creates a new ObjectStorage with the given .git directory.
-func NewObjectStorage(dir *dotgit.DotGit) (ObjectStorage, error) {
-	return NewObjectStorageWithOptions(dir, Options{})
+// NewObjectStorage creates a new ObjectStorage with the given .git directory and cache.
+func NewObjectStorage(dir *dotgit.DotGit, objectCache cache.Object) *ObjectStorage {
+	return NewObjectStorageWithOptions(dir, objectCache, Options{})
 }
 
-// NewObjectStorageWithOptions creates a new ObjectStorage with the given .git
-// directory and sets its options.
-func NewObjectStorageWithOptions(
-	dir *dotgit.DotGit,
-	ops Options,
-) (ObjectStorage, error) {
-	s := ObjectStorage{
-		options:        ops,
-		deltaBaseCache: cache.NewObjectLRUDefault(),
-		dir:            dir,
+// NewObjectStorageWithOptions creates a new ObjectStorage with the given .git directory, cache and extra options
+func NewObjectStorageWithOptions(dir *dotgit.DotGit, objectCache cache.Object, ops Options) *ObjectStorage {
+	return &ObjectStorage{
+		options:     ops,
+		objectCache: objectCache,
+		dir:         dir,
 	}
-
-	return s, nil
 }
 
 func (s *ObjectStorage) requireIndex() error {
@@ -67,6 +62,11 @@ func (s *ObjectStorage) requireIndex() error {
 	return nil
 }
 
+// Reindex indexes again all packfiles. Useful if git changed packfiles externally
+func (s *ObjectStorage) Reindex() {
+	s.index = nil
+}
+
 func (s *ObjectStorage) loadIdxFile(h plumbing.Hash) (err error) {
 	f, err := s.dir.ObjectPackIdx(h)
 	if err != nil {
@@ -166,12 +166,95 @@ func (s *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) {
 	return nil
 }
 
+func (s *ObjectStorage) encodedObjectSizeFromUnpacked(h plumbing.Hash) (
+	size int64, err error) {
+	f, err := s.dir.Object(h)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return 0, plumbing.ErrObjectNotFound
+		}
+
+		return 0, err
+	}
+
+	r, err := objfile.NewReader(f)
+	if err != nil {
+		return 0, err
+	}
+	defer ioutil.CheckClose(r, &err)
+
+	_, size, err = r.Header()
+	return size, err
+}
+
+func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
+	size int64, err error) {
+	if err := s.requireIndex(); err != nil {
+		return 0, err
+	}
+
+	pack, _, offset := s.findObjectInPackfile(h)
+	if offset == -1 {
+		return 0, plumbing.ErrObjectNotFound
+	}
+
+	f, err := s.dir.ObjectPack(pack)
+	if err != nil {
+		return 0, err
+	}
+	defer ioutil.CheckClose(f, &err)
+
+	idx := s.index[pack]
+	hash, err := idx.FindHash(offset)
+	if err == nil {
+		obj, ok := s.objectCache.Get(hash)
+		if ok {
+			return obj.Size(), nil
+		}
+	} else if err != nil && err != plumbing.ErrObjectNotFound {
+		return 0, err
+	}
+
+	var p *packfile.Packfile
+	if s.objectCache != nil {
+		p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
+	} else {
+		p = packfile.NewPackfile(idx, s.dir.Fs(), f)
+	}
+
+	return p.GetSizeByOffset(offset)
+}
+
+// EncodedObjectSize returns the plaintext size of the given object,
+// without actually reading the full object data from storage.
+func (s *ObjectStorage) EncodedObjectSize(h plumbing.Hash) (
+	size int64, err error) {
+	size, err = s.encodedObjectSizeFromUnpacked(h)
+	if err != nil && err != plumbing.ErrObjectNotFound {
+		return 0, err
+	} else if err == nil {
+		return size, nil
+	}
+
+	return s.encodedObjectSizeFromPackfile(h)
+}
+
 // EncodedObject returns the object with the given hash, by searching for it in
 // the packfile and the git object directories.
 func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) {
-	obj, err := s.getFromUnpacked(h)
-	if err == plumbing.ErrObjectNotFound {
+	var obj plumbing.EncodedObject
+	var err error
+
+	if s.index != nil {
 		obj, err = s.getFromPackfile(h, false)
+		if err == plumbing.ErrObjectNotFound {
+			obj, err = s.getFromUnpacked(h)
+		}
+	} else {
+		obj, err = s.getFromUnpacked(h)
+		if err == plumbing.ErrObjectNotFound {
+			obj, err = s.getFromPackfile(h, false)
+		}
 	}
 
 	// If the error is still object not found, check if it's a shared object
@@ -182,10 +265,7 @@ func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (p
 			// Create a new object storage with the DotGit(s) and check for the
 			// required hash object. Skip when not found.
 			for _, dg := range dotgits {
-				o, oe := NewObjectStorage(dg)
-				if oe != nil {
-					continue
-				}
+				o := NewObjectStorage(dg, s.objectCache)
 				enobj, enerr := o.EncodedObject(t, h)
 				if enerr != nil {
 					continue
@@ -235,9 +315,12 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb
 
 		return nil, err
 	}
-
 	defer ioutil.CheckClose(f, &err)
 
+	if cacheObj, found := s.objectCache.Get(h); found {
+		return cacheObj, nil
+	}
+
 	obj = s.NewEncodedObject()
 	r, err := objfile.NewReader(f)
 	if err != nil {
@@ -258,6 +341,8 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb
 		return nil, err
 	}
 
+	s.objectCache.Put(obj)
+
 	_, err = io.Copy(w, r)
 	return obj, err
 }
@@ -300,7 +385,7 @@ func (s *ObjectStorage) decodeObjectAt(
 ) (plumbing.EncodedObject, error) {
 	hash, err := idx.FindHash(offset)
 	if err == nil {
-		obj, ok := s.deltaBaseCache.Get(hash)
+		obj, ok := s.objectCache.Get(hash)
 		if ok {
 			return obj, nil
 		}
@@ -311,8 +396,8 @@ func (s *ObjectStorage) decodeObjectAt(
 	}
 
 	var p *packfile.Packfile
-	if s.deltaBaseCache != nil {
-		p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.deltaBaseCache)
+	if s.objectCache != nil {
+		p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
 	} else {
 		p = packfile.NewPackfile(idx, s.dir.Fs(), f)
 	}
@@ -331,11 +416,7 @@ func (s *ObjectStorage) decodeDeltaObjectAt(
 	}
 
 	p := packfile.NewScanner(f)
-	if _, err := p.SeekFromStart(offset); err != nil {
-		return nil, err
-	}
-
-	header, err := p.NextObjectHeader()
+	header, err := p.SeekObjectHeader(offset)
 	if err != nil {
 		return nil, err
 	}
@@ -405,7 +486,10 @@ func (s *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.Encode
 	return storer.NewMultiEncodedObjectIter(iters), nil
 }
 
-func (s *ObjectStorage) buildPackfileIters(t plumbing.ObjectType, seen map[plumbing.Hash]struct{}) (storer.EncodedObjectIter, error) {
+func (s *ObjectStorage) buildPackfileIters(
+	t plumbing.ObjectType,
+	seen map[plumbing.Hash]struct{},
+) (storer.EncodedObjectIter, error) {
 	if err := s.requireIndex(); err != nil {
 		return nil, err
 	}
@@ -421,7 +505,10 @@ func (s *ObjectStorage) buildPackfileIters(t plumbing.ObjectType, seen map[plumb
 			if err != nil {
 				return nil, err
 			}
-			return newPackfileIter(s.dir.Fs(), pack, t, seen, s.index[h], s.deltaBaseCache)
+			return newPackfileIter(
+				s.dir.Fs(), pack, t, seen, s.index[h],
+				s.objectCache, s.options.KeepDescriptors,
+			)
 		},
 	}, nil
 }
@@ -482,16 +569,21 @@ type packfileIter struct {
 	pack billy.File
 	iter storer.EncodedObjectIter
 	seen map[plumbing.Hash]struct{}
+
+	// tells whether the pack file should be left open after iteration or not
+	keepPack bool
 }
 
 // NewPackfileIter returns a new EncodedObjectIter for the provided packfile
 // and object type. Packfile and index file will be closed after they're
-// used.
+// used. If keepPack is true the packfile won't be closed after the iteration
+// finished.
 func NewPackfileIter(
 	fs billy.Filesystem,
 	f billy.File,
 	idxFile billy.File,
 	t plumbing.ObjectType,
+	keepPack bool,
 ) (storer.EncodedObjectIter, error) {
 	idx := idxfile.NewMemoryIndex()
 	if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil {
@@ -502,7 +594,8 @@ func NewPackfileIter(
 		return nil, err
 	}
 
-	return newPackfileIter(fs, f, t, make(map[plumbing.Hash]struct{}), idx, nil)
+	seen := make(map[plumbing.Hash]struct{})
+	return newPackfileIter(fs, f, t, seen, idx, nil, keepPack)
 }
 
 func newPackfileIter(
@@ -512,6 +605,7 @@ func newPackfileIter(
 	seen map[plumbing.Hash]struct{},
 	index idxfile.Index,
 	cache cache.Object,
+	keepPack bool,
 ) (storer.EncodedObjectIter, error) {
 	var p *packfile.Packfile
 	if cache != nil {
@@ -526,9 +620,10 @@ func newPackfileIter(
 	}
 
 	return &packfileIter{
-		pack: f,
-		iter: iter,
-		seen: seen,
+		pack:     f,
+		iter:     iter,
+		seen:     seen,
+		keepPack: keepPack,
 	}, nil
 }
 
@@ -566,7 +661,9 @@ func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error {
 
 func (iter *packfileIter) Close() {
 	iter.iter.Close()
-	_ = iter.pack.Close()
+	if !iter.keepPack {
+		_ = iter.pack.Close()
+	}
 }
 
 type objectsIter struct {

+ 10 - 15
vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/storage.go

@@ -2,6 +2,7 @@
 package filesystem
 
 import (
+	"gopkg.in/src-d/go-git.v4/plumbing/cache"
 	"gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
 
 	"gopkg.in/src-d/go-billy.v4"
@@ -32,38 +33,31 @@ type Options struct {
 	KeepDescriptors bool
 }
 
-// NewStorage returns a new Storage backed by a given `fs.Filesystem`
-func NewStorage(fs billy.Filesystem) (*Storage, error) {
-	return NewStorageWithOptions(fs, Options{})
+// NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache.
+func NewStorage(fs billy.Filesystem, cache cache.Object) *Storage {
+	return NewStorageWithOptions(fs, cache, Options{})
 }
 
-// NewStorageWithOptions returns a new Storage backed by a given `fs.Filesystem`
-func NewStorageWithOptions(
-	fs billy.Filesystem,
-	ops Options,
-) (*Storage, error) {
+// NewStorageWithOptions returns a new Storage with extra options,
+// backed by a given `fs.Filesystem` and cache.
+func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options) *Storage {
 	dirOps := dotgit.Options{
 		ExclusiveAccess: ops.ExclusiveAccess,
 		KeepDescriptors: ops.KeepDescriptors,
 	}
-
 	dir := dotgit.NewWithOptions(fs, dirOps)
-	o, err := NewObjectStorageWithOptions(dir, ops)
-	if err != nil {
-		return nil, err
-	}
 
 	return &Storage{
 		fs:  fs,
 		dir: dir,
 
-		ObjectStorage:    o,
+		ObjectStorage:    *NewObjectStorageWithOptions(dir, cache, ops),
 		ReferenceStorage: ReferenceStorage{dir: dir},
 		IndexStorage:     IndexStorage{dir: dir},
 		ShallowStorage:   ShallowStorage{dir: dir},
 		ConfigStorage:    ConfigStorage{dir: dir},
 		ModuleStorage:    ModuleStorage{dir: dir},
-	}, nil
+	}
 }
 
 // Filesystem returns the underlying filesystem
@@ -71,6 +65,7 @@ func (s *Storage) Filesystem() billy.Filesystem {
 	return s.fs
 }
 
+// Init initializes .git directory
 func (s *Storage) Init() error {
 	return s.dir.Initialize()
 }

+ 10 - 0
vendor/gopkg.in/src-d/go-git.v4/storage/memory/storage.go

@@ -122,6 +122,16 @@ func (o *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) {
 	return nil
 }
 
+func (o *ObjectStorage) EncodedObjectSize(h plumbing.Hash) (
+	size int64, err error) {
+	obj, ok := o.Objects[h]
+	if !ok {
+		return 0, plumbing.ErrObjectNotFound
+	}
+
+	return obj.Size(), nil
+}
+
 func (o *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) {
 	obj, ok := o.Objects[h]
 	if !ok || (plumbing.AnyObject != t && obj.Type() != t) {

+ 1 - 1
vendor/gopkg.in/src-d/go-git.v4/worktree_bsd.go

@@ -1,4 +1,4 @@
-// +build darwin freebsd netbsd openbsd
+// +build darwin freebsd netbsd
 
 package git
 

+ 26 - 0
vendor/gopkg.in/src-d/go-git.v4/worktree_unix_other.go

@@ -0,0 +1,26 @@
+// +build openbsd dragonfly solaris
+
+package git
+
+import (
+	"syscall"
+	"time"
+
+	"gopkg.in/src-d/go-git.v4/plumbing/format/index"
+)
+
+func init() {
+	fillSystemInfo = func(e *index.Entry, sys interface{}) {
+		if os, ok := sys.(*syscall.Stat_t); ok {
+			e.CreatedAt = time.Unix(int64(os.Atim.Sec), int64(os.Atim.Nsec))
+			e.Dev = uint32(os.Dev)
+			e.Inode = uint32(os.Ino)
+			e.GID = os.Gid
+			e.UID = os.Uid
+		}
+	}
+}
+
+func isSymlinkWindowsNonAdmin(err error) bool {
+	return false
+}