Procházet zdrojové kódy

Merge pull request #189 from zricethezav/debt/breakout

Debt/breakout
Zachary Rice před 6 roky
rodič
revize
2a6bd1636f
100 změnil soubory, kde provedl 1502 přidání a 20900 odebrání
  1. 3 2
      .travis.yml
  2. 6 0
      CHANGELOG.md
  3. 0 1
      CONTRIBUTING.md
  4. 3 3
      Dockerfile
  5. 0 266
      Gopkg.lock
  6. 0 62
      Gopkg.toml
  7. 1 1
      Makefile
  8. 1 0
      README.md
  9. 26 0
      go.mod
  10. 111 0
      go.sum
  11. 7 1070
      main.go
  12. 224 0
      src/config.go
  13. 77 0
      src/constants.go
  14. 117 0
      src/core.go
  15. 52 0
      src/entropy.go
  16. 24 26
      src/github.go
  17. 12 10
      src/gitlab.go
  18. 149 137
      src/gitleaks_test.go
  19. 170 0
      src/options.go
  20. 318 0
      src/repo.go
  21. 201 0
      src/utils.go
  22. 0 5
      vendor/github.com/BurntSushi/toml/.gitignore
  23. 0 15
      vendor/github.com/BurntSushi/toml/.travis.yml
  24. 0 3
      vendor/github.com/BurntSushi/toml/COMPATIBLE
  25. 0 21
      vendor/github.com/BurntSushi/toml/COPYING
  26. 0 19
      vendor/github.com/BurntSushi/toml/Makefile
  27. 0 218
      vendor/github.com/BurntSushi/toml/README.md
  28. 0 21
      vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
  29. 0 21
      vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
  30. 0 21
      vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING
  31. 0 509
      vendor/github.com/BurntSushi/toml/decode.go
  32. 0 121
      vendor/github.com/BurntSushi/toml/decode_meta.go
  33. 0 27
      vendor/github.com/BurntSushi/toml/doc.go
  34. 0 568
      vendor/github.com/BurntSushi/toml/encode.go
  35. 0 19
      vendor/github.com/BurntSushi/toml/encoding_types.go
  36. 0 18
      vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
  37. 0 953
      vendor/github.com/BurntSushi/toml/lex.go
  38. 0 592
      vendor/github.com/BurntSushi/toml/parse.go
  39. 0 1
      vendor/github.com/BurntSushi/toml/session.vim
  40. 0 91
      vendor/github.com/BurntSushi/toml/type_check.go
  41. 0 242
      vendor/github.com/BurntSushi/toml/type_fields.go
  42. 0 41
      vendor/github.com/emirpasic/gods/LICENSE
  43. 0 35
      vendor/github.com/emirpasic/gods/containers/containers.go
  44. 0 61
      vendor/github.com/emirpasic/gods/containers/enumerable.go
  45. 0 109
      vendor/github.com/emirpasic/gods/containers/iterator.go
  46. 0 17
      vendor/github.com/emirpasic/gods/containers/serialization.go
  47. 0 228
      vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go
  48. 0 79
      vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go
  49. 0 83
      vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go
  50. 0 29
      vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go
  51. 0 33
      vendor/github.com/emirpasic/gods/lists/lists.go
  52. 0 163
      vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go
  53. 0 84
      vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go
  54. 0 22
      vendor/github.com/emirpasic/gods/trees/binaryheap/serialization.go
  55. 0 21
      vendor/github.com/emirpasic/gods/trees/trees.go
  56. 0 251
      vendor/github.com/emirpasic/gods/utils/comparator.go
  57. 0 29
      vendor/github.com/emirpasic/gods/utils/sort.go
  58. 0 47
      vendor/github.com/emirpasic/gods/utils/utils.go
  59. 0 22
      vendor/github.com/franela/goblin/.gitignore
  60. 0 7
      vendor/github.com/franela/goblin/.travis.yml
  61. 0 19
      vendor/github.com/franela/goblin/LICENSE
  62. 0 3
      vendor/github.com/franela/goblin/Makefile
  63. 0 149
      vendor/github.com/franela/goblin/README.md
  64. 0 70
      vendor/github.com/franela/goblin/assertions.go
  65. 0 36
      vendor/github.com/franela/goblin/go.snippets
  66. 0 337
      vendor/github.com/franela/goblin/goblin.go
  67. binární
      vendor/github.com/franela/goblin/goblin_logo.jpg
  68. binární
      vendor/github.com/franela/goblin/goblin_output.png
  69. 0 30
      vendor/github.com/franela/goblin/mono_reporter.go
  70. 0 153
      vendor/github.com/franela/goblin/reporting.go
  71. 0 21
      vendor/github.com/franela/goblin/resolver.go
  72. 0 3
      vendor/github.com/golang/protobuf/AUTHORS
  73. 0 3
      vendor/github.com/golang/protobuf/CONTRIBUTORS
  74. 0 28
      vendor/github.com/golang/protobuf/LICENSE
  75. 0 253
      vendor/github.com/golang/protobuf/proto/clone.go
  76. 0 428
      vendor/github.com/golang/protobuf/proto/decode.go
  77. 0 350
      vendor/github.com/golang/protobuf/proto/discard.go
  78. 0 203
      vendor/github.com/golang/protobuf/proto/encode.go
  79. 0 300
      vendor/github.com/golang/protobuf/proto/equal.go
  80. 0 543
      vendor/github.com/golang/protobuf/proto/extensions.go
  81. 0 979
      vendor/github.com/golang/protobuf/proto/lib.go
  82. 0 314
      vendor/github.com/golang/protobuf/proto/message_set.go
  83. 0 357
      vendor/github.com/golang/protobuf/proto/pointer_reflect.go
  84. 0 308
      vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
  85. 0 544
      vendor/github.com/golang/protobuf/proto/properties.go
  86. 0 2767
      vendor/github.com/golang/protobuf/proto/table_marshal.go
  87. 0 654
      vendor/github.com/golang/protobuf/proto/table_merge.go
  88. 0 2051
      vendor/github.com/golang/protobuf/proto/table_unmarshal.go
  89. 0 843
      vendor/github.com/golang/protobuf/proto/text.go
  90. 0 880
      vendor/github.com/golang/protobuf/proto/text_parser.go
  91. 0 171
      vendor/github.com/google/go-github/AUTHORS
  92. 0 341
      vendor/github.com/google/go-github/LICENSE
  93. 0 69
      vendor/github.com/google/go-github/github/activity.go
  94. 0 324
      vendor/github.com/google/go-github/github/activity_events.go
  95. 0 223
      vendor/github.com/google/go-github/github/activity_notifications.go
  96. 0 135
      vendor/github.com/google/go-github/github/activity_star.go
  97. 0 146
      vendor/github.com/google/go-github/github/activity_watching.go
  98. 0 101
      vendor/github.com/google/go-github/github/admin.go
  99. 0 171
      vendor/github.com/google/go-github/github/admin_stats.go
  100. 0 169
      vendor/github.com/google/go-github/github/apps.go

+ 3 - 2
.travis.yml

@@ -1,12 +1,13 @@
 sudo: required
 sudo: required
 language: go
 language: go
 go:
 go:
-- 1.9
+- 1.11
 services:
 services:
 - docker
 - docker
 script:
 script:
-- make test
+- env GO111MODULE=on make test
 after_success:
 after_success:
+- env GO111MODULE=on go build
 - export REPO=zricethezav/gitleaks
 - export REPO=zricethezav/gitleaks
 - export TAG=`if [ "$TRAVIS_BRANCH" == "master" ]; then echo "latest"; else echo $TRAVIS_BRANCH;
 - export TAG=`if [ "$TRAVIS_BRANCH" == "master" ]; then echo "latest"; else echo $TRAVIS_BRANCH;
   fi`
   fi`

+ 6 - 0
CHANGELOG.md

@@ -1,5 +1,11 @@
 CHANGELOG
 CHANGELOG
 =========
 =========
+1.25.0
+----
+- Pretty big refactor, see `src` directory
+- Dropping dep for go modules
+- Separating email and author
+- Readding branch support with `--branch=`
 
 
 1.24.0
 1.24.0
 ----
 ----

+ 0 - 1
CONTRIBUTING.md

@@ -13,7 +13,6 @@ Make sure you pass this list of requirements.
 
 
 - You've run `go fmt`.
 - You've run `go fmt`.
 - You've run `golint`.
 - You've run `golint`.
-- Your Go changes are confined to `gitleaks_test.go` and `main.go`. This is subject to change as the project evolves. Stylistically, I like having a single go file considering the size of this project (its tiny).
 - You've added test cases for your changes.
 - You've added test cases for your changes.
 - You've updated [the changelog](CHANGELOG.md).
 - You've updated [the changelog](CHANGELOG.md).
 - Your tests pass.
 - Your tests pass.

+ 3 - 3
Dockerfile

@@ -1,7 +1,7 @@
-FROM golang:1.10.3 AS build
+FROM golang:1.11.6 AS build
 WORKDIR /go/src/github.com/zricethezav/gitleaks
 WORKDIR /go/src/github.com/zricethezav/gitleaks
 COPY . .
 COPY . .
-RUN CGO_ENABLED=0 go build -o bin/gitleaks *.go
+RUN GO111MODULE=on CGO_ENABLED=0 go build -o bin/gitleaks *.go
 
 
 FROM alpine:3.7
 FROM alpine:3.7
 RUN apk add --no-cache bash git openssh
 RUN apk add --no-cache bash git openssh
@@ -11,6 +11,6 @@ ENTRYPOINT ["gitleaks"]
 # How to use me :
 # How to use me :
 
 
 # docker build -t gitleaks .
 # docker build -t gitleaks .
-# docker run --rm --name=gitleaks gitleaks https://github.com/zricethezav/gitleaks
+# docker run --rm --name=gitleaks gitleaks --repo=https://github.com/zricethezav/gitleaks
 
 
 # This will check for secrets in https://github.com/zricethezav/gitleaks
 # This will check for secrets in https://github.com/zricethezav/gitleaks

+ 0 - 266
Gopkg.lock

@@ -1,266 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
-  name = "github.com/BurntSushi/toml"
-  packages = ["."]
-  revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005"
-  version = "v0.3.1"
-
-[[projects]]
-  name = "github.com/emirpasic/gods"
-  packages = [
-    "containers",
-    "lists",
-    "lists/arraylist",
-    "trees",
-    "trees/binaryheap",
-    "utils"
-  ]
-  revision = "1615341f118ae12f353cc8a983f35b584342c9b3"
-  version = "v1.12.0"
-
-[[projects]]
-  name = "github.com/franela/goblin"
-  packages = ["."]
-  revision = "cd5d08fb4ede9eaac1812fdb513552e7404eae2e"
-  version = "0.0.2"
-
-[[projects]]
-  name = "github.com/golang/protobuf"
-  packages = ["proto"]
-  revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
-  version = "v1.2.0"
-
-[[projects]]
-  name = "github.com/google/go-github"
-  packages = ["github"]
-  revision = "e48060a28fac52d0f1cb758bc8b87c07bac4a87d"
-  version = "v15.0.0"
-
-[[projects]]
-  name = "github.com/google/go-querystring"
-  packages = ["query"]
-  revision = "44c6ddd0a2342c386950e880b658017258da92fc"
-  version = "v1.0.0"
-
-[[projects]]
-  name = "github.com/hako/durafmt"
-  packages = ["."]
-  revision = "7b7ae1e72eade09dbc9c2cfba3e6c4bae7b8bcac"
-  version = "1.0.0"
-
-[[projects]]
-  branch = "master"
-  name = "github.com/jbenet/go-context"
-  packages = ["io"]
-  revision = "d14ea06fba99483203c19d92cfcd13ebe73135f4"
-
-[[projects]]
-  name = "github.com/jessevdk/go-flags"
-  packages = ["."]
-  revision = "c6ca198ec95c841fdb89fc0de7496fed11ab854e"
-  version = "v1.4.0"
-
-[[projects]]
-  name = "github.com/kevinburke/ssh_config"
-  packages = ["."]
-  revision = "81db2a75821ed34e682567d48be488a1c3121088"
-  version = "0.5"
-
-[[projects]]
-  name = "github.com/mitchellh/go-homedir"
-  packages = ["."]
-  revision = "ae18d6b8b3205b561c79e8e5f69bff09736185f4"
-  version = "v1.0.0"
-
-[[projects]]
-  name = "github.com/pelletier/go-buffruneio"
-  packages = ["."]
-  revision = "c37440a7cf42ac63b919c752ca73a85067e05992"
-  version = "v0.2.0"
-
-[[projects]]
-  name = "github.com/sergi/go-diff"
-  packages = ["diffmatchpatch"]
-  revision = "1744e2970ca51c86172c8190fadad617561ed6e7"
-  version = "v1.0.0"
-
-[[projects]]
-  name = "github.com/sirupsen/logrus"
-  packages = ["."]
-  revision = "3e01752db0189b9157070a0e1668a620f9a85da2"
-  version = "v1.0.6"
-
-[[projects]]
-  name = "github.com/src-d/gcfg"
-  packages = [
-    ".",
-    "scanner",
-    "token",
-    "types"
-  ]
-  revision = "f187355171c936ac84a82793659ebb4936bc1c23"
-  version = "v1.3.0"
-
-[[projects]]
-  name = "github.com/xanzy/go-gitlab"
-  packages = ["."]
-  revision = "183a80bb43ec9746d72f7cb37116e756d075fb91"
-  version = "v0.11.3"
-
-[[projects]]
-  name = "github.com/xanzy/ssh-agent"
-  packages = ["."]
-  revision = "640f0ab560aeb89d523bb6ac322b1244d5c3796c"
-  version = "v0.2.0"
-
-[[projects]]
-  branch = "master"
-  name = "golang.org/x/crypto"
-  packages = [
-    "cast5",
-    "curve25519",
-    "ed25519",
-    "ed25519/internal/edwards25519",
-    "internal/chacha20",
-    "internal/subtle",
-    "openpgp",
-    "openpgp/armor",
-    "openpgp/elgamal",
-    "openpgp/errors",
-    "openpgp/packet",
-    "openpgp/s2k",
-    "poly1305",
-    "ssh",
-    "ssh/agent",
-    "ssh/knownhosts",
-    "ssh/terminal"
-  ]
-  revision = "0e37d006457bf46f9e6692014ba72ef82c33022c"
-
-[[projects]]
-  branch = "master"
-  name = "golang.org/x/net"
-  packages = [
-    "context",
-    "context/ctxhttp"
-  ]
-  revision = "f04abc6bdfa7a0171a8a0c9fd2ada9391044d056"
-
-[[projects]]
-  branch = "master"
-  name = "golang.org/x/oauth2"
-  packages = [
-    ".",
-    "internal"
-  ]
-  revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9"
-
-[[projects]]
-  branch = "master"
-  name = "golang.org/x/sys"
-  packages = [
-    "unix",
-    "windows"
-  ]
-  revision = "b09afc3d579e346c4a7e4705953acaf6f9e551bd"
-
-[[projects]]
-  name = "golang.org/x/text"
-  packages = [
-    "internal/gen",
-    "internal/triegen",
-    "internal/ucd",
-    "transform",
-    "unicode/cldr",
-    "unicode/norm"
-  ]
-  revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
-  version = "v0.3.0"
-
-[[projects]]
-  name = "google.golang.org/appengine"
-  packages = [
-    "internal",
-    "internal/base",
-    "internal/datastore",
-    "internal/log",
-    "internal/remote_api",
-    "internal/urlfetch",
-    "urlfetch"
-  ]
-  revision = "ae0ab99deb4dc413a2b4bd6c8bdd0eb67f1e4d06"
-  version = "v1.2.0"
-
-[[projects]]
-  name = "gopkg.in/src-d/go-billy.v4"
-  packages = [
-    ".",
-    "helper/chroot",
-    "helper/polyfill",
-    "osfs",
-    "util"
-  ]
-  revision = "982626487c60a5252e7d0b695ca23fb0fa2fd670"
-  version = "v4.3.0"
-
-[[projects]]
-  name = "gopkg.in/src-d/go-git.v4"
-  packages = [
-    ".",
-    "config",
-    "internal/revision",
-    "plumbing",
-    "plumbing/cache",
-    "plumbing/filemode",
-    "plumbing/format/config",
-    "plumbing/format/diff",
-    "plumbing/format/gitignore",
-    "plumbing/format/idxfile",
-    "plumbing/format/index",
-    "plumbing/format/objfile",
-    "plumbing/format/packfile",
-    "plumbing/format/pktline",
-    "plumbing/object",
-    "plumbing/protocol/packp",
-    "plumbing/protocol/packp/capability",
-    "plumbing/protocol/packp/sideband",
-    "plumbing/revlist",
-    "plumbing/storer",
-    "plumbing/transport",
-    "plumbing/transport/client",
-    "plumbing/transport/file",
-    "plumbing/transport/git",
-    "plumbing/transport/http",
-    "plumbing/transport/internal/common",
-    "plumbing/transport/server",
-    "plumbing/transport/ssh",
-    "storage",
-    "storage/filesystem",
-    "storage/filesystem/dotgit",
-    "storage/memory",
-    "utils/binary",
-    "utils/diff",
-    "utils/ioutil",
-    "utils/merkletrie",
-    "utils/merkletrie/filesystem",
-    "utils/merkletrie/index",
-    "utils/merkletrie/internal/frame",
-    "utils/merkletrie/noder"
-  ]
-  revision = "a1f6ef44dfed1253ef7f3bc049f66b15f8fc2ab2"
-  version = "v4.9.1"
-
-[[projects]]
-  name = "gopkg.in/warnings.v0"
-  packages = ["."]
-  revision = "ec4a0fea49c7b46c2aeb0b51aac55779c607e52b"
-  version = "v0.1.2"
-
-[solve-meta]
-  analyzer-name = "dep"
-  analyzer-version = 1
-  inputs-digest = "94efaa76ee4e1f9c4d3a38138dc1ceebf7e8f50c8cdc56721c2dc122f810b8c5"
-  solver-name = "gps-cdcl"
-  solver-version = 1

+ 0 - 62
Gopkg.toml

@@ -1,62 +0,0 @@
-# Gopkg.toml example
-#
-# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
-# for detailed Gopkg.toml documentation.
-#
-# required = ["github.com/user/thing/cmd/thing"]
-# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
-#
-# [[constraint]]
-#   name = "github.com/user/project"
-#   version = "1.0.0"
-#
-# [[constraint]]
-#   name = "github.com/user/project2"
-#   branch = "dev"
-#   source = "github.com/myfork/project2"
-#
-# [[override]]
-#   name = "github.com/x/y"
-#   version = "2.4.0"
-#
-# [prune]
-#   non-go = false
-#   go-tests = true
-#   unused-packages = true
-
-
-[[constraint]]
-  name = "github.com/BurntSushi/toml"
-  version = "0.3.0"
-
-[[constraint]]
-  name = "github.com/franela/goblin"
-  version = "0.0.2"
-
-[[constraint]]
-  name = "github.com/google/go-github"
-  version = "15.0.0"
-
-[[constraint]]
-  name = "github.com/jessevdk/go-flags"
-  version = "1.4.0"
-
-[[constraint]]
-  name = "github.com/sirupsen/logrus"
-  version = "1.0.5"
-
-[[constraint]]
-  branch = "master"
-  name = "golang.org/x/oauth2"
-
-[[constraint]]
-  name = "gopkg.in/src-d/go-git.v4"
-  version = "4.9.1"
-
-[prune]
-  go-tests = true
-  unused-packages = true
-
-[[constraint]]
-  name = "github.com/xanzy/go-gitlab"
-  version = "0.11.3"

+ 1 - 1
Makefile

@@ -4,7 +4,7 @@ test:
 	go get golang.org/x/lint/golint
 	go get golang.org/x/lint/golint
 	go fmt
 	go fmt
 	golint
 	golint
-	go test --race --cover -run=Test$
+	go test --race --cover github.com/zricethezav/gitleaks/src -v
 deploy:
 deploy:
 	@echo "$(DOCKER_PASSWORD)" | docker login -u "$(DOCKER_USERNAME)" --password-stdin
 	@echo "$(DOCKER_PASSWORD)" | docker login -u "$(DOCKER_USERNAME)" --password-stdin
 	docker build -f Dockerfile -t $(REPO):$(TAG) .
 	docker build -f Dockerfile -t $(REPO):$(TAG) .

+ 1 - 0
README.md

@@ -84,6 +84,7 @@ Application Options:
   -e, --entropy=        Include entropy checks during audit. Entropy scale: 0.0(no entropy) - 8.0(max entropy)
   -e, --entropy=        Include entropy checks during audit. Entropy scale: 0.0(no entropy) - 8.0(max entropy)
       --noise-reduction Reduce the number of finds when entropy checks are enabled
       --noise-reduction Reduce the number of finds when entropy checks are enabled
       --repo-config     Load config from target repo. Config file must be ".gitleaks.toml"
       --repo-config     Load config from target repo. Config file must be ".gitleaks.toml"
+      --branch=         Branch to audit
   -l, --log=            log level
   -l, --log=            log level
   -v, --verbose         Show verbose output from gitleaks audit
   -v, --verbose         Show verbose output from gitleaks audit
       --report=         path to write report file
       --report=         path to write report file

+ 26 - 0
go.mod

@@ -0,0 +1,26 @@
+module github.com/zricethezav/gitleaks
+
+require (
+	github.com/BurntSushi/toml v0.3.1
+	github.com/emirpasic/gods v1.12.0 // indirect
+	github.com/franela/goblin v0.0.0-20181003173013-ead4ad1d2727
+	github.com/google/go-github v15.0.0+incompatible
+	github.com/google/go-querystring v1.0.0 // indirect
+	github.com/hako/durafmt v0.0.0-20180520121703-7b7ae1e72ead
+	github.com/ipfs/go-ipfs v0.4.19 // indirect
+	github.com/jessevdk/go-flags v1.4.0
+	github.com/onsi/ginkgo v1.8.0 // indirect
+	github.com/onsi/gomega v1.5.0 // indirect
+	github.com/sirupsen/logrus v1.0.6
+	github.com/xanzy/go-gitlab v0.11.3
+	golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b // indirect
+	golang.org/x/net v0.0.0-20180925072008-f04abc6bdfa7 // indirect
+	golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
+	golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 // indirect
+	golang.org/x/sys v0.0.0-20180925112736-b09afc3d579e // indirect
+	google.golang.org/appengine v1.2.0 // indirect
+	gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
+	gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect
+	gopkg.in/src-d/go-billy.v4 v4.3.0 // indirect
+	gopkg.in/src-d/go-git.v4 v4.9.1
+)

+ 111 - 0
go.sum

@@ -0,0 +1,111 @@
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=
+github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
+github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
+github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/emirpasic/gods v1.9.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
+github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=
+github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
+github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
+github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
+github.com/franela/goblin v0.0.0-20181003173013-ead4ad1d2727 h1:eouy4stZdUKn7n98c1+rdUTxWMg+jvhP+oHt0K8fiug=
+github.com/franela/goblin v0.0.0-20181003173013-ead4ad1d2727/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
+github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/gliderlabs/ssh v0.1.1 h1:j3L6gSLQalDETeEg/Jg0mGY0/y/N6zI2xX1978P0Uqw=
+github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
+github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-github v15.0.0+incompatible h1:jlPg2Cpsxb/FyEV/MFiIE9tW/2RAevQNZDPeHbf5a94=
+github.com/google/go-github v15.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
+github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
+github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/hako/durafmt v0.0.0-20180520121703-7b7ae1e72ead h1:Y9WOGZY2nw5ksbEf5AIpk+vK52Tdg/VN/rHFRfEeeGQ=
+github.com/hako/durafmt v0.0.0-20180520121703-7b7ae1e72ead/go.mod h1:5Scbynm8dF1XAPwIwkGPqzkM/shndPm79Jd1003hTjE=
+github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/ipfs/go-ipfs v0.4.19 h1:ioUvuv1L3Zb9XgPi6S8selFMMlTa5AntQzVGvaoYHXM=
+github.com/ipfs/go-ipfs v0.4.19/go.mod h1:iXzbK+Wa6eePj3jQg/uY6Uoq5iOwY+GToD/bgaRadto=
+github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
+github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
+github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=
+github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e h1:RgQk53JHp/Cjunrr1WlsXSZpqXn+uREuHvUVcK82CV8=
+github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
+github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
+github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/pelletier/go-buffruneio v0.2.0 h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA=
+github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
+github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/sirupsen/logrus v1.0.6 h1:hcP1GmhGigz/O7h1WVUM5KklBp1JoNS9FggWKdj/j3s=
+github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4=
+github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/xanzy/go-gitlab v0.11.3 h1:gSYcSb+pCx3fco6/O3w784/omQVTcrgxRzyf14SBvUQ=
+github.com/xanzy/go-gitlab v0.11.3/go.mod h1:CRKHkvFWNU6C3AEfqLWjnCNnAs4nj8Zk95rX2S3X6Mw=
+github.com/xanzy/ssh-agent v0.2.0 h1:Adglfbi5p9Z0BmK2oKU9nTG+zKfniSfnaMYB+ULd+Ro=
+github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b h1:2b9XGzhjiYsYPnKXoEfL7klWZQIt8IfyRCz62gCqqlQ=
+golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180925072008-f04abc6bdfa7 h1:zKzVgSQ8WOSHzD7I4k8LQjrHUUCNOlBsgc0PcYLVNnY=
+golang.org/x/net v0.0.0-20180925072008-f04abc6bdfa7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180925112736-b09afc3d579e h1:LSlw/Dbj0MkNvPYAAkGinYmGliq+aqS7eKPYlE4oWC4=
+golang.org/x/sys v0.0.0-20180925112736-b09afc3d579e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+google.golang.org/appengine v1.2.0 h1:S0iUepdCWODXRvtE+gcRDd15L+k+k1AiHlMiMjefH24=
+google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=
+gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=
+gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
+gopkg.in/src-d/go-billy.v4 v4.2.1/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
+gopkg.in/src-d/go-billy.v4 v4.3.0 h1:KtlZ4c1OWbIs4jCv5ZXrTqG8EQocr0g/d4DjNg70aek=
+gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
+gopkg.in/src-d/go-git-fixtures.v3 v3.1.1 h1:XWW/s5W18RaJpmo1l0IYGqXKuJITWRFuA45iOf1dKJs=
+gopkg.in/src-d/go-git-fixtures.v3 v3.1.1/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
+gopkg.in/src-d/go-git.v4 v4.9.1 h1:0oKHJZY8tM7B71378cfTg2c5jmWyNlXvestTT6WfY+4=
+gopkg.in/src-d/go-git.v4 v4.9.1/go.mod h1:Vtut8izDyrM8BUVQnzJ+YvmNcem2J89EmfZYCkLokZk=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
+gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
+gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

+ 7 - 1070
main.go

@@ -1,1086 +1,23 @@
 package main
 package main
 
 
 import (
 import (
-	"crypto/md5"
-	"encoding/csv"
-	"encoding/json"
-	"fmt"
-	"io/ioutil"
-	"math"
-	"net"
-	"net/url"
 	"os"
 	"os"
-	"os/user"
-	"path"
-	"path/filepath"
-	"regexp"
-	"runtime"
-	"strconv"
-	"strings"
-	"sync"
-	"time"
 
 
-	"gopkg.in/src-d/go-git.v4/plumbing"
-
-	diffType "gopkg.in/src-d/go-git.v4/plumbing/format/diff"
-	"gopkg.in/src-d/go-git.v4/plumbing/object"
-	"gopkg.in/src-d/go-git.v4/plumbing/storer"
-	"gopkg.in/src-d/go-git.v4/plumbing/transport/ssh"
-	"gopkg.in/src-d/go-git.v4/storage/memory"
-
-	"github.com/BurntSushi/toml"
-	"github.com/google/go-github/github"
-	"github.com/hako/durafmt"
-	"github.com/jessevdk/go-flags"
 	log "github.com/sirupsen/logrus"
 	log "github.com/sirupsen/logrus"
-	"gopkg.in/src-d/go-git.v4"
-)
-
-// Leak represents a leaked secret or regex match.
-type Leak struct {
-	Line     string    `json:"line"`
-	Commit   string    `json:"commit"`
-	Offender string    `json:"offender"`
-	Type     string    `json:"reason"`
-	Message  string    `json:"commitMsg"`
-	Author   string    `json:"author"`
-	File     string    `json:"file"`
-	Repo     string    `json:"repo"`
-	Date     time.Time `json:"date"`
-}
-
-// RepoDescriptor contains a src-d git repository and other data about the repo
-type RepoDescriptor struct {
-	path       string
-	url        string
-	name       string
-	repository *git.Repository
-	err        error
-}
-
-// Options for gitleaks
-type Options struct {
-	// remote target options
-	Repo       string `short:"r" long:"repo" description:"Repo url to audit"`
-	GithubUser string `long:"github-user" description:"Github user to audit"`
-	GithubOrg  string `long:"github-org" description:"Github organization to audit"`
-	GithubURL  string `long:"github-url" default:"https://api.github.com/" description:"GitHub API Base URL, use for GitHub Enterprise. Example: https://github.example.com/api/v3/"`
-	GithubPR   string `long:"github-pr" description:"Github PR url to audit. This does not clone the repo. GITHUB_TOKEN must be set"`
-
-	GitLabUser string `long:"gitlab-user" description:"GitLab user ID to audit"`
-	GitLabOrg  string `long:"gitlab-org" description:"GitLab group ID to audit"`
-
-	CommitStop string `long:"commit-stop" description:"sha of commit to stop at"`
-	Commit     string `long:"commit" description:"sha of commit to audit"`
-	Depth      int64  `long:"depth" description:"maximum commit depth"`
-
-	// local target option
-	RepoPath  string `long:"repo-path" description:"Path to repo"`
-	OwnerPath string `long:"owner-path" description:"Path to owner directory (repos discovered)"`
-
-	// Process options
-	Threads        int     `long:"threads" description:"Maximum number of threads gitleaks spawns"`
-	Disk           bool    `long:"disk" description:"Clones repo(s) to disk"`
-	SingleSearch   string  `long:"single-search" description:"single regular expression to search for"`
-	ConfigPath     string  `long:"config" description:"path to gitleaks config"`
-	SSHKey         string  `long:"ssh-key" description:"path to ssh key"`
-	ExcludeForks   bool    `long:"exclude-forks" description:"exclude forks for organization/user audits"`
-	Entropy        float64 `long:"entropy" short:"e" description:"Include entropy checks during audit. Entropy scale: 0.0(no entropy) - 8.0(max entropy)"`
-	NoiseReduction bool    `long:"noise-reduction" description:"Reduce the number of finds when entropy checks are enabled"`
-	RepoConfig     bool    `long:"repo-config" description:"Load config from target repo. Config file must be \".gitleaks.toml\""`
-	// TODO: IncludeMessages  string `long:"messages" description:"include commit messages in audit"`
-
-	// Output options
-	Log          string `short:"l" long:"log" description:"log level"`
-	Verbose      bool   `short:"v" long:"verbose" description:"Show verbose output from gitleaks audit"`
-	Report       string `long:"report" description:"path to write report file. Needs to be csv or json"`
-	Redact       bool   `long:"redact" description:"redact secrets from log messages and report"`
-	Version      bool   `long:"version" description:"version number"`
-	SampleConfig bool   `long:"sample-config" description:"prints a sample config file"`
-}
-
-// Config struct for regexes matching and whitelisting
-type Config struct {
-	Regexes []struct {
-		Description string
-		Regex       string
-	}
-	Entropy struct {
-		LineRegexes []string
-	}
-	Whitelist struct {
-		Files   []string
-		Regexes []string
-		Commits []string
-		Repos   []string
-	}
-	Misc struct {
-		Entropy []string
-	}
-}
-
-type gitDiff struct {
-	content      string
-	commit       *object.Commit
-	filePath     string
-	repoName     string
-	githubCommit *github.RepositoryCommit
-	sha          string
-	message      string
-	author       string
-	date         time.Time
-}
-
-type entropyRange struct {
-	v1 float64
-	v2 float64
-}
-
-const defaultGithubURL = "https://api.github.com/"
-const version = "1.24.0"
-const errExit = 2
-const leakExit = 1
-const defaultConfig = `
-# This is a sample config file for gitleaks. You can configure gitleaks what to search for and what to whitelist.
-# The output you are seeing here is the default gitleaks config. If GITLEAKS_CONFIG environment variable
-# is set, gitleaks will load configurations from that path. If option --config-path is set, gitleaks will load
-# configurations from that path. Gitleaks does not whitelist anything by default.
-
-title = "gitleaks config"
-# add regexes to the regex table
-[[regexes]]
-description = "AWS"
-regex = '''AKIA[0-9A-Z]{16}'''
-[[regexes]]
-description = "PKCS8"
-regex = '''-----BEGIN PRIVATE KEY-----'''
-[[regexes]]
-description = "RSA"
-regex = '''-----BEGIN RSA PRIVATE KEY-----'''
-[[regexes]]
-description = "SSH"
-regex = '''-----BEGIN OPENSSH PRIVATE KEY-----'''
-[[regexes]]
-description = "PGP"
-regex = '''-----BEGIN PGP PRIVATE KEY BLOCK-----'''
-[[regexes]]
-description = "Facebook"
-regex = '''(?i)facebook(.{0,4})?['\"][0-9a-f]{32}['\"]'''
-[[regexes]]
-description = "Twitter"
-regex = '''(?i)twitter(.{0,4})?['\"][0-9a-zA-Z]{35,44}['\"]'''
-[[regexes]]
-description = "Github"
-regex = '''(?i)github(.{0,4})?['\"][0-9a-zA-Z]{35,40}['\"]'''
-[[regexes]]
-description = "Slack"
-regex = '''xox[baprs]-([0-9a-zA-Z]{10,48})?'''
-
-[entropy]
-lineregexes = [
-	"api",
-	"key",
-	"signature",
-	"secret",
-	"password",
-	"pass",
-	"pwd",
-	"token",
-	"curl",
-	"wget",
-	"https?",
-]
-
-[whitelist]
-files = [
-  "(.*?)(jpg|gif|doc|pdf|bin)$"
-]
-#commits = [
-#  "BADHA5H1",
-#  "BADHA5H2",
-#]
-#repos = [
-#	"mygoodrepo"
-#]
-[misc]
-#entropy = [
-#	"3.3-4.30"
-#	"6.0-8.0
-#]
-`
-
-var (
-	opts              Options
-	regexes           map[string]*regexp.Regexp
-	singleSearchRegex *regexp.Regexp
-	whiteListRegexes  []*regexp.Regexp
-	whiteListFiles    []*regexp.Regexp
-	whiteListCommits  map[string]bool
-	whiteListRepos    []*regexp.Regexp
-	entropyRanges     []entropyRange
-	entropyRegexes    []*regexp.Regexp
-	fileDiffRegex     *regexp.Regexp
-	sshAuth           *ssh.PublicKeys
-	dir               string
-	threads           int
-	totalCommits      int64
-	commitMap         = make(map[string]bool)
-	cMutex            = &sync.Mutex{}
-	auditDone         bool
+	"github.com/zricethezav/gitleaks/src"
 )
 )
 
 
-func init() {
-	log.SetOutput(os.Stdout)
-	// threads = runtime.GOMAXPROCS(0) / 2
-	threads = 1
-	regexes = make(map[string]*regexp.Regexp)
-	whiteListCommits = make(map[string]bool)
-}
-
 func main() {
 func main() {
-	parser := flags.NewParser(&opts, flags.Default)
-	_, err := parser.Parse()
-
-	if err != nil {
-		if flagsErr, ok := err.(*flags.Error); ok && flagsErr.Type == flags.ErrHelp {
-			os.Exit(0)
-		}
-	}
-
-	if len(os.Args) == 1 {
-		parser.WriteHelp(os.Stdout)
-		os.Exit(0)
-	}
-
-	if opts.Version {
-		fmt.Println(version)
-		os.Exit(0)
-	}
-	if opts.SampleConfig {
-		fmt.Println(defaultConfig)
-		os.Exit(0)
-	}
-	now := time.Now()
-	leaks, err := run()
+	report, err := gitleaks.Run(gitleaks.ParseOpts())
 	if err != nil {
 	if err != nil {
-		if strings.Contains(err.Error(), "whitelisted") {
-			log.Info(err.Error())
-			os.Exit(0)
-		}
 		log.Error(err)
 		log.Error(err)
-		os.Exit(errExit)
-	}
-	if opts.Report != "" {
-		writeReport(leaks)
-	}
-	if len(leaks) != 0 {
-		log.Warnf("%d leaks detected. %d commits inspected in %s", len(leaks), totalCommits, durafmt.Parse(time.Now().Sub(now)).String())
-		os.Exit(leakExit)
-	} else {
-		log.Infof("%d leaks detected. %d commits inspected in %s", len(leaks), totalCommits, durafmt.Parse(time.Now().Sub(now)).String())
+		os.Exit(gitleaks.ErrExit)
 	}
 	}
-}
 
 
-// run parses options and kicks off the audit
-func run() ([]Leak, error) {
-	var leaks []Leak
-	setLogs()
-	err := optsGuard()
-	if err != nil {
-		return nil, err
-	}
-	err = loadToml()
-	if err != nil {
-		return nil, err
-	}
-	sshAuth, err = getSSHAuth()
-	if err != nil {
-		return leaks, err
-	}
-
-	if opts.Disk {
-		// temporary directory where all the gitleaks plain clones will reside
-		dir, err = ioutil.TempDir("", "gitleaks")
-		defer os.RemoveAll(dir)
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	// start audits
-	if opts.Repo != "" || opts.RepoPath != "" {
-		// Audit a single remote repo or a local repo.
-		repo, err := cloneRepo()
-		if err != nil {
-			return leaks, err
-		}
-		return auditGitRepo(repo)
-	} else if opts.OwnerPath != "" {
-		// Audit local repos. Gitleaks will look for all child directories of OwnerPath for
-		// git repos and perform an audit on said repos.
-		repos, err := discoverRepos(opts.OwnerPath)
-		if err != nil {
-			return leaks, err
-		}
-		for _, repo := range repos {
-			leaksFromRepo, err := auditGitRepo(repo)
-			if err != nil {
-				return leaks, err
-			}
-			leaks = append(leaksFromRepo, leaks...)
-		}
-	} else if opts.GithubOrg != "" || opts.GithubUser != "" {
-		// Audit a github owner -- a user or organization.
-		leaks, err = auditGithubRepos()
-		if err != nil {
-			return leaks, err
-		}
-	} else if opts.GitLabOrg != "" || opts.GitLabUser != "" {
-		leaks, err = auditGitlabRepos()
-		if err != nil {
-			return leaks, err
-		}
-	} else if opts.GithubPR != "" {
-		return auditGithubPR()
-	}
-	return leaks, nil
-}
-
-// writeReport writes a report to a file specified in the --report= option.
-// Default format for report is JSON. You can use the --csv option to write the report as a csv
-func writeReport(leaks []Leak) error {
-	if len(leaks) == 0 {
-		return nil
-	}
-
-	var err error
-	log.Infof("writing report to %s", opts.Report)
-	if strings.HasSuffix(opts.Report, ".csv") {
-		f, err := os.Create(opts.Report)
-		if err != nil {
-			return err
-		}
-		defer f.Close()
-		w := csv.NewWriter(f)
-		w.Write([]string{"repo", "line", "commit", "offender", "reason", "commitMsg", "author", "file", "date"})
-		for _, leak := range leaks {
-			w.Write([]string{leak.Repo, leak.Line, leak.Commit, leak.Offender, leak.Type, leak.Message, leak.Author, leak.File, leak.Date.Format(time.RFC3339)})
-		}
-		w.Flush()
-	} else {
-		var (
-			f       *os.File
-			encoder *json.Encoder
-		)
-		f, err := os.Create(opts.Report)
-		if err != nil {
-			return err
-		}
-		defer f.Close()
-		encoder = json.NewEncoder(f)
-		encoder.SetIndent("", "\t")
-		if _, err := f.WriteString("[\n"); err != nil {
-			return err
-		}
-		for i := 0; i < len(leaks); i++ {
-			if err := encoder.Encode(leaks[i]); err != nil {
-				return err
-			}
-			// for all but the last leak, seek back and overwrite the newline appended by Encode() with comma & newline
-			if i+1 < len(leaks) {
-				if _, err := f.Seek(-1, 1); err != nil {
-					return err
-				}
-				if _, err := f.WriteString(",\n"); err != nil {
-					return err
-				}
-			}
-		}
-		if _, err := f.WriteString("]"); err != nil {
-			return err
-		}
-		if err := f.Sync(); err != nil {
-			log.Error(err)
-			return err
-		}
-	}
-	return err
-}
-
-// cloneRepo clones a repo to memory(default) or to disk if the --disk option is set.
-func cloneRepo() (*RepoDescriptor, error) {
-	var (
-		err  error
-		repo *git.Repository
-	)
-	// check if whitelist
-	for _, re := range whiteListRepos {
-		if re.FindString(opts.Repo) != "" {
-			return nil, fmt.Errorf("skipping %s, whitelisted", opts.Repo)
-		}
-	}
-	if opts.Disk {
-		log.Infof("cloning %s", opts.Repo)
-		cloneTarget := fmt.Sprintf("%s/%x", dir, md5.Sum([]byte(fmt.Sprintf("%s%s", opts.GithubUser, opts.Repo))))
-		if strings.HasPrefix(opts.Repo, "git") {
-			repo, err = git.PlainClone(cloneTarget, false, &git.CloneOptions{
-				URL:      opts.Repo,
-				Progress: os.Stdout,
-				Auth:     sshAuth,
-			})
-		} else {
-			repo, err = git.PlainClone(cloneTarget, false, &git.CloneOptions{
-				URL:      opts.Repo,
-				Progress: os.Stdout,
-			})
-		}
-	} else if opts.RepoPath != "" {
-		log.Infof("opening %s", opts.RepoPath)
-		repo, err = git.PlainOpen(opts.RepoPath)
+	if len(report.Leaks) != 0 {
+		log.Warnf("%d leaks detected. %d commits inspected in %s", len(report.Leaks), report.Commits, report.Duration)
+		os.Exit(gitleaks.LeakExit)
 	} else {
 	} else {
-		log.Infof("cloning %s", opts.Repo)
-		if strings.HasPrefix(opts.Repo, "git") {
-			repo, err = git.Clone(memory.NewStorage(), nil, &git.CloneOptions{
-				URL:      opts.Repo,
-				Progress: os.Stdout,
-				Auth:     sshAuth,
-			})
-		} else {
-			repo, err = git.Clone(memory.NewStorage(), nil, &git.CloneOptions{
-				URL:      opts.Repo,
-				Progress: os.Stdout,
-			})
-		}
-	}
-	return &RepoDescriptor{
-		repository: repo,
-		path:       opts.RepoPath,
-		url:        opts.Repo,
-		name:       filepath.Base(opts.Repo),
-		err:        err,
-	}, nil
-}
-
-// auditGitRepo beings an audit on a git repository
-func auditGitRepo(repo *RepoDescriptor) ([]Leak, error) {
-	var (
-		err   error
-		leaks []Leak
-	)
-	for _, re := range whiteListRepos {
-		if re.FindString(repo.name) != "" {
-			return leaks, fmt.Errorf("skipping %s, whitelisted", repo.name)
-		}
-	}
-
-	// check if target contains an external gitleaks toml
-	if opts.RepoConfig {
-		err := externalConfig(repo)
-		if err != nil {
-			return leaks, nil
-		}
-	}
-
-	// clear commit cache
-	commitMap = make(map[string]bool)
-
-	refs, err := repo.repository.Storer.IterReferences()
-	if err != nil {
-		return leaks, err
+		log.Infof("%d leaks detected. %d commits inspected in %s", len(report.Leaks), report.Commits, report.Duration)
 	}
 	}
-	err = refs.ForEach(func(ref *plumbing.Reference) error {
-		if ref.Name().IsTag() {
-			return nil
-		}
-		branchLeaks := auditGitReference(repo, ref)
-		for _, leak := range branchLeaks {
-			leaks = append(leaks, leak)
-		}
-		return nil
-	})
-	return leaks, err
-}
-
-// auditGitReference beings the audit for a git reference. This function will
-// traverse the git reference and audit each line of each diff.
-func auditGitReference(repo *RepoDescriptor, ref *plumbing.Reference) []Leak {
-	var (
-		err         error
-		repoName    string
-		leaks       []Leak
-		commitCount int64
-		commitWg    sync.WaitGroup
-		mutex       = &sync.Mutex{}
-		semaphore   chan bool
-	)
-	if auditDone {
-		return nil
-	}
-	repoName = repo.name
-	if opts.Threads != 0 {
-		threads = opts.Threads
-	}
-	if opts.RepoPath != "" {
-		threads = 1
-	}
-	semaphore = make(chan bool, threads)
-
-	cIter, err := repo.repository.Log(&git.LogOptions{From: ref.Hash()})
-	if err != nil {
-		return nil
-	}
-	err = cIter.ForEach(func(c *object.Commit) error {
-		if c == nil || (opts.Depth != 0 && commitCount == opts.Depth) || auditDone {
-			if commitCount == opts.Depth {
-				auditDone = true
-			}
-			return storer.ErrStop
-		}
-		commitCount = commitCount + 1
-		if whiteListCommits[c.Hash.String()] {
-			log.Infof("skipping commit: %s\n", c.Hash.String())
-			return nil
-		}
-
-		// commits w/o parent (root of git the git ref) or option for single commit is not empty str
-		if len(c.ParentHashes) == 0 || opts.Commit == c.Hash.String() {
-			if commitMap[c.Hash.String()] {
-				return nil
-			}
-
-			if opts.Commit == c.Hash.String() {
-				auditDone = true
-			}
-
-			cMutex.Lock()
-			commitMap[c.Hash.String()] = true
-			cMutex.Unlock()
-			totalCommits = totalCommits + 1
-
-			fIter, err := c.Files()
-			if err != nil {
-				return nil
-			}
-			err = fIter.ForEach(func(f *object.File) error {
-				bin, err := f.IsBinary()
-				if bin || err != nil {
-					return nil
-				}
-				for _, re := range whiteListFiles {
-					if re.FindString(f.Name) != "" {
-						log.Debugf("skipping whitelisted file (matched regex '%s'): %s", re.String(), f.Name)
-						return nil
-					}
-				}
-				content, err := f.Contents()
-				if err != nil {
-					return nil
-				}
-				diff := gitDiff{
-					repoName: repoName,
-					filePath: f.Name,
-					content:  content,
-					sha:      c.Hash.String(),
-					author:   c.Author.String(),
-					message:  strings.Replace(c.Message, "\n", " ", -1),
-					date:     c.Author.When,
-				}
-				fileLeaks := inspect(diff)
-				mutex.Lock()
-				leaks = append(leaks, fileLeaks...)
-				mutex.Unlock()
-				return nil
-			})
-			return nil
-		}
-
-		// single commit
-		if opts.Commit != "" {
-			return nil
-		}
-
-		skipCount := false
-		err = c.Parents().ForEach(func(parent *object.Commit) error {
-			// check if we've seen this diff before
-			if commitMap[c.Hash.String()+parent.Hash.String()] {
-				return nil
-			}
-			cMutex.Lock()
-			commitMap[c.Hash.String()+parent.Hash.String()] = true
-			cMutex.Unlock()
-
-			if !skipCount {
-				totalCommits = totalCommits + 1
-				skipCount = true
-			}
-
-			commitWg.Add(1)
-			semaphore <- true
-			go func(c *object.Commit, parent *object.Commit) {
-				var (
-					filePath string
-					skipFile bool
-				)
-				defer func() {
-					commitWg.Done()
-					<-semaphore
-					if r := recover(); r != nil {
-						log.Warnf("recovering from panic on commit %s, likely large diff causing panic", c.Hash.String())
-					}
-				}()
-				patch, err := c.Patch(parent)
-				if err != nil {
-					log.Warnf("problem generating patch for commit: %s\n", c.Hash.String())
-					return
-				}
-				for _, f := range patch.FilePatches() {
-					if f.IsBinary() {
-						continue
-					}
-					skipFile = false
-					from, to := f.Files()
-					filePath = "???"
-					if from != nil {
-						filePath = from.Path()
-					} else if to != nil {
-						filePath = to.Path()
-					}
-					for _, re := range whiteListFiles {
-						if re.FindString(filePath) != "" {
-							log.Debugf("skipping whitelisted file (matched regex '%s'): %s", re.String(), filePath)
-							skipFile = true
-							break
-						}
-					}
-					if skipFile {
-						continue
-					}
-					chunks := f.Chunks()
-					for _, chunk := range chunks {
-						if chunk.Type() == diffType.Add || chunk.Type() == diffType.Delete {
-							diff := gitDiff{
-								repoName: repoName,
-								filePath: filePath,
-								content:  chunk.Content(),
-								sha:      c.Hash.String(),
-								author:   c.Author.String(),
-								message:  strings.Replace(c.Message, "\n", " ", -1),
-								date:     c.Author.When,
-							}
-							chunkLeaks := inspect(diff)
-							for _, leak := range chunkLeaks {
-								mutex.Lock()
-								leaks = append(leaks, leak)
-								mutex.Unlock()
-							}
-						}
-					}
-				}
-			}(c, parent)
-
-			return nil
-		})
-
-		// stop audit if we are at commitStop
-		if c.Hash.String() == opts.CommitStop {
-			auditDone = true
-			return storer.ErrStop
-		}
-
-		return nil
-	})
-	commitWg.Wait()
-	return leaks
-}
-
-// inspect will parse each line of the git diff's content against a set of regexes or
-// a set of regexes set by the config (see gitleaks.toml for example). This function
-// will skip lines that include a whitelisted regex. A list of leaks is returned.
-// If verbose mode (-v/--verbose) is set, then checkDiff will log leaks as they are discovered.
-func inspect(diff gitDiff) []Leak {
-	var (
-		leaks    []Leak
-		skipLine bool
-	)
-
-	lines := strings.Split(diff.content, "\n")
-
-	for _, line := range lines {
-		skipLine = false
-		for leakType, re := range regexes {
-			match := re.FindString(line)
-			if match == "" {
-				continue
-			}
-			if skipLine = isLineWhitelisted(line); skipLine {
-				break
-			}
-			leaks = addLeak(leaks, line, match, leakType, diff)
-		}
-
-		if !skipLine && (opts.Entropy > 0 || len(entropyRanges) != 0) {
-			words := strings.Fields(line)
-			for _, word := range words {
-				entropy := getShannonEntropy(word)
-				// Only check entropyRegexes and whiteListRegexes once per line, and only if an entropy leak type
-				// was found above, since regex checks are expensive.
-				if !entropyIsHighEnough(entropy) {
-					continue
-				}
-				// If either the line is whitelisted or the line fails the noiseReduction check (when enabled),
-				// then we can skip checking the rest of the line for high entropy words.
-				if skipLine = !highEntropyLineIsALeak(line) || isLineWhitelisted(line); skipLine {
-					break
-				}
-				leaks = addLeak(leaks, line, word, fmt.Sprintf("Entropy: %.2f", entropy), diff)
-			}
-		}
-	}
-	return leaks
-}
-
-// isLineWhitelisted returns true iff the line is matched by at least one of the whiteListRegexes.
-func isLineWhitelisted(line string) bool {
-	for _, wRe := range whiteListRegexes {
-		whitelistMatch := wRe.FindString(line)
-		if whitelistMatch != "" {
-			return true
-		}
-	}
-	return false
-}
-
-// addLeak is helper for func inspect() to append leaks if found during a diff check.
-func addLeak(leaks []Leak, line string, offender string, leakType string, diff gitDiff) []Leak {
-	leak := Leak{
-		Line:     line,
-		Commit:   diff.sha,
-		Offender: offender,
-		Type:     leakType,
-		Author:   diff.author,
-		File:     diff.filePath,
-		Repo:     diff.repoName,
-		Message:  diff.message,
-		Date:     diff.date,
-	}
-	if opts.Redact {
-		leak.Offender = "REDACTED"
-		leak.Line = strings.Replace(line, offender, "REDACTED", -1)
-	}
-
-	if opts.Verbose {
-		leak.log()
-	}
-
-	leaks = append(leaks, leak)
-	return leaks
-}
-
-// getShannonEntropy https://en.wiktionary.org/wiki/Shannon_entropy
-func getShannonEntropy(data string) (entropy float64) {
-	if data == "" {
-		return 0
-	}
-
-	charCounts := make(map[rune]int)
-	for _, char := range data {
-		charCounts[char]++
-	}
-
-	invLength := 1.0 / float64(len(data))
-	for _, count := range charCounts {
-		freq := float64(count) * invLength
-		entropy -= freq * math.Log2(freq)
-	}
-
-	return entropy
-}
-
-func entropyIsHighEnough(entropy float64) bool {
-	if entropy >= opts.Entropy && len(entropyRanges) == 0 {
-		return true
-	}
-
-	if len(entropyRanges) != 0 {
-		for _, eR := range entropyRanges {
-			if entropy > eR.v1 && entropy < eR.v2 {
-				return true
-			}
-		}
-	}
-
-	return false
-}
-
-func highEntropyLineIsALeak(line string) bool {
-	if !opts.NoiseReduction {
-		return true
-	}
-
-	for _, re := range entropyRegexes {
-		if re.FindString(line) != "" {
-			return true
-		}
-	}
-
-	return false
-}
-
-// discoverRepos walks all the children of `path`. If a child directory
-// contain a .git file then that repo will be added to the list of repos returned
-func discoverRepos(ownerPath string) ([]*RepoDescriptor, error) {
-	var (
-		err   error
-		repos []*RepoDescriptor
-	)
-	files, err := ioutil.ReadDir(ownerPath)
-	if err != nil {
-		return repos, err
-	}
-	for _, f := range files {
-		if f.IsDir() {
-			repoPath := path.Join(ownerPath, f.Name())
-			r, err := git.PlainOpen(repoPath)
-			if err != nil {
-				continue
-			}
-			repos = append(repos, &RepoDescriptor{
-				repository: r,
-				name:       f.Name(),
-				path:       repoPath,
-			})
-		}
-	}
-	return repos, err
-}
-
-// externalConfig will attempt to load a pinned ".gitleaks.toml" configuration file
-// from a remote or local repo. Use the --repo-config option to trigger this.
-func externalConfig(repo *RepoDescriptor) error {
-	var config Config
-	wt, err := repo.repository.Worktree()
-	if err != nil {
-		return err
-	}
-	f, err := wt.Filesystem.Open(".gitleaks.toml")
-	if err != nil {
-		return err
-	}
-	if _, err := toml.DecodeReader(f, &config); err != nil {
-		return fmt.Errorf("problem loading config: %v", err)
-	}
-	f.Close()
-	if err != nil {
-		return err
-	}
-	updateConfig(config)
-	return nil
-}
-
-// setLogLevel sets log level for gitleaks. Default is Warning
-func setLogs() {
-	switch opts.Log {
-	case "info":
-		log.SetLevel(log.InfoLevel)
-	case "debug":
-		log.SetLevel(log.DebugLevel)
-	case "warn":
-		log.SetLevel(log.WarnLevel)
-	default:
-		log.SetLevel(log.InfoLevel)
-	}
-	log.SetFormatter(&log.TextFormatter{
-		FullTimestamp: true,
-	})
-}
-
-// optsGuard prevents invalid options
-func optsGuard() error {
-	var err error
-	if opts.GithubOrg != "" && opts.GithubUser != "" {
-		return fmt.Errorf("github user and organization set")
-	} else if opts.GithubOrg != "" && opts.OwnerPath != "" {
-		return fmt.Errorf("github organization set and local owner path")
-	} else if opts.GithubUser != "" && opts.OwnerPath != "" {
-		return fmt.Errorf("github user set and local owner path")
-	}
-
-	if opts.Threads > runtime.GOMAXPROCS(0) {
-		return fmt.Errorf("%d available threads", runtime.GOMAXPROCS(0))
-	}
-
-	// do the URL Parse and error checking here, so we can skip it later
-	// empty string is OK, it will default to the public github URL.
-	if opts.GithubURL != "" && opts.GithubURL != defaultGithubURL {
-		if !strings.HasSuffix(opts.GithubURL, "/") {
-			opts.GithubURL += "/"
-		}
-		ghURL, err := url.Parse(opts.GithubURL)
-		if err != nil {
-			return err
-		}
-		tcpPort := "443"
-		if ghURL.Scheme == "http" {
-			tcpPort = "80"
-		}
-		timeout := time.Duration(1 * time.Second)
-		_, err = net.DialTimeout("tcp", ghURL.Host+":"+tcpPort, timeout)
-		if err != nil {
-			return fmt.Errorf("%s unreachable, error: %s", ghURL.Host, err)
-		}
-	}
-
-	if opts.SingleSearch != "" {
-		singleSearchRegex, err = regexp.Compile(opts.SingleSearch)
-		if err != nil {
-			return fmt.Errorf("unable to compile regex: %s, %v", opts.SingleSearch, err)
-		}
-	}
-
-	if opts.Entropy > 8 {
-		return fmt.Errorf("The maximum level of entropy is 8")
-	}
-	if opts.Report != "" {
-		if !strings.HasSuffix(opts.Report, ".json") && !strings.HasSuffix(opts.Report, ".csv") {
-			return fmt.Errorf("Report should be a .json or .csv file")
-		}
-		dirPath := filepath.Dir(opts.Report)
-		if _, err := os.Stat(dirPath); os.IsNotExist(err) {
-			return fmt.Errorf("%s does not exist", dirPath)
-		}
-	}
-
-	return nil
-}
-
-// loadToml loads of the toml config containing regexes and whitelists.
-// This function will first look if the configPath is set and load the config
-// from that file. Otherwise will then look for the path set by the GITHLEAKS_CONIFG
-// env var. If that is not set, then gitleaks will continue with the default configs
-// specified by the const var at the top `defaultConfig`
-func loadToml() error {
-	var (
-		config     Config
-		configPath string
-	)
-	if opts.ConfigPath != "" {
-		configPath = opts.ConfigPath
-		_, err := os.Stat(configPath)
-		if err != nil {
-			return fmt.Errorf("no gitleaks config at %s", configPath)
-		}
-	} else {
-		configPath = os.Getenv("GITLEAKS_CONFIG")
-	}
-
-	if configPath != "" {
-		if _, err := toml.DecodeFile(configPath, &config); err != nil {
-			return fmt.Errorf("problem loading config: %v", err)
-		}
-	} else {
-		_, err := toml.Decode(defaultConfig, &config)
-		if err != nil {
-			return fmt.Errorf("problem loading default config: %v", err)
-		}
-	}
-
-	return updateConfig(config)
-}
-
-// updateConfig will update a the global config values
-func updateConfig(config Config) error {
-	if len(config.Misc.Entropy) != 0 {
-		err := entropyLimits(config.Misc.Entropy)
-		if err != nil {
-			return err
-		}
-	}
-
-	for _, regex := range config.Entropy.LineRegexes {
-		entropyRegexes = append(entropyRegexes, regexp.MustCompile(regex))
-	}
-
-	if singleSearchRegex != nil {
-		regexes["singleSearch"] = singleSearchRegex
-	} else {
-		for _, regex := range config.Regexes {
-			regexes[regex.Description] = regexp.MustCompile(regex.Regex)
-		}
-	}
-	whiteListCommits = make(map[string]bool)
-	for _, commit := range config.Whitelist.Commits {
-		whiteListCommits[commit] = true
-	}
-	for _, regex := range config.Whitelist.Files {
-		whiteListFiles = append(whiteListFiles, regexp.MustCompile(regex))
-	}
-	for _, regex := range config.Whitelist.Regexes {
-		whiteListRegexes = append(whiteListRegexes, regexp.MustCompile(regex))
-	}
-	for _, regex := range config.Whitelist.Repos {
-		whiteListRepos = append(whiteListRepos, regexp.MustCompile(regex))
-	}
-
-	return nil
-
-}
-
-// entropyLimits hydrates entropyRanges which allows for fine tuning entropy checking
-func entropyLimits(entropyLimitStr []string) error {
-	for _, span := range entropyLimitStr {
-		split := strings.Split(span, "-")
-		v1, err := strconv.ParseFloat(split[0], 64)
-		if err != nil {
-			return err
-		}
-		v2, err := strconv.ParseFloat(split[1], 64)
-		if err != nil {
-			return err
-		}
-		if v1 > v2 {
-			return fmt.Errorf("entropy range must be ascending")
-		}
-		r := entropyRange{
-			v1: v1,
-			v2: v2,
-		}
-		if r.v1 > 8.0 || r.v1 < 0.0 || r.v2 > 8.0 || r.v2 < 0.0 {
-			return fmt.Errorf("invalid entropy ranges, must be within 0.0-8.0")
-		}
-		entropyRanges = append(entropyRanges, r)
-	}
-	return nil
-}
-
-// getSSHAuth return an ssh auth use by go-git to clone repos behind authentication.
-// If --ssh-key is set then it will attempt to load the key from that path. If not,
-// gitleaks will use the default $HOME/.ssh/id_rsa key
-func getSSHAuth() (*ssh.PublicKeys, error) {
-	var (
-		sshKeyPath string
-	)
-	if opts.SSHKey != "" {
-		sshKeyPath = opts.SSHKey
-	} else {
-		// try grabbing default
-		c, err := user.Current()
-		if err != nil {
-			return nil, nil
-		}
-		sshKeyPath = fmt.Sprintf("%s/.ssh/id_rsa", c.HomeDir)
-	}
-	sshAuth, err := ssh.NewPublicKeysFromFile("git", sshKeyPath, "")
-	if err != nil {
-		if strings.HasPrefix(opts.Repo, "git") {
-			// if you are attempting to clone a git repo via ssh and supply a bad ssh key,
-			// the clone will fail.
-			return nil, fmt.Errorf("unable to generate ssh key: %v", err)
-		}
-	}
-	return sshAuth, nil
-}
-
-func (leak Leak) log() {
-	b, _ := json.MarshalIndent(leak, "", "   ")
-	fmt.Println(string(b))
 }
 }

+ 224 - 0
src/config.go

@@ -0,0 +1,224 @@
+package gitleaks
+
+import (
+	"fmt"
+	"os"
+	"os/user"
+	"regexp"
+	"strconv"
+	"strings"
+
+	"github.com/BurntSushi/toml"
+	"gopkg.in/src-d/go-git.v4/plumbing/transport/ssh"
+)
+
+type entropyRange struct {
+	v1 float64
+	v2 float64
+}
+
+type Regex struct {
+	description string
+	regex       *regexp.Regexp
+}
+
+// TomlConfig is used for loading gitleaks configs from a toml file
+type TomlConfig struct {
+	Regexes []struct {
+		Description string
+		Regex       string
+	}
+	Entropy struct {
+		LineRegexes []string
+		Ranges      []string
+	}
+	Whitelist struct {
+		Files   []string
+		Regexes []string
+		Commits []string
+		Repos   []string
+	}
+}
+
+// Config contains gitleaks config
+type Config struct {
+	Regexes   []Regex
+	WhiteList struct {
+		regexes []*regexp.Regexp
+		files   []*regexp.Regexp
+		commits map[string]bool
+		repos   []*regexp.Regexp
+	}
+	Entropy struct {
+		entropyRanges []*entropyRange
+		regexes       []*regexp.Regexp
+	}
+	sshAuth *ssh.PublicKeys
+}
+
+// loadToml loads of the toml config containing regexes and whitelists.
+// This function will first look if the configPath is set and load the config
+// from that file. Otherwise will then look for the path set by the GITHLEAKS_CONIFG
+// env var. If that is not set, then gitleaks will continue with the default configs
+// specified by the const var at the top `defaultConfig`
+func newConfig() (*Config, error) {
+	var (
+		tomlConfig TomlConfig
+		configPath string
+		config     Config
+	)
+
+	if opts.ConfigPath != "" {
+		configPath = opts.ConfigPath
+		_, err := os.Stat(configPath)
+		if err != nil {
+			return nil, fmt.Errorf("no gitleaks config at %s", configPath)
+		}
+	} else {
+		configPath = os.Getenv("GITLEAKS_CONFIG")
+	}
+
+	if configPath != "" {
+		if _, err := toml.DecodeFile(configPath, &tomlConfig); err != nil {
+			return nil, fmt.Errorf("problem loading config: %v", err)
+		}
+	} else {
+		_, err := toml.Decode(defaultConfig, &tomlConfig)
+		if err != nil {
+			return nil, fmt.Errorf("problem loading default config: %v", err)
+		}
+	}
+
+	sshAuth, err := getSSHAuth()
+	if err != nil {
+		return nil, err
+	}
+	config.sshAuth = sshAuth
+
+	err = config.update(tomlConfig)
+	if err != nil {
+		return nil, err
+	}
+	return &config, err
+}
+
+// updateConfig will update a the global config values
+func (config *Config) update(tomlConfig TomlConfig) error {
+	if len(tomlConfig.Entropy.Ranges) != 0 {
+		err := config.updateEntropyRanges(tomlConfig.Entropy.Ranges)
+		if err != nil {
+			return err
+		}
+	}
+
+	for _, regex := range tomlConfig.Entropy.LineRegexes {
+		config.Entropy.regexes = append(config.Entropy.regexes, regexp.MustCompile(regex))
+	}
+
+	if singleSearchRegex != nil {
+		config.Regexes = append(config.Regexes, Regex{
+			description: "single search",
+			regex:       singleSearchRegex,
+		})
+	} else {
+		for _, regex := range tomlConfig.Regexes {
+			config.Regexes = append(config.Regexes, Regex{
+				description: regex.Description,
+				regex:       regexp.MustCompile(regex.Regex),
+			})
+		}
+	}
+
+	config.WhiteList.commits = make(map[string]bool)
+	for _, commit := range tomlConfig.Whitelist.Commits {
+		config.WhiteList.commits[commit] = true
+	}
+	for _, regex := range tomlConfig.Whitelist.Files {
+		config.WhiteList.files = append(config.WhiteList.files, regexp.MustCompile(regex))
+	}
+	for _, regex := range tomlConfig.Whitelist.Regexes {
+		config.WhiteList.regexes = append(config.WhiteList.regexes, regexp.MustCompile(regex))
+	}
+	for _, regex := range tomlConfig.Whitelist.Repos {
+		config.WhiteList.repos = append(config.WhiteList.repos, regexp.MustCompile(regex))
+	}
+
+	return nil
+}
+
+// entropyRanges hydrates entropyRanges which allows for fine tuning entropy checking
+func (config *Config) updateEntropyRanges(entropyLimitStr []string) error {
+	for _, span := range entropyLimitStr {
+		split := strings.Split(span, "-")
+		v1, err := strconv.ParseFloat(split[0], 64)
+		if err != nil {
+			return err
+		}
+		v2, err := strconv.ParseFloat(split[1], 64)
+		if err != nil {
+			return err
+		}
+		if v1 > v2 {
+			return fmt.Errorf("entropy range must be ascending")
+		}
+		r := &entropyRange{
+			v1: v1,
+			v2: v2,
+		}
+		if r.v1 > 8.0 || r.v1 < 0.0 || r.v2 > 8.0 || r.v2 < 0.0 {
+			return fmt.Errorf("invalid entropy ranges, must be within 0.0-8.0")
+		}
+		config.Entropy.entropyRanges = append(config.Entropy.entropyRanges, r)
+	}
+	return nil
+}
+
+// externalConfig will attempt to load a pinned ".gitleaks.toml" configuration file
+// from a remote or local repo. Use the --repo-config option to trigger this.
+func (config *Config) updateFromRepo(repo *RepoInfo) error {
+	var tomlConfig TomlConfig
+	wt, err := repo.repository.Worktree()
+	if err != nil {
+		return err
+	}
+	f, err := wt.Filesystem.Open(".gitleaks.toml")
+	if err != nil {
+		return err
+	}
+	if _, err := toml.DecodeReader(f, &config); err != nil {
+		return fmt.Errorf("problem loading config: %v", err)
+	}
+	f.Close()
+	if err != nil {
+		return err
+	}
+	return config.update(tomlConfig)
+}
+
+// getSSHAuth return an ssh auth use by go-git to clone repos behind authentication.
+// If --ssh-key is set then it will attempt to load the key from that path. If not,
+// gitleaks will use the default $HOME/.ssh/id_rsa key
+func getSSHAuth() (*ssh.PublicKeys, error) {
+	var (
+		sshKeyPath string
+	)
+	if opts.SSHKey != "" {
+		sshKeyPath = opts.SSHKey
+	} else {
+		// try grabbing default
+		c, err := user.Current()
+		if err != nil {
+			return nil, nil
+		}
+		sshKeyPath = fmt.Sprintf("%s/.ssh/id_rsa", c.HomeDir)
+	}
+	sshAuth, err := ssh.NewPublicKeysFromFile("git", sshKeyPath, "")
+	if err != nil {
+		if strings.HasPrefix(opts.Repo, "git") {
+			// if you are attempting to clone a git repo via ssh and supply a bad ssh key,
+			// the clone will fail.
+			return nil, fmt.Errorf("unable to generate ssh key: %v", err)
+		}
+	}
+	return sshAuth, nil
+}

+ 77 - 0
src/constants.go

@@ -0,0 +1,77 @@
+package gitleaks
+
+const version = "1.25.0"
+
+const defaultGithubURL = "https://api.github.com/"
+const defaultThreadNum = 1
+const ErrExit = 2
+const LeakExit = 1
+
+const defaultConfig = `
+# This is a sample config file for gitleaks. You can configure gitleaks what to search for and what to whitelist.
+# The output you are seeing here is the default gitleaks config. If GITLEAKS_CONFIG environment variable
+# is set, gitleaks will load configurations from that path. If option --config-path is set, gitleaks will load
+# configurations from that path. Gitleaks does not whitelist anything by default.
+
+title = "gitleaks config"
+# add regexes to the regex table
+[[regexes]]
+description = "AWS"
+regex = '''AKIA[0-9A-Z]{16}'''
+[[regexes]]
+description = "PKCS8"
+regex = '''-----BEGIN PRIVATE KEY-----'''
+[[regexes]]
+description = "RSA"
+regex = '''-----BEGIN RSA PRIVATE KEY-----'''
+[[regexes]]
+description = "SSH"
+regex = '''-----BEGIN OPENSSH PRIVATE KEY-----'''
+[[regexes]]
+description = "PGP"
+regex = '''-----BEGIN PGP PRIVATE KEY BLOCK-----'''
+[[regexes]]
+description = "Facebook"
+regex = '''(?i)facebook(.{0,4})?['\"][0-9a-f]{32}['\"]'''
+[[regexes]]
+description = "Twitter"
+regex = '''(?i)twitter(.{0,4})?['\"][0-9a-zA-Z]{35,44}['\"]'''
+[[regexes]]
+description = "Github"
+regex = '''(?i)github(.{0,4})?['\"][0-9a-zA-Z]{35,40}['\"]'''
+[[regexes]]
+description = "Slack"
+regex = '''xox[baprs]-([0-9a-zA-Z]{10,48})?'''
+
+[entropy]
+lineregexes = [
+	"api",
+	"key",
+	"signature",
+	"secret",
+	"password",
+	"pass",
+	"pwd",
+	"token",
+	"curl",
+	"wget",
+	"https?",
+]
+
+[whitelist]
+files = [
+  "(.*?)(jpg|gif|doc|pdf|bin)$"
+]
+#commits = [
+#  "BADHA5H1",
+#  "BADHA5H2",
+#]
+#repos = [
+#	"mygoodrepo"
+#]
+[misc]
+#entropy = [
+#	"3.3-4.30"
+#	"6.0-8.0
+#]
+`

+ 117 - 0
src/core.go

@@ -0,0 +1,117 @@
+package gitleaks
+
+import (
+	"io/ioutil"
+	"os"
+	"regexp"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/hako/durafmt"
+	log "github.com/sirupsen/logrus"
+)
+
+var (
+	opts              *Options
+	config            *Config
+	singleSearchRegex *regexp.Regexp
+	dir               string
+	threads           int
+	totalCommits      int64
+	commitMap         = make(map[string]bool)
+	auditDone         bool
+	mutex             = &sync.Mutex{}
+)
+
+func init() {
+	log.SetOutput(os.Stdout)
+	threads = defaultThreadNum
+}
+
+// Report is
+type Report struct {
+	Leaks    []Leak
+	Duration string
+	Commits  int64
+}
+
+// Run is the entry point for gitleaks
+func Run(optsL *Options) (*Report, error) {
+	var (
+		leaks []Leak
+		err   error
+	)
+
+	now := time.Now()
+	opts = optsL
+	config, err = newConfig()
+	if err != nil {
+		return nil, err
+	}
+
+	if opts.Disk {
+		// temporary directory where all the gitleaks plain clones will reside
+		dir, err = ioutil.TempDir("", "gitleaks")
+		defer os.RemoveAll(dir)
+		if err != nil {
+			goto postAudit
+		}
+	}
+
+	// start audits
+	if opts.Repo != "" || opts.RepoPath != "" {
+		repoInfo, err := newRepoInfo()
+		if err != nil {
+			goto postAudit
+		}
+		err = repoInfo.clone()
+		if err != nil {
+			goto postAudit
+		}
+		leaks, err = repoInfo.audit()
+	} else if opts.OwnerPath != "" {
+		repoDs, err := discoverRepos(opts.OwnerPath)
+		if err != nil {
+			goto postAudit
+		}
+		for _, repoInfo := range repoDs {
+			err = repoInfo.clone()
+			if err != nil {
+				continue
+			}
+			leaksFromRepo, err := repoInfo.audit()
+
+			if err != nil {
+				log.Warnf("error occured auditing repo: %s, continuing", repoInfo.name)
+			}
+			leaks = append(leaksFromRepo, leaks...)
+		}
+	} else if opts.GithubOrg != "" || opts.GithubUser != "" {
+		leaks, err = auditGithubRepos()
+	} else if opts.GitLabOrg != "" || opts.GitLabUser != "" {
+		leaks, err = auditGitlabRepos()
+	} else if opts.GithubPR != "" {
+		leaks, err = auditGithubPR()
+	}
+
+postAudit:
+	if err != nil {
+		if strings.Contains(err.Error(), "whitelisted") {
+			log.Info(err.Error())
+			os.Exit(0)
+		}
+		log.Error(err)
+		os.Exit(ErrExit)
+	}
+
+	if opts.Report != "" {
+		writeReport(leaks)
+	}
+
+	return &Report{
+		Leaks:    leaks,
+		Duration: durafmt.Parse(time.Now().Sub(now)).String(),
+		Commits:  totalCommits,
+	}, err
+}

+ 52 - 0
src/entropy.go

@@ -0,0 +1,52 @@
+package gitleaks
+
+import (
+	"math"
+)
+
+// getShannonEntropy https://en.wiktionary.org/wiki/Shannon_entropy
+func getShannonEntropy(data string) (entropy float64) {
+	if data == "" {
+		return 0
+	}
+
+	charCounts := make(map[rune]int)
+	for _, char := range data {
+		charCounts[char]++
+	}
+
+	invLength := 1.0 / float64(len(data))
+	for _, count := range charCounts {
+		freq := float64(count) * invLength
+		entropy -= freq * math.Log2(freq)
+	}
+
+	return entropy
+}
+
+func entropyIsHighEnough(entropy float64) bool {
+	if entropy >= opts.Entropy && len(config.Entropy.entropyRanges) == 0 {
+		return true
+	}
+
+	for _, eR := range config.Entropy.entropyRanges {
+		if entropy > eR.v1 && entropy < eR.v2 {
+			return true
+		}
+	}
+
+	return false
+}
+
+func highEntropyLineIsALeak(line string) bool {
+	if !opts.NoiseReduction {
+		return true
+	}
+	for _, re := range config.Entropy.regexes {
+		if re.FindString(line) != "" {
+			return true
+		}
+	}
+
+	return false
+}

+ 24 - 26
github.go → src/github.go

@@ -1,4 +1,4 @@
-package main
+package gitleaks
 
 
 import (
 import (
 	"context"
 	"context"
@@ -43,19 +43,19 @@ func auditGithubPR() ([]Leak, error) {
 			return leaks, err
 			return leaks, err
 		}
 		}
 
 
-		for _, commit := range commits {
+		for _, c := range commits {
 			totalCommits = totalCommits + 1
 			totalCommits = totalCommits + 1
-			commit, _, err := githubClient.Repositories.GetCommit(ctx, owner, repo, *commit.SHA)
+			c, _, err := githubClient.Repositories.GetCommit(ctx, owner, repo, *c.SHA)
 			if err != nil {
 			if err != nil {
 				continue
 				continue
 			}
 			}
-			files := commit.Files
+			files := c.Files
 			for _, f := range files {
 			for _, f := range files {
 				skipFile := false
 				skipFile := false
 				if f.Patch == nil || f.Filename == nil {
 				if f.Patch == nil || f.Filename == nil {
 					continue
 					continue
 				}
 				}
-				for _, re := range whiteListFiles {
+				for _, re := range config.WhiteList.files {
 					if re.FindString(f.GetFilename()) != "" {
 					if re.FindString(f.GetFilename()) != "" {
 						log.Infof("skipping whitelisted file (matched regex '%s'): %s", re.String(), f.GetFilename())
 						log.Infof("skipping whitelisted file (matched regex '%s'): %s", re.String(), f.GetFilename())
 						skipFile = true
 						skipFile = true
@@ -66,17 +66,16 @@ func auditGithubPR() ([]Leak, error) {
 					continue
 					continue
 				}
 				}
 
 
-				diff := gitDiff{
-					sha:          commit.GetSHA(),
-					content:      *f.Patch,
-					filePath:     *f.Filename,
-					repoName:     repo,
-					githubCommit: commit,
-					author:       commit.GetCommitter().GetLogin(),
-					message:      *commit.Commit.Message,
-					date:         *commit.Commit.Committer.Date,
+				commit := commitInfo{
+					sha:      c.GetSHA(),
+					content:  *f.Patch,
+					filePath: *f.Filename,
+					repoName: repo,
+					author:   c.GetCommitter().GetLogin(),
+					message:  *c.Commit.Message,
+					date:     *c.Commit.Committer.Date,
 				}
 				}
-				leaks = append(leaks, inspect(diff)...)
+				leaks = append(leaks, inspect(commit)...)
 			}
 			}
 		}
 		}
 		page = resp.NextPage
 		page = resp.NextPage
@@ -164,12 +163,12 @@ func auditGithubRepos() ([]Leak, error) {
 		ownerDir, _ = ioutil.TempDir(dir, opts.GithubUser)
 		ownerDir, _ = ioutil.TempDir(dir, opts.GithubUser)
 	}
 	}
 	for _, githubRepo := range githubRepos {
 	for _, githubRepo := range githubRepos {
-		repo, err := cloneGithubRepo(githubRepo)
+		repoD, err := cloneGithubRepo(githubRepo)
 		if err != nil {
 		if err != nil {
 			log.Warn(err)
 			log.Warn(err)
 			continue
 			continue
 		}
 		}
-		leaksFromRepo, err := auditGitRepo(repo)
+		leaksFromRepo, err := repoD.audit()
 		if opts.Disk {
 		if opts.Disk {
 			os.RemoveAll(fmt.Sprintf("%s/%s", ownerDir, *githubRepo.Name))
 			os.RemoveAll(fmt.Sprintf("%s/%s", ownerDir, *githubRepo.Name))
 		}
 		}
@@ -187,9 +186,8 @@ func auditGithubRepos() ([]Leak, error) {
 }
 }
 
 
 // cloneGithubRepo clones a repo from the url parsed from a github repo. The repo
 // cloneGithubRepo clones a repo from the url parsed from a github repo. The repo
-// will be cloned to disk if --disk is set. If the repo is private, you must include the
-// --private/-p option. After the repo is clone, an audit will begin.
-func cloneGithubRepo(githubRepo *github.Repository) (*RepoDescriptor, error) {
+// will be cloned to disk if --disk is set.
+func cloneGithubRepo(githubRepo *github.Repository) (*RepoInfo, error) {
 	var (
 	var (
 		repo *git.Repository
 		repo *git.Repository
 		err  error
 		err  error
@@ -198,7 +196,7 @@ func cloneGithubRepo(githubRepo *github.Repository) (*RepoDescriptor, error) {
 	if opts.ExcludeForks && githubRepo.GetFork() {
 	if opts.ExcludeForks && githubRepo.GetFork() {
 		return nil, fmt.Errorf("skipping %s, excluding forks", *githubRepo.Name)
 		return nil, fmt.Errorf("skipping %s, excluding forks", *githubRepo.Name)
 	}
 	}
-	for _, re := range whiteListRepos {
+	for _, re := range config.WhiteList.repos {
 		if re.FindString(*githubRepo.Name) != "" {
 		if re.FindString(*githubRepo.Name) != "" {
 			return nil, fmt.Errorf("skipping %s, whitelisted", *githubRepo.Name)
 			return nil, fmt.Errorf("skipping %s, whitelisted", *githubRepo.Name)
 		}
 		}
@@ -209,10 +207,10 @@ func cloneGithubRepo(githubRepo *github.Repository) (*RepoDescriptor, error) {
 		if err != nil {
 		if err != nil {
 			return nil, fmt.Errorf("unable to generater owner temp dir: %v", err)
 			return nil, fmt.Errorf("unable to generater owner temp dir: %v", err)
 		}
 		}
-		if sshAuth != nil && githubToken == "" {
+		if config.sshAuth != nil && githubToken == "" {
 			repo, err = git.PlainClone(fmt.Sprintf("%s/%s", ownerDir, *githubRepo.Name), false, &git.CloneOptions{
 			repo, err = git.PlainClone(fmt.Sprintf("%s/%s", ownerDir, *githubRepo.Name), false, &git.CloneOptions{
 				URL:  *githubRepo.SSHURL,
 				URL:  *githubRepo.SSHURL,
-				Auth: sshAuth,
+				Auth: config.sshAuth,
 			})
 			})
 		} else if githubToken != "" {
 		} else if githubToken != "" {
 			repo, err = git.PlainClone(fmt.Sprintf("%s/%s", ownerDir, *githubRepo.Name), false, &git.CloneOptions{
 			repo, err = git.PlainClone(fmt.Sprintf("%s/%s", ownerDir, *githubRepo.Name), false, &git.CloneOptions{
@@ -228,10 +226,10 @@ func cloneGithubRepo(githubRepo *github.Repository) (*RepoDescriptor, error) {
 			})
 			})
 		}
 		}
 	} else {
 	} else {
-		if sshAuth != nil && githubToken == "" {
+		if config.sshAuth != nil && githubToken == "" {
 			repo, err = git.Clone(memory.NewStorage(), nil, &git.CloneOptions{
 			repo, err = git.Clone(memory.NewStorage(), nil, &git.CloneOptions{
 				URL:  *githubRepo.SSHURL,
 				URL:  *githubRepo.SSHURL,
-				Auth: sshAuth,
+				Auth: config.sshAuth,
 			})
 			})
 		} else if githubToken != "" {
 		} else if githubToken != "" {
 			repo, err = git.Clone(memory.NewStorage(), nil, &git.CloneOptions{
 			repo, err = git.Clone(memory.NewStorage(), nil, &git.CloneOptions{
@@ -250,7 +248,7 @@ func cloneGithubRepo(githubRepo *github.Repository) (*RepoDescriptor, error) {
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	return &RepoDescriptor{
+	return &RepoInfo{
 		repository: repo,
 		repository: repo,
 		name:       *githubRepo.Name,
 		name:       *githubRepo.Name,
 	}, nil
 	}, nil

+ 12 - 10
gitlab.go → src/gitlab.go

@@ -1,4 +1,4 @@
-package main
+package gitleaks
 
 
 import (
 import (
 	"fmt"
 	"fmt"
@@ -57,13 +57,15 @@ func auditGitlabRepos() ([]Leak, error) {
 		}
 		}
 
 
 		if err != nil {
 		if err != nil {
-			log.Fatal("error listing projects: ", err) // exit when can't make API call
+			// exit when can't make API call
+			log.Fatal("error listing projects: ", err)
 		}
 		}
 
 
 		repos = append(repos, ps...)
 		repos = append(repos, ps...)
 
 
 		if page >= resp.TotalPages {
 		if page >= resp.TotalPages {
-			break // exit when we've seen all pages
+			// exit when we've seen all pages
+			break
 		}
 		}
 
 
 		page = resp.NextPage
 		page = resp.NextPage
@@ -78,13 +80,13 @@ func auditGitlabRepos() ([]Leak, error) {
 	}
 	}
 
 
 	for _, p := range repos {
 	for _, p := range repos {
-		repo, err := cloneGitlabRepo(tempDir, p)
+		repoInfo, err := cloneGitlabRepo(tempDir, p)
 		if err != nil {
 		if err != nil {
 			log.Warn(err)
 			log.Warn(err)
 			continue
 			continue
 		}
 		}
 
 
-		leaksFromRepo, err := auditGitRepo(repo)
+		leaksFromRepo, err := repoInfo.audit()
 		if err != nil {
 		if err != nil {
 			log.Warn(err)
 			log.Warn(err)
 		}
 		}
@@ -121,7 +123,7 @@ func createGitlabTempDir() (string, error) {
 	return ownerDir, nil
 	return ownerDir, nil
 }
 }
 
 
-func cloneGitlabRepo(tempDir string, p *gitlab.Project) (*RepoDescriptor, error) {
+func cloneGitlabRepo(tempDir string, p *gitlab.Project) (*RepoInfo, error) {
 	var (
 	var (
 		repo *git.Repository
 		repo *git.Repository
 		err  error
 		err  error
@@ -130,7 +132,7 @@ func cloneGitlabRepo(tempDir string, p *gitlab.Project) (*RepoDescriptor, error)
 		return nil, fmt.Errorf("skipping %s, excluding forks", p.Name)
 		return nil, fmt.Errorf("skipping %s, excluding forks", p.Name)
 	}
 	}
 
 
-	for _, re := range whiteListRepos {
+	for _, re := range config.WhiteList.repos {
 		if re.FindString(p.Name) != "" {
 		if re.FindString(p.Name) != "" {
 			return nil, fmt.Errorf("skipping %s, whitelisted", p.Name)
 			return nil, fmt.Errorf("skipping %s, whitelisted", p.Name)
 		}
 		}
@@ -140,9 +142,9 @@ func cloneGitlabRepo(tempDir string, p *gitlab.Project) (*RepoDescriptor, error)
 		URL: p.HTTPURLToRepo,
 		URL: p.HTTPURLToRepo,
 	}
 	}
 
 
-	if sshAuth != nil {
+	if config.sshAuth != nil {
 		opt.URL = p.SSHURLToRepo
 		opt.URL = p.SSHURLToRepo
-		opt.Auth = sshAuth
+		opt.Auth = config.sshAuth
 	}
 	}
 
 
 	log.Infof("cloning: %s", p.Name)
 	log.Infof("cloning: %s", p.Name)
@@ -157,7 +159,7 @@ func cloneGitlabRepo(tempDir string, p *gitlab.Project) (*RepoDescriptor, error)
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	return &RepoDescriptor{
+	return &RepoInfo{
 		repository: repo,
 		repository: repo,
 		name:       p.Name,
 		name:       p.Name,
 	}, nil
 	}, nil

+ 149 - 137
gitleaks_test.go → src/gitleaks_test.go

@@ -1,4 +1,4 @@
-package main
+package gitleaks
 
 
 import (
 import (
 	"fmt"
 	"fmt"
@@ -11,6 +11,7 @@ import (
 	"time"
 	"time"
 
 
 	"github.com/franela/goblin"
 	"github.com/franela/goblin"
+	log "github.com/sirupsen/logrus"
 	git "gopkg.in/src-d/go-git.v4"
 	git "gopkg.in/src-d/go-git.v4"
 	"gopkg.in/src-d/go-git.v4/storage/memory"
 	"gopkg.in/src-d/go-git.v4/storage/memory"
 )
 )
@@ -59,21 +60,34 @@ repos = [
 `
 `
 
 
 const testEntropyRange = `
 const testEntropyRange = `
-[misc]
-entropy = [
+[entropy]
+ranges = [
   "7.5-8.0",
   "7.5-8.0",
-  "3.3-3.4",
+  "3.2-3.4",
+]
+lineregexes = [
+	"(?i)api",
+	"(?i)key",
+	"signature",
+	"secret",
+	"password",
+	"pass",
+	"pwd",
+	"token",
+	"curl",
+	"wget",
+	"https?",
 ]
 ]
 `
 `
 const testBadEntropyRange = `
 const testBadEntropyRange = `
-[misc]
-entropy = [
+[entropy]
+ranges = [
   "8.0-3.0",
   "8.0-3.0",
 ]
 ]
 `
 `
 const testBadEntropyRange2 = `
 const testBadEntropyRange2 = `
-[misc]
-entropy = [
+[entropy]
+ranges = [
   "8.0-8.9",
   "8.0-8.9",
 ]
 ]
 `
 `
@@ -94,19 +108,19 @@ func TestGetRepo(t *testing.T) {
 	}
 	}
 
 
 	var tests = []struct {
 	var tests = []struct {
-		testOpts       Options
+		testOpts       *Options
 		description    string
 		description    string
 		expectedErrMsg string
 		expectedErrMsg string
 	}{
 	}{
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				Repo: "https://github.com/gitleakstest/gronit",
 				Repo: "https://github.com/gitleakstest/gronit",
 			},
 			},
 			description:    "test plain clone remote repo",
 			description:    "test plain clone remote repo",
 			expectedErrMsg: "",
 			expectedErrMsg: "",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				Repo: "https://github.com/gitleakstest/gronit",
 				Repo: "https://github.com/gitleakstest/gronit",
 				Disk: true,
 				Disk: true,
 			},
 			},
@@ -114,33 +128,33 @@ func TestGetRepo(t *testing.T) {
 			expectedErrMsg: "",
 			expectedErrMsg: "",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				RepoPath: dir,
 				RepoPath: dir,
 			},
 			},
 			description:    "test local clone repo",
 			description:    "test local clone repo",
 			expectedErrMsg: "",
 			expectedErrMsg: "",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				Repo: "https://github.com/gitleakstest/nope",
 				Repo: "https://github.com/gitleakstest/nope",
 			},
 			},
 			description:    "test no repo",
 			description:    "test no repo",
 			expectedErrMsg: "authentication required",
 			expectedErrMsg: "authentication required",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				Repo: "https://github.com/gitleakstest/private",
 				Repo: "https://github.com/gitleakstest/private",
 			},
 			},
 			description:    "test private repo",
 			description:    "test private repo",
-			expectedErrMsg: "invalid auth method",
+			expectedErrMsg: "authentication required",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				Repo: "https://github.com/gitleakstest/private",
 				Repo: "https://github.com/gitleakstest/private",
 				Disk: true,
 				Disk: true,
 			},
 			},
 			description:    "test private repo",
 			description:    "test private repo",
-			expectedErrMsg: "invalid auth method",
+			expectedErrMsg: "authentication required",
 		},
 		},
 	}
 	}
 	g := goblin.Goblin(t)
 	g := goblin.Goblin(t)
@@ -148,7 +162,12 @@ func TestGetRepo(t *testing.T) {
 		g.Describe("TestGetRepo", func() {
 		g.Describe("TestGetRepo", func() {
 			g.It(test.description, func() {
 			g.It(test.description, func() {
 				opts = test.testOpts
 				opts = test.testOpts
-				_, err := cloneRepo()
+				config, err = newConfig()
+				if err != nil {
+					log.Fatal(err)
+				}
+				repo, _ := newRepoInfo()
+				err := repo.clone()
 				if err != nil {
 				if err != nil {
 					g.Assert(err.Error()).Equal(test.expectedErrMsg)
 					g.Assert(err.Error()).Equal(test.expectedErrMsg)
 				}
 				}
@@ -156,6 +175,7 @@ func TestGetRepo(t *testing.T) {
 		})
 		})
 	}
 	}
 }
 }
+
 func TestRun(t *testing.T) {
 func TestRun(t *testing.T) {
 	var err error
 	var err error
 	configsDir := testTomlLoader()
 	configsDir := testTomlLoader()
@@ -172,7 +192,7 @@ func TestRun(t *testing.T) {
 		URL: "https://github.com/gitleakstest/h1domains",
 		URL: "https://github.com/gitleakstest/h1domains",
 	})
 	})
 	var tests = []struct {
 	var tests = []struct {
-		testOpts       Options
+		testOpts       *Options
 		description    string
 		description    string
 		expectedErrMsg string
 		expectedErrMsg string
 		whiteListRepos []string
 		whiteListRepos []string
@@ -182,15 +202,15 @@ func TestRun(t *testing.T) {
 		commitPerPage  int
 		commitPerPage  int
 	}{
 	}{
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				GitLabUser: "gitleakstest",
 				GitLabUser: "gitleakstest",
 			},
 			},
-			description:    "test github user",
+			description:    "test gitlab user",
 			numLeaks:       2,
 			numLeaks:       2,
 			expectedErrMsg: "",
 			expectedErrMsg: "",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				GithubUser: "gitleakstest",
 				GithubUser: "gitleakstest",
 			},
 			},
 			description:    "test github user",
 			description:    "test github user",
@@ -198,7 +218,7 @@ func TestRun(t *testing.T) {
 			expectedErrMsg: "",
 			expectedErrMsg: "",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				GithubUser: "gitleakstest",
 				GithubUser: "gitleakstest",
 				Disk:       true,
 				Disk:       true,
 			},
 			},
@@ -207,7 +227,7 @@ func TestRun(t *testing.T) {
 			expectedErrMsg: "",
 			expectedErrMsg: "",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				GithubOrg: "gitleakstestorg",
 				GithubOrg: "gitleakstestorg",
 			},
 			},
 			description:    "test github org",
 			description:    "test github org",
@@ -215,7 +235,7 @@ func TestRun(t *testing.T) {
 			expectedErrMsg: "",
 			expectedErrMsg: "",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				GithubOrg: "gitleakstestorg",
 				GithubOrg: "gitleakstestorg",
 				Disk:      true,
 				Disk:      true,
 			},
 			},
@@ -224,7 +244,7 @@ func TestRun(t *testing.T) {
 			expectedErrMsg: "",
 			expectedErrMsg: "",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				OwnerPath: dir,
 				OwnerPath: dir,
 			},
 			},
 			description:    "test owner path",
 			description:    "test owner path",
@@ -232,7 +252,7 @@ func TestRun(t *testing.T) {
 			expectedErrMsg: "",
 			expectedErrMsg: "",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				Repo:   "git@github.com:gitleakstest/gronit.git",
 				Repo:   "git@github.com:gitleakstest/gronit.git",
 				SSHKey: "trash",
 				SSHKey: "trash",
 			},
 			},
@@ -241,7 +261,7 @@ func TestRun(t *testing.T) {
 			expectedErrMsg: "unable to generate ssh key: open trash: no such file or directory",
 			expectedErrMsg: "unable to generate ssh key: open trash: no such file or directory",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				Repo: "https://github.com/gitleakstest/gronit.git",
 				Repo: "https://github.com/gitleakstest/gronit.git",
 			},
 			},
 			description:    "test leak",
 			description:    "test leak",
@@ -249,7 +269,7 @@ func TestRun(t *testing.T) {
 			expectedErrMsg: "",
 			expectedErrMsg: "",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				Repo: "https://github.com/gitleakstest/h1domains.git",
 				Repo: "https://github.com/gitleakstest/h1domains.git",
 			},
 			},
 			description:    "test clean",
 			description:    "test clean",
@@ -257,7 +277,7 @@ func TestRun(t *testing.T) {
 			expectedErrMsg: "",
 			expectedErrMsg: "",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				Repo: "https://github.com/gitleakstest/empty.git",
 				Repo: "https://github.com/gitleakstest/empty.git",
 			},
 			},
 			description:    "test empty",
 			description:    "test empty",
@@ -265,7 +285,7 @@ func TestRun(t *testing.T) {
 			expectedErrMsg: "reference not found",
 			expectedErrMsg: "reference not found",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				GithubOrg: "gitleakstestorg",
 				GithubOrg: "gitleakstestorg",
 			},
 			},
 			description:    "test github org, whitelist repo",
 			description:    "test github org, whitelist repo",
@@ -274,7 +294,7 @@ func TestRun(t *testing.T) {
 			configPath:     path.Join(configsDir, "repo"),
 			configPath:     path.Join(configsDir, "repo"),
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				GithubOrg:    "gitleakstestorg",
 				GithubOrg:    "gitleakstestorg",
 				ExcludeForks: true,
 				ExcludeForks: true,
 			},
 			},
@@ -283,7 +303,7 @@ func TestRun(t *testing.T) {
 			expectedErrMsg: "",
 			expectedErrMsg: "",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				GithubPR: "https://github.com/gitleakstest/gronit/pull/1",
 				GithubPR: "https://github.com/gitleakstest/gronit/pull/1",
 			},
 			},
 			description:    "test github pr",
 			description:    "test github pr",
@@ -291,7 +311,7 @@ func TestRun(t *testing.T) {
 			expectedErrMsg: "",
 			expectedErrMsg: "",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				GithubPR: "https://github.com/gitleakstest/gronit/pull/1",
 				GithubPR: "https://github.com/gitleakstest/gronit/pull/1",
 			},
 			},
 			description:    "test github pr",
 			description:    "test github pr",
@@ -300,19 +320,17 @@ func TestRun(t *testing.T) {
 			commitPerPage:  1,
 			commitPerPage:  1,
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				GithubPR: "https://github.com/gitleakstest/gronit/pull/1",
 				GithubPR: "https://github.com/gitleakstest/gronit/pull/1",
 			},
 			},
 			description:    "test github pr with whitelisted files",
 			description:    "test github pr with whitelisted files",
 			numLeaks:       0,
 			numLeaks:       0,
 			expectedErrMsg: "",
 			expectedErrMsg: "",
 			commitPerPage:  1,
 			commitPerPage:  1,
-			whiteListFiles: []*regexp.Regexp{
-				regexp.MustCompile("main.go"),
-			},
+			configPath:     path.Join(configsDir, "file"),
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				GithubPR: "https://github.com/gitleakstest/gronit/pull/2",
 				GithubPR: "https://github.com/gitleakstest/gronit/pull/2",
 			},
 			},
 			description:    "test github pr with commits without patch info",
 			description:    "test github pr with commits without patch info",
@@ -331,17 +349,12 @@ func TestRun(t *testing.T) {
 				if test.commitPerPage != 0 {
 				if test.commitPerPage != 0 {
 					githubPages = test.commitPerPage
 					githubPages = test.commitPerPage
 				}
 				}
-				if test.whiteListFiles != nil {
-					whiteListFiles = test.whiteListFiles
-				} else {
-					whiteListFiles = nil
-				}
-				opts = test.testOpts
-				leaks, err := run()
+				report, err := Run(test.testOpts)
 				if err != nil {
 				if err != nil {
 					g.Assert(err.Error()).Equal(test.expectedErrMsg)
 					g.Assert(err.Error()).Equal(test.expectedErrMsg)
+				} else {
+					g.Assert(len(report.Leaks)).Equal(test.numLeaks)
 				}
 				}
-				g.Assert(len(leaks)).Equal(test.numLeaks)
 				githubPages = 100
 				githubPages = 100
 			})
 			})
 		})
 		})
@@ -419,8 +432,8 @@ func TestWriteReport(t *testing.T) {
 	for _, test := range tests {
 	for _, test := range tests {
 		g.Describe("TestWriteReport", func() {
 		g.Describe("TestWriteReport", func() {
 			g.It(test.description, func() {
 			g.It(test.description, func() {
-				opts = test.testOpts
-				err := optsGuard()
+				opts = &(test.testOpts)
+				err := opts.guard()
 				if err != nil {
 				if err != nil {
 					g.Assert(err.Error()).Equal(test.expectedErrMsg)
 					g.Assert(err.Error()).Equal(test.expectedErrMsg)
 				} else {
 				} else {
@@ -448,20 +461,16 @@ func testTomlLoader() string {
 
 
 func TestAuditRepo(t *testing.T) {
 func TestAuditRepo(t *testing.T) {
 	var leaks []Leak
 	var leaks []Leak
-	err := loadToml()
 	configsDir := testTomlLoader()
 	configsDir := testTomlLoader()
 	defer os.RemoveAll(configsDir)
 	defer os.RemoveAll(configsDir)
 
 
-	if err != nil {
-		panic(err)
-	}
 	leaksR, err := git.Clone(memory.NewStorage(), nil, &git.CloneOptions{
 	leaksR, err := git.Clone(memory.NewStorage(), nil, &git.CloneOptions{
 		URL: "https://github.com/gitleakstest/gronit.git",
 		URL: "https://github.com/gitleakstest/gronit.git",
 	})
 	})
 	if err != nil {
 	if err != nil {
 		panic(err)
 		panic(err)
 	}
 	}
-	leaksRepo := &RepoDescriptor{
+	leaksRepo := &RepoInfo{
 		repository: leaksR,
 		repository: leaksR,
 		name:       "gronit",
 		name:       "gronit",
 	}
 	}
@@ -472,17 +481,17 @@ func TestAuditRepo(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		panic(err)
 		panic(err)
 	}
 	}
-	cleanRepo := &RepoDescriptor{
+	cleanRepo := &RepoInfo{
 		repository: cleanR,
 		repository: cleanR,
 		name:       "h1domains",
 		name:       "h1domains",
 	}
 	}
 
 
 	var tests = []struct {
 	var tests = []struct {
-		testOpts         Options
+		testOpts         *Options
 		description      string
 		description      string
 		expectedErrMsg   string
 		expectedErrMsg   string
 		numLeaks         int
 		numLeaks         int
-		repo             *RepoDescriptor
+		repo             *RepoInfo
 		whiteListFiles   []*regexp.Regexp
 		whiteListFiles   []*regexp.Regexp
 		whiteListCommits map[string]bool
 		whiteListCommits map[string]bool
 		whiteListRepos   []*regexp.Regexp
 		whiteListRepos   []*regexp.Regexp
@@ -493,7 +502,7 @@ func TestAuditRepo(t *testing.T) {
 			repo:        leaksRepo,
 			repo:        leaksRepo,
 			description: "pinned config",
 			description: "pinned config",
 			numLeaks:    0,
 			numLeaks:    0,
-			testOpts: Options{
+			testOpts: &Options{
 				RepoConfig: true,
 				RepoConfig: true,
 			},
 			},
 		},
 		},
@@ -501,7 +510,7 @@ func TestAuditRepo(t *testing.T) {
 			repo:        leaksRepo,
 			repo:        leaksRepo,
 			description: "commit depth = 1, one leak",
 			description: "commit depth = 1, one leak",
 			numLeaks:    1,
 			numLeaks:    1,
-			testOpts: Options{
+			testOpts: &Options{
 				Depth: 1,
 				Depth: 1,
 			},
 			},
 		},
 		},
@@ -509,53 +518,63 @@ func TestAuditRepo(t *testing.T) {
 			repo:        leaksRepo,
 			repo:        leaksRepo,
 			description: "two leaks present",
 			description: "two leaks present",
 			numLeaks:    2,
 			numLeaks:    2,
+			testOpts:    &Options{},
+		},
+		{
+			repo:        leaksRepo,
+			description: "no leaks present on branch",
+			numLeaks:    0,
+			testOpts: &Options{
+				Branch: "dev",
+			},
 		},
 		},
 		{
 		{
 			repo:        leaksRepo,
 			repo:        leaksRepo,
 			description: "two leaks present limit goroutines",
 			description: "two leaks present limit goroutines",
 			numLeaks:    2,
 			numLeaks:    2,
-			testOpts: Options{
+			testOpts: &Options{
 				Threads: 4,
 				Threads: 4,
 			},
 			},
 		},
 		},
 		{
 		{
 			repo:        leaksRepo,
 			repo:        leaksRepo,
 			description: "two leaks present whitelist AWS.. no leaks",
 			description: "two leaks present whitelist AWS.. no leaks",
-			whiteListRegexes: []*regexp.Regexp{
-				regexp.MustCompile("AKIA"),
-			},
-			numLeaks: 0,
+			testOpts:    &Options{},
+			configPath:  path.Join(configsDir, "regex"),
+			numLeaks:    0,
 		},
 		},
 		{
 		{
 			repo:        leaksRepo,
 			repo:        leaksRepo,
 			description: "two leaks present limit goroutines",
 			description: "two leaks present limit goroutines",
-			numLeaks:    2,
+			testOpts: &Options{
+				Threads: 2,
+			},
+			numLeaks: 2,
 		},
 		},
 		{
 		{
 			repo:        cleanRepo,
 			repo:        cleanRepo,
 			description: "no leaks present",
 			description: "no leaks present",
+			testOpts:    &Options{},
 			numLeaks:    0,
 			numLeaks:    0,
 		},
 		},
 		{
 		{
 			repo:        leaksRepo,
 			repo:        leaksRepo,
 			description: "two leaks present whitelist go files",
 			description: "two leaks present whitelist go files",
-			whiteListFiles: []*regexp.Regexp{
-				regexp.MustCompile(".go"),
-			},
-			numLeaks: 0,
+			testOpts:    &Options{},
+			configPath:  path.Join(configsDir, "file"),
+			numLeaks:    0,
 		},
 		},
 		{
 		{
 			repo:        leaksRepo,
 			repo:        leaksRepo,
 			description: "two leaks present whitelist bad commit",
 			description: "two leaks present whitelist bad commit",
-			whiteListCommits: map[string]bool{
-				"eaeffdc65b4c73ccb67e75d96bd8743be2c85973": true,
-			},
-			numLeaks: 1,
+			configPath:  path.Join(configsDir, "commit"),
+			testOpts:    &Options{},
+			numLeaks:    1,
 		},
 		},
 		{
 		{
 			repo:        leaksRepo,
 			repo:        leaksRepo,
 			description: "redact",
 			description: "redact",
-			testOpts: Options{
+			testOpts: &Options{
 				Redact: true,
 				Redact: true,
 			},
 			},
 			numLeaks: 2,
 			numLeaks: 2,
@@ -564,7 +583,7 @@ func TestAuditRepo(t *testing.T) {
 			repo:        leaksRepo,
 			repo:        leaksRepo,
 			description: "Audit a specific commit",
 			description: "Audit a specific commit",
 			numLeaks:    1,
 			numLeaks:    1,
-			testOpts: Options{
+			testOpts: &Options{
 				Commit: "cb5599aeed261b2c038aa4729e2d53ca050a4988",
 				Commit: "cb5599aeed261b2c038aa4729e2d53ca050a4988",
 			},
 			},
 		},
 		},
@@ -572,7 +591,7 @@ func TestAuditRepo(t *testing.T) {
 			repo:        leaksRepo,
 			repo:        leaksRepo,
 			description: "Audit a specific commit no leaks",
 			description: "Audit a specific commit no leaks",
 			numLeaks:    0,
 			numLeaks:    0,
-			testOpts: Options{
+			testOpts: &Options{
 				Commit: "2b033e012eee364fc41b4ab7c5db1497399b8e67",
 				Commit: "2b033e012eee364fc41b4ab7c5db1497399b8e67",
 			},
 			},
 		},
 		},
@@ -580,38 +599,41 @@ func TestAuditRepo(t *testing.T) {
 			repo:        leaksRepo,
 			repo:        leaksRepo,
 			description: "toml whitelist regex",
 			description: "toml whitelist regex",
 			configPath:  path.Join(configsDir, "regex"),
 			configPath:  path.Join(configsDir, "regex"),
+			testOpts:    &Options{},
 			numLeaks:    0,
 			numLeaks:    0,
 		},
 		},
 		{
 		{
 			repo:        leaksRepo,
 			repo:        leaksRepo,
 			description: "toml whitelist file",
 			description: "toml whitelist file",
 			configPath:  path.Join(configsDir, "file"),
 			configPath:  path.Join(configsDir, "file"),
+			testOpts:    &Options{},
 			numLeaks:    0,
 			numLeaks:    0,
 		},
 		},
 		{
 		{
 			repo:        leaksRepo,
 			repo:        leaksRepo,
 			description: "toml whitelist commit",
 			description: "toml whitelist commit",
 			configPath:  path.Join(configsDir, "commit"),
 			configPath:  path.Join(configsDir, "commit"),
+			testOpts:    &Options{},
 			numLeaks:    1,
 			numLeaks:    1,
 		},
 		},
 		{
 		{
 			repo:        leaksRepo,
 			repo:        leaksRepo,
 			description: "audit whitelist repo",
 			description: "audit whitelist repo",
 			numLeaks:    0,
 			numLeaks:    0,
-			whiteListRepos: []*regexp.Regexp{
-				regexp.MustCompile("gronit"),
-			},
+			testOpts:    &Options{},
+			configPath:  path.Join(configsDir, "repo"),
 		},
 		},
 		{
 		{
 			repo:        leaksRepo,
 			repo:        leaksRepo,
 			description: "toml whitelist repo",
 			description: "toml whitelist repo",
 			numLeaks:    0,
 			numLeaks:    0,
+			testOpts:    &Options{},
 			configPath:  path.Join(configsDir, "repo"),
 			configPath:  path.Join(configsDir, "repo"),
 		},
 		},
 		{
 		{
 			repo:        leaksRepo,
 			repo:        leaksRepo,
 			description: "leaks present with entropy",
 			description: "leaks present with entropy",
-			testOpts: Options{
+			testOpts: &Options{
 				Entropy: 4.7,
 				Entropy: 4.7,
 			},
 			},
 			numLeaks: 6,
 			numLeaks: 6,
@@ -619,7 +641,7 @@ func TestAuditRepo(t *testing.T) {
 		{
 		{
 			repo:        leaksRepo,
 			repo:        leaksRepo,
 			description: "leaks present with entropy",
 			description: "leaks present with entropy",
-			testOpts: Options{
+			testOpts: &Options{
 				Entropy:        4.7,
 				Entropy:        4.7,
 				NoiseReduction: true,
 				NoiseReduction: true,
 			},
 			},
@@ -629,7 +651,7 @@ func TestAuditRepo(t *testing.T) {
 			repo:        leaksRepo,
 			repo:        leaksRepo,
 			description: "Audit until specific commit",
 			description: "Audit until specific commit",
 			numLeaks:    2,
 			numLeaks:    2,
-			testOpts: Options{
+			testOpts: &Options{
 				CommitStop: "f6839959b7bbdcd23008f1fb16f797f35bcd3a0c",
 				CommitStop: "f6839959b7bbdcd23008f1fb16f797f35bcd3a0c",
 			},
 			},
 		},
 		},
@@ -637,29 +659,39 @@ func TestAuditRepo(t *testing.T) {
 			repo:        leaksRepo,
 			repo:        leaksRepo,
 			description: "commit depth = 2, two leaks",
 			description: "commit depth = 2, two leaks",
 			numLeaks:    2,
 			numLeaks:    2,
-			testOpts: Options{
+			testOpts: &Options{
 				Depth: 2,
 				Depth: 2,
 			},
 			},
 		},
 		},
+		{
+			repo:        leaksRepo,
+			description: "toml entropy range from opts",
+			numLeaks:    454,
+			testOpts: &Options{
+				ConfigPath: path.Join(configsDir, "entropy"),
+			},
+		},
 		{
 		{
 			repo:        leaksRepo,
 			repo:        leaksRepo,
 			description: "toml entropy range",
 			description: "toml entropy range",
-			numLeaks:    298,
+			numLeaks:    454,
+			testOpts:    &Options{},
 			configPath:  path.Join(configsDir, "entropy"),
 			configPath:  path.Join(configsDir, "entropy"),
 		},
 		},
 		{
 		{
 			repo: leaksRepo,
 			repo: leaksRepo,
-			testOpts: Options{
+			testOpts: &Options{
 				NoiseReduction: true,
 				NoiseReduction: true,
 			},
 			},
-			description: "toml entropy range",
-			numLeaks:    58,
+			description: "toml entropy noise reduction range",
+			numLeaks:    64,
 			configPath:  path.Join(configsDir, "entropy"),
 			configPath:  path.Join(configsDir, "entropy"),
 		},
 		},
 		{
 		{
 			repo:           leaksRepo,
 			repo:           leaksRepo,
 			description:    "toml bad entropy range",
 			description:    "toml bad entropy range",
 			numLeaks:       0,
 			numLeaks:       0,
+			testOpts:       &Options{},
 			configPath:     path.Join(configsDir, "badEntropy"),
 			configPath:     path.Join(configsDir, "badEntropy"),
 			expectedErrMsg: "entropy range must be ascending",
 			expectedErrMsg: "entropy range must be ascending",
 		},
 		},
@@ -667,60 +699,40 @@ func TestAuditRepo(t *testing.T) {
 			repo:           leaksRepo,
 			repo:           leaksRepo,
 			description:    "toml bad entropy2 range",
 			description:    "toml bad entropy2 range",
 			numLeaks:       0,
 			numLeaks:       0,
+			testOpts:       &Options{},
 			configPath:     path.Join(configsDir, "badEntropy2"),
 			configPath:     path.Join(configsDir, "badEntropy2"),
 			expectedErrMsg: "invalid entropy ranges, must be within 0.0-8.0",
 			expectedErrMsg: "invalid entropy ranges, must be within 0.0-8.0",
 		},
 		},
 	}
 	}
-	whiteListCommits = make(map[string]bool)
 	g := goblin.Goblin(t)
 	g := goblin.Goblin(t)
 	for _, test := range tests {
 	for _, test := range tests {
 		g.Describe("TestAuditRepo", func() {
 		g.Describe("TestAuditRepo", func() {
 			g.It(test.description, func() {
 			g.It(test.description, func() {
 				auditDone = false
 				auditDone = false
 				opts = test.testOpts
 				opts = test.testOpts
-				// settin da globs
-				if test.whiteListFiles != nil {
-					whiteListFiles = test.whiteListFiles
-				} else {
-					whiteListFiles = nil
-				}
-				if test.whiteListCommits != nil {
-					whiteListCommits = test.whiteListCommits
-				} else {
-					whiteListCommits = nil
-				}
-				if test.whiteListRegexes != nil {
-					whiteListRegexes = test.whiteListRegexes
-				} else {
-					whiteListRegexes = nil
-				}
-				if test.whiteListRepos != nil {
-					whiteListRepos = test.whiteListRepos
-				} else {
-					whiteListRepos = nil
-				}
-				skip := false
 				totalCommits = 0
 				totalCommits = 0
+
+				config, err = newConfig()
 				// config paths
 				// config paths
 				if test.configPath != "" {
 				if test.configPath != "" {
 					os.Setenv("GITLEAKS_CONFIG", test.configPath)
 					os.Setenv("GITLEAKS_CONFIG", test.configPath)
-					err := loadToml()
+					config, err = newConfig()
 					if err != nil {
 					if err != nil {
 						g.Assert(err.Error()).Equal(test.expectedErrMsg)
 						g.Assert(err.Error()).Equal(test.expectedErrMsg)
-						skip = true
+						goto next
 					}
 					}
 				}
 				}
-				if !skip {
-					leaks, err = auditGitRepo(test.repo)
-					if test.testOpts.Depth != 0 {
-						g.Assert(totalCommits).Equal(test.testOpts.Depth)
-					} else {
-						if opts.Redact {
-							g.Assert(leaks[0].Offender).Equal("REDACTED")
-						}
-						g.Assert(len(leaks)).Equal(test.numLeaks)
+				leaks, err = test.repo.audit()
+				if test.testOpts.Depth != 0 {
+					g.Assert(totalCommits).Equal(test.testOpts.Depth)
+				} else {
+					if opts.Redact {
+						g.Assert(leaks[0].Offender).Equal("REDACTED")
 					}
 					}
+					g.Assert(len(leaks)).Equal(test.numLeaks)
 				}
 				}
+			next:
+				os.Setenv("GITLEAKS_CONFIG", "")
 			})
 			})
 		})
 		})
 	}
 	}
@@ -728,19 +740,19 @@ func TestAuditRepo(t *testing.T) {
 
 
 func TestOptionGuard(t *testing.T) {
 func TestOptionGuard(t *testing.T) {
 	var tests = []struct {
 	var tests = []struct {
-		testOpts            Options
+		testOpts            *Options
 		githubToken         bool
 		githubToken         bool
 		description         string
 		description         string
 		expectedErrMsg      string
 		expectedErrMsg      string
 		expectedErrMsgFuzzy string
 		expectedErrMsgFuzzy string
 	}{
 	}{
 		{
 		{
-			testOpts:       Options{},
+			testOpts:       &Options{},
 			description:    "default no opts",
 			description:    "default no opts",
 			expectedErrMsg: "",
 			expectedErrMsg: "",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				GithubUser: "fakeUser",
 				GithubUser: "fakeUser",
 				GithubOrg:  "fakeOrg",
 				GithubOrg:  "fakeOrg",
 			},
 			},
@@ -748,7 +760,7 @@ func TestOptionGuard(t *testing.T) {
 			expectedErrMsg: "github user and organization set",
 			expectedErrMsg: "github user and organization set",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				GithubOrg: "fakeOrg",
 				GithubOrg: "fakeOrg",
 				OwnerPath: "/dev/null",
 				OwnerPath: "/dev/null",
 			},
 			},
@@ -756,7 +768,7 @@ func TestOptionGuard(t *testing.T) {
 			expectedErrMsg: "github organization set and local owner path",
 			expectedErrMsg: "github organization set and local owner path",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				GithubUser: "fakeUser",
 				GithubUser: "fakeUser",
 				OwnerPath:  "/dev/null",
 				OwnerPath:  "/dev/null",
 			},
 			},
@@ -764,7 +776,7 @@ func TestOptionGuard(t *testing.T) {
 			expectedErrMsg: "github user set and local owner path",
 			expectedErrMsg: "github user set and local owner path",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				GithubUser:   "fakeUser",
 				GithubUser:   "fakeUser",
 				SingleSearch: "*/./....",
 				SingleSearch: "*/./....",
 			},
 			},
@@ -772,7 +784,7 @@ func TestOptionGuard(t *testing.T) {
 			expectedErrMsgFuzzy: "unable to compile regex: */./...., ",
 			expectedErrMsgFuzzy: "unable to compile regex: */./...., ",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				GithubUser:   "fakeUser",
 				GithubUser:   "fakeUser",
 				SingleSearch: "mystring",
 				SingleSearch: "mystring",
 			},
 			},
@@ -780,7 +792,7 @@ func TestOptionGuard(t *testing.T) {
 			expectedErrMsg: "",
 			expectedErrMsg: "",
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				GithubOrg: "fakeOrg",
 				GithubOrg: "fakeOrg",
 				Entropy:   9,
 				Entropy:   9,
 			},
 			},
@@ -797,7 +809,7 @@ func TestOptionGuard(t *testing.T) {
 				if test.githubToken {
 				if test.githubToken {
 					os.Setenv("GITHUB_TOKEN", "fakeToken")
 					os.Setenv("GITHUB_TOKEN", "fakeToken")
 				}
 				}
-				err := optsGuard()
+				err := opts.guard()
 				if err != nil {
 				if err != nil {
 					if test.expectedErrMsgFuzzy != "" {
 					if test.expectedErrMsgFuzzy != "" {
 						g.Assert(strings.Contains(err.Error(), test.expectedErrMsgFuzzy)).Equal(true)
 						g.Assert(strings.Contains(err.Error(), test.expectedErrMsgFuzzy)).Equal(true)
@@ -825,38 +837,38 @@ func TestLoadToml(t *testing.T) {
 	noConfigPath := path.Join(tmpDir, "gitleaksConfigNope")
 	noConfigPath := path.Join(tmpDir, "gitleaksConfigNope")
 
 
 	var tests = []struct {
 	var tests = []struct {
-		testOpts       Options
+		testOpts       *Options
 		description    string
 		description    string
 		configPath     string
 		configPath     string
 		expectedErrMsg string
 		expectedErrMsg string
 		singleSearch   bool
 		singleSearch   bool
 	}{
 	}{
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				ConfigPath: configPath,
 				ConfigPath: configPath,
 			},
 			},
 			description: "path to config",
 			description: "path to config",
 		},
 		},
 		{
 		{
-			testOpts:     Options{},
+			testOpts:     &Options{},
 			description:  "env var path to no config",
 			description:  "env var path to no config",
 			singleSearch: true,
 			singleSearch: true,
 		},
 		},
 		{
 		{
-			testOpts: Options{
+			testOpts: &Options{
 				ConfigPath: noConfigPath,
 				ConfigPath: noConfigPath,
 			},
 			},
 			description:    "no path to config",
 			description:    "no path to config",
 			expectedErrMsg: fmt.Sprintf("no gitleaks config at %s", noConfigPath),
 			expectedErrMsg: fmt.Sprintf("no gitleaks config at %s", noConfigPath),
 		},
 		},
 		{
 		{
-			testOpts:       Options{},
+			testOpts:       &Options{},
 			description:    "env var path to config",
 			description:    "env var path to config",
 			configPath:     configPath,
 			configPath:     configPath,
 			expectedErrMsg: "",
 			expectedErrMsg: "",
 		},
 		},
 		{
 		{
-			testOpts:       Options{},
+			testOpts:       &Options{},
 			description:    "env var path to no config",
 			description:    "env var path to no config",
 			configPath:     noConfigPath,
 			configPath:     noConfigPath,
 			expectedErrMsg: fmt.Sprintf("problem loading config: open %s: no such file or directory", noConfigPath),
 			expectedErrMsg: fmt.Sprintf("problem loading config: open %s: no such file or directory", noConfigPath),
@@ -878,7 +890,7 @@ func TestLoadToml(t *testing.T) {
 				} else {
 				} else {
 					os.Clearenv()
 					os.Clearenv()
 				}
 				}
-				err := loadToml()
+				_, err = newConfig()
 				if err != nil {
 				if err != nil {
 					g.Assert(err.Error()).Equal(test.expectedErrMsg)
 					g.Assert(err.Error()).Equal(test.expectedErrMsg)
 				} else {
 				} else {

+ 170 - 0
src/options.go

@@ -0,0 +1,170 @@
+package gitleaks
+
+import (
+	"fmt"
+	"net"
+	"net/url"
+	"os"
+	"path/filepath"
+	"regexp"
+	"runtime"
+	"strings"
+	"time"
+
+	"github.com/jessevdk/go-flags"
+	log "github.com/sirupsen/logrus"
+)
+
+// Options for gitleaks
+type Options struct {
+	// remote target options
+	Repo       string `short:"r" long:"repo" description:"Repo url to audit"`
+	GithubUser string `long:"github-user" description:"Github user to audit"`
+	GithubOrg  string `long:"github-org" description:"Github organization to audit"`
+	GithubURL  string `long:"github-url" default:"https://api.github.com/" description:"GitHub API Base URL, use for GitHub Enterprise. Example: https://github.example.com/api/v3/"`
+	GithubPR   string `long:"github-pr" description:"Github PR url to audit. This does not clone the repo. GITHUB_TOKEN must be set"`
+
+	GitLabUser string `long:"gitlab-user" description:"GitLab user ID to audit"`
+	GitLabOrg  string `long:"gitlab-org" description:"GitLab group ID to audit"`
+
+	CommitStop string `long:"commit-stop" description:"sha of commit to stop at"`
+	Commit     string `long:"commit" description:"sha of commit to audit"`
+	Depth      int64  `long:"depth" description:"maximum commit depth"`
+
+	// local target option
+	RepoPath  string `long:"repo-path" description:"Path to repo"`
+	OwnerPath string `long:"owner-path" description:"Path to owner directory (repos discovered)"`
+
+	// Process options
+	Threads        int     `long:"threads" description:"Maximum number of threads gitleaks spawns"`
+	Disk           bool    `long:"disk" description:"Clones repo(s) to disk"`
+	SingleSearch   string  `long:"single-search" description:"single regular expression to search for"`
+	ConfigPath     string  `long:"config" description:"path to gitleaks config"`
+	SSHKey         string  `long:"ssh-key" description:"path to ssh key"`
+	ExcludeForks   bool    `long:"exclude-forks" description:"exclude forks for organization/user audits"`
+	Entropy        float64 `long:"entropy" short:"e" description:"Include entropy checks during audit. Entropy scale: 0.0(no entropy) - 8.0(max entropy)"`
+	NoiseReduction bool    `long:"noise-reduction" description:"Reduce the number of finds when entropy checks are enabled"`
+	RepoConfig     bool    `long:"repo-config" description:"Load config from target repo. Config file must be \".gitleaks.toml\""`
+	Branch         string  `long:"branch" description:"Branch to audit"`
+	// TODO: IncludeMessages  string `long:"messages" description:"include commit messages in audit"`
+
+	// Output options
+	Log          string `short:"l" long:"log" description:"log level"`
+	Verbose      bool   `short:"v" long:"verbose" description:"Show verbose output from gitleaks audit"`
+	Report       string `long:"report" description:"path to write report file. Needs to be csv or json"`
+	Redact       bool   `long:"redact" description:"redact secrets from log messages and report"`
+	Version      bool   `long:"version" description:"version number"`
+	SampleConfig bool   `long:"sample-config" description:"prints a sample config file"`
+}
+
+// ParseOpts parses the options
+func ParseOpts() *Options {
+	var opts Options
+	parser := flags.NewParser(&opts, flags.Default)
+	_, err := parser.Parse()
+
+	if err != nil {
+		if flagsErr, ok := err.(*flags.Error); ok && flagsErr.Type == flags.ErrHelp {
+			os.Exit(0)
+		}
+	}
+
+	if len(os.Args) == 1 {
+		// TODO: this will be a feature, check locally
+		parser.WriteHelp(os.Stdout)
+		os.Exit(0)
+	}
+
+	if opts.Version {
+		fmt.Println(version)
+		os.Exit(0)
+	}
+	if opts.SampleConfig {
+		fmt.Println(defaultConfig)
+		os.Exit(0)
+	}
+
+	opts.setLogs()
+
+	err = opts.guard()
+	if err != nil {
+		log.Fatal(err)
+	}
+	return &opts
+}
+
+// optsGuard prevents invalid options
+func (opts *Options) guard() error {
+	var err error
+	if opts.GithubOrg != "" && opts.GithubUser != "" {
+		return fmt.Errorf("github user and organization set")
+	} else if opts.GithubOrg != "" && opts.OwnerPath != "" {
+		return fmt.Errorf("github organization set and local owner path")
+	} else if opts.GithubUser != "" && opts.OwnerPath != "" {
+		return fmt.Errorf("github user set and local owner path")
+	}
+
+	if opts.Threads > runtime.GOMAXPROCS(0) {
+		return fmt.Errorf("%d available threads", runtime.GOMAXPROCS(0))
+	}
+
+	// do the URL Parse and error checking here, so we can skip it later
+	// empty string is OK, it will default to the public github URL.
+	if opts.GithubURL != "" && opts.GithubURL != defaultGithubURL {
+		if !strings.HasSuffix(opts.GithubURL, "/") {
+			opts.GithubURL += "/"
+		}
+		ghURL, err := url.Parse(opts.GithubURL)
+		if err != nil {
+			return err
+		}
+		tcpPort := "443"
+		if ghURL.Scheme == "http" {
+			tcpPort = "80"
+		}
+		timeout := time.Duration(1 * time.Second)
+		_, err = net.DialTimeout("tcp", ghURL.Host+":"+tcpPort, timeout)
+		if err != nil {
+			return fmt.Errorf("%s unreachable, error: %s", ghURL.Host, err)
+		}
+	}
+
+	if opts.SingleSearch != "" {
+		singleSearchRegex, err = regexp.Compile(opts.SingleSearch)
+		if err != nil {
+			return fmt.Errorf("unable to compile regex: %s, %v", opts.SingleSearch, err)
+		}
+	}
+
+	if opts.Entropy > 8 {
+		return fmt.Errorf("The maximum level of entropy is 8")
+	}
+	if opts.Report != "" {
+		if !strings.HasSuffix(opts.Report, ".json") && !strings.HasSuffix(opts.Report, ".csv") {
+			return fmt.Errorf("Report should be a .json or .csv file")
+		}
+		dirPath := filepath.Dir(opts.Report)
+		if _, err := os.Stat(dirPath); os.IsNotExist(err) {
+			return fmt.Errorf("%s does not exist", dirPath)
+		}
+	}
+
+	return nil
+}
+
+// setLogLevel sets log level for gitleaks. Default is Warning
+func (opts *Options) setLogs() {
+	switch opts.Log {
+	case "info":
+		log.SetLevel(log.InfoLevel)
+	case "debug":
+		log.SetLevel(log.DebugLevel)
+	case "warn":
+		log.SetLevel(log.WarnLevel)
+	default:
+		log.SetLevel(log.InfoLevel)
+	}
+	log.SetFormatter(&log.TextFormatter{
+		FullTimestamp: true,
+	})
+}

+ 318 - 0
src/repo.go

@@ -0,0 +1,318 @@
+package gitleaks
+
+import (
+	"crypto/md5"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+	"sync"
+	"time"
+
+	log "github.com/sirupsen/logrus"
+	git "gopkg.in/src-d/go-git.v4"
+	"gopkg.in/src-d/go-git.v4/plumbing"
+	diffType "gopkg.in/src-d/go-git.v4/plumbing/format/diff"
+	"gopkg.in/src-d/go-git.v4/plumbing/object"
+	"gopkg.in/src-d/go-git.v4/plumbing/storer"
+	"gopkg.in/src-d/go-git.v4/storage/memory"
+)
+
+// Leak represents a leaked secret or regex match.
+type Leak struct {
+	Line     string    `json:"line"`
+	Commit   string    `json:"commit"`
+	Offender string    `json:"offender"`
+	Type     string    `json:"reason"`
+	Message  string    `json:"commitMsg"`
+	Author   string    `json:"author"`
+	Email    string    `json:"email"`
+	File     string    `json:"file"`
+	Repo     string    `json:"repo"`
+	Date     time.Time `json:"date"`
+}
+
+// RepoInfo contains a src-d git repository and other data about the repo
+type RepoInfo struct {
+	path       string
+	url        string
+	name       string
+	repository *git.Repository
+	err        error
+}
+
+func newRepoInfo() (*RepoInfo, error) {
+	for _, re := range config.WhiteList.repos {
+		if re.FindString(opts.Repo) != "" {
+			return nil, fmt.Errorf("skipping %s, whitelisted", opts.Repo)
+		}
+	}
+	return &RepoInfo{
+		path: opts.RepoPath,
+		url:  opts.Repo,
+		name: filepath.Base(opts.Repo),
+	}, nil
+}
+
+// clone will clone a repo
+func (repoInfo *RepoInfo) clone() error {
+	var (
+		err  error
+		repo *git.Repository
+	)
+
+	// check if cloning to disk
+	if opts.Disk {
+		log.Infof("cloning %s to disk", opts.Repo)
+		cloneTarget := fmt.Sprintf("%s/%x", dir, md5.Sum([]byte(fmt.Sprintf("%s%s", opts.GithubUser, opts.Repo))))
+		if strings.HasPrefix(opts.Repo, "git") {
+			// private
+			repo, err = git.PlainClone(cloneTarget, false, &git.CloneOptions{
+				URL:      opts.Repo,
+				Progress: os.Stdout,
+				Auth:     config.sshAuth,
+			})
+		} else {
+			// public
+			repo, err = git.PlainClone(cloneTarget, false, &git.CloneOptions{
+				URL:      opts.Repo,
+				Progress: os.Stdout,
+			})
+		}
+	} else if repoInfo.path != "" {
+		log.Infof("opening %s", opts.RepoPath)
+		repo, err = git.PlainOpen(repoInfo.path)
+	} else {
+		// cloning to memory
+		log.Infof("cloning %s", opts.Repo)
+		if strings.HasPrefix(opts.Repo, "git") {
+			repo, err = git.Clone(memory.NewStorage(), nil, &git.CloneOptions{
+				URL:      opts.Repo,
+				Progress: os.Stdout,
+				Auth:     config.sshAuth,
+			})
+		} else {
+			repo, err = git.Clone(memory.NewStorage(), nil, &git.CloneOptions{
+				URL:      opts.Repo,
+				Progress: os.Stdout,
+			})
+		}
+	}
+	repoInfo.repository = repo
+	repoInfo.err = err
+	return err
+}
+
+// audit performs an audit
+func (repoInfo *RepoInfo) audit() ([]Leak, error) {
+	var (
+		err         error
+		leaks       []Leak
+		commitCount int64
+		commitWg    sync.WaitGroup
+		semaphore   chan bool
+		logOpts     git.LogOptions
+	)
+	for _, re := range config.WhiteList.repos {
+		if re.FindString(repoInfo.name) != "" {
+			return leaks, fmt.Errorf("skipping %s, whitelisted", repoInfo.name)
+		}
+	}
+
+	// check if target contains an external gitleaks toml
+	if opts.RepoConfig {
+		err := config.updateFromRepo(repoInfo)
+		if err != nil {
+			return leaks, nil
+		}
+	}
+
+	if opts.Branch != "" {
+		refs, err := repoInfo.repository.Storer.IterReferences()
+		if err != nil {
+			return leaks, err
+		}
+		err = refs.ForEach(func(ref *plumbing.Reference) error {
+			if ref.Name().IsTag() {
+				return nil
+			}
+			// check heads first
+			if ref.Name().String() == "refs/heads/"+opts.Branch {
+				logOpts = git.LogOptions{
+					From: ref.Hash(),
+				}
+				return nil
+			} else if ref.Name().String() == "refs/remotes/origin/"+opts.Branch {
+				logOpts = git.LogOptions{
+					From: ref.Hash(),
+				}
+				return nil
+			}
+			return nil
+		})
+	} else {
+		logOpts = git.LogOptions{
+			All: true,
+		}
+	}
+
+	// iterate all through commits
+	cIter, err := repoInfo.repository.Log(&logOpts)
+
+	if err != nil {
+		return leaks, nil
+	}
+
+	if opts.Threads != 0 {
+		threads = opts.Threads
+	}
+	if opts.RepoPath != "" {
+		threads = 1
+	}
+	semaphore = make(chan bool, threads)
+
+	err = cIter.ForEach(func(c *object.Commit) error {
+		if c == nil || (opts.Depth != 0 && commitCount == opts.Depth) {
+			return storer.ErrStop
+		}
+
+		if config.WhiteList.commits[c.Hash.String()] {
+			log.Infof("skipping commit: %s\n", c.Hash.String())
+			return nil
+		}
+
+		commitCount = commitCount + 1
+		totalCommits = totalCommits + 1
+
+		// commits w/o parent (root of git the git ref) or option for single commit is not empty str
+		if len(c.ParentHashes) == 0 || opts.Commit == c.Hash.String() {
+			leaksFromSingleCommit := repoInfo.auditSingleCommit(c)
+			mutex.Lock()
+			leaks = append(leaksFromSingleCommit, leaks...)
+			mutex.Unlock()
+			if opts.Commit == c.Hash.String() {
+				return storer.ErrStop
+			}
+			return nil
+		}
+
+		if opts.Commit != "" {
+			return nil
+		}
+
+		// regular commit audit
+		err = c.Parents().ForEach(func(parent *object.Commit) error {
+			commitWg.Add(1)
+			semaphore <- true
+			go func(c *object.Commit, parent *object.Commit) {
+				var (
+					filePath string
+					skipFile bool
+				)
+				defer func() {
+					commitWg.Done()
+					<-semaphore
+					if r := recover(); r != nil {
+						log.Warnf("recovering from panic on commit %s, likely large diff causing panic", c.Hash.String())
+					}
+				}()
+				patch, err := c.Patch(parent)
+				if err != nil {
+					log.Warnf("problem generating patch for commit: %s\n", c.Hash.String())
+					return
+				}
+				for _, f := range patch.FilePatches() {
+					if f.IsBinary() {
+						continue
+					}
+					skipFile = false
+					from, to := f.Files()
+					filePath = "???"
+					if from != nil {
+						filePath = from.Path()
+					} else if to != nil {
+						filePath = to.Path()
+					}
+					for _, re := range config.WhiteList.files {
+						if re.FindString(filePath) != "" {
+							log.Debugf("skipping whitelisted file (matched regex '%s'): %s", re.String(), filePath)
+							skipFile = true
+							break
+						}
+					}
+					if skipFile {
+						continue
+					}
+					chunks := f.Chunks()
+					for _, chunk := range chunks {
+						if chunk.Type() == diffType.Add || chunk.Type() == diffType.Delete {
+							diff := commitInfo{
+								repoName: repoInfo.name,
+								filePath: filePath,
+								content:  chunk.Content(),
+								sha:      c.Hash.String(),
+								author:   c.Author.Name,
+								email:    c.Author.Email,
+								message:  strings.Replace(c.Message, "\n", " ", -1),
+								date:     c.Author.When,
+							}
+							chunkLeaks := inspect(diff)
+							for _, leak := range chunkLeaks {
+								mutex.Lock()
+								leaks = append(leaks, leak)
+								mutex.Unlock()
+							}
+						}
+					}
+				}
+			}(c, parent)
+
+			return nil
+		})
+
+		return nil
+	})
+
+	commitWg.Wait()
+	return leaks, nil
+}
+
+func (repoInfo *RepoInfo) auditSingleCommit(c *object.Commit) []Leak {
+	var leaks []Leak
+	fIter, err := c.Files()
+	if err != nil {
+		return nil
+	}
+	err = fIter.ForEach(func(f *object.File) error {
+		bin, err := f.IsBinary()
+		if bin || err != nil {
+			return nil
+		}
+		for _, re := range config.WhiteList.files {
+			if re.FindString(f.Name) != "" {
+				log.Debugf("skipping whitelisted file (matched regex '%s'): %s", re.String(), f.Name)
+				return nil
+			}
+		}
+		content, err := f.Contents()
+		if err != nil {
+			return nil
+		}
+		diff := commitInfo{
+			repoName: repoInfo.name,
+			filePath: f.Name,
+			content:  content,
+			sha:      c.Hash.String(),
+			author:   c.Author.Name,
+			email:    c.Author.Email,
+			message:  strings.Replace(c.Message, "\n", " ", -1),
+			date:     c.Author.When,
+		}
+		fileLeaks := inspect(diff)
+		mutex.Lock()
+		leaks = append(leaks, fileLeaks...)
+		mutex.Unlock()
+		return nil
+	})
+	return leaks
+}

+ 201 - 0
src/utils.go

@@ -0,0 +1,201 @@
+package gitleaks
+
+import (
+	"encoding/csv"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path"
+	"strings"
+	"time"
+
+	log "github.com/sirupsen/logrus"
+	"gopkg.in/src-d/go-git.v4/plumbing/object"
+)
+
+type commitInfo struct {
+	content  string
+	commit   *object.Commit
+	filePath string
+	repoName string
+	sha      string
+	message  string
+	author   string
+	email    string
+	date     time.Time
+}
+
+// writeReport writes a report to a file specified in the --report= option.
+// Default format for report is JSON. You can use the --csv option to write the report as a csv
+func writeReport(leaks []Leak) error {
+	if len(leaks) == 0 {
+		return nil
+	}
+
+	log.Infof("writing report to %s", opts.Report)
+	if strings.HasSuffix(opts.Report, ".csv") {
+		f, err := os.Create(opts.Report)
+		if err != nil {
+			return err
+		}
+		defer f.Close()
+		w := csv.NewWriter(f)
+		w.Write([]string{"repo", "line", "commit", "offender", "reason", "commitMsg", "author", "email", "file", "date"})
+		for _, leak := range leaks {
+			w.Write([]string{leak.Repo, leak.Line, leak.Commit, leak.Offender, leak.Type, leak.Message, leak.Author, leak.Email, leak.File, leak.Date.Format(time.RFC3339)})
+		}
+		w.Flush()
+	} else {
+		f, err := os.Create(opts.Report)
+		if err != nil {
+			return err
+		}
+		defer f.Close()
+		encoder := json.NewEncoder(f)
+		encoder.SetIndent("", "\t")
+		if _, err := f.WriteString("[\n"); err != nil {
+			return err
+		}
+		for i := 0; i < len(leaks); i++ {
+			if err := encoder.Encode(leaks[i]); err != nil {
+				return err
+			}
+			// for all but the last leak, seek back and overwrite the newline appended by Encode() with comma & newline
+			if i+1 < len(leaks) {
+				if _, err := f.Seek(-1, 1); err != nil {
+					return err
+				}
+				if _, err := f.WriteString(",\n"); err != nil {
+					return err
+				}
+			}
+		}
+		if _, err := f.WriteString("]"); err != nil {
+			return err
+		}
+		if err := f.Sync(); err != nil {
+			log.Error(err)
+			return err
+		}
+	}
+	return nil
+}
+
+// inspect will parse each line of the git diff's content against a set of regexes or
+// a set of regexes set by the config (see gitleaks.toml for example). This function
+// will skip lines that include a whitelisted regex. A list of leaks is returned.
+// If verbose mode (-v/--verbose) is set, then checkDiff will log leaks as they are discovered.
+func inspect(commit commitInfo) []Leak {
+	var (
+		leaks    []Leak
+		skipLine bool
+	)
+	lines := strings.Split(commit.content, "\n")
+
+	for _, line := range lines {
+		skipLine = false
+		for _, re := range config.Regexes {
+			match := re.regex.FindString(line)
+			if match == "" {
+				continue
+			}
+			if skipLine = isLineWhitelisted(line); skipLine {
+				break
+			}
+			leaks = addLeak(leaks, line, match, re.description, commit)
+		}
+
+		if !skipLine && (opts.Entropy > 0 || len(config.Entropy.entropyRanges) != 0) {
+			words := strings.Fields(line)
+			for _, word := range words {
+				entropy := getShannonEntropy(word)
+				// Only check entropyRegexes and whiteListRegexes once per line, and only if an entropy leak type
+				// was found above, since regex checks are expensive.
+				if !entropyIsHighEnough(entropy) {
+					continue
+				}
+				// If either the line is whitelisted or the line fails the noiseReduction check (when enabled),
+				// then we can skip checking the rest of the line for high entropy words.
+				if skipLine = !highEntropyLineIsALeak(line) || isLineWhitelisted(line); skipLine {
+					break
+				}
+				leaks = addLeak(leaks, line, word, fmt.Sprintf("Entropy: %.2f", entropy), commit)
+			}
+		}
+	}
+	return leaks
+}
+
+// isLineWhitelisted returns true iff the line is matched by at least one of the whiteListRegexes.
+func isLineWhitelisted(line string) bool {
+	for _, wRe := range config.WhiteList.regexes {
+		whitelistMatch := wRe.FindString(line)
+		if whitelistMatch != "" {
+			return true
+		}
+	}
+	return false
+}
+
+// addLeak is helper for func inspect() to append leaks if found during a diff check.
+func addLeak(leaks []Leak, line string, offender string, leakType string, commit commitInfo) []Leak {
+	leak := Leak{
+		Line:     line,
+		Commit:   commit.sha,
+		Offender: offender,
+		Type:     leakType,
+		Author:   commit.author,
+		Email:    commit.email,
+		File:     commit.filePath,
+		Repo:     commit.repoName,
+		Message:  commit.message,
+		Date:     commit.date,
+	}
+	if opts.Redact {
+		leak.Offender = "REDACTED"
+		leak.Line = strings.Replace(line, offender, "REDACTED", -1)
+	}
+
+	if opts.Verbose {
+		leak.log()
+	}
+
+	leaks = append(leaks, leak)
+	return leaks
+}
+
+// discoverRepos walks all the children of `path`. If a child directory
+// contain a .git subdirectory then that repo will be added to the list of repos returned
+func discoverRepos(ownerPath string) ([]*RepoInfo, error) {
+	var (
+		err    error
+		repoDs []*RepoInfo
+	)
+	files, err := ioutil.ReadDir(ownerPath)
+	if err != nil {
+		return repoDs, err
+	}
+	for _, f := range files {
+		repoPath := path.Join(ownerPath, f.Name())
+		if f.IsDir() && containsGit(repoPath) {
+			repoDs = append(repoDs, &RepoInfo{
+				name: f.Name(),
+				path: repoPath,
+			})
+		}
+	}
+	return repoDs, err
+}
+
+func (leak Leak) log() {
+	b, _ := json.MarshalIndent(leak, "", "   ")
+	fmt.Println(string(b))
+}
+
+func containsGit(repoPath string) bool {
+	if _, err := os.Stat(repoPath); os.IsNotExist(err) {
+		return false
+	}
+	return true
+}

+ 0 - 5
vendor/github.com/BurntSushi/toml/.gitignore

@@ -1,5 +0,0 @@
-TAGS
-tags
-.*.swp
-tomlcheck/tomlcheck
-toml.test

+ 0 - 15
vendor/github.com/BurntSushi/toml/.travis.yml

@@ -1,15 +0,0 @@
-language: go
-go:
-  - 1.1
-  - 1.2
-  - 1.3
-  - 1.4
-  - 1.5
-  - 1.6
-  - tip
-install:
-  - go install ./...
-  - go get github.com/BurntSushi/toml-test
-script:
-  - export PATH="$PATH:$HOME/gopath/bin"
-  - make test

+ 0 - 3
vendor/github.com/BurntSushi/toml/COMPATIBLE

@@ -1,3 +0,0 @@
-Compatible with TOML version
-[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)
-

+ 0 - 21
vendor/github.com/BurntSushi/toml/COPYING

@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013 TOML authors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.

+ 0 - 19
vendor/github.com/BurntSushi/toml/Makefile

@@ -1,19 +0,0 @@
-install:
-	go install ./...
-
-test: install
-	go test -v
-	toml-test toml-test-decoder
-	toml-test -encoder toml-test-encoder
-
-fmt:
-	gofmt -w *.go */*.go
-	colcheck *.go */*.go
-
-tags:
-	find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
-
-push:
-	git push origin master
-	git push github master
-

+ 0 - 218
vendor/github.com/BurntSushi/toml/README.md

@@ -1,218 +0,0 @@
-## TOML parser and encoder for Go with reflection
-
-TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
-reflection interface similar to Go's standard library `json` and `xml`
-packages. This package also supports the `encoding.TextUnmarshaler` and
-`encoding.TextMarshaler` interfaces so that you can define custom data
-representations. (There is an example of this below.)
-
-Spec: https://github.com/toml-lang/toml
-
-Compatible with TOML version
-[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
-
-Documentation: https://godoc.org/github.com/BurntSushi/toml
-
-Installation:
-
-```bash
-go get github.com/BurntSushi/toml
-```
-
-Try the toml validator:
-
-```bash
-go get github.com/BurntSushi/toml/cmd/tomlv
-tomlv some-toml-file.toml
-```
-
-[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
-
-### Testing
-
-This package passes all tests in
-[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
-and the encoder.
-
-### Examples
-
-This package works similarly to how the Go standard library handles `XML`
-and `JSON`. Namely, data is loaded into Go values via reflection.
-
-For the simplest example, consider some TOML file as just a list of keys
-and values:
-
-```toml
-Age = 25
-Cats = [ "Cauchy", "Plato" ]
-Pi = 3.14
-Perfection = [ 6, 28, 496, 8128 ]
-DOB = 1987-07-05T05:45:00Z
-```
-
-Which could be defined in Go as:
-
-```go
-type Config struct {
-  Age int
-  Cats []string
-  Pi float64
-  Perfection []int
-  DOB time.Time // requires `import time`
-}
-```
-
-And then decoded with:
-
-```go
-var conf Config
-if _, err := toml.Decode(tomlData, &conf); err != nil {
-  // handle error
-}
-```
-
-You can also use struct tags if your struct field name doesn't map to a TOML
-key value directly:
-
-```toml
-some_key_NAME = "wat"
-```
-
-```go
-type TOML struct {
-  ObscureKey string `toml:"some_key_NAME"`
-}
-```
-
-### Using the `encoding.TextUnmarshaler` interface
-
-Here's an example that automatically parses duration strings into
-`time.Duration` values:
-
-```toml
-[[song]]
-name = "Thunder Road"
-duration = "4m49s"
-
-[[song]]
-name = "Stairway to Heaven"
-duration = "8m03s"
-```
-
-Which can be decoded with:
-
-```go
-type song struct {
-  Name     string
-  Duration duration
-}
-type songs struct {
-  Song []song
-}
-var favorites songs
-if _, err := toml.Decode(blob, &favorites); err != nil {
-  log.Fatal(err)
-}
-
-for _, s := range favorites.Song {
-  fmt.Printf("%s (%s)\n", s.Name, s.Duration)
-}
-```
-
-And you'll also need a `duration` type that satisfies the
-`encoding.TextUnmarshaler` interface:
-
-```go
-type duration struct {
-	time.Duration
-}
-
-func (d *duration) UnmarshalText(text []byte) error {
-	var err error
-	d.Duration, err = time.ParseDuration(string(text))
-	return err
-}
-```
-
-### More complex usage
-
-Here's an example of how to load the example from the official spec page:
-
-```toml
-# This is a TOML document. Boom.
-
-title = "TOML Example"
-
-[owner]
-name = "Tom Preston-Werner"
-organization = "GitHub"
-bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
-dob = 1979-05-27T07:32:00Z # First class dates? Why not?
-
-[database]
-server = "192.168.1.1"
-ports = [ 8001, 8001, 8002 ]
-connection_max = 5000
-enabled = true
-
-[servers]
-
-  # You can indent as you please. Tabs or spaces. TOML don't care.
-  [servers.alpha]
-  ip = "10.0.0.1"
-  dc = "eqdc10"
-
-  [servers.beta]
-  ip = "10.0.0.2"
-  dc = "eqdc10"
-
-[clients]
-data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
-
-# Line breaks are OK when inside arrays
-hosts = [
-  "alpha",
-  "omega"
-]
-```
-
-And the corresponding Go types are:
-
-```go
-type tomlConfig struct {
-	Title string
-	Owner ownerInfo
-	DB database `toml:"database"`
-	Servers map[string]server
-	Clients clients
-}
-
-type ownerInfo struct {
-	Name string
-	Org string `toml:"organization"`
-	Bio string
-	DOB time.Time
-}
-
-type database struct {
-	Server string
-	Ports []int
-	ConnMax int `toml:"connection_max"`
-	Enabled bool
-}
-
-type server struct {
-	IP string
-	DC string
-}
-
-type clients struct {
-	Data [][]interface{}
-	Hosts []string
-}
-```
-
-Note that a case insensitive match will be tried if an exact match can't be
-found.
-
-A working example of the above can be found in `_examples/example.{go,toml}`.

+ 0 - 21
vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING

@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013 TOML authors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.

+ 0 - 21
vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING

@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013 TOML authors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.

+ 0 - 21
vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING

@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013 TOML authors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.

+ 0 - 509
vendor/github.com/BurntSushi/toml/decode.go

@@ -1,509 +0,0 @@
-package toml
-
-import (
-	"fmt"
-	"io"
-	"io/ioutil"
-	"math"
-	"reflect"
-	"strings"
-	"time"
-)
-
-func e(format string, args ...interface{}) error {
-	return fmt.Errorf("toml: "+format, args...)
-}
-
-// Unmarshaler is the interface implemented by objects that can unmarshal a
-// TOML description of themselves.
-type Unmarshaler interface {
-	UnmarshalTOML(interface{}) error
-}
-
-// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
-func Unmarshal(p []byte, v interface{}) error {
-	_, err := Decode(string(p), v)
-	return err
-}
-
-// Primitive is a TOML value that hasn't been decoded into a Go value.
-// When using the various `Decode*` functions, the type `Primitive` may
-// be given to any value, and its decoding will be delayed.
-//
-// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
-//
-// The underlying representation of a `Primitive` value is subject to change.
-// Do not rely on it.
-//
-// N.B. Primitive values are still parsed, so using them will only avoid
-// the overhead of reflection. They can be useful when you don't know the
-// exact type of TOML data until run time.
-type Primitive struct {
-	undecoded interface{}
-	context   Key
-}
-
-// DEPRECATED!
-//
-// Use MetaData.PrimitiveDecode instead.
-func PrimitiveDecode(primValue Primitive, v interface{}) error {
-	md := MetaData{decoded: make(map[string]bool)}
-	return md.unify(primValue.undecoded, rvalue(v))
-}
-
-// PrimitiveDecode is just like the other `Decode*` functions, except it
-// decodes a TOML value that has already been parsed. Valid primitive values
-// can *only* be obtained from values filled by the decoder functions,
-// including this method. (i.e., `v` may contain more `Primitive`
-// values.)
-//
-// Meta data for primitive values is included in the meta data returned by
-// the `Decode*` functions with one exception: keys returned by the Undecoded
-// method will only reflect keys that were decoded. Namely, any keys hidden
-// behind a Primitive will be considered undecoded. Executing this method will
-// update the undecoded keys in the meta data. (See the example.)
-func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
-	md.context = primValue.context
-	defer func() { md.context = nil }()
-	return md.unify(primValue.undecoded, rvalue(v))
-}
-
-// Decode will decode the contents of `data` in TOML format into a pointer
-// `v`.
-//
-// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
-// used interchangeably.)
-//
-// TOML arrays of tables correspond to either a slice of structs or a slice
-// of maps.
-//
-// TOML datetimes correspond to Go `time.Time` values.
-//
-// All other TOML types (float, string, int, bool and array) correspond
-// to the obvious Go types.
-//
-// An exception to the above rules is if a type implements the
-// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
-// (floats, strings, integers, booleans and datetimes) will be converted to
-// a byte string and given to the value's UnmarshalText method. See the
-// Unmarshaler example for a demonstration with time duration strings.
-//
-// Key mapping
-//
-// TOML keys can map to either keys in a Go map or field names in a Go
-// struct. The special `toml` struct tag may be used to map TOML keys to
-// struct fields that don't match the key name exactly. (See the example.)
-// A case insensitive match to struct names will be tried if an exact match
-// can't be found.
-//
-// The mapping between TOML values and Go values is loose. That is, there
-// may exist TOML values that cannot be placed into your representation, and
-// there may be parts of your representation that do not correspond to
-// TOML values. This loose mapping can be made stricter by using the IsDefined
-// and/or Undecoded methods on the MetaData returned.
-//
-// This decoder will not handle cyclic types. If a cyclic type is passed,
-// `Decode` will not terminate.
-func Decode(data string, v interface{}) (MetaData, error) {
-	rv := reflect.ValueOf(v)
-	if rv.Kind() != reflect.Ptr {
-		return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
-	}
-	if rv.IsNil() {
-		return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
-	}
-	p, err := parse(data)
-	if err != nil {
-		return MetaData{}, err
-	}
-	md := MetaData{
-		p.mapping, p.types, p.ordered,
-		make(map[string]bool, len(p.ordered)), nil,
-	}
-	return md, md.unify(p.mapping, indirect(rv))
-}
-
-// DecodeFile is just like Decode, except it will automatically read the
-// contents of the file at `fpath` and decode it for you.
-func DecodeFile(fpath string, v interface{}) (MetaData, error) {
-	bs, err := ioutil.ReadFile(fpath)
-	if err != nil {
-		return MetaData{}, err
-	}
-	return Decode(string(bs), v)
-}
-
-// DecodeReader is just like Decode, except it will consume all bytes
-// from the reader and decode it for you.
-func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
-	bs, err := ioutil.ReadAll(r)
-	if err != nil {
-		return MetaData{}, err
-	}
-	return Decode(string(bs), v)
-}
-
-// unify performs a sort of type unification based on the structure of `rv`,
-// which is the client representation.
-//
-// Any type mismatch produces an error. Finding a type that we don't know
-// how to handle produces an unsupported type error.
-func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
-
-	// Special case. Look for a `Primitive` value.
-	if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
-		// Save the undecoded data and the key context into the primitive
-		// value.
-		context := make(Key, len(md.context))
-		copy(context, md.context)
-		rv.Set(reflect.ValueOf(Primitive{
-			undecoded: data,
-			context:   context,
-		}))
-		return nil
-	}
-
-	// Special case. Unmarshaler Interface support.
-	if rv.CanAddr() {
-		if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
-			return v.UnmarshalTOML(data)
-		}
-	}
-
-	// Special case. Handle time.Time values specifically.
-	// TODO: Remove this code when we decide to drop support for Go 1.1.
-	// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
-	// interfaces.
-	if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
-		return md.unifyDatetime(data, rv)
-	}
-
-	// Special case. Look for a value satisfying the TextUnmarshaler interface.
-	if v, ok := rv.Interface().(TextUnmarshaler); ok {
-		return md.unifyText(data, v)
-	}
-	// BUG(burntsushi)
-	// The behavior here is incorrect whenever a Go type satisfies the
-	// encoding.TextUnmarshaler interface but also corresponds to a TOML
-	// hash or array. In particular, the unmarshaler should only be applied
-	// to primitive TOML values. But at this point, it will be applied to
-	// all kinds of values and produce an incorrect error whenever those values
-	// are hashes or arrays (including arrays of tables).
-
-	k := rv.Kind()
-
-	// laziness
-	if k >= reflect.Int && k <= reflect.Uint64 {
-		return md.unifyInt(data, rv)
-	}
-	switch k {
-	case reflect.Ptr:
-		elem := reflect.New(rv.Type().Elem())
-		err := md.unify(data, reflect.Indirect(elem))
-		if err != nil {
-			return err
-		}
-		rv.Set(elem)
-		return nil
-	case reflect.Struct:
-		return md.unifyStruct(data, rv)
-	case reflect.Map:
-		return md.unifyMap(data, rv)
-	case reflect.Array:
-		return md.unifyArray(data, rv)
-	case reflect.Slice:
-		return md.unifySlice(data, rv)
-	case reflect.String:
-		return md.unifyString(data, rv)
-	case reflect.Bool:
-		return md.unifyBool(data, rv)
-	case reflect.Interface:
-		// we only support empty interfaces.
-		if rv.NumMethod() > 0 {
-			return e("unsupported type %s", rv.Type())
-		}
-		return md.unifyAnything(data, rv)
-	case reflect.Float32:
-		fallthrough
-	case reflect.Float64:
-		return md.unifyFloat64(data, rv)
-	}
-	return e("unsupported type %s", rv.Kind())
-}
-
-func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
-	tmap, ok := mapping.(map[string]interface{})
-	if !ok {
-		if mapping == nil {
-			return nil
-		}
-		return e("type mismatch for %s: expected table but found %T",
-			rv.Type().String(), mapping)
-	}
-
-	for key, datum := range tmap {
-		var f *field
-		fields := cachedTypeFields(rv.Type())
-		for i := range fields {
-			ff := &fields[i]
-			if ff.name == key {
-				f = ff
-				break
-			}
-			if f == nil && strings.EqualFold(ff.name, key) {
-				f = ff
-			}
-		}
-		if f != nil {
-			subv := rv
-			for _, i := range f.index {
-				subv = indirect(subv.Field(i))
-			}
-			if isUnifiable(subv) {
-				md.decoded[md.context.add(key).String()] = true
-				md.context = append(md.context, key)
-				if err := md.unify(datum, subv); err != nil {
-					return err
-				}
-				md.context = md.context[0 : len(md.context)-1]
-			} else if f.name != "" {
-				// Bad user! No soup for you!
-				return e("cannot write unexported field %s.%s",
-					rv.Type().String(), f.name)
-			}
-		}
-	}
-	return nil
-}
-
-func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
-	tmap, ok := mapping.(map[string]interface{})
-	if !ok {
-		if tmap == nil {
-			return nil
-		}
-		return badtype("map", mapping)
-	}
-	if rv.IsNil() {
-		rv.Set(reflect.MakeMap(rv.Type()))
-	}
-	for k, v := range tmap {
-		md.decoded[md.context.add(k).String()] = true
-		md.context = append(md.context, k)
-
-		rvkey := indirect(reflect.New(rv.Type().Key()))
-		rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
-		if err := md.unify(v, rvval); err != nil {
-			return err
-		}
-		md.context = md.context[0 : len(md.context)-1]
-
-		rvkey.SetString(k)
-		rv.SetMapIndex(rvkey, rvval)
-	}
-	return nil
-}
-
-func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
-	datav := reflect.ValueOf(data)
-	if datav.Kind() != reflect.Slice {
-		if !datav.IsValid() {
-			return nil
-		}
-		return badtype("slice", data)
-	}
-	sliceLen := datav.Len()
-	if sliceLen != rv.Len() {
-		return e("expected array length %d; got TOML array of length %d",
-			rv.Len(), sliceLen)
-	}
-	return md.unifySliceArray(datav, rv)
-}
-
-func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
-	datav := reflect.ValueOf(data)
-	if datav.Kind() != reflect.Slice {
-		if !datav.IsValid() {
-			return nil
-		}
-		return badtype("slice", data)
-	}
-	n := datav.Len()
-	if rv.IsNil() || rv.Cap() < n {
-		rv.Set(reflect.MakeSlice(rv.Type(), n, n))
-	}
-	rv.SetLen(n)
-	return md.unifySliceArray(datav, rv)
-}
-
-func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
-	sliceLen := data.Len()
-	for i := 0; i < sliceLen; i++ {
-		v := data.Index(i).Interface()
-		sliceval := indirect(rv.Index(i))
-		if err := md.unify(v, sliceval); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
-	if _, ok := data.(time.Time); ok {
-		rv.Set(reflect.ValueOf(data))
-		return nil
-	}
-	return badtype("time.Time", data)
-}
-
-func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
-	if s, ok := data.(string); ok {
-		rv.SetString(s)
-		return nil
-	}
-	return badtype("string", data)
-}
-
-func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
-	if num, ok := data.(float64); ok {
-		switch rv.Kind() {
-		case reflect.Float32:
-			fallthrough
-		case reflect.Float64:
-			rv.SetFloat(num)
-		default:
-			panic("bug")
-		}
-		return nil
-	}
-	return badtype("float", data)
-}
-
-func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
-	if num, ok := data.(int64); ok {
-		if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
-			switch rv.Kind() {
-			case reflect.Int, reflect.Int64:
-				// No bounds checking necessary.
-			case reflect.Int8:
-				if num < math.MinInt8 || num > math.MaxInt8 {
-					return e("value %d is out of range for int8", num)
-				}
-			case reflect.Int16:
-				if num < math.MinInt16 || num > math.MaxInt16 {
-					return e("value %d is out of range for int16", num)
-				}
-			case reflect.Int32:
-				if num < math.MinInt32 || num > math.MaxInt32 {
-					return e("value %d is out of range for int32", num)
-				}
-			}
-			rv.SetInt(num)
-		} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
-			unum := uint64(num)
-			switch rv.Kind() {
-			case reflect.Uint, reflect.Uint64:
-				// No bounds checking necessary.
-			case reflect.Uint8:
-				if num < 0 || unum > math.MaxUint8 {
-					return e("value %d is out of range for uint8", num)
-				}
-			case reflect.Uint16:
-				if num < 0 || unum > math.MaxUint16 {
-					return e("value %d is out of range for uint16", num)
-				}
-			case reflect.Uint32:
-				if num < 0 || unum > math.MaxUint32 {
-					return e("value %d is out of range for uint32", num)
-				}
-			}
-			rv.SetUint(unum)
-		} else {
-			panic("unreachable")
-		}
-		return nil
-	}
-	return badtype("integer", data)
-}
-
-func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
-	if b, ok := data.(bool); ok {
-		rv.SetBool(b)
-		return nil
-	}
-	return badtype("boolean", data)
-}
-
-func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
-	rv.Set(reflect.ValueOf(data))
-	return nil
-}
-
-func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
-	var s string
-	switch sdata := data.(type) {
-	case TextMarshaler:
-		text, err := sdata.MarshalText()
-		if err != nil {
-			return err
-		}
-		s = string(text)
-	case fmt.Stringer:
-		s = sdata.String()
-	case string:
-		s = sdata
-	case bool:
-		s = fmt.Sprintf("%v", sdata)
-	case int64:
-		s = fmt.Sprintf("%d", sdata)
-	case float64:
-		s = fmt.Sprintf("%f", sdata)
-	default:
-		return badtype("primitive (string-like)", data)
-	}
-	if err := v.UnmarshalText([]byte(s)); err != nil {
-		return err
-	}
-	return nil
-}
-
-// rvalue returns a reflect.Value of `v`. All pointers are resolved.
-func rvalue(v interface{}) reflect.Value {
-	return indirect(reflect.ValueOf(v))
-}
-
-// indirect returns the value pointed to by a pointer.
-// Pointers are followed until the value is not a pointer.
-// New values are allocated for each nil pointer.
-//
-// An exception to this rule is if the value satisfies an interface of
-// interest to us (like encoding.TextUnmarshaler).
-func indirect(v reflect.Value) reflect.Value {
-	if v.Kind() != reflect.Ptr {
-		if v.CanSet() {
-			pv := v.Addr()
-			if _, ok := pv.Interface().(TextUnmarshaler); ok {
-				return pv
-			}
-		}
-		return v
-	}
-	if v.IsNil() {
-		v.Set(reflect.New(v.Type().Elem()))
-	}
-	return indirect(reflect.Indirect(v))
-}
-
-func isUnifiable(rv reflect.Value) bool {
-	if rv.CanSet() {
-		return true
-	}
-	if _, ok := rv.Interface().(TextUnmarshaler); ok {
-		return true
-	}
-	return false
-}
-
-func badtype(expected string, data interface{}) error {
-	return e("cannot load TOML value of type %T into a Go %s", data, expected)
-}

+ 0 - 121
vendor/github.com/BurntSushi/toml/decode_meta.go

@@ -1,121 +0,0 @@
-package toml
-
-import "strings"
-
-// MetaData allows access to meta information about TOML data that may not
-// be inferrable via reflection. In particular, whether a key has been defined
-// and the TOML type of a key.
-type MetaData struct {
-	mapping map[string]interface{}
-	types   map[string]tomlType
-	keys    []Key
-	decoded map[string]bool
-	context Key // Used only during decoding.
-}
-
-// IsDefined returns true if the key given exists in the TOML data. The key
-// should be specified hierarchially. e.g.,
-//
-//	// access the TOML key 'a.b.c'
-//	IsDefined("a", "b", "c")
-//
-// IsDefined will return false if an empty key given. Keys are case sensitive.
-func (md *MetaData) IsDefined(key ...string) bool {
-	if len(key) == 0 {
-		return false
-	}
-
-	var hash map[string]interface{}
-	var ok bool
-	var hashOrVal interface{} = md.mapping
-	for _, k := range key {
-		if hash, ok = hashOrVal.(map[string]interface{}); !ok {
-			return false
-		}
-		if hashOrVal, ok = hash[k]; !ok {
-			return false
-		}
-	}
-	return true
-}
-
-// Type returns a string representation of the type of the key specified.
-//
-// Type will return the empty string if given an empty key or a key that
-// does not exist. Keys are case sensitive.
-func (md *MetaData) Type(key ...string) string {
-	fullkey := strings.Join(key, ".")
-	if typ, ok := md.types[fullkey]; ok {
-		return typ.typeString()
-	}
-	return ""
-}
-
-// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
-// to get values of this type.
-type Key []string
-
-func (k Key) String() string {
-	return strings.Join(k, ".")
-}
-
-func (k Key) maybeQuotedAll() string {
-	var ss []string
-	for i := range k {
-		ss = append(ss, k.maybeQuoted(i))
-	}
-	return strings.Join(ss, ".")
-}
-
-func (k Key) maybeQuoted(i int) string {
-	quote := false
-	for _, c := range k[i] {
-		if !isBareKeyChar(c) {
-			quote = true
-			break
-		}
-	}
-	if quote {
-		return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
-	}
-	return k[i]
-}
-
-func (k Key) add(piece string) Key {
-	newKey := make(Key, len(k)+1)
-	copy(newKey, k)
-	newKey[len(k)] = piece
-	return newKey
-}
-
-// Keys returns a slice of every key in the TOML data, including key groups.
-// Each key is itself a slice, where the first element is the top of the
-// hierarchy and the last is the most specific.
-//
-// The list will have the same order as the keys appeared in the TOML data.
-//
-// All keys returned are non-empty.
-func (md *MetaData) Keys() []Key {
-	return md.keys
-}
-
-// Undecoded returns all keys that have not been decoded in the order in which
-// they appear in the original TOML document.
-//
-// This includes keys that haven't been decoded because of a Primitive value.
-// Once the Primitive value is decoded, the keys will be considered decoded.
-//
-// Also note that decoding into an empty interface will result in no decoding,
-// and so no keys will be considered decoded.
-//
-// In this sense, the Undecoded keys correspond to keys in the TOML document
-// that do not have a concrete type in your representation.
-func (md *MetaData) Undecoded() []Key {
-	undecoded := make([]Key, 0, len(md.keys))
-	for _, key := range md.keys {
-		if !md.decoded[key.String()] {
-			undecoded = append(undecoded, key)
-		}
-	}
-	return undecoded
-}

+ 0 - 27
vendor/github.com/BurntSushi/toml/doc.go

@@ -1,27 +0,0 @@
-/*
-Package toml provides facilities for decoding and encoding TOML configuration
-files via reflection. There is also support for delaying decoding with
-the Primitive type, and querying the set of keys in a TOML document with the
-MetaData type.
-
-The specification implemented: https://github.com/toml-lang/toml
-
-The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
-whether a file is a valid TOML document. It can also be used to print the
-type of each key in a TOML document.
-
-Testing
-
-There are two important types of tests used for this package. The first is
-contained inside '*_test.go' files and uses the standard Go unit testing
-framework. These tests are primarily devoted to holistically testing the
-decoder and encoder.
-
-The second type of testing is used to verify the implementation's adherence
-to the TOML specification. These tests have been factored into their own
-project: https://github.com/BurntSushi/toml-test
-
-The reason the tests are in a separate project is so that they can be used by
-any implementation of TOML. Namely, it is language agnostic.
-*/
-package toml

+ 0 - 568
vendor/github.com/BurntSushi/toml/encode.go

@@ -1,568 +0,0 @@
-package toml
-
-import (
-	"bufio"
-	"errors"
-	"fmt"
-	"io"
-	"reflect"
-	"sort"
-	"strconv"
-	"strings"
-	"time"
-)
-
-type tomlEncodeError struct{ error }
-
-var (
-	errArrayMixedElementTypes = errors.New(
-		"toml: cannot encode array with mixed element types")
-	errArrayNilElement = errors.New(
-		"toml: cannot encode array with nil element")
-	errNonString = errors.New(
-		"toml: cannot encode a map with non-string key type")
-	errAnonNonStruct = errors.New(
-		"toml: cannot encode an anonymous field that is not a struct")
-	errArrayNoTable = errors.New(
-		"toml: TOML array element cannot contain a table")
-	errNoKey = errors.New(
-		"toml: top-level values must be Go maps or structs")
-	errAnything = errors.New("") // used in testing
-)
-
-var quotedReplacer = strings.NewReplacer(
-	"\t", "\\t",
-	"\n", "\\n",
-	"\r", "\\r",
-	"\"", "\\\"",
-	"\\", "\\\\",
-)
-
-// Encoder controls the encoding of Go values to a TOML document to some
-// io.Writer.
-//
-// The indentation level can be controlled with the Indent field.
-type Encoder struct {
-	// A single indentation level. By default it is two spaces.
-	Indent string
-
-	// hasWritten is whether we have written any output to w yet.
-	hasWritten bool
-	w          *bufio.Writer
-}
-
-// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
-// given. By default, a single indentation level is 2 spaces.
-func NewEncoder(w io.Writer) *Encoder {
-	return &Encoder{
-		w:      bufio.NewWriter(w),
-		Indent: "  ",
-	}
-}
-
-// Encode writes a TOML representation of the Go value to the underlying
-// io.Writer. If the value given cannot be encoded to a valid TOML document,
-// then an error is returned.
-//
-// The mapping between Go values and TOML values should be precisely the same
-// as for the Decode* functions. Similarly, the TextMarshaler interface is
-// supported by encoding the resulting bytes as strings. (If you want to write
-// arbitrary binary data then you will need to use something like base64 since
-// TOML does not have any binary types.)
-//
-// When encoding TOML hashes (i.e., Go maps or structs), keys without any
-// sub-hashes are encoded first.
-//
-// If a Go map is encoded, then its keys are sorted alphabetically for
-// deterministic output. More control over this behavior may be provided if
-// there is demand for it.
-//
-// Encoding Go values without a corresponding TOML representation---like map
-// types with non-string keys---will cause an error to be returned. Similarly
-// for mixed arrays/slices, arrays/slices with nil elements, embedded
-// non-struct types and nested slices containing maps or structs.
-// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
-// and so is []map[string][]string.)
-func (enc *Encoder) Encode(v interface{}) error {
-	rv := eindirect(reflect.ValueOf(v))
-	if err := enc.safeEncode(Key([]string{}), rv); err != nil {
-		return err
-	}
-	return enc.w.Flush()
-}
-
-func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
-	defer func() {
-		if r := recover(); r != nil {
-			if terr, ok := r.(tomlEncodeError); ok {
-				err = terr.error
-				return
-			}
-			panic(r)
-		}
-	}()
-	enc.encode(key, rv)
-	return nil
-}
-
-func (enc *Encoder) encode(key Key, rv reflect.Value) {
-	// Special case. Time needs to be in ISO8601 format.
-	// Special case. If we can marshal the type to text, then we used that.
-	// Basically, this prevents the encoder for handling these types as
-	// generic structs (or whatever the underlying type of a TextMarshaler is).
-	switch rv.Interface().(type) {
-	case time.Time, TextMarshaler:
-		enc.keyEqElement(key, rv)
-		return
-	}
-
-	k := rv.Kind()
-	switch k {
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
-		reflect.Int64,
-		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
-		reflect.Uint64,
-		reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
-		enc.keyEqElement(key, rv)
-	case reflect.Array, reflect.Slice:
-		if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
-			enc.eArrayOfTables(key, rv)
-		} else {
-			enc.keyEqElement(key, rv)
-		}
-	case reflect.Interface:
-		if rv.IsNil() {
-			return
-		}
-		enc.encode(key, rv.Elem())
-	case reflect.Map:
-		if rv.IsNil() {
-			return
-		}
-		enc.eTable(key, rv)
-	case reflect.Ptr:
-		if rv.IsNil() {
-			return
-		}
-		enc.encode(key, rv.Elem())
-	case reflect.Struct:
-		enc.eTable(key, rv)
-	default:
-		panic(e("unsupported type for key '%s': %s", key, k))
-	}
-}
-
-// eElement encodes any value that can be an array element (primitives and
-// arrays).
-func (enc *Encoder) eElement(rv reflect.Value) {
-	switch v := rv.Interface().(type) {
-	case time.Time:
-		// Special case time.Time as a primitive. Has to come before
-		// TextMarshaler below because time.Time implements
-		// encoding.TextMarshaler, but we need to always use UTC.
-		enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
-		return
-	case TextMarshaler:
-		// Special case. Use text marshaler if it's available for this value.
-		if s, err := v.MarshalText(); err != nil {
-			encPanic(err)
-		} else {
-			enc.writeQuoted(string(s))
-		}
-		return
-	}
-	switch rv.Kind() {
-	case reflect.Bool:
-		enc.wf(strconv.FormatBool(rv.Bool()))
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
-		reflect.Int64:
-		enc.wf(strconv.FormatInt(rv.Int(), 10))
-	case reflect.Uint, reflect.Uint8, reflect.Uint16,
-		reflect.Uint32, reflect.Uint64:
-		enc.wf(strconv.FormatUint(rv.Uint(), 10))
-	case reflect.Float32:
-		enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
-	case reflect.Float64:
-		enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
-	case reflect.Array, reflect.Slice:
-		enc.eArrayOrSliceElement(rv)
-	case reflect.Interface:
-		enc.eElement(rv.Elem())
-	case reflect.String:
-		enc.writeQuoted(rv.String())
-	default:
-		panic(e("unexpected primitive type: %s", rv.Kind()))
-	}
-}
-
-// By the TOML spec, all floats must have a decimal with at least one
-// number on either side.
-func floatAddDecimal(fstr string) string {
-	if !strings.Contains(fstr, ".") {
-		return fstr + ".0"
-	}
-	return fstr
-}
-
-func (enc *Encoder) writeQuoted(s string) {
-	enc.wf("\"%s\"", quotedReplacer.Replace(s))
-}
-
-func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
-	length := rv.Len()
-	enc.wf("[")
-	for i := 0; i < length; i++ {
-		elem := rv.Index(i)
-		enc.eElement(elem)
-		if i != length-1 {
-			enc.wf(", ")
-		}
-	}
-	enc.wf("]")
-}
-
-func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
-	if len(key) == 0 {
-		encPanic(errNoKey)
-	}
-	for i := 0; i < rv.Len(); i++ {
-		trv := rv.Index(i)
-		if isNil(trv) {
-			continue
-		}
-		panicIfInvalidKey(key)
-		enc.newline()
-		enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
-		enc.newline()
-		enc.eMapOrStruct(key, trv)
-	}
-}
-
-func (enc *Encoder) eTable(key Key, rv reflect.Value) {
-	panicIfInvalidKey(key)
-	if len(key) == 1 {
-		// Output an extra newline between top-level tables.
-		// (The newline isn't written if nothing else has been written though.)
-		enc.newline()
-	}
-	if len(key) > 0 {
-		enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
-		enc.newline()
-	}
-	enc.eMapOrStruct(key, rv)
-}
-
-func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
-	switch rv := eindirect(rv); rv.Kind() {
-	case reflect.Map:
-		enc.eMap(key, rv)
-	case reflect.Struct:
-		enc.eStruct(key, rv)
-	default:
-		panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
-	}
-}
-
-func (enc *Encoder) eMap(key Key, rv reflect.Value) {
-	rt := rv.Type()
-	if rt.Key().Kind() != reflect.String {
-		encPanic(errNonString)
-	}
-
-	// Sort keys so that we have deterministic output. And write keys directly
-	// underneath this key first, before writing sub-structs or sub-maps.
-	var mapKeysDirect, mapKeysSub []string
-	for _, mapKey := range rv.MapKeys() {
-		k := mapKey.String()
-		if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
-			mapKeysSub = append(mapKeysSub, k)
-		} else {
-			mapKeysDirect = append(mapKeysDirect, k)
-		}
-	}
-
-	var writeMapKeys = func(mapKeys []string) {
-		sort.Strings(mapKeys)
-		for _, mapKey := range mapKeys {
-			mrv := rv.MapIndex(reflect.ValueOf(mapKey))
-			if isNil(mrv) {
-				// Don't write anything for nil fields.
-				continue
-			}
-			enc.encode(key.add(mapKey), mrv)
-		}
-	}
-	writeMapKeys(mapKeysDirect)
-	writeMapKeys(mapKeysSub)
-}
-
-func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
-	// Write keys for fields directly under this key first, because if we write
-	// a field that creates a new table, then all keys under it will be in that
-	// table (not the one we're writing here).
-	rt := rv.Type()
-	var fieldsDirect, fieldsSub [][]int
-	var addFields func(rt reflect.Type, rv reflect.Value, start []int)
-	addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
-		for i := 0; i < rt.NumField(); i++ {
-			f := rt.Field(i)
-			// skip unexported fields
-			if f.PkgPath != "" && !f.Anonymous {
-				continue
-			}
-			frv := rv.Field(i)
-			if f.Anonymous {
-				t := f.Type
-				switch t.Kind() {
-				case reflect.Struct:
-					// Treat anonymous struct fields with
-					// tag names as though they are not
-					// anonymous, like encoding/json does.
-					if getOptions(f.Tag).name == "" {
-						addFields(t, frv, f.Index)
-						continue
-					}
-				case reflect.Ptr:
-					if t.Elem().Kind() == reflect.Struct &&
-						getOptions(f.Tag).name == "" {
-						if !frv.IsNil() {
-							addFields(t.Elem(), frv.Elem(), f.Index)
-						}
-						continue
-					}
-					// Fall through to the normal field encoding logic below
-					// for non-struct anonymous fields.
-				}
-			}
-
-			if typeIsHash(tomlTypeOfGo(frv)) {
-				fieldsSub = append(fieldsSub, append(start, f.Index...))
-			} else {
-				fieldsDirect = append(fieldsDirect, append(start, f.Index...))
-			}
-		}
-	}
-	addFields(rt, rv, nil)
-
-	var writeFields = func(fields [][]int) {
-		for _, fieldIndex := range fields {
-			sft := rt.FieldByIndex(fieldIndex)
-			sf := rv.FieldByIndex(fieldIndex)
-			if isNil(sf) {
-				// Don't write anything for nil fields.
-				continue
-			}
-
-			opts := getOptions(sft.Tag)
-			if opts.skip {
-				continue
-			}
-			keyName := sft.Name
-			if opts.name != "" {
-				keyName = opts.name
-			}
-			if opts.omitempty && isEmpty(sf) {
-				continue
-			}
-			if opts.omitzero && isZero(sf) {
-				continue
-			}
-
-			enc.encode(key.add(keyName), sf)
-		}
-	}
-	writeFields(fieldsDirect)
-	writeFields(fieldsSub)
-}
-
-// tomlTypeName returns the TOML type name of the Go value's type. It is
-// used to determine whether the types of array elements are mixed (which is
-// forbidden). If the Go value is nil, then it is illegal for it to be an array
-// element, and valueIsNil is returned as true.
-
-// Returns the TOML type of a Go value. The type may be `nil`, which means
-// no concrete TOML type could be found.
-func tomlTypeOfGo(rv reflect.Value) tomlType {
-	if isNil(rv) || !rv.IsValid() {
-		return nil
-	}
-	switch rv.Kind() {
-	case reflect.Bool:
-		return tomlBool
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
-		reflect.Int64,
-		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
-		reflect.Uint64:
-		return tomlInteger
-	case reflect.Float32, reflect.Float64:
-		return tomlFloat
-	case reflect.Array, reflect.Slice:
-		if typeEqual(tomlHash, tomlArrayType(rv)) {
-			return tomlArrayHash
-		}
-		return tomlArray
-	case reflect.Ptr, reflect.Interface:
-		return tomlTypeOfGo(rv.Elem())
-	case reflect.String:
-		return tomlString
-	case reflect.Map:
-		return tomlHash
-	case reflect.Struct:
-		switch rv.Interface().(type) {
-		case time.Time:
-			return tomlDatetime
-		case TextMarshaler:
-			return tomlString
-		default:
-			return tomlHash
-		}
-	default:
-		panic("unexpected reflect.Kind: " + rv.Kind().String())
-	}
-}
-
-// tomlArrayType returns the element type of a TOML array. The type returned
-// may be nil if it cannot be determined (e.g., a nil slice or a zero length
-// slize). This function may also panic if it finds a type that cannot be
-// expressed in TOML (such as nil elements, heterogeneous arrays or directly
-// nested arrays of tables).
-func tomlArrayType(rv reflect.Value) tomlType {
-	if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
-		return nil
-	}
-	firstType := tomlTypeOfGo(rv.Index(0))
-	if firstType == nil {
-		encPanic(errArrayNilElement)
-	}
-
-	rvlen := rv.Len()
-	for i := 1; i < rvlen; i++ {
-		elem := rv.Index(i)
-		switch elemType := tomlTypeOfGo(elem); {
-		case elemType == nil:
-			encPanic(errArrayNilElement)
-		case !typeEqual(firstType, elemType):
-			encPanic(errArrayMixedElementTypes)
-		}
-	}
-	// If we have a nested array, then we must make sure that the nested
-	// array contains ONLY primitives.
-	// This checks arbitrarily nested arrays.
-	if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
-		nest := tomlArrayType(eindirect(rv.Index(0)))
-		if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
-			encPanic(errArrayNoTable)
-		}
-	}
-	return firstType
-}
-
-type tagOptions struct {
-	skip      bool // "-"
-	name      string
-	omitempty bool
-	omitzero  bool
-}
-
-func getOptions(tag reflect.StructTag) tagOptions {
-	t := tag.Get("toml")
-	if t == "-" {
-		return tagOptions{skip: true}
-	}
-	var opts tagOptions
-	parts := strings.Split(t, ",")
-	opts.name = parts[0]
-	for _, s := range parts[1:] {
-		switch s {
-		case "omitempty":
-			opts.omitempty = true
-		case "omitzero":
-			opts.omitzero = true
-		}
-	}
-	return opts
-}
-
-func isZero(rv reflect.Value) bool {
-	switch rv.Kind() {
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return rv.Int() == 0
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
-		return rv.Uint() == 0
-	case reflect.Float32, reflect.Float64:
-		return rv.Float() == 0.0
-	}
-	return false
-}
-
-func isEmpty(rv reflect.Value) bool {
-	switch rv.Kind() {
-	case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
-		return rv.Len() == 0
-	case reflect.Bool:
-		return !rv.Bool()
-	}
-	return false
-}
-
-func (enc *Encoder) newline() {
-	if enc.hasWritten {
-		enc.wf("\n")
-	}
-}
-
-func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
-	if len(key) == 0 {
-		encPanic(errNoKey)
-	}
-	panicIfInvalidKey(key)
-	enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
-	enc.eElement(val)
-	enc.newline()
-}
-
-func (enc *Encoder) wf(format string, v ...interface{}) {
-	if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
-		encPanic(err)
-	}
-	enc.hasWritten = true
-}
-
-func (enc *Encoder) indentStr(key Key) string {
-	return strings.Repeat(enc.Indent, len(key)-1)
-}
-
-func encPanic(err error) {
-	panic(tomlEncodeError{err})
-}
-
-func eindirect(v reflect.Value) reflect.Value {
-	switch v.Kind() {
-	case reflect.Ptr, reflect.Interface:
-		return eindirect(v.Elem())
-	default:
-		return v
-	}
-}
-
-func isNil(rv reflect.Value) bool {
-	switch rv.Kind() {
-	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
-		return rv.IsNil()
-	default:
-		return false
-	}
-}
-
-func panicIfInvalidKey(key Key) {
-	for _, k := range key {
-		if len(k) == 0 {
-			encPanic(e("Key '%s' is not a valid table name. Key names "+
-				"cannot be empty.", key.maybeQuotedAll()))
-		}
-	}
-}
-
-func isValidKeyName(s string) bool {
-	return len(s) != 0
-}

+ 0 - 19
vendor/github.com/BurntSushi/toml/encoding_types.go

@@ -1,19 +0,0 @@
-// +build go1.2
-
-package toml
-
-// In order to support Go 1.1, we define our own TextMarshaler and
-// TextUnmarshaler types. For Go 1.2+, we just alias them with the
-// standard library interfaces.
-
-import (
-	"encoding"
-)
-
-// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextMarshaler encoding.TextMarshaler
-
-// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
-// here so that Go 1.1 can be supported.
-type TextUnmarshaler encoding.TextUnmarshaler

+ 0 - 18
vendor/github.com/BurntSushi/toml/encoding_types_1.1.go

@@ -1,18 +0,0 @@
-// +build !go1.2
-
-package toml
-
-// These interfaces were introduced in Go 1.2, so we add them manually when
-// compiling for Go 1.1.
-
-// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextMarshaler interface {
-	MarshalText() (text []byte, err error)
-}
-
-// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
-// here so that Go 1.1 can be supported.
-type TextUnmarshaler interface {
-	UnmarshalText(text []byte) error
-}

+ 0 - 953
vendor/github.com/BurntSushi/toml/lex.go

@@ -1,953 +0,0 @@
-package toml
-
-import (
-	"fmt"
-	"strings"
-	"unicode"
-	"unicode/utf8"
-)
-
-type itemType int
-
-const (
-	itemError itemType = iota
-	itemNIL            // used in the parser to indicate no type
-	itemEOF
-	itemText
-	itemString
-	itemRawString
-	itemMultilineString
-	itemRawMultilineString
-	itemBool
-	itemInteger
-	itemFloat
-	itemDatetime
-	itemArray // the start of an array
-	itemArrayEnd
-	itemTableStart
-	itemTableEnd
-	itemArrayTableStart
-	itemArrayTableEnd
-	itemKeyStart
-	itemCommentStart
-	itemInlineTableStart
-	itemInlineTableEnd
-)
-
-const (
-	eof              = 0
-	comma            = ','
-	tableStart       = '['
-	tableEnd         = ']'
-	arrayTableStart  = '['
-	arrayTableEnd    = ']'
-	tableSep         = '.'
-	keySep           = '='
-	arrayStart       = '['
-	arrayEnd         = ']'
-	commentStart     = '#'
-	stringStart      = '"'
-	stringEnd        = '"'
-	rawStringStart   = '\''
-	rawStringEnd     = '\''
-	inlineTableStart = '{'
-	inlineTableEnd   = '}'
-)
-
-type stateFn func(lx *lexer) stateFn
-
-type lexer struct {
-	input string
-	start int
-	pos   int
-	line  int
-	state stateFn
-	items chan item
-
-	// Allow for backing up up to three runes.
-	// This is necessary because TOML contains 3-rune tokens (""" and ''').
-	prevWidths [3]int
-	nprev      int // how many of prevWidths are in use
-	// If we emit an eof, we can still back up, but it is not OK to call
-	// next again.
-	atEOF bool
-
-	// A stack of state functions used to maintain context.
-	// The idea is to reuse parts of the state machine in various places.
-	// For example, values can appear at the top level or within arbitrarily
-	// nested arrays. The last state on the stack is used after a value has
-	// been lexed. Similarly for comments.
-	stack []stateFn
-}
-
-type item struct {
-	typ  itemType
-	val  string
-	line int
-}
-
-func (lx *lexer) nextItem() item {
-	for {
-		select {
-		case item := <-lx.items:
-			return item
-		default:
-			lx.state = lx.state(lx)
-		}
-	}
-}
-
-func lex(input string) *lexer {
-	lx := &lexer{
-		input: input,
-		state: lexTop,
-		line:  1,
-		items: make(chan item, 10),
-		stack: make([]stateFn, 0, 10),
-	}
-	return lx
-}
-
-func (lx *lexer) push(state stateFn) {
-	lx.stack = append(lx.stack, state)
-}
-
-func (lx *lexer) pop() stateFn {
-	if len(lx.stack) == 0 {
-		return lx.errorf("BUG in lexer: no states to pop")
-	}
-	last := lx.stack[len(lx.stack)-1]
-	lx.stack = lx.stack[0 : len(lx.stack)-1]
-	return last
-}
-
-func (lx *lexer) current() string {
-	return lx.input[lx.start:lx.pos]
-}
-
-func (lx *lexer) emit(typ itemType) {
-	lx.items <- item{typ, lx.current(), lx.line}
-	lx.start = lx.pos
-}
-
-func (lx *lexer) emitTrim(typ itemType) {
-	lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
-	lx.start = lx.pos
-}
-
-func (lx *lexer) next() (r rune) {
-	if lx.atEOF {
-		panic("next called after EOF")
-	}
-	if lx.pos >= len(lx.input) {
-		lx.atEOF = true
-		return eof
-	}
-
-	if lx.input[lx.pos] == '\n' {
-		lx.line++
-	}
-	lx.prevWidths[2] = lx.prevWidths[1]
-	lx.prevWidths[1] = lx.prevWidths[0]
-	if lx.nprev < 3 {
-		lx.nprev++
-	}
-	r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
-	lx.prevWidths[0] = w
-	lx.pos += w
-	return r
-}
-
-// ignore skips over the pending input before this point.
-func (lx *lexer) ignore() {
-	lx.start = lx.pos
-}
-
-// backup steps back one rune. Can be called only twice between calls to next.
-func (lx *lexer) backup() {
-	if lx.atEOF {
-		lx.atEOF = false
-		return
-	}
-	if lx.nprev < 1 {
-		panic("backed up too far")
-	}
-	w := lx.prevWidths[0]
-	lx.prevWidths[0] = lx.prevWidths[1]
-	lx.prevWidths[1] = lx.prevWidths[2]
-	lx.nprev--
-	lx.pos -= w
-	if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
-		lx.line--
-	}
-}
-
-// accept consumes the next rune if it's equal to `valid`.
-func (lx *lexer) accept(valid rune) bool {
-	if lx.next() == valid {
-		return true
-	}
-	lx.backup()
-	return false
-}
-
-// peek returns but does not consume the next rune in the input.
-func (lx *lexer) peek() rune {
-	r := lx.next()
-	lx.backup()
-	return r
-}
-
-// skip ignores all input that matches the given predicate.
-func (lx *lexer) skip(pred func(rune) bool) {
-	for {
-		r := lx.next()
-		if pred(r) {
-			continue
-		}
-		lx.backup()
-		lx.ignore()
-		return
-	}
-}
-
-// errorf stops all lexing by emitting an error and returning `nil`.
-// Note that any value that is a character is escaped if it's a special
-// character (newlines, tabs, etc.).
-func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
-	lx.items <- item{
-		itemError,
-		fmt.Sprintf(format, values...),
-		lx.line,
-	}
-	return nil
-}
-
-// lexTop consumes elements at the top level of TOML data.
-func lexTop(lx *lexer) stateFn {
-	r := lx.next()
-	if isWhitespace(r) || isNL(r) {
-		return lexSkip(lx, lexTop)
-	}
-	switch r {
-	case commentStart:
-		lx.push(lexTop)
-		return lexCommentStart
-	case tableStart:
-		return lexTableStart
-	case eof:
-		if lx.pos > lx.start {
-			return lx.errorf("unexpected EOF")
-		}
-		lx.emit(itemEOF)
-		return nil
-	}
-
-	// At this point, the only valid item can be a key, so we back up
-	// and let the key lexer do the rest.
-	lx.backup()
-	lx.push(lexTopEnd)
-	return lexKeyStart
-}
-
-// lexTopEnd is entered whenever a top-level item has been consumed. (A value
-// or a table.) It must see only whitespace, and will turn back to lexTop
-// upon a newline. If it sees EOF, it will quit the lexer successfully.
-func lexTopEnd(lx *lexer) stateFn {
-	r := lx.next()
-	switch {
-	case r == commentStart:
-		// a comment will read to a newline for us.
-		lx.push(lexTop)
-		return lexCommentStart
-	case isWhitespace(r):
-		return lexTopEnd
-	case isNL(r):
-		lx.ignore()
-		return lexTop
-	case r == eof:
-		lx.emit(itemEOF)
-		return nil
-	}
-	return lx.errorf("expected a top-level item to end with a newline, "+
-		"comment, or EOF, but got %q instead", r)
-}
-
-// lexTable lexes the beginning of a table. Namely, it makes sure that
-// it starts with a character other than '.' and ']'.
-// It assumes that '[' has already been consumed.
-// It also handles the case that this is an item in an array of tables.
-// e.g., '[[name]]'.
-func lexTableStart(lx *lexer) stateFn {
-	if lx.peek() == arrayTableStart {
-		lx.next()
-		lx.emit(itemArrayTableStart)
-		lx.push(lexArrayTableEnd)
-	} else {
-		lx.emit(itemTableStart)
-		lx.push(lexTableEnd)
-	}
-	return lexTableNameStart
-}
-
-func lexTableEnd(lx *lexer) stateFn {
-	lx.emit(itemTableEnd)
-	return lexTopEnd
-}
-
-func lexArrayTableEnd(lx *lexer) stateFn {
-	if r := lx.next(); r != arrayTableEnd {
-		return lx.errorf("expected end of table array name delimiter %q, "+
-			"but got %q instead", arrayTableEnd, r)
-	}
-	lx.emit(itemArrayTableEnd)
-	return lexTopEnd
-}
-
-func lexTableNameStart(lx *lexer) stateFn {
-	lx.skip(isWhitespace)
-	switch r := lx.peek(); {
-	case r == tableEnd || r == eof:
-		return lx.errorf("unexpected end of table name " +
-			"(table names cannot be empty)")
-	case r == tableSep:
-		return lx.errorf("unexpected table separator " +
-			"(table names cannot be empty)")
-	case r == stringStart || r == rawStringStart:
-		lx.ignore()
-		lx.push(lexTableNameEnd)
-		return lexValue // reuse string lexing
-	default:
-		return lexBareTableName
-	}
-}
-
-// lexBareTableName lexes the name of a table. It assumes that at least one
-// valid character for the table has already been read.
-func lexBareTableName(lx *lexer) stateFn {
-	r := lx.next()
-	if isBareKeyChar(r) {
-		return lexBareTableName
-	}
-	lx.backup()
-	lx.emit(itemText)
-	return lexTableNameEnd
-}
-
-// lexTableNameEnd reads the end of a piece of a table name, optionally
-// consuming whitespace.
-func lexTableNameEnd(lx *lexer) stateFn {
-	lx.skip(isWhitespace)
-	switch r := lx.next(); {
-	case isWhitespace(r):
-		return lexTableNameEnd
-	case r == tableSep:
-		lx.ignore()
-		return lexTableNameStart
-	case r == tableEnd:
-		return lx.pop()
-	default:
-		return lx.errorf("expected '.' or ']' to end table name, "+
-			"but got %q instead", r)
-	}
-}
-
-// lexKeyStart consumes a key name up until the first non-whitespace character.
-// lexKeyStart will ignore whitespace.
-func lexKeyStart(lx *lexer) stateFn {
-	r := lx.peek()
-	switch {
-	case r == keySep:
-		return lx.errorf("unexpected key separator %q", keySep)
-	case isWhitespace(r) || isNL(r):
-		lx.next()
-		return lexSkip(lx, lexKeyStart)
-	case r == stringStart || r == rawStringStart:
-		lx.ignore()
-		lx.emit(itemKeyStart)
-		lx.push(lexKeyEnd)
-		return lexValue // reuse string lexing
-	default:
-		lx.ignore()
-		lx.emit(itemKeyStart)
-		return lexBareKey
-	}
-}
-
-// lexBareKey consumes the text of a bare key. Assumes that the first character
-// (which is not whitespace) has not yet been consumed.
-func lexBareKey(lx *lexer) stateFn {
-	switch r := lx.next(); {
-	case isBareKeyChar(r):
-		return lexBareKey
-	case isWhitespace(r):
-		lx.backup()
-		lx.emit(itemText)
-		return lexKeyEnd
-	case r == keySep:
-		lx.backup()
-		lx.emit(itemText)
-		return lexKeyEnd
-	default:
-		return lx.errorf("bare keys cannot contain %q", r)
-	}
-}
-
-// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
-// separator).
-func lexKeyEnd(lx *lexer) stateFn {
-	switch r := lx.next(); {
-	case r == keySep:
-		return lexSkip(lx, lexValue)
-	case isWhitespace(r):
-		return lexSkip(lx, lexKeyEnd)
-	default:
-		return lx.errorf("expected key separator %q, but got %q instead",
-			keySep, r)
-	}
-}
-
-// lexValue starts the consumption of a value anywhere a value is expected.
-// lexValue will ignore whitespace.
-// After a value is lexed, the last state on the next is popped and returned.
-func lexValue(lx *lexer) stateFn {
-	// We allow whitespace to precede a value, but NOT newlines.
-	// In array syntax, the array states are responsible for ignoring newlines.
-	r := lx.next()
-	switch {
-	case isWhitespace(r):
-		return lexSkip(lx, lexValue)
-	case isDigit(r):
-		lx.backup() // avoid an extra state and use the same as above
-		return lexNumberOrDateStart
-	}
-	switch r {
-	case arrayStart:
-		lx.ignore()
-		lx.emit(itemArray)
-		return lexArrayValue
-	case inlineTableStart:
-		lx.ignore()
-		lx.emit(itemInlineTableStart)
-		return lexInlineTableValue
-	case stringStart:
-		if lx.accept(stringStart) {
-			if lx.accept(stringStart) {
-				lx.ignore() // Ignore """
-				return lexMultilineString
-			}
-			lx.backup()
-		}
-		lx.ignore() // ignore the '"'
-		return lexString
-	case rawStringStart:
-		if lx.accept(rawStringStart) {
-			if lx.accept(rawStringStart) {
-				lx.ignore() // Ignore """
-				return lexMultilineRawString
-			}
-			lx.backup()
-		}
-		lx.ignore() // ignore the "'"
-		return lexRawString
-	case '+', '-':
-		return lexNumberStart
-	case '.': // special error case, be kind to users
-		return lx.errorf("floats must start with a digit, not '.'")
-	}
-	if unicode.IsLetter(r) {
-		// Be permissive here; lexBool will give a nice error if the
-		// user wrote something like
-		//   x = foo
-		// (i.e. not 'true' or 'false' but is something else word-like.)
-		lx.backup()
-		return lexBool
-	}
-	return lx.errorf("expected value but found %q instead", r)
-}
-
-// lexArrayValue consumes one value in an array. It assumes that '[' or ','
-// have already been consumed. All whitespace and newlines are ignored.
-func lexArrayValue(lx *lexer) stateFn {
-	r := lx.next()
-	switch {
-	case isWhitespace(r) || isNL(r):
-		return lexSkip(lx, lexArrayValue)
-	case r == commentStart:
-		lx.push(lexArrayValue)
-		return lexCommentStart
-	case r == comma:
-		return lx.errorf("unexpected comma")
-	case r == arrayEnd:
-		// NOTE(caleb): The spec isn't clear about whether you can have
-		// a trailing comma or not, so we'll allow it.
-		return lexArrayEnd
-	}
-
-	lx.backup()
-	lx.push(lexArrayValueEnd)
-	return lexValue
-}
-
-// lexArrayValueEnd consumes everything between the end of an array value and
-// the next value (or the end of the array): it ignores whitespace and newlines
-// and expects either a ',' or a ']'.
-func lexArrayValueEnd(lx *lexer) stateFn {
-	r := lx.next()
-	switch {
-	case isWhitespace(r) || isNL(r):
-		return lexSkip(lx, lexArrayValueEnd)
-	case r == commentStart:
-		lx.push(lexArrayValueEnd)
-		return lexCommentStart
-	case r == comma:
-		lx.ignore()
-		return lexArrayValue // move on to the next value
-	case r == arrayEnd:
-		return lexArrayEnd
-	}
-	return lx.errorf(
-		"expected a comma or array terminator %q, but got %q instead",
-		arrayEnd, r,
-	)
-}
-
-// lexArrayEnd finishes the lexing of an array.
-// It assumes that a ']' has just been consumed.
-func lexArrayEnd(lx *lexer) stateFn {
-	lx.ignore()
-	lx.emit(itemArrayEnd)
-	return lx.pop()
-}
-
-// lexInlineTableValue consumes one key/value pair in an inline table.
-// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
-func lexInlineTableValue(lx *lexer) stateFn {
-	r := lx.next()
-	switch {
-	case isWhitespace(r):
-		return lexSkip(lx, lexInlineTableValue)
-	case isNL(r):
-		return lx.errorf("newlines not allowed within inline tables")
-	case r == commentStart:
-		lx.push(lexInlineTableValue)
-		return lexCommentStart
-	case r == comma:
-		return lx.errorf("unexpected comma")
-	case r == inlineTableEnd:
-		return lexInlineTableEnd
-	}
-	lx.backup()
-	lx.push(lexInlineTableValueEnd)
-	return lexKeyStart
-}
-
-// lexInlineTableValueEnd consumes everything between the end of an inline table
-// key/value pair and the next pair (or the end of the table):
-// it ignores whitespace and expects either a ',' or a '}'.
-func lexInlineTableValueEnd(lx *lexer) stateFn {
-	r := lx.next()
-	switch {
-	case isWhitespace(r):
-		return lexSkip(lx, lexInlineTableValueEnd)
-	case isNL(r):
-		return lx.errorf("newlines not allowed within inline tables")
-	case r == commentStart:
-		lx.push(lexInlineTableValueEnd)
-		return lexCommentStart
-	case r == comma:
-		lx.ignore()
-		return lexInlineTableValue
-	case r == inlineTableEnd:
-		return lexInlineTableEnd
-	}
-	return lx.errorf("expected a comma or an inline table terminator %q, "+
-		"but got %q instead", inlineTableEnd, r)
-}
-
-// lexInlineTableEnd finishes the lexing of an inline table.
-// It assumes that a '}' has just been consumed.
-func lexInlineTableEnd(lx *lexer) stateFn {
-	lx.ignore()
-	lx.emit(itemInlineTableEnd)
-	return lx.pop()
-}
-
-// lexString consumes the inner contents of a string. It assumes that the
-// beginning '"' has already been consumed and ignored.
-func lexString(lx *lexer) stateFn {
-	r := lx.next()
-	switch {
-	case r == eof:
-		return lx.errorf("unexpected EOF")
-	case isNL(r):
-		return lx.errorf("strings cannot contain newlines")
-	case r == '\\':
-		lx.push(lexString)
-		return lexStringEscape
-	case r == stringEnd:
-		lx.backup()
-		lx.emit(itemString)
-		lx.next()
-		lx.ignore()
-		return lx.pop()
-	}
-	return lexString
-}
-
-// lexMultilineString consumes the inner contents of a string. It assumes that
-// the beginning '"""' has already been consumed and ignored.
-func lexMultilineString(lx *lexer) stateFn {
-	switch lx.next() {
-	case eof:
-		return lx.errorf("unexpected EOF")
-	case '\\':
-		return lexMultilineStringEscape
-	case stringEnd:
-		if lx.accept(stringEnd) {
-			if lx.accept(stringEnd) {
-				lx.backup()
-				lx.backup()
-				lx.backup()
-				lx.emit(itemMultilineString)
-				lx.next()
-				lx.next()
-				lx.next()
-				lx.ignore()
-				return lx.pop()
-			}
-			lx.backup()
-		}
-	}
-	return lexMultilineString
-}
-
-// lexRawString consumes a raw string. Nothing can be escaped in such a string.
-// It assumes that the beginning "'" has already been consumed and ignored.
-func lexRawString(lx *lexer) stateFn {
-	r := lx.next()
-	switch {
-	case r == eof:
-		return lx.errorf("unexpected EOF")
-	case isNL(r):
-		return lx.errorf("strings cannot contain newlines")
-	case r == rawStringEnd:
-		lx.backup()
-		lx.emit(itemRawString)
-		lx.next()
-		lx.ignore()
-		return lx.pop()
-	}
-	return lexRawString
-}
-
-// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
-// a string. It assumes that the beginning "'''" has already been consumed and
-// ignored.
-func lexMultilineRawString(lx *lexer) stateFn {
-	switch lx.next() {
-	case eof:
-		return lx.errorf("unexpected EOF")
-	case rawStringEnd:
-		if lx.accept(rawStringEnd) {
-			if lx.accept(rawStringEnd) {
-				lx.backup()
-				lx.backup()
-				lx.backup()
-				lx.emit(itemRawMultilineString)
-				lx.next()
-				lx.next()
-				lx.next()
-				lx.ignore()
-				return lx.pop()
-			}
-			lx.backup()
-		}
-	}
-	return lexMultilineRawString
-}
-
-// lexMultilineStringEscape consumes an escaped character. It assumes that the
-// preceding '\\' has already been consumed.
-func lexMultilineStringEscape(lx *lexer) stateFn {
-	// Handle the special case first:
-	if isNL(lx.next()) {
-		return lexMultilineString
-	}
-	lx.backup()
-	lx.push(lexMultilineString)
-	return lexStringEscape(lx)
-}
-
-func lexStringEscape(lx *lexer) stateFn {
-	r := lx.next()
-	switch r {
-	case 'b':
-		fallthrough
-	case 't':
-		fallthrough
-	case 'n':
-		fallthrough
-	case 'f':
-		fallthrough
-	case 'r':
-		fallthrough
-	case '"':
-		fallthrough
-	case '\\':
-		return lx.pop()
-	case 'u':
-		return lexShortUnicodeEscape
-	case 'U':
-		return lexLongUnicodeEscape
-	}
-	return lx.errorf("invalid escape character %q; only the following "+
-		"escape characters are allowed: "+
-		`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
-}
-
-func lexShortUnicodeEscape(lx *lexer) stateFn {
-	var r rune
-	for i := 0; i < 4; i++ {
-		r = lx.next()
-		if !isHexadecimal(r) {
-			return lx.errorf(`expected four hexadecimal digits after '\u', `+
-				"but got %q instead", lx.current())
-		}
-	}
-	return lx.pop()
-}
-
-func lexLongUnicodeEscape(lx *lexer) stateFn {
-	var r rune
-	for i := 0; i < 8; i++ {
-		r = lx.next()
-		if !isHexadecimal(r) {
-			return lx.errorf(`expected eight hexadecimal digits after '\U', `+
-				"but got %q instead", lx.current())
-		}
-	}
-	return lx.pop()
-}
-
-// lexNumberOrDateStart consumes either an integer, a float, or datetime.
-func lexNumberOrDateStart(lx *lexer) stateFn {
-	r := lx.next()
-	if isDigit(r) {
-		return lexNumberOrDate
-	}
-	switch r {
-	case '_':
-		return lexNumber
-	case 'e', 'E':
-		return lexFloat
-	case '.':
-		return lx.errorf("floats must start with a digit, not '.'")
-	}
-	return lx.errorf("expected a digit but got %q", r)
-}
-
-// lexNumberOrDate consumes either an integer, float or datetime.
-func lexNumberOrDate(lx *lexer) stateFn {
-	r := lx.next()
-	if isDigit(r) {
-		return lexNumberOrDate
-	}
-	switch r {
-	case '-':
-		return lexDatetime
-	case '_':
-		return lexNumber
-	case '.', 'e', 'E':
-		return lexFloat
-	}
-
-	lx.backup()
-	lx.emit(itemInteger)
-	return lx.pop()
-}
-
-// lexDatetime consumes a Datetime, to a first approximation.
-// The parser validates that it matches one of the accepted formats.
-func lexDatetime(lx *lexer) stateFn {
-	r := lx.next()
-	if isDigit(r) {
-		return lexDatetime
-	}
-	switch r {
-	case '-', 'T', ':', '.', 'Z', '+':
-		return lexDatetime
-	}
-
-	lx.backup()
-	lx.emit(itemDatetime)
-	return lx.pop()
-}
-
-// lexNumberStart consumes either an integer or a float. It assumes that a sign
-// has already been read, but that *no* digits have been consumed.
-// lexNumberStart will move to the appropriate integer or float states.
-func lexNumberStart(lx *lexer) stateFn {
-	// We MUST see a digit. Even floats have to start with a digit.
-	r := lx.next()
-	if !isDigit(r) {
-		if r == '.' {
-			return lx.errorf("floats must start with a digit, not '.'")
-		}
-		return lx.errorf("expected a digit but got %q", r)
-	}
-	return lexNumber
-}
-
-// lexNumber consumes an integer or a float after seeing the first digit.
-func lexNumber(lx *lexer) stateFn {
-	r := lx.next()
-	if isDigit(r) {
-		return lexNumber
-	}
-	switch r {
-	case '_':
-		return lexNumber
-	case '.', 'e', 'E':
-		return lexFloat
-	}
-
-	lx.backup()
-	lx.emit(itemInteger)
-	return lx.pop()
-}
-
-// lexFloat consumes the elements of a float. It allows any sequence of
-// float-like characters, so floats emitted by the lexer are only a first
-// approximation and must be validated by the parser.
-func lexFloat(lx *lexer) stateFn {
-	r := lx.next()
-	if isDigit(r) {
-		return lexFloat
-	}
-	switch r {
-	case '_', '.', '-', '+', 'e', 'E':
-		return lexFloat
-	}
-
-	lx.backup()
-	lx.emit(itemFloat)
-	return lx.pop()
-}
-
-// lexBool consumes a bool string: 'true' or 'false.
-func lexBool(lx *lexer) stateFn {
-	var rs []rune
-	for {
-		r := lx.next()
-		if !unicode.IsLetter(r) {
-			lx.backup()
-			break
-		}
-		rs = append(rs, r)
-	}
-	s := string(rs)
-	switch s {
-	case "true", "false":
-		lx.emit(itemBool)
-		return lx.pop()
-	}
-	return lx.errorf("expected value but found %q instead", s)
-}
-
-// lexCommentStart begins the lexing of a comment. It will emit
-// itemCommentStart and consume no characters, passing control to lexComment.
-func lexCommentStart(lx *lexer) stateFn {
-	lx.ignore()
-	lx.emit(itemCommentStart)
-	return lexComment
-}
-
-// lexComment lexes an entire comment. It assumes that '#' has been consumed.
-// It will consume *up to* the first newline character, and pass control
-// back to the last state on the stack.
-func lexComment(lx *lexer) stateFn {
-	r := lx.peek()
-	if isNL(r) || r == eof {
-		lx.emit(itemText)
-		return lx.pop()
-	}
-	lx.next()
-	return lexComment
-}
-
-// lexSkip ignores all slurped input and moves on to the next state.
-func lexSkip(lx *lexer, nextState stateFn) stateFn {
-	return func(lx *lexer) stateFn {
-		lx.ignore()
-		return nextState
-	}
-}
-
-// isWhitespace returns true if `r` is a whitespace character according
-// to the spec.
-func isWhitespace(r rune) bool {
-	return r == '\t' || r == ' '
-}
-
-func isNL(r rune) bool {
-	return r == '\n' || r == '\r'
-}
-
-func isDigit(r rune) bool {
-	return r >= '0' && r <= '9'
-}
-
-func isHexadecimal(r rune) bool {
-	return (r >= '0' && r <= '9') ||
-		(r >= 'a' && r <= 'f') ||
-		(r >= 'A' && r <= 'F')
-}
-
-func isBareKeyChar(r rune) bool {
-	return (r >= 'A' && r <= 'Z') ||
-		(r >= 'a' && r <= 'z') ||
-		(r >= '0' && r <= '9') ||
-		r == '_' ||
-		r == '-'
-}
-
-func (itype itemType) String() string {
-	switch itype {
-	case itemError:
-		return "Error"
-	case itemNIL:
-		return "NIL"
-	case itemEOF:
-		return "EOF"
-	case itemText:
-		return "Text"
-	case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
-		return "String"
-	case itemBool:
-		return "Bool"
-	case itemInteger:
-		return "Integer"
-	case itemFloat:
-		return "Float"
-	case itemDatetime:
-		return "DateTime"
-	case itemTableStart:
-		return "TableStart"
-	case itemTableEnd:
-		return "TableEnd"
-	case itemKeyStart:
-		return "KeyStart"
-	case itemArray:
-		return "Array"
-	case itemArrayEnd:
-		return "ArrayEnd"
-	case itemCommentStart:
-		return "CommentStart"
-	}
-	panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
-}
-
-func (item item) String() string {
-	return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
-}

+ 0 - 592
vendor/github.com/BurntSushi/toml/parse.go

@@ -1,592 +0,0 @@
-package toml
-
-import (
-	"fmt"
-	"strconv"
-	"strings"
-	"time"
-	"unicode"
-	"unicode/utf8"
-)
-
-type parser struct {
-	mapping map[string]interface{}
-	types   map[string]tomlType
-	lx      *lexer
-
-	// A list of keys in the order that they appear in the TOML data.
-	ordered []Key
-
-	// the full key for the current hash in scope
-	context Key
-
-	// the base key name for everything except hashes
-	currentKey string
-
-	// rough approximation of line number
-	approxLine int
-
-	// A map of 'key.group.names' to whether they were created implicitly.
-	implicits map[string]bool
-}
-
-type parseError string
-
-func (pe parseError) Error() string {
-	return string(pe)
-}
-
-func parse(data string) (p *parser, err error) {
-	defer func() {
-		if r := recover(); r != nil {
-			var ok bool
-			if err, ok = r.(parseError); ok {
-				return
-			}
-			panic(r)
-		}
-	}()
-
-	p = &parser{
-		mapping:   make(map[string]interface{}),
-		types:     make(map[string]tomlType),
-		lx:        lex(data),
-		ordered:   make([]Key, 0),
-		implicits: make(map[string]bool),
-	}
-	for {
-		item := p.next()
-		if item.typ == itemEOF {
-			break
-		}
-		p.topLevel(item)
-	}
-
-	return p, nil
-}
-
-func (p *parser) panicf(format string, v ...interface{}) {
-	msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
-		p.approxLine, p.current(), fmt.Sprintf(format, v...))
-	panic(parseError(msg))
-}
-
-func (p *parser) next() item {
-	it := p.lx.nextItem()
-	if it.typ == itemError {
-		p.panicf("%s", it.val)
-	}
-	return it
-}
-
-func (p *parser) bug(format string, v ...interface{}) {
-	panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
-}
-
-func (p *parser) expect(typ itemType) item {
-	it := p.next()
-	p.assertEqual(typ, it.typ)
-	return it
-}
-
-func (p *parser) assertEqual(expected, got itemType) {
-	if expected != got {
-		p.bug("Expected '%s' but got '%s'.", expected, got)
-	}
-}
-
-func (p *parser) topLevel(item item) {
-	switch item.typ {
-	case itemCommentStart:
-		p.approxLine = item.line
-		p.expect(itemText)
-	case itemTableStart:
-		kg := p.next()
-		p.approxLine = kg.line
-
-		var key Key
-		for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
-			key = append(key, p.keyString(kg))
-		}
-		p.assertEqual(itemTableEnd, kg.typ)
-
-		p.establishContext(key, false)
-		p.setType("", tomlHash)
-		p.ordered = append(p.ordered, key)
-	case itemArrayTableStart:
-		kg := p.next()
-		p.approxLine = kg.line
-
-		var key Key
-		for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
-			key = append(key, p.keyString(kg))
-		}
-		p.assertEqual(itemArrayTableEnd, kg.typ)
-
-		p.establishContext(key, true)
-		p.setType("", tomlArrayHash)
-		p.ordered = append(p.ordered, key)
-	case itemKeyStart:
-		kname := p.next()
-		p.approxLine = kname.line
-		p.currentKey = p.keyString(kname)
-
-		val, typ := p.value(p.next())
-		p.setValue(p.currentKey, val)
-		p.setType(p.currentKey, typ)
-		p.ordered = append(p.ordered, p.context.add(p.currentKey))
-		p.currentKey = ""
-	default:
-		p.bug("Unexpected type at top level: %s", item.typ)
-	}
-}
-
-// Gets a string for a key (or part of a key in a table name).
-func (p *parser) keyString(it item) string {
-	switch it.typ {
-	case itemText:
-		return it.val
-	case itemString, itemMultilineString,
-		itemRawString, itemRawMultilineString:
-		s, _ := p.value(it)
-		return s.(string)
-	default:
-		p.bug("Unexpected key type: %s", it.typ)
-		panic("unreachable")
-	}
-}
-
-// value translates an expected value from the lexer into a Go value wrapped
-// as an empty interface.
-func (p *parser) value(it item) (interface{}, tomlType) {
-	switch it.typ {
-	case itemString:
-		return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
-	case itemMultilineString:
-		trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
-		return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
-	case itemRawString:
-		return it.val, p.typeOfPrimitive(it)
-	case itemRawMultilineString:
-		return stripFirstNewline(it.val), p.typeOfPrimitive(it)
-	case itemBool:
-		switch it.val {
-		case "true":
-			return true, p.typeOfPrimitive(it)
-		case "false":
-			return false, p.typeOfPrimitive(it)
-		}
-		p.bug("Expected boolean value, but got '%s'.", it.val)
-	case itemInteger:
-		if !numUnderscoresOK(it.val) {
-			p.panicf("Invalid integer %q: underscores must be surrounded by digits",
-				it.val)
-		}
-		val := strings.Replace(it.val, "_", "", -1)
-		num, err := strconv.ParseInt(val, 10, 64)
-		if err != nil {
-			// Distinguish integer values. Normally, it'd be a bug if the lexer
-			// provides an invalid integer, but it's possible that the number is
-			// out of range of valid values (which the lexer cannot determine).
-			// So mark the former as a bug but the latter as a legitimate user
-			// error.
-			if e, ok := err.(*strconv.NumError); ok &&
-				e.Err == strconv.ErrRange {
-
-				p.panicf("Integer '%s' is out of the range of 64-bit "+
-					"signed integers.", it.val)
-			} else {
-				p.bug("Expected integer value, but got '%s'.", it.val)
-			}
-		}
-		return num, p.typeOfPrimitive(it)
-	case itemFloat:
-		parts := strings.FieldsFunc(it.val, func(r rune) bool {
-			switch r {
-			case '.', 'e', 'E':
-				return true
-			}
-			return false
-		})
-		for _, part := range parts {
-			if !numUnderscoresOK(part) {
-				p.panicf("Invalid float %q: underscores must be "+
-					"surrounded by digits", it.val)
-			}
-		}
-		if !numPeriodsOK(it.val) {
-			// As a special case, numbers like '123.' or '1.e2',
-			// which are valid as far as Go/strconv are concerned,
-			// must be rejected because TOML says that a fractional
-			// part consists of '.' followed by 1+ digits.
-			p.panicf("Invalid float %q: '.' must be followed "+
-				"by one or more digits", it.val)
-		}
-		val := strings.Replace(it.val, "_", "", -1)
-		num, err := strconv.ParseFloat(val, 64)
-		if err != nil {
-			if e, ok := err.(*strconv.NumError); ok &&
-				e.Err == strconv.ErrRange {
-
-				p.panicf("Float '%s' is out of the range of 64-bit "+
-					"IEEE-754 floating-point numbers.", it.val)
-			} else {
-				p.panicf("Invalid float value: %q", it.val)
-			}
-		}
-		return num, p.typeOfPrimitive(it)
-	case itemDatetime:
-		var t time.Time
-		var ok bool
-		var err error
-		for _, format := range []string{
-			"2006-01-02T15:04:05Z07:00",
-			"2006-01-02T15:04:05",
-			"2006-01-02",
-		} {
-			t, err = time.ParseInLocation(format, it.val, time.Local)
-			if err == nil {
-				ok = true
-				break
-			}
-		}
-		if !ok {
-			p.panicf("Invalid TOML Datetime: %q.", it.val)
-		}
-		return t, p.typeOfPrimitive(it)
-	case itemArray:
-		array := make([]interface{}, 0)
-		types := make([]tomlType, 0)
-
-		for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
-			if it.typ == itemCommentStart {
-				p.expect(itemText)
-				continue
-			}
-
-			val, typ := p.value(it)
-			array = append(array, val)
-			types = append(types, typ)
-		}
-		return array, p.typeOfArray(types)
-	case itemInlineTableStart:
-		var (
-			hash         = make(map[string]interface{})
-			outerContext = p.context
-			outerKey     = p.currentKey
-		)
-
-		p.context = append(p.context, p.currentKey)
-		p.currentKey = ""
-		for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
-			if it.typ != itemKeyStart {
-				p.bug("Expected key start but instead found %q, around line %d",
-					it.val, p.approxLine)
-			}
-			if it.typ == itemCommentStart {
-				p.expect(itemText)
-				continue
-			}
-
-			// retrieve key
-			k := p.next()
-			p.approxLine = k.line
-			kname := p.keyString(k)
-
-			// retrieve value
-			p.currentKey = kname
-			val, typ := p.value(p.next())
-			// make sure we keep metadata up to date
-			p.setType(kname, typ)
-			p.ordered = append(p.ordered, p.context.add(p.currentKey))
-			hash[kname] = val
-		}
-		p.context = outerContext
-		p.currentKey = outerKey
-		return hash, tomlHash
-	}
-	p.bug("Unexpected value type: %s", it.typ)
-	panic("unreachable")
-}
-
-// numUnderscoresOK checks whether each underscore in s is surrounded by
-// characters that are not underscores.
-func numUnderscoresOK(s string) bool {
-	accept := false
-	for _, r := range s {
-		if r == '_' {
-			if !accept {
-				return false
-			}
-			accept = false
-			continue
-		}
-		accept = true
-	}
-	return accept
-}
-
-// numPeriodsOK checks whether every period in s is followed by a digit.
-func numPeriodsOK(s string) bool {
-	period := false
-	for _, r := range s {
-		if period && !isDigit(r) {
-			return false
-		}
-		period = r == '.'
-	}
-	return !period
-}
-
-// establishContext sets the current context of the parser,
-// where the context is either a hash or an array of hashes. Which one is
-// set depends on the value of the `array` parameter.
-//
-// Establishing the context also makes sure that the key isn't a duplicate, and
-// will create implicit hashes automatically.
-func (p *parser) establishContext(key Key, array bool) {
-	var ok bool
-
-	// Always start at the top level and drill down for our context.
-	hashContext := p.mapping
-	keyContext := make(Key, 0)
-
-	// We only need implicit hashes for key[0:-1]
-	for _, k := range key[0 : len(key)-1] {
-		_, ok = hashContext[k]
-		keyContext = append(keyContext, k)
-
-		// No key? Make an implicit hash and move on.
-		if !ok {
-			p.addImplicit(keyContext)
-			hashContext[k] = make(map[string]interface{})
-		}
-
-		// If the hash context is actually an array of tables, then set
-		// the hash context to the last element in that array.
-		//
-		// Otherwise, it better be a table, since this MUST be a key group (by
-		// virtue of it not being the last element in a key).
-		switch t := hashContext[k].(type) {
-		case []map[string]interface{}:
-			hashContext = t[len(t)-1]
-		case map[string]interface{}:
-			hashContext = t
-		default:
-			p.panicf("Key '%s' was already created as a hash.", keyContext)
-		}
-	}
-
-	p.context = keyContext
-	if array {
-		// If this is the first element for this array, then allocate a new
-		// list of tables for it.
-		k := key[len(key)-1]
-		if _, ok := hashContext[k]; !ok {
-			hashContext[k] = make([]map[string]interface{}, 0, 5)
-		}
-
-		// Add a new table. But make sure the key hasn't already been used
-		// for something else.
-		if hash, ok := hashContext[k].([]map[string]interface{}); ok {
-			hashContext[k] = append(hash, make(map[string]interface{}))
-		} else {
-			p.panicf("Key '%s' was already created and cannot be used as "+
-				"an array.", keyContext)
-		}
-	} else {
-		p.setValue(key[len(key)-1], make(map[string]interface{}))
-	}
-	p.context = append(p.context, key[len(key)-1])
-}
-
-// setValue sets the given key to the given value in the current context.
-// It will make sure that the key hasn't already been defined, account for
-// implicit key groups.
-func (p *parser) setValue(key string, value interface{}) {
-	var tmpHash interface{}
-	var ok bool
-
-	hash := p.mapping
-	keyContext := make(Key, 0)
-	for _, k := range p.context {
-		keyContext = append(keyContext, k)
-		if tmpHash, ok = hash[k]; !ok {
-			p.bug("Context for key '%s' has not been established.", keyContext)
-		}
-		switch t := tmpHash.(type) {
-		case []map[string]interface{}:
-			// The context is a table of hashes. Pick the most recent table
-			// defined as the current hash.
-			hash = t[len(t)-1]
-		case map[string]interface{}:
-			hash = t
-		default:
-			p.bug("Expected hash to have type 'map[string]interface{}', but "+
-				"it has '%T' instead.", tmpHash)
-		}
-	}
-	keyContext = append(keyContext, key)
-
-	if _, ok := hash[key]; ok {
-		// Typically, if the given key has already been set, then we have
-		// to raise an error since duplicate keys are disallowed. However,
-		// it's possible that a key was previously defined implicitly. In this
-		// case, it is allowed to be redefined concretely. (See the
-		// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
-		//
-		// But we have to make sure to stop marking it as an implicit. (So that
-		// another redefinition provokes an error.)
-		//
-		// Note that since it has already been defined (as a hash), we don't
-		// want to overwrite it. So our business is done.
-		if p.isImplicit(keyContext) {
-			p.removeImplicit(keyContext)
-			return
-		}
-
-		// Otherwise, we have a concrete key trying to override a previous
-		// key, which is *always* wrong.
-		p.panicf("Key '%s' has already been defined.", keyContext)
-	}
-	hash[key] = value
-}
-
-// setType sets the type of a particular value at a given key.
-// It should be called immediately AFTER setValue.
-//
-// Note that if `key` is empty, then the type given will be applied to the
-// current context (which is either a table or an array of tables).
-func (p *parser) setType(key string, typ tomlType) {
-	keyContext := make(Key, 0, len(p.context)+1)
-	for _, k := range p.context {
-		keyContext = append(keyContext, k)
-	}
-	if len(key) > 0 { // allow type setting for hashes
-		keyContext = append(keyContext, key)
-	}
-	p.types[keyContext.String()] = typ
-}
-
-// addImplicit sets the given Key as having been created implicitly.
-func (p *parser) addImplicit(key Key) {
-	p.implicits[key.String()] = true
-}
-
-// removeImplicit stops tagging the given key as having been implicitly
-// created.
-func (p *parser) removeImplicit(key Key) {
-	p.implicits[key.String()] = false
-}
-
-// isImplicit returns true if the key group pointed to by the key was created
-// implicitly.
-func (p *parser) isImplicit(key Key) bool {
-	return p.implicits[key.String()]
-}
-
-// current returns the full key name of the current context.
-func (p *parser) current() string {
-	if len(p.currentKey) == 0 {
-		return p.context.String()
-	}
-	if len(p.context) == 0 {
-		return p.currentKey
-	}
-	return fmt.Sprintf("%s.%s", p.context, p.currentKey)
-}
-
-func stripFirstNewline(s string) string {
-	if len(s) == 0 || s[0] != '\n' {
-		return s
-	}
-	return s[1:]
-}
-
-func stripEscapedWhitespace(s string) string {
-	esc := strings.Split(s, "\\\n")
-	if len(esc) > 1 {
-		for i := 1; i < len(esc); i++ {
-			esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
-		}
-	}
-	return strings.Join(esc, "")
-}
-
-func (p *parser) replaceEscapes(str string) string {
-	var replaced []rune
-	s := []byte(str)
-	r := 0
-	for r < len(s) {
-		if s[r] != '\\' {
-			c, size := utf8.DecodeRune(s[r:])
-			r += size
-			replaced = append(replaced, c)
-			continue
-		}
-		r += 1
-		if r >= len(s) {
-			p.bug("Escape sequence at end of string.")
-			return ""
-		}
-		switch s[r] {
-		default:
-			p.bug("Expected valid escape code after \\, but got %q.", s[r])
-			return ""
-		case 'b':
-			replaced = append(replaced, rune(0x0008))
-			r += 1
-		case 't':
-			replaced = append(replaced, rune(0x0009))
-			r += 1
-		case 'n':
-			replaced = append(replaced, rune(0x000A))
-			r += 1
-		case 'f':
-			replaced = append(replaced, rune(0x000C))
-			r += 1
-		case 'r':
-			replaced = append(replaced, rune(0x000D))
-			r += 1
-		case '"':
-			replaced = append(replaced, rune(0x0022))
-			r += 1
-		case '\\':
-			replaced = append(replaced, rune(0x005C))
-			r += 1
-		case 'u':
-			// At this point, we know we have a Unicode escape of the form
-			// `uXXXX` at [r, r+5). (Because the lexer guarantees this
-			// for us.)
-			escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
-			replaced = append(replaced, escaped)
-			r += 5
-		case 'U':
-			// At this point, we know we have a Unicode escape of the form
-			// `uXXXX` at [r, r+9). (Because the lexer guarantees this
-			// for us.)
-			escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
-			replaced = append(replaced, escaped)
-			r += 9
-		}
-	}
-	return string(replaced)
-}
-
-func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
-	s := string(bs)
-	hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
-	if err != nil {
-		p.bug("Could not parse '%s' as a hexadecimal number, but the "+
-			"lexer claims it's OK: %s", s, err)
-	}
-	if !utf8.ValidRune(rune(hex)) {
-		p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
-	}
-	return rune(hex)
-}
-
-func isStringType(ty itemType) bool {
-	return ty == itemString || ty == itemMultilineString ||
-		ty == itemRawString || ty == itemRawMultilineString
-}

+ 0 - 1
vendor/github.com/BurntSushi/toml/session.vim

@@ -1 +0,0 @@
-au BufWritePost *.go silent!make tags > /dev/null 2>&1

+ 0 - 91
vendor/github.com/BurntSushi/toml/type_check.go

@@ -1,91 +0,0 @@
-package toml
-
-// tomlType represents any Go type that corresponds to a TOML type.
-// While the first draft of the TOML spec has a simplistic type system that
-// probably doesn't need this level of sophistication, we seem to be militating
-// toward adding real composite types.
-type tomlType interface {
-	typeString() string
-}
-
-// typeEqual accepts any two types and returns true if they are equal.
-func typeEqual(t1, t2 tomlType) bool {
-	if t1 == nil || t2 == nil {
-		return false
-	}
-	return t1.typeString() == t2.typeString()
-}
-
-func typeIsHash(t tomlType) bool {
-	return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
-}
-
-type tomlBaseType string
-
-func (btype tomlBaseType) typeString() string {
-	return string(btype)
-}
-
-func (btype tomlBaseType) String() string {
-	return btype.typeString()
-}
-
-var (
-	tomlInteger   tomlBaseType = "Integer"
-	tomlFloat     tomlBaseType = "Float"
-	tomlDatetime  tomlBaseType = "Datetime"
-	tomlString    tomlBaseType = "String"
-	tomlBool      tomlBaseType = "Bool"
-	tomlArray     tomlBaseType = "Array"
-	tomlHash      tomlBaseType = "Hash"
-	tomlArrayHash tomlBaseType = "ArrayHash"
-)
-
-// typeOfPrimitive returns a tomlType of any primitive value in TOML.
-// Primitive values are: Integer, Float, Datetime, String and Bool.
-//
-// Passing a lexer item other than the following will cause a BUG message
-// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
-func (p *parser) typeOfPrimitive(lexItem item) tomlType {
-	switch lexItem.typ {
-	case itemInteger:
-		return tomlInteger
-	case itemFloat:
-		return tomlFloat
-	case itemDatetime:
-		return tomlDatetime
-	case itemString:
-		return tomlString
-	case itemMultilineString:
-		return tomlString
-	case itemRawString:
-		return tomlString
-	case itemRawMultilineString:
-		return tomlString
-	case itemBool:
-		return tomlBool
-	}
-	p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
-	panic("unreachable")
-}
-
-// typeOfArray returns a tomlType for an array given a list of types of its
-// values.
-//
-// In the current spec, if an array is homogeneous, then its type is always
-// "Array". If the array is not homogeneous, an error is generated.
-func (p *parser) typeOfArray(types []tomlType) tomlType {
-	// Empty arrays are cool.
-	if len(types) == 0 {
-		return tomlArray
-	}
-
-	theType := types[0]
-	for _, t := range types[1:] {
-		if !typeEqual(theType, t) {
-			p.panicf("Array contains values of type '%s' and '%s', but "+
-				"arrays must be homogeneous.", theType, t)
-		}
-	}
-	return tomlArray
-}

+ 0 - 242
vendor/github.com/BurntSushi/toml/type_fields.go

@@ -1,242 +0,0 @@
-package toml
-
-// Struct field handling is adapted from code in encoding/json:
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the Go distribution.
-
-import (
-	"reflect"
-	"sort"
-	"sync"
-)
-
-// A field represents a single field found in a struct.
-type field struct {
-	name  string       // the name of the field (`toml` tag included)
-	tag   bool         // whether field has a `toml` tag
-	index []int        // represents the depth of an anonymous field
-	typ   reflect.Type // the type of the field
-}
-
-// byName sorts field by name, breaking ties with depth,
-// then breaking ties with "name came from toml tag", then
-// breaking ties with index sequence.
-type byName []field
-
-func (x byName) Len() int { return len(x) }
-
-func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-func (x byName) Less(i, j int) bool {
-	if x[i].name != x[j].name {
-		return x[i].name < x[j].name
-	}
-	if len(x[i].index) != len(x[j].index) {
-		return len(x[i].index) < len(x[j].index)
-	}
-	if x[i].tag != x[j].tag {
-		return x[i].tag
-	}
-	return byIndex(x).Less(i, j)
-}
-
-// byIndex sorts field by index sequence.
-type byIndex []field
-
-func (x byIndex) Len() int { return len(x) }
-
-func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-func (x byIndex) Less(i, j int) bool {
-	for k, xik := range x[i].index {
-		if k >= len(x[j].index) {
-			return false
-		}
-		if xik != x[j].index[k] {
-			return xik < x[j].index[k]
-		}
-	}
-	return len(x[i].index) < len(x[j].index)
-}
-
-// typeFields returns a list of fields that TOML should recognize for the given
-// type. The algorithm is breadth-first search over the set of structs to
-// include - the top struct and then any reachable anonymous structs.
-func typeFields(t reflect.Type) []field {
-	// Anonymous fields to explore at the current level and the next.
-	current := []field{}
-	next := []field{{typ: t}}
-
-	// Count of queued names for current level and the next.
-	count := map[reflect.Type]int{}
-	nextCount := map[reflect.Type]int{}
-
-	// Types already visited at an earlier level.
-	visited := map[reflect.Type]bool{}
-
-	// Fields found.
-	var fields []field
-
-	for len(next) > 0 {
-		current, next = next, current[:0]
-		count, nextCount = nextCount, map[reflect.Type]int{}
-
-		for _, f := range current {
-			if visited[f.typ] {
-				continue
-			}
-			visited[f.typ] = true
-
-			// Scan f.typ for fields to include.
-			for i := 0; i < f.typ.NumField(); i++ {
-				sf := f.typ.Field(i)
-				if sf.PkgPath != "" && !sf.Anonymous { // unexported
-					continue
-				}
-				opts := getOptions(sf.Tag)
-				if opts.skip {
-					continue
-				}
-				index := make([]int, len(f.index)+1)
-				copy(index, f.index)
-				index[len(f.index)] = i
-
-				ft := sf.Type
-				if ft.Name() == "" && ft.Kind() == reflect.Ptr {
-					// Follow pointer.
-					ft = ft.Elem()
-				}
-
-				// Record found field and index sequence.
-				if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
-					tagged := opts.name != ""
-					name := opts.name
-					if name == "" {
-						name = sf.Name
-					}
-					fields = append(fields, field{name, tagged, index, ft})
-					if count[f.typ] > 1 {
-						// If there were multiple instances, add a second,
-						// so that the annihilation code will see a duplicate.
-						// It only cares about the distinction between 1 or 2,
-						// so don't bother generating any more copies.
-						fields = append(fields, fields[len(fields)-1])
-					}
-					continue
-				}
-
-				// Record new anonymous struct to explore in next round.
-				nextCount[ft]++
-				if nextCount[ft] == 1 {
-					f := field{name: ft.Name(), index: index, typ: ft}
-					next = append(next, f)
-				}
-			}
-		}
-	}
-
-	sort.Sort(byName(fields))
-
-	// Delete all fields that are hidden by the Go rules for embedded fields,
-	// except that fields with TOML tags are promoted.
-
-	// The fields are sorted in primary order of name, secondary order
-	// of field index length. Loop over names; for each name, delete
-	// hidden fields by choosing the one dominant field that survives.
-	out := fields[:0]
-	for advance, i := 0, 0; i < len(fields); i += advance {
-		// One iteration per name.
-		// Find the sequence of fields with the name of this first field.
-		fi := fields[i]
-		name := fi.name
-		for advance = 1; i+advance < len(fields); advance++ {
-			fj := fields[i+advance]
-			if fj.name != name {
-				break
-			}
-		}
-		if advance == 1 { // Only one field with this name
-			out = append(out, fi)
-			continue
-		}
-		dominant, ok := dominantField(fields[i : i+advance])
-		if ok {
-			out = append(out, dominant)
-		}
-	}
-
-	fields = out
-	sort.Sort(byIndex(fields))
-
-	return fields
-}
-
-// dominantField looks through the fields, all of which are known to
-// have the same name, to find the single field that dominates the
-// others using Go's embedding rules, modified by the presence of
-// TOML tags. If there are multiple top-level fields, the boolean
-// will be false: This condition is an error in Go and we skip all
-// the fields.
-func dominantField(fields []field) (field, bool) {
-	// The fields are sorted in increasing index-length order. The winner
-	// must therefore be one with the shortest index length. Drop all
-	// longer entries, which is easy: just truncate the slice.
-	length := len(fields[0].index)
-	tagged := -1 // Index of first tagged field.
-	for i, f := range fields {
-		if len(f.index) > length {
-			fields = fields[:i]
-			break
-		}
-		if f.tag {
-			if tagged >= 0 {
-				// Multiple tagged fields at the same level: conflict.
-				// Return no field.
-				return field{}, false
-			}
-			tagged = i
-		}
-	}
-	if tagged >= 0 {
-		return fields[tagged], true
-	}
-	// All remaining fields have the same length. If there's more than one,
-	// we have a conflict (two fields named "X" at the same level) and we
-	// return no field.
-	if len(fields) > 1 {
-		return field{}, false
-	}
-	return fields[0], true
-}
-
-var fieldCache struct {
-	sync.RWMutex
-	m map[reflect.Type][]field
-}
-
-// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
-func cachedTypeFields(t reflect.Type) []field {
-	fieldCache.RLock()
-	f := fieldCache.m[t]
-	fieldCache.RUnlock()
-	if f != nil {
-		return f
-	}
-
-	// Compute fields without lock.
-	// Might duplicate effort but won't hold other computations back.
-	f = typeFields(t)
-	if f == nil {
-		f = []field{}
-	}
-
-	fieldCache.Lock()
-	if fieldCache.m == nil {
-		fieldCache.m = map[reflect.Type][]field{}
-	}
-	fieldCache.m[t] = f
-	fieldCache.Unlock()
-	return f
-}

+ 0 - 41
vendor/github.com/emirpasic/gods/LICENSE

@@ -1,41 +0,0 @@
-Copyright (c) 2015, Emir Pasic
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
-  list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice,
-  this list of conditions and the following disclaimer in the documentation
-  and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
--------------------------------------------------------------------------------
-
-AVL Tree:
-
-Copyright (c) 2017 Benjamin Scher Purcell <benjapurcell@gmail.com>
-
-Permission to use, copy, modify, and distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

+ 0 - 35
vendor/github.com/emirpasic/gods/containers/containers.go

@@ -1,35 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package containers provides core interfaces and functions for data structures.
-//
-// Container is the base interface for all data structures to implement.
-//
-// Iterators provide stateful iterators.
-//
-// Enumerable provides Ruby inspired (each, select, map, find, any?, etc.) container functions.
-//
-// Serialization provides serializers (marshalers) and deserializers (unmarshalers).
-package containers
-
-import "github.com/emirpasic/gods/utils"
-
-// Container is base interface that all data structures implement.
-type Container interface {
-	Empty() bool
-	Size() int
-	Clear()
-	Values() []interface{}
-}
-
-// GetSortedValues returns sorted container's elements with respect to the passed comparator.
-// Does not effect the ordering of elements within the container.
-func GetSortedValues(container Container, comparator utils.Comparator) []interface{} {
-	values := container.Values()
-	if len(values) < 2 {
-		return values
-	}
-	utils.Sort(values, comparator)
-	return values
-}

+ 0 - 61
vendor/github.com/emirpasic/gods/containers/enumerable.go

@@ -1,61 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package containers
-
-// EnumerableWithIndex provides functions for ordered containers whose values can be fetched by an index.
-type EnumerableWithIndex interface {
-	// Each calls the given function once for each element, passing that element's index and value.
-	Each(func(index int, value interface{}))
-
-	// Map invokes the given function once for each element and returns a
-	// container containing the values returned by the given function.
-	// TODO would appreciate help on how to enforce this in containers (don't want to type assert when chaining)
-	// Map(func(index int, value interface{}) interface{}) Container
-
-	// Select returns a new container containing all elements for which the given function returns a true value.
-	// TODO need help on how to enforce this in containers (don't want to type assert when chaining)
-	// Select(func(index int, value interface{}) bool) Container
-
-	// Any passes each element of the container to the given function and
-	// returns true if the function ever returns true for any element.
-	Any(func(index int, value interface{}) bool) bool
-
-	// All passes each element of the container to the given function and
-	// returns true if the function returns true for all elements.
-	All(func(index int, value interface{}) bool) bool
-
-	// Find passes each element of the container to the given function and returns
-	// the first (index,value) for which the function is true or -1,nil otherwise
-	// if no element matches the criteria.
-	Find(func(index int, value interface{}) bool) (int, interface{})
-}
-
-// EnumerableWithKey provides functions for ordered containers whose values whose elements are key/value pairs.
-type EnumerableWithKey interface {
-	// Each calls the given function once for each element, passing that element's key and value.
-	Each(func(key interface{}, value interface{}))
-
-	// Map invokes the given function once for each element and returns a container
-	// containing the values returned by the given function as key/value pairs.
-	// TODO need help on how to enforce this in containers (don't want to type assert when chaining)
-	// Map(func(key interface{}, value interface{}) (interface{}, interface{})) Container
-
-	// Select returns a new container containing all elements for which the given function returns a true value.
-	// TODO need help on how to enforce this in containers (don't want to type assert when chaining)
-	// Select(func(key interface{}, value interface{}) bool) Container
-
-	// Any passes each element of the container to the given function and
-	// returns true if the function ever returns true for any element.
-	Any(func(key interface{}, value interface{}) bool) bool
-
-	// All passes each element of the container to the given function and
-	// returns true if the function returns true for all elements.
-	All(func(key interface{}, value interface{}) bool) bool
-
-	// Find passes each element of the container to the given function and returns
-	// the first (key,value) for which the function is true or nil,nil otherwise if no element
-	// matches the criteria.
-	Find(func(key interface{}, value interface{}) bool) (interface{}, interface{})
-}

+ 0 - 109
vendor/github.com/emirpasic/gods/containers/iterator.go

@@ -1,109 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package containers
-
-// IteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index.
-type IteratorWithIndex interface {
-	// Next moves the iterator to the next element and returns true if there was a next element in the container.
-	// If Next() returns true, then next element's index and value can be retrieved by Index() and Value().
-	// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
-	// Modifies the state of the iterator.
-	Next() bool
-
-	// Value returns the current element's value.
-	// Does not modify the state of the iterator.
-	Value() interface{}
-
-	// Index returns the current element's index.
-	// Does not modify the state of the iterator.
-	Index() int
-
-	// Begin resets the iterator to its initial state (one-before-first)
-	// Call Next() to fetch the first element if any.
-	Begin()
-
-	// First moves the iterator to the first element and returns true if there was a first element in the container.
-	// If First() returns true, then first element's index and value can be retrieved by Index() and Value().
-	// Modifies the state of the iterator.
-	First() bool
-}
-
-// IteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs.
-type IteratorWithKey interface {
-	// Next moves the iterator to the next element and returns true if there was a next element in the container.
-	// If Next() returns true, then next element's key and value can be retrieved by Key() and Value().
-	// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
-	// Modifies the state of the iterator.
-	Next() bool
-
-	// Value returns the current element's value.
-	// Does not modify the state of the iterator.
-	Value() interface{}
-
-	// Key returns the current element's key.
-	// Does not modify the state of the iterator.
-	Key() interface{}
-
-	// Begin resets the iterator to its initial state (one-before-first)
-	// Call Next() to fetch the first element if any.
-	Begin()
-
-	// First moves the iterator to the first element and returns true if there was a first element in the container.
-	// If First() returns true, then first element's key and value can be retrieved by Key() and Value().
-	// Modifies the state of the iterator.
-	First() bool
-}
-
-// ReverseIteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index.
-//
-// Essentially it is the same as IteratorWithIndex, but provides additional:
-//
-// Prev() function to enable traversal in reverse
-//
-// Last() function to move the iterator to the last element.
-//
-// End() function to move the iterator past the last element (one-past-the-end).
-type ReverseIteratorWithIndex interface {
-	// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
-	// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value().
-	// Modifies the state of the iterator.
-	Prev() bool
-
-	// End moves the iterator past the last element (one-past-the-end).
-	// Call Prev() to fetch the last element if any.
-	End()
-
-	// Last moves the iterator to the last element and returns true if there was a last element in the container.
-	// If Last() returns true, then last element's index and value can be retrieved by Index() and Value().
-	// Modifies the state of the iterator.
-	Last() bool
-
-	IteratorWithIndex
-}
-
-// ReverseIteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs.
-//
-// Essentially it is the same as IteratorWithKey, but provides additional:
-//
-// Prev() function to enable traversal in reverse
-//
-// Last() function to move the iterator to the last element.
-type ReverseIteratorWithKey interface {
-	// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
-	// If Prev() returns true, then previous element's key and value can be retrieved by Key() and Value().
-	// Modifies the state of the iterator.
-	Prev() bool
-
-	// End moves the iterator past the last element (one-past-the-end).
-	// Call Prev() to fetch the last element if any.
-	End()
-
-	// Last moves the iterator to the last element and returns true if there was a last element in the container.
-	// If Last() returns true, then last element's key and value can be retrieved by Key() and Value().
-	// Modifies the state of the iterator.
-	Last() bool
-
-	IteratorWithKey
-}

+ 0 - 17
vendor/github.com/emirpasic/gods/containers/serialization.go

@@ -1,17 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package containers
-
-// JSONSerializer provides JSON serialization
-type JSONSerializer interface {
-	// ToJSON outputs the JSON representation of containers's elements.
-	ToJSON() ([]byte, error)
-}
-
-// JSONDeserializer provides JSON deserialization
-type JSONDeserializer interface {
-	// FromJSON populates containers's elements from the input JSON representation.
-	FromJSON([]byte) error
-}

+ 0 - 228
vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go

@@ -1,228 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package arraylist implements the array list.
-//
-// Structure is not thread safe.
-//
-// Reference: https://en.wikipedia.org/wiki/List_%28abstract_data_type%29
-package arraylist
-
-import (
-	"fmt"
-	"strings"
-
-	"github.com/emirpasic/gods/lists"
-	"github.com/emirpasic/gods/utils"
-)
-
-func assertListImplementation() {
-	var _ lists.List = (*List)(nil)
-}
-
-// List holds the elements in a slice
-type List struct {
-	elements []interface{}
-	size     int
-}
-
-const (
-	growthFactor = float32(2.0)  // growth by 100%
-	shrinkFactor = float32(0.25) // shrink when size is 25% of capacity (0 means never shrink)
-)
-
-// New instantiates a new list and adds the passed values, if any, to the list
-func New(values ...interface{}) *List {
-	list := &List{}
-	if len(values) > 0 {
-		list.Add(values...)
-	}
-	return list
-}
-
-// Add appends a value at the end of the list
-func (list *List) Add(values ...interface{}) {
-	list.growBy(len(values))
-	for _, value := range values {
-		list.elements[list.size] = value
-		list.size++
-	}
-}
-
-// Get returns the element at index.
-// Second return parameter is true if index is within bounds of the array and array is not empty, otherwise false.
-func (list *List) Get(index int) (interface{}, bool) {
-
-	if !list.withinRange(index) {
-		return nil, false
-	}
-
-	return list.elements[index], true
-}
-
-// Remove removes the element at the given index from the list.
-func (list *List) Remove(index int) {
-
-	if !list.withinRange(index) {
-		return
-	}
-
-	list.elements[index] = nil                                    // cleanup reference
-	copy(list.elements[index:], list.elements[index+1:list.size]) // shift to the left by one (slow operation, need ways to optimize this)
-	list.size--
-
-	list.shrink()
-}
-
-// Contains checks if elements (one or more) are present in the set.
-// All elements have to be present in the set for the method to return true.
-// Performance time complexity of n^2.
-// Returns true if no arguments are passed at all, i.e. set is always super-set of empty set.
-func (list *List) Contains(values ...interface{}) bool {
-
-	for _, searchValue := range values {
-		found := false
-		for _, element := range list.elements {
-			if element == searchValue {
-				found = true
-				break
-			}
-		}
-		if !found {
-			return false
-		}
-	}
-	return true
-}
-
-// Values returns all elements in the list.
-func (list *List) Values() []interface{} {
-	newElements := make([]interface{}, list.size, list.size)
-	copy(newElements, list.elements[:list.size])
-	return newElements
-}
-
-//IndexOf returns index of provided element
-func (list *List) IndexOf(value interface{}) int {
-	if list.size == 0 {
-		return -1
-	}
-	for index, element := range list.elements {
-		if element == value {
-			return index
-		}
-	}
-	return -1
-}
-
-// Empty returns true if list does not contain any elements.
-func (list *List) Empty() bool {
-	return list.size == 0
-}
-
-// Size returns number of elements within the list.
-func (list *List) Size() int {
-	return list.size
-}
-
-// Clear removes all elements from the list.
-func (list *List) Clear() {
-	list.size = 0
-	list.elements = []interface{}{}
-}
-
-// Sort sorts values (in-place) using.
-func (list *List) Sort(comparator utils.Comparator) {
-	if len(list.elements) < 2 {
-		return
-	}
-	utils.Sort(list.elements[:list.size], comparator)
-}
-
-// Swap swaps the two values at the specified positions.
-func (list *List) Swap(i, j int) {
-	if list.withinRange(i) && list.withinRange(j) {
-		list.elements[i], list.elements[j] = list.elements[j], list.elements[i]
-	}
-}
-
-// Insert inserts values at specified index position shifting the value at that position (if any) and any subsequent elements to the right.
-// Does not do anything if position is negative or bigger than list's size
-// Note: position equal to list's size is valid, i.e. append.
-func (list *List) Insert(index int, values ...interface{}) {
-
-	if !list.withinRange(index) {
-		// Append
-		if index == list.size {
-			list.Add(values...)
-		}
-		return
-	}
-
-	l := len(values)
-	list.growBy(l)
-	list.size += l
-	copy(list.elements[index+l:], list.elements[index:list.size-l])
-	copy(list.elements[index:], values)
-}
-
-// Set the value at specified index
-// Does not do anything if position is negative or bigger than list's size
-// Note: position equal to list's size is valid, i.e. append.
-func (list *List) Set(index int, value interface{}) {
-
-	if !list.withinRange(index) {
-		// Append
-		if index == list.size {
-			list.Add(value)
-		}
-		return
-	}
-
-	list.elements[index] = value
-}
-
-// String returns a string representation of container
-func (list *List) String() string {
-	str := "ArrayList\n"
-	values := []string{}
-	for _, value := range list.elements[:list.size] {
-		values = append(values, fmt.Sprintf("%v", value))
-	}
-	str += strings.Join(values, ", ")
-	return str
-}
-
-// Check that the index is within bounds of the list
-func (list *List) withinRange(index int) bool {
-	return index >= 0 && index < list.size
-}
-
-func (list *List) resize(cap int) {
-	newElements := make([]interface{}, cap, cap)
-	copy(newElements, list.elements)
-	list.elements = newElements
-}
-
-// Expand the array if necessary, i.e. capacity will be reached if we add n elements
-func (list *List) growBy(n int) {
-	// When capacity is reached, grow by a factor of growthFactor and add number of elements
-	currentCapacity := cap(list.elements)
-	if list.size+n >= currentCapacity {
-		newCapacity := int(growthFactor * float32(currentCapacity+n))
-		list.resize(newCapacity)
-	}
-}
-
-// Shrink the array if necessary, i.e. when size is shrinkFactor percent of current capacity
-func (list *List) shrink() {
-	if shrinkFactor == 0.0 {
-		return
-	}
-	// Shrink when size is at shrinkFactor * capacity
-	currentCapacity := cap(list.elements)
-	if list.size <= int(float32(currentCapacity)*shrinkFactor) {
-		list.resize(list.size)
-	}
-}

+ 0 - 79
vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go

@@ -1,79 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package arraylist
-
-import "github.com/emirpasic/gods/containers"
-
-func assertEnumerableImplementation() {
-	var _ containers.EnumerableWithIndex = (*List)(nil)
-}
-
-// Each calls the given function once for each element, passing that element's index and value.
-func (list *List) Each(f func(index int, value interface{})) {
-	iterator := list.Iterator()
-	for iterator.Next() {
-		f(iterator.Index(), iterator.Value())
-	}
-}
-
-// Map invokes the given function once for each element and returns a
-// container containing the values returned by the given function.
-func (list *List) Map(f func(index int, value interface{}) interface{}) *List {
-	newList := &List{}
-	iterator := list.Iterator()
-	for iterator.Next() {
-		newList.Add(f(iterator.Index(), iterator.Value()))
-	}
-	return newList
-}
-
-// Select returns a new container containing all elements for which the given function returns a true value.
-func (list *List) Select(f func(index int, value interface{}) bool) *List {
-	newList := &List{}
-	iterator := list.Iterator()
-	for iterator.Next() {
-		if f(iterator.Index(), iterator.Value()) {
-			newList.Add(iterator.Value())
-		}
-	}
-	return newList
-}
-
-// Any passes each element of the collection to the given function and
-// returns true if the function ever returns true for any element.
-func (list *List) Any(f func(index int, value interface{}) bool) bool {
-	iterator := list.Iterator()
-	for iterator.Next() {
-		if f(iterator.Index(), iterator.Value()) {
-			return true
-		}
-	}
-	return false
-}
-
-// All passes each element of the collection to the given function and
-// returns true if the function returns true for all elements.
-func (list *List) All(f func(index int, value interface{}) bool) bool {
-	iterator := list.Iterator()
-	for iterator.Next() {
-		if !f(iterator.Index(), iterator.Value()) {
-			return false
-		}
-	}
-	return true
-}
-
-// Find passes each element of the container to the given function and returns
-// the first (index,value) for which the function is true or -1,nil otherwise
-// if no element matches the criteria.
-func (list *List) Find(f func(index int, value interface{}) bool) (int, interface{}) {
-	iterator := list.Iterator()
-	for iterator.Next() {
-		if f(iterator.Index(), iterator.Value()) {
-			return iterator.Index(), iterator.Value()
-		}
-	}
-	return -1, nil
-}

+ 0 - 83
vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go

@@ -1,83 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package arraylist
-
-import "github.com/emirpasic/gods/containers"
-
-func assertIteratorImplementation() {
-	var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil)
-}
-
-// Iterator holding the iterator's state
-type Iterator struct {
-	list  *List
-	index int
-}
-
-// Iterator returns a stateful iterator whose values can be fetched by an index.
-func (list *List) Iterator() Iterator {
-	return Iterator{list: list, index: -1}
-}
-
-// Next moves the iterator to the next element and returns true if there was a next element in the container.
-// If Next() returns true, then next element's index and value can be retrieved by Index() and Value().
-// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
-// Modifies the state of the iterator.
-func (iterator *Iterator) Next() bool {
-	if iterator.index < iterator.list.size {
-		iterator.index++
-	}
-	return iterator.list.withinRange(iterator.index)
-}
-
-// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
-// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value().
-// Modifies the state of the iterator.
-func (iterator *Iterator) Prev() bool {
-	if iterator.index >= 0 {
-		iterator.index--
-	}
-	return iterator.list.withinRange(iterator.index)
-}
-
-// Value returns the current element's value.
-// Does not modify the state of the iterator.
-func (iterator *Iterator) Value() interface{} {
-	return iterator.list.elements[iterator.index]
-}
-
-// Index returns the current element's index.
-// Does not modify the state of the iterator.
-func (iterator *Iterator) Index() int {
-	return iterator.index
-}
-
-// Begin resets the iterator to its initial state (one-before-first)
-// Call Next() to fetch the first element if any.
-func (iterator *Iterator) Begin() {
-	iterator.index = -1
-}
-
-// End moves the iterator past the last element (one-past-the-end).
-// Call Prev() to fetch the last element if any.
-func (iterator *Iterator) End() {
-	iterator.index = iterator.list.size
-}
-
-// First moves the iterator to the first element and returns true if there was a first element in the container.
-// If First() returns true, then first element's index and value can be retrieved by Index() and Value().
-// Modifies the state of the iterator.
-func (iterator *Iterator) First() bool {
-	iterator.Begin()
-	return iterator.Next()
-}
-
-// Last moves the iterator to the last element and returns true if there was a last element in the container.
-// If Last() returns true, then last element's index and value can be retrieved by Index() and Value().
-// Modifies the state of the iterator.
-func (iterator *Iterator) Last() bool {
-	iterator.End()
-	return iterator.Prev()
-}

+ 0 - 29
vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go

@@ -1,29 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package arraylist
-
-import (
-	"encoding/json"
-	"github.com/emirpasic/gods/containers"
-)
-
-func assertSerializationImplementation() {
-	var _ containers.JSONSerializer = (*List)(nil)
-	var _ containers.JSONDeserializer = (*List)(nil)
-}
-
-// ToJSON outputs the JSON representation of list's elements.
-func (list *List) ToJSON() ([]byte, error) {
-	return json.Marshal(list.elements[:list.size])
-}
-
-// FromJSON populates list's elements from the input JSON representation.
-func (list *List) FromJSON(data []byte) error {
-	err := json.Unmarshal(data, &list.elements)
-	if err == nil {
-		list.size = len(list.elements)
-	}
-	return err
-}

+ 0 - 33
vendor/github.com/emirpasic/gods/lists/lists.go

@@ -1,33 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package lists provides an abstract List interface.
-//
-// In computer science, a list or sequence is an abstract data type that represents an ordered sequence of values, where the same value may occur more than once. An instance of a list is a computer representation of the mathematical concept of a finite sequence; the (potentially) infinite analog of a list is a stream.  Lists are a basic example of containers, as they contain other values. If the same value occurs multiple times, each occurrence is considered a distinct item.
-//
-// Reference: https://en.wikipedia.org/wiki/List_%28abstract_data_type%29
-package lists
-
-import (
-	"github.com/emirpasic/gods/containers"
-	"github.com/emirpasic/gods/utils"
-)
-
-// List interface that all lists implement
-type List interface {
-	Get(index int) (interface{}, bool)
-	Remove(index int)
-	Add(values ...interface{})
-	Contains(values ...interface{}) bool
-	Sort(comparator utils.Comparator)
-	Swap(index1, index2 int)
-	Insert(index int, values ...interface{})
-	Set(index int, value interface{})
-
-	containers.Container
-	// Empty() bool
-	// Size() int
-	// Clear()
-	// Values() []interface{}
-}

+ 0 - 163
vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go

@@ -1,163 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package binaryheap implements a binary heap backed by array list.
-//
-// Comparator defines this heap as either min or max heap.
-//
-// Structure is not thread safe.
-//
-// References: http://en.wikipedia.org/wiki/Binary_heap
-package binaryheap
-
-import (
-	"fmt"
-	"github.com/emirpasic/gods/lists/arraylist"
-	"github.com/emirpasic/gods/trees"
-	"github.com/emirpasic/gods/utils"
-	"strings"
-)
-
-func assertTreeImplementation() {
-	var _ trees.Tree = (*Heap)(nil)
-}
-
-// Heap holds elements in an array-list
-type Heap struct {
-	list       *arraylist.List
-	Comparator utils.Comparator
-}
-
-// NewWith instantiates a new empty heap tree with the custom comparator.
-func NewWith(comparator utils.Comparator) *Heap {
-	return &Heap{list: arraylist.New(), Comparator: comparator}
-}
-
-// NewWithIntComparator instantiates a new empty heap with the IntComparator, i.e. elements are of type int.
-func NewWithIntComparator() *Heap {
-	return &Heap{list: arraylist.New(), Comparator: utils.IntComparator}
-}
-
-// NewWithStringComparator instantiates a new empty heap with the StringComparator, i.e. elements are of type string.
-func NewWithStringComparator() *Heap {
-	return &Heap{list: arraylist.New(), Comparator: utils.StringComparator}
-}
-
-// Push adds a value onto the heap and bubbles it up accordingly.
-func (heap *Heap) Push(values ...interface{}) {
-	if len(values) == 1 {
-		heap.list.Add(values[0])
-		heap.bubbleUp()
-	} else {
-		// Reference: https://en.wikipedia.org/wiki/Binary_heap#Building_a_heap
-		for _, value := range values {
-			heap.list.Add(value)
-		}
-		size := heap.list.Size()/2 + 1
-		for i := size; i >= 0; i-- {
-			heap.bubbleDownIndex(i)
-		}
-	}
-}
-
-// Pop removes top element on heap and returns it, or nil if heap is empty.
-// Second return parameter is true, unless the heap was empty and there was nothing to pop.
-func (heap *Heap) Pop() (value interface{}, ok bool) {
-	value, ok = heap.list.Get(0)
-	if !ok {
-		return
-	}
-	lastIndex := heap.list.Size() - 1
-	heap.list.Swap(0, lastIndex)
-	heap.list.Remove(lastIndex)
-	heap.bubbleDown()
-	return
-}
-
-// Peek returns top element on the heap without removing it, or nil if heap is empty.
-// Second return parameter is true, unless the heap was empty and there was nothing to peek.
-func (heap *Heap) Peek() (value interface{}, ok bool) {
-	return heap.list.Get(0)
-}
-
-// Empty returns true if heap does not contain any elements.
-func (heap *Heap) Empty() bool {
-	return heap.list.Empty()
-}
-
-// Size returns number of elements within the heap.
-func (heap *Heap) Size() int {
-	return heap.list.Size()
-}
-
-// Clear removes all elements from the heap.
-func (heap *Heap) Clear() {
-	heap.list.Clear()
-}
-
-// Values returns all elements in the heap.
-func (heap *Heap) Values() []interface{} {
-	return heap.list.Values()
-}
-
-// String returns a string representation of container
-func (heap *Heap) String() string {
-	str := "BinaryHeap\n"
-	values := []string{}
-	for _, value := range heap.list.Values() {
-		values = append(values, fmt.Sprintf("%v", value))
-	}
-	str += strings.Join(values, ", ")
-	return str
-}
-
-// Performs the "bubble down" operation. This is to place the element that is at the root
-// of the heap in its correct place so that the heap maintains the min/max-heap order property.
-func (heap *Heap) bubbleDown() {
-	heap.bubbleDownIndex(0)
-}
-
-// Performs the "bubble down" operation. This is to place the element that is at the index
-// of the heap in its correct place so that the heap maintains the min/max-heap order property.
-func (heap *Heap) bubbleDownIndex(index int) {
-	size := heap.list.Size()
-	for leftIndex := index<<1 + 1; leftIndex < size; leftIndex = index<<1 + 1 {
-		rightIndex := index<<1 + 2
-		smallerIndex := leftIndex
-		leftValue, _ := heap.list.Get(leftIndex)
-		rightValue, _ := heap.list.Get(rightIndex)
-		if rightIndex < size && heap.Comparator(leftValue, rightValue) > 0 {
-			smallerIndex = rightIndex
-		}
-		indexValue, _ := heap.list.Get(index)
-		smallerValue, _ := heap.list.Get(smallerIndex)
-		if heap.Comparator(indexValue, smallerValue) > 0 {
-			heap.list.Swap(index, smallerIndex)
-		} else {
-			break
-		}
-		index = smallerIndex
-	}
-}
-
-// Performs the "bubble up" operation. This is to place a newly inserted
-// element (i.e. last element in the list) in its correct place so that
-// the heap maintains the min/max-heap order property.
-func (heap *Heap) bubbleUp() {
-	index := heap.list.Size() - 1
-	for parentIndex := (index - 1) >> 1; index > 0; parentIndex = (index - 1) >> 1 {
-		indexValue, _ := heap.list.Get(index)
-		parentValue, _ := heap.list.Get(parentIndex)
-		if heap.Comparator(parentValue, indexValue) <= 0 {
-			break
-		}
-		heap.list.Swap(index, parentIndex)
-		index = parentIndex
-	}
-}
-
-// Check that the index is within bounds of the list
-func (heap *Heap) withinRange(index int) bool {
-	return index >= 0 && index < heap.list.Size()
-}

+ 0 - 84
vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go

@@ -1,84 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package binaryheap
-
-import "github.com/emirpasic/gods/containers"
-
-func assertIteratorImplementation() {
-	var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil)
-}
-
-// Iterator returns a stateful iterator whose values can be fetched by an index.
-type Iterator struct {
-	heap  *Heap
-	index int
-}
-
-// Iterator returns a stateful iterator whose values can be fetched by an index.
-func (heap *Heap) Iterator() Iterator {
-	return Iterator{heap: heap, index: -1}
-}
-
-// Next moves the iterator to the next element and returns true if there was a next element in the container.
-// If Next() returns true, then next element's index and value can be retrieved by Index() and Value().
-// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
-// Modifies the state of the iterator.
-func (iterator *Iterator) Next() bool {
-	if iterator.index < iterator.heap.Size() {
-		iterator.index++
-	}
-	return iterator.heap.withinRange(iterator.index)
-}
-
-// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
-// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value().
-// Modifies the state of the iterator.
-func (iterator *Iterator) Prev() bool {
-	if iterator.index >= 0 {
-		iterator.index--
-	}
-	return iterator.heap.withinRange(iterator.index)
-}
-
-// Value returns the current element's value.
-// Does not modify the state of the iterator.
-func (iterator *Iterator) Value() interface{} {
-	value, _ := iterator.heap.list.Get(iterator.index)
-	return value
-}
-
-// Index returns the current element's index.
-// Does not modify the state of the iterator.
-func (iterator *Iterator) Index() int {
-	return iterator.index
-}
-
-// Begin resets the iterator to its initial state (one-before-first)
-// Call Next() to fetch the first element if any.
-func (iterator *Iterator) Begin() {
-	iterator.index = -1
-}
-
-// End moves the iterator past the last element (one-past-the-end).
-// Call Prev() to fetch the last element if any.
-func (iterator *Iterator) End() {
-	iterator.index = iterator.heap.Size()
-}
-
-// First moves the iterator to the first element and returns true if there was a first element in the container.
-// If First() returns true, then first element's index and value can be retrieved by Index() and Value().
-// Modifies the state of the iterator.
-func (iterator *Iterator) First() bool {
-	iterator.Begin()
-	return iterator.Next()
-}
-
-// Last moves the iterator to the last element and returns true if there was a last element in the container.
-// If Last() returns true, then last element's index and value can be retrieved by Index() and Value().
-// Modifies the state of the iterator.
-func (iterator *Iterator) Last() bool {
-	iterator.End()
-	return iterator.Prev()
-}

+ 0 - 22
vendor/github.com/emirpasic/gods/trees/binaryheap/serialization.go

@@ -1,22 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package binaryheap
-
-import "github.com/emirpasic/gods/containers"
-
-func assertSerializationImplementation() {
-	var _ containers.JSONSerializer = (*Heap)(nil)
-	var _ containers.JSONDeserializer = (*Heap)(nil)
-}
-
-// ToJSON outputs the JSON representation of the heap.
-func (heap *Heap) ToJSON() ([]byte, error) {
-	return heap.list.ToJSON()
-}
-
-// FromJSON populates the heap from the input JSON representation.
-func (heap *Heap) FromJSON(data []byte) error {
-	return heap.list.FromJSON(data)
-}

+ 0 - 21
vendor/github.com/emirpasic/gods/trees/trees.go

@@ -1,21 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package trees provides an abstract Tree interface.
-//
-// In computer science, a tree is a widely used abstract data type (ADT) or data structure implementing this ADT that simulates a hierarchical tree structure, with a root value and subtrees of children with a parent node, represented as a set of linked nodes.
-//
-// Reference: https://en.wikipedia.org/wiki/Tree_%28data_structure%29
-package trees
-
-import "github.com/emirpasic/gods/containers"
-
-// Tree interface that all trees implement
-type Tree interface {
-	containers.Container
-	// Empty() bool
-	// Size() int
-	// Clear()
-	// Values() []interface{}
-}

+ 0 - 251
vendor/github.com/emirpasic/gods/utils/comparator.go

@@ -1,251 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package utils
-
-import "time"
-
-// Comparator will make type assertion (see IntComparator for example),
-// which will panic if a or b are not of the asserted type.
-//
-// Should return a number:
-//    negative , if a < b
-//    zero     , if a == b
-//    positive , if a > b
-type Comparator func(a, b interface{}) int
-
-// StringComparator provides a fast comparison on strings
-func StringComparator(a, b interface{}) int {
-	s1 := a.(string)
-	s2 := b.(string)
-	min := len(s2)
-	if len(s1) < len(s2) {
-		min = len(s1)
-	}
-	diff := 0
-	for i := 0; i < min && diff == 0; i++ {
-		diff = int(s1[i]) - int(s2[i])
-	}
-	if diff == 0 {
-		diff = len(s1) - len(s2)
-	}
-	if diff < 0 {
-		return -1
-	}
-	if diff > 0 {
-		return 1
-	}
-	return 0
-}
-
-// IntComparator provides a basic comparison on int
-func IntComparator(a, b interface{}) int {
-	aAsserted := a.(int)
-	bAsserted := b.(int)
-	switch {
-	case aAsserted > bAsserted:
-		return 1
-	case aAsserted < bAsserted:
-		return -1
-	default:
-		return 0
-	}
-}
-
-// Int8Comparator provides a basic comparison on int8
-func Int8Comparator(a, b interface{}) int {
-	aAsserted := a.(int8)
-	bAsserted := b.(int8)
-	switch {
-	case aAsserted > bAsserted:
-		return 1
-	case aAsserted < bAsserted:
-		return -1
-	default:
-		return 0
-	}
-}
-
-// Int16Comparator provides a basic comparison on int16
-func Int16Comparator(a, b interface{}) int {
-	aAsserted := a.(int16)
-	bAsserted := b.(int16)
-	switch {
-	case aAsserted > bAsserted:
-		return 1
-	case aAsserted < bAsserted:
-		return -1
-	default:
-		return 0
-	}
-}
-
-// Int32Comparator provides a basic comparison on int32
-func Int32Comparator(a, b interface{}) int {
-	aAsserted := a.(int32)
-	bAsserted := b.(int32)
-	switch {
-	case aAsserted > bAsserted:
-		return 1
-	case aAsserted < bAsserted:
-		return -1
-	default:
-		return 0
-	}
-}
-
-// Int64Comparator provides a basic comparison on int64
-func Int64Comparator(a, b interface{}) int {
-	aAsserted := a.(int64)
-	bAsserted := b.(int64)
-	switch {
-	case aAsserted > bAsserted:
-		return 1
-	case aAsserted < bAsserted:
-		return -1
-	default:
-		return 0
-	}
-}
-
-// UIntComparator provides a basic comparison on uint
-func UIntComparator(a, b interface{}) int {
-	aAsserted := a.(uint)
-	bAsserted := b.(uint)
-	switch {
-	case aAsserted > bAsserted:
-		return 1
-	case aAsserted < bAsserted:
-		return -1
-	default:
-		return 0
-	}
-}
-
-// UInt8Comparator provides a basic comparison on uint8
-func UInt8Comparator(a, b interface{}) int {
-	aAsserted := a.(uint8)
-	bAsserted := b.(uint8)
-	switch {
-	case aAsserted > bAsserted:
-		return 1
-	case aAsserted < bAsserted:
-		return -1
-	default:
-		return 0
-	}
-}
-
-// UInt16Comparator provides a basic comparison on uint16
-func UInt16Comparator(a, b interface{}) int {
-	aAsserted := a.(uint16)
-	bAsserted := b.(uint16)
-	switch {
-	case aAsserted > bAsserted:
-		return 1
-	case aAsserted < bAsserted:
-		return -1
-	default:
-		return 0
-	}
-}
-
-// UInt32Comparator provides a basic comparison on uint32
-func UInt32Comparator(a, b interface{}) int {
-	aAsserted := a.(uint32)
-	bAsserted := b.(uint32)
-	switch {
-	case aAsserted > bAsserted:
-		return 1
-	case aAsserted < bAsserted:
-		return -1
-	default:
-		return 0
-	}
-}
-
-// UInt64Comparator provides a basic comparison on uint64
-func UInt64Comparator(a, b interface{}) int {
-	aAsserted := a.(uint64)
-	bAsserted := b.(uint64)
-	switch {
-	case aAsserted > bAsserted:
-		return 1
-	case aAsserted < bAsserted:
-		return -1
-	default:
-		return 0
-	}
-}
-
-// Float32Comparator provides a basic comparison on float32
-func Float32Comparator(a, b interface{}) int {
-	aAsserted := a.(float32)
-	bAsserted := b.(float32)
-	switch {
-	case aAsserted > bAsserted:
-		return 1
-	case aAsserted < bAsserted:
-		return -1
-	default:
-		return 0
-	}
-}
-
-// Float64Comparator provides a basic comparison on float64
-func Float64Comparator(a, b interface{}) int {
-	aAsserted := a.(float64)
-	bAsserted := b.(float64)
-	switch {
-	case aAsserted > bAsserted:
-		return 1
-	case aAsserted < bAsserted:
-		return -1
-	default:
-		return 0
-	}
-}
-
-// ByteComparator provides a basic comparison on byte
-func ByteComparator(a, b interface{}) int {
-	aAsserted := a.(byte)
-	bAsserted := b.(byte)
-	switch {
-	case aAsserted > bAsserted:
-		return 1
-	case aAsserted < bAsserted:
-		return -1
-	default:
-		return 0
-	}
-}
-
-// RuneComparator provides a basic comparison on rune
-func RuneComparator(a, b interface{}) int {
-	aAsserted := a.(rune)
-	bAsserted := b.(rune)
-	switch {
-	case aAsserted > bAsserted:
-		return 1
-	case aAsserted < bAsserted:
-		return -1
-	default:
-		return 0
-	}
-}
-
-// TimeComparator provides a basic comparison on time.Time
-func TimeComparator(a, b interface{}) int {
-	aAsserted := a.(time.Time)
-	bAsserted := b.(time.Time)
-
-	switch {
-	case aAsserted.After(bAsserted):
-		return 1
-	case aAsserted.Before(bAsserted):
-		return -1
-	default:
-		return 0
-	}
-}

+ 0 - 29
vendor/github.com/emirpasic/gods/utils/sort.go

@@ -1,29 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package utils
-
-import "sort"
-
-// Sort sorts values (in-place) with respect to the given comparator.
-//
-// Uses Go's sort (hybrid of quicksort for large and then insertion sort for smaller slices).
-func Sort(values []interface{}, comparator Comparator) {
-	sort.Sort(sortable{values, comparator})
-}
-
-type sortable struct {
-	values     []interface{}
-	comparator Comparator
-}
-
-func (s sortable) Len() int {
-	return len(s.values)
-}
-func (s sortable) Swap(i, j int) {
-	s.values[i], s.values[j] = s.values[j], s.values[i]
-}
-func (s sortable) Less(i, j int) bool {
-	return s.comparator(s.values[i], s.values[j]) < 0
-}

+ 0 - 47
vendor/github.com/emirpasic/gods/utils/utils.go

@@ -1,47 +0,0 @@
-// Copyright (c) 2015, Emir Pasic. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package utils provides common utility functions.
-//
-// Provided functionalities:
-// - sorting
-// - comparators
-package utils
-
-import (
-	"fmt"
-	"strconv"
-)
-
-// ToString converts a value to string.
-func ToString(value interface{}) string {
-	switch value.(type) {
-	case string:
-		return value.(string)
-	case int8:
-		return strconv.FormatInt(int64(value.(int8)), 10)
-	case int16:
-		return strconv.FormatInt(int64(value.(int16)), 10)
-	case int32:
-		return strconv.FormatInt(int64(value.(int32)), 10)
-	case int64:
-		return strconv.FormatInt(int64(value.(int64)), 10)
-	case uint8:
-		return strconv.FormatUint(uint64(value.(uint8)), 10)
-	case uint16:
-		return strconv.FormatUint(uint64(value.(uint16)), 10)
-	case uint32:
-		return strconv.FormatUint(uint64(value.(uint32)), 10)
-	case uint64:
-		return strconv.FormatUint(uint64(value.(uint64)), 10)
-	case float32:
-		return strconv.FormatFloat(float64(value.(float32)), 'g', -1, 64)
-	case float64:
-		return strconv.FormatFloat(float64(value.(float64)), 'g', -1, 64)
-	case bool:
-		return strconv.FormatBool(value.(bool))
-	default:
-		return fmt.Sprintf("%+v", value)
-	}
-}

+ 0 - 22
vendor/github.com/franela/goblin/.gitignore

@@ -1,22 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe

+ 0 - 7
vendor/github.com/franela/goblin/.travis.yml

@@ -1,7 +0,0 @@
-language: go
-go:
- - 1.6.3
-notifications:
-  email:
-      - ionathan@gmail.com
-      - marcosnils@gmail.com

+ 0 - 19
vendor/github.com/franela/goblin/LICENSE

@@ -1,19 +0,0 @@
-Copyright (c) 2013 Marcos Lilljedahl and Jonathan Leibiusky
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.

+ 0 - 3
vendor/github.com/franela/goblin/Makefile

@@ -1,3 +0,0 @@
-export GOPATH=$(shell pwd)
-test:
-	go test -v

+ 0 - 149
vendor/github.com/franela/goblin/README.md

@@ -1,149 +0,0 @@
-Goblin
-======
-
-[![Build Status](https://travis-ci.org/franela/goblin.svg)](https://travis-ci.org/franela/goblin)
-[![Go Reportcard](https://goreportcard.com/badge/github.com/franela/goblin)](https://goreportcard.com/report/github.com/franela/goblin)
-[![GoDoc](https://godoc.org/github.com/franela/goblin?status.svg)](https://godoc.org/github.com/franela/goblin)
-[![License](https://img.shields.io/github/license/franela/goblin.svg)](https://github.com/franela/goblin/blob/master/LICENSE.md)
-[![Release](https://img.shields.io/github/release/franela/goblin.svg)](https://github.com/franela/goblin/releases/latest)
-
-
-A [Mocha](http://mochajs.org/) like BDD testing framework written in Go that requires no additional dependencies. Requires no extensive documentation nor complicated steps to get it running.
-
-![](https://github.com/marcosnils/goblin/blob/master/goblin_logo.jpg?raw=true)
-
-Why Goblin?
------------
-
-Inspired by the flexibility and simplicity of Node BDD and frustrated by the
-rigorousness of Go way of testing, we wanted to bring a new tool to
-write self-describing and comprehensive code.
-
-
-
-What do I get with it?
-----------------------
-
-- Run tests as usual with `go test`
-- Colorful reports and beautiful syntax
-- Preserve the exact same syntax and behaviour as Node's Mocha
-- Nest as many `Describe` and `It` blocks as you want
-- Use `Before`, `BeforeEach`, `After` and `AfterEach` for setup and teardown your tests
-- No need to remember confusing parameters in `Describe` and `It` blocks
-- Use a declarative and expressive language to write your tests
-- Plug different assertion libraries
- - [Gomega](https://github.com/onsi/gomega) (supported so far)
-- Skip your tests the same way as you would do in Mocha
-- Automatic terminal support for colored outputs
-- Two line setup is all you need to get up running
-
-
-
-How do I use it?
-----------------
-
-Since ```go test``` is not currently extensive, you will have to hook Goblin to it. You do that by
-adding a single test method in your test file. All your goblin tests will be implemented inside this function.
-
-```go
-package foobar
-
-import (
-    "testing"
-    . "github.com/franela/goblin"
-)
-
-func Test(t *testing.T) {
-    g := Goblin(t)
-    g.Describe("Numbers", func() {
-        // Passing Test
-        g.It("Should add two numbers ", func() {
-            g.Assert(1+1).Equal(2)
-        })
-        // Failing Test
-        g.It("Should match equal numbers", func() {
-            g.Assert(2).Equal(4)
-        })
-        // Pending Test
-        g.It("Should substract two numbers")
-        // Excluded Test
-        g.XIt("Should add two numbers ", func() {
-            g.Assert(3+1).Equal(4)
-        })
-    })
-}
-```
-
-Ouput will be something like:
-
-![](https://github.com/marcosnils/goblin/blob/master/goblin_output.png?raw=true)
-
-Nice and easy, right?
-
-Can I do asynchronous tests?
-----------------------------
-
-Yes! Goblin will help you to test asynchronous things, like goroutines, etc. You just need to add a ```done``` parameter to the handler function of your ```It```. This handler function should be called when your test passes.
-
-```go
-  ...
-  g.Describe("Numbers", func() {
-      g.It("Should add two numbers asynchronously", func(done Done) {
-          go func() {
-              g.Assert(1+1).Equal(2)
-              done()
-          }()
-      })
-  })
-  ...
-```
-
-Goblin will wait for the ```done``` call, a ```Fail``` call or any false assertion.
-
-How do I use it with Gomega?
-----------------------------
-
-Gomega is a nice assertion framework. But it doesn't provide a nice way to hook it to testing frameworks. It should just panic instead of requiring a fail function. There is an issue about that [here](https://github.com/onsi/gomega/issues/5).
-While this is being discussed and hopefully fixed, the way to use Gomega with Goblin is:
-
-```go
-package foobar
-
-import (
-    "testing"
-    . "github.com/franela/goblin"
-    . "github.com/onsi/gomega"
-)
-
-func Test(t *testing.T) {
-    g := Goblin(t)
-
-    //special hook for gomega
-    RegisterFailHandler(func(m string, _ ...int) { g.Fail(m) })
-
-    g.Describe("lala", func() {
-        g.It("lslslslsls", func() {
-            Expect(1).To(Equal(10))
-        })
-    })
-}
-```
-
-
-FAQ
-----
-
-### How do I run specific tests?
-
-If `-goblin.run=$REGES` is supplied to the `go test` command then only tests that match the supplied regex will run
-
-
-Contributing
------
-
-We do have a couple of [issues](https://github.com/franela/goblin/issues) pending.  Feel free to contribute and send us PRs (with tests please :smile:).
-
-Special Thanks
-------------
-
-Special thanks to [Leandro Reox](https://github.com/leandroreox) (Leitan) for the goblin logo.

+ 0 - 70
vendor/github.com/franela/goblin/assertions.go

@@ -1,70 +0,0 @@
-package goblin
-
-import (
-	"fmt"
-	"reflect"
-	"strings"
-)
-
-// Assertion represents a fact stated about a source object. It contains the source object and function to call
-type Assertion struct {
-	src  interface{}
-	fail func(interface{})
-}
-
-func objectsAreEqual(a, b interface{}) bool {
-	if reflect.TypeOf(a) != reflect.TypeOf(b) {
-		return false
-	}
-
-	if reflect.DeepEqual(a, b) {
-		return true
-	}
-
-	if fmt.Sprintf("%#v", a) == fmt.Sprintf("%#v", b) {
-		return true
-	}
-
-	return false
-}
-
-func formatMessages(messages ...string) string {
-	if len(messages) > 0 {
-		return ", " + strings.Join(messages, " ")
-	}
-	return ""
-}
-
-// Eql is a shorthand alias of Equal for convenience
-func (a *Assertion) Eql(dst interface{}) {
-	a.Equal(dst)
-}
-
-// Equal takes a destination object and asserts that a source object and
-// destination object are equal to one another. It will fail the assertion and
-// print a corresponding message if the objects are not equivalent.
-func (a *Assertion) Equal(dst interface{}) {
-	if !objectsAreEqual(a.src, dst) {
-		a.fail(fmt.Sprintf("%#v %s %#v", a.src, "does not equal", dst))
-	}
-}
-
-// IsTrue asserts that a source is equal to true. Optional messages can be
-// provided for inclusion in the displayed message if the assertion fails. It
-// will fail the assertion if the source does not resolve to true.
-func (a *Assertion) IsTrue(messages ...string) {
-	if !objectsAreEqual(a.src, true) {
-		message := fmt.Sprintf("%v %s%s", a.src, "expected false to be truthy", formatMessages(messages...))
-		a.fail(message)
-	}
-}
-
-// IsFalse asserts that a source is equal to false. Optional messages can be
-// provided for inclusion in the displayed message if the assertion fails. It
-// will fail the assertion if the source does not resolve to false.
-func (a *Assertion) IsFalse(messages ...string) {
-	if !objectsAreEqual(a.src, false) {
-		message := fmt.Sprintf("%v %s%s", a.src, "expected true to be falsey", formatMessages(messages...))
-		a.fail(message)
-	}
-}

+ 0 - 36
vendor/github.com/franela/goblin/go.snippets

@@ -1,36 +0,0 @@
-snippet gd
-	g.Describe("${1:name}", func() {
-		${2}
-	})
-	${0}
-snippet git 
-	g.It("${1:name}", func() {
-		${2}
-	})
-	${0}
-snippet gait 
-	g.It("${1:name}", func(done Done) {
-		done()
-		${2}
-	})
-	${0}
-snippet gb 
-	g.Before(func() {
-		${1}
-	})
-	${0}
-snippet gbe 
-	g.BeforeEach(func() {
-		${1}
-	})
-	${0}
-snippet ga 
-	g.After(func() {
-		${1}
-	})
-	${0}
-snippet gae 
-	g.AfterEach(func() {
-		${1}
-	})
-	${0}

+ 0 - 337
vendor/github.com/franela/goblin/goblin.go

@@ -1,337 +0,0 @@
-package goblin
-
-import (
-	"flag"
-	"fmt"
-	"regexp"
-	"runtime"
-	"sync"
-	"testing"
-	"time"
-)
-
-type Done func(error ...interface{})
-
-type Runnable interface {
-	run(*G) bool
-}
-
-type Itable interface {
-	run(*G) bool
-	failed(string, []string)
-}
-
-func (g *G) Describe(name string, h func()) {
-	d := &Describe{name: name, h: h, parent: g.parent}
-
-	if d.parent != nil {
-		d.parent.children = append(d.parent.children, Runnable(d))
-	}
-
-	g.parent = d
-
-	h()
-
-	g.parent = d.parent
-
-	if g.parent == nil && d.hasTests {
-		g.reporter.begin()
-		if d.run(g) {
-			g.t.Fail()
-		}
-		g.reporter.end()
-	}
-}
-func (g *G) Timeout(time time.Duration) {
-	g.timeout = time
-	g.timer.Reset(time)
-}
-
-type Describe struct {
-	name       string
-	h          func()
-	children   []Runnable
-	befores    []func()
-	afters     []func()
-	afterEach  []func()
-	beforeEach []func()
-	hasTests   bool
-	parent     *Describe
-}
-
-func (d *Describe) runBeforeEach() {
-	if d.parent != nil {
-		d.parent.runBeforeEach()
-	}
-
-	for _, b := range d.beforeEach {
-		b()
-	}
-}
-
-func (d *Describe) runAfterEach() {
-
-	if d.parent != nil {
-		d.parent.runAfterEach()
-	}
-
-	for _, a := range d.afterEach {
-		a()
-	}
-}
-
-func (d *Describe) run(g *G) bool {
-	failed := false
-	if d.hasTests {
-		g.reporter.beginDescribe(d.name)
-
-		for _, b := range d.befores {
-			b()
-		}
-
-		for _, r := range d.children {
-			if r.run(g) {
-				failed = true
-			}
-		}
-
-		for _, a := range d.afters {
-			a()
-		}
-
-		g.reporter.endDescribe()
-	}
-
-	return failed
-}
-
-type Failure struct {
-	stack    []string
-	testName string
-	message  string
-}
-
-type It struct {
-	h        interface{}
-	name     string
-	parent   *Describe
-	failure  *Failure
-	reporter Reporter
-	isAsync  bool
-}
-
-func (it *It) run(g *G) bool {
-	g.currentIt = it
-
-	if it.h == nil {
-		g.reporter.itIsPending(it.name)
-		return false
-	}
-	//TODO: should handle errors for beforeEach
-	it.parent.runBeforeEach()
-
-	runIt(g, it.h)
-
-	it.parent.runAfterEach()
-
-	failed := false
-	if it.failure != nil {
-		failed = true
-	}
-
-	if failed {
-		g.reporter.itFailed(it.name)
-		g.reporter.failure(it.failure)
-	} else {
-		g.reporter.itPassed(it.name)
-	}
-	return failed
-}
-
-func (it *It) failed(msg string, stack []string) {
-	it.failure = &Failure{stack: stack, message: msg, testName: it.parent.name + " " + it.name}
-}
-
-type Xit struct {
-	h        interface{}
-	name     string
-	parent   *Describe
-	failure  *Failure
-	reporter Reporter
-	isAsync  bool
-}
-
-func (xit *Xit) run(g *G) bool {
-	g.currentIt = xit
-
-	g.reporter.itIsExcluded(xit.name)
-	return false
-}
-
-func (xit *Xit) failed(msg string, stack []string) {
-	xit.failure = nil
-}
-
-func parseFlags() {
-	//Flag parsing
-	flag.Parse()
-	if *regexParam != "" {
-		runRegex = regexp.MustCompile(*regexParam)
-	} else {
-		runRegex = nil
-	}
-}
-
-var timeout = flag.Duration("goblin.timeout", 5*time.Second, "Sets default timeouts for all tests")
-var isTty = flag.Bool("goblin.tty", true, "Sets the default output format (color / monochrome)")
-var regexParam = flag.String("goblin.run", "", "Runs only tests which match the supplied regex")
-var runRegex *regexp.Regexp
-
-func Goblin(t *testing.T, arguments ...string) *G {
-	if !flag.Parsed() {
-		parseFlags()
-	}
-	g := &G{t: t, timeout: *timeout}
-	var fancy TextFancier
-	if *isTty {
-		fancy = &TerminalFancier{}
-	} else {
-		fancy = &Monochrome{}
-	}
-
-	g.reporter = Reporter(&DetailedReporter{fancy: fancy})
-	return g
-}
-
-func runIt(g *G, h interface{}) {
-	defer timeTrack(time.Now(), g)
-	g.mutex.Lock()
-	g.timedOut = false
-	g.mutex.Unlock()
-	g.timer = time.NewTimer(g.timeout)
-	g.shouldContinue = make(chan bool)
-	if call, ok := h.(func()); ok {
-		// the test is synchronous
-		go func(c chan bool) { call(); c <- true }(g.shouldContinue)
-	} else if call, ok := h.(func(Done)); ok {
-		doneCalled := 0
-		go func(c chan bool) {
-			call(func(msg ...interface{}) {
-				if len(msg) > 0 {
-					g.Fail(msg)
-				} else {
-					doneCalled++
-					if doneCalled > 1 {
-						g.Fail("Done called multiple times")
-					}
-					c <- true
-				}
-			})
-		}(g.shouldContinue)
-	} else {
-		panic("Not implemented.")
-	}
-	select {
-	case <-g.shouldContinue:
-	case <-g.timer.C:
-		//Set to nil as it shouldn't continue
-		g.shouldContinue = nil
-		g.timedOut = true
-		g.Fail("Test exceeded " + fmt.Sprintf("%s", g.timeout))
-	}
-	// Reset timeout value
-	g.timeout = *timeout
-}
-
-type G struct {
-	t              *testing.T
-	parent         *Describe
-	currentIt      Itable
-	timeout        time.Duration
-	reporter       Reporter
-	timedOut       bool
-	shouldContinue chan bool
-	mutex          sync.Mutex
-	timer          *time.Timer
-}
-
-func (g *G) SetReporter(r Reporter) {
-	g.reporter = r
-}
-
-func (g *G) It(name string, h ...interface{}) {
-	if matchesRegex(name) {
-		it := &It{name: name, parent: g.parent, reporter: g.reporter}
-		notifyParents(g.parent)
-		if len(h) > 0 {
-			it.h = h[0]
-		}
-		g.parent.children = append(g.parent.children, Runnable(it))
-	}
-}
-
-func (g *G) Xit(name string, h ...interface{}) {
-	if matchesRegex(name) {
-		xit := &Xit{name: name, parent: g.parent, reporter: g.reporter}
-		notifyParents(g.parent)
-		if len(h) > 0 {
-			xit.h = h[0]
-		}
-		g.parent.children = append(g.parent.children, Runnable(xit))
-	}
-}
-
-func matchesRegex(value string) bool {
-	if runRegex != nil {
-		return runRegex.MatchString(value)
-	}
-	return true
-}
-
-func notifyParents(d *Describe) {
-	d.hasTests = true
-	if d.parent != nil {
-		notifyParents(d.parent)
-	}
-}
-
-func (g *G) Before(h func()) {
-	g.parent.befores = append(g.parent.befores, h)
-}
-
-func (g *G) BeforeEach(h func()) {
-	g.parent.beforeEach = append(g.parent.beforeEach, h)
-}
-
-func (g *G) After(h func()) {
-	g.parent.afters = append(g.parent.afters, h)
-}
-
-func (g *G) AfterEach(h func()) {
-	g.parent.afterEach = append(g.parent.afterEach, h)
-}
-
-func (g *G) Assert(src interface{}) *Assertion {
-	return &Assertion{src: src, fail: g.Fail}
-}
-
-func timeTrack(start time.Time, g *G) {
-	g.reporter.itTook(time.Since(start))
-}
-
-func (g *G) Fail(error interface{}) {
-	//Skips 7 stacks due to the functions between the stack and the test
-	stack := ResolveStack(7)
-	message := fmt.Sprintf("%v", error)
-	g.currentIt.failed(message, stack)
-	if g.shouldContinue != nil {
-		g.shouldContinue <- true
-	}
-	g.mutex.Lock()
-	defer g.mutex.Unlock()
-	if !g.timedOut {
-		//Stop test function execution
-		runtime.Goexit()
-	}
-
-}

binární
vendor/github.com/franela/goblin/goblin_logo.jpg


binární
vendor/github.com/franela/goblin/goblin_output.png


+ 0 - 30
vendor/github.com/franela/goblin/mono_reporter.go

@@ -1,30 +0,0 @@
-package goblin
-
-import ()
-
-type Monochrome struct {
-}
-
-func (self *Monochrome) Red(text string) string {
-	return "!" + text
-}
-
-func (self *Monochrome) Gray(text string) string {
-	return text
-}
-
-func (self *Monochrome) Cyan(text string) string {
-	return text
-}
-
-func (self *Monochrome) WithCheck(text string) string {
-	return ">>>" + text
-}
-
-func (self *Monochrome) Green(text string) string {
-	return text
-}
-
-func (self *Monochrome) Yellow(text string) string {
-	return text
-}

+ 0 - 153
vendor/github.com/franela/goblin/reporting.go

@@ -1,153 +0,0 @@
-package goblin
-
-import (
-	"fmt"
-	"strconv"
-	"strings"
-	"time"
-)
-
-type Reporter interface {
-	beginDescribe(string)
-	endDescribe()
-	begin()
-	end()
-	failure(*Failure)
-	itTook(time.Duration)
-	itFailed(string)
-	itPassed(string)
-	itIsPending(string)
-	itIsExcluded(string)
-}
-
-type TextFancier interface {
-	Red(text string) string
-	Gray(text string) string
-	Cyan(text string) string
-	Green(text string) string
-	Yellow(text string) string
-	WithCheck(text string) string
-}
-
-type DetailedReporter struct {
-	level, failed, passed, pending, excluded int
-	failures                                 []*Failure
-	executionTime, totalExecutionTime        time.Duration
-	fancy                                    TextFancier
-}
-
-func (r *DetailedReporter) SetTextFancier(f TextFancier) {
-	r.fancy = f
-}
-
-type TerminalFancier struct {
-}
-
-func (self *TerminalFancier) Red(text string) string {
-	return "\033[31m" + text + "\033[0m"
-}
-
-func (self *TerminalFancier) Gray(text string) string {
-	return "\033[90m" + text + "\033[0m"
-}
-
-func (self *TerminalFancier) Cyan(text string) string {
-	return "\033[36m" + text + "\033[0m"
-}
-
-func (self *TerminalFancier) Green(text string) string {
-	return "\033[32m" + text + "\033[0m"
-}
-
-func (self *TerminalFancier) Yellow(text string) string {
-	return "\033[33m" + text + "\033[0m"
-}
-
-func (self *TerminalFancier) WithCheck(text string) string {
-	return "\033[32m\u2713\033[0m " + text
-}
-
-func (r *DetailedReporter) getSpace() string {
-	return strings.Repeat(" ", (r.level+1)*2)
-}
-
-func (r *DetailedReporter) failure(failure *Failure) {
-	r.failures = append(r.failures, failure)
-}
-
-func (r *DetailedReporter) print(text string) {
-	fmt.Printf("%v%v\n", r.getSpace(), text)
-}
-
-func (r *DetailedReporter) printWithCheck(text string) {
-	fmt.Printf("%v%v\n", r.getSpace(), r.fancy.WithCheck(text))
-}
-
-func (r *DetailedReporter) beginDescribe(name string) {
-	fmt.Println("")
-	r.print(name)
-	r.level++
-}
-
-func (r *DetailedReporter) endDescribe() {
-	r.level--
-}
-
-func (r *DetailedReporter) itTook(duration time.Duration) {
-	r.executionTime = duration
-	r.totalExecutionTime += duration
-}
-
-func (r *DetailedReporter) itFailed(name string) {
-	r.failed++
-	r.print(r.fancy.Red(strconv.Itoa(r.failed) + ") " + name))
-}
-
-func (r *DetailedReporter) itPassed(name string) {
-	r.passed++
-	r.printWithCheck(r.fancy.Gray(name))
-}
-
-func (r *DetailedReporter) itIsPending(name string) {
-	r.pending++
-	r.print(r.fancy.Cyan("- " + name))
-}
-
-func (r *DetailedReporter) itIsExcluded(name string) {
-	r.excluded++
-	r.print(r.fancy.Yellow("- " + name))
-}
-
-func (r *DetailedReporter) begin() {
-}
-
-func (r *DetailedReporter) end() {
-	comp := fmt.Sprintf("%d tests complete", r.passed)
-	t := fmt.Sprintf("(%d ms)", r.totalExecutionTime/time.Millisecond)
-
-	//fmt.Printf("\n\n \033[32m%d tests complete\033[0m \033[90m(%d ms)\033[0m\n", r.passed, r.totalExecutionTime/time.Millisecond)
-	fmt.Printf("\n\n %v %v\n", r.fancy.Green(comp), r.fancy.Gray(t))
-
-	if r.pending > 0 {
-		pend := fmt.Sprintf("%d test(s) pending", r.pending)
-		fmt.Printf(" %v\n\n", r.fancy.Cyan(pend))
-	}
-
-	if r.excluded > 0 {
-		excl := fmt.Sprintf("%d test(s) excluded", r.excluded)
-		fmt.Printf(" %v\n\n", r.fancy.Yellow(excl))
-	}
-
-	if len(r.failures) > 0 {
-		fmt.Printf("%s \n\n", r.fancy.Red(fmt.Sprintf(" %d tests failed:", len(r.failures))))
-
-	}
-
-	for i, failure := range r.failures {
-		fmt.Printf("  %d) %s:\n\n", i+1, failure.testName)
-		fmt.Printf("    %s\n", r.fancy.Red(failure.message))
-		for _, stackItem := range failure.stack {
-			fmt.Printf("    %s\n", r.fancy.Gray(stackItem))
-		}
-	}
-}

+ 0 - 21
vendor/github.com/franela/goblin/resolver.go

@@ -1,21 +0,0 @@
-package goblin
-
-import (
-	"runtime/debug"
-	"strings"
-)
-
-func ResolveStack(skip int) []string {
-	return cleanStack(debug.Stack(), skip)
-}
-
-func cleanStack(stack []byte, skip int) []string {
-	arrayStack := strings.Split(string(stack), "\n")
-	var finalStack []string
-	for i := skip; i < len(arrayStack); i++ {
-		if strings.Contains(arrayStack[i], ".go") {
-			finalStack = append(finalStack, arrayStack[i])
-		}
-	}
-	return finalStack
-}

+ 0 - 3
vendor/github.com/golang/protobuf/AUTHORS

@@ -1,3 +0,0 @@
-# This source code refers to The Go Authors for copyright purposes.
-# The master list of authors is in the main Go distribution,
-# visible at http://tip.golang.org/AUTHORS.

+ 0 - 3
vendor/github.com/golang/protobuf/CONTRIBUTORS

@@ -1,3 +0,0 @@
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at http://tip.golang.org/CONTRIBUTORS.

+ 0 - 28
vendor/github.com/golang/protobuf/LICENSE

@@ -1,28 +0,0 @@
-Copyright 2010 The Go Authors.  All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-    * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-    * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-

+ 0 - 253
vendor/github.com/golang/protobuf/proto/clone.go

@@ -1,253 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2011 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Protocol buffer deep copy and merge.
-// TODO: RawMessage.
-
-package proto
-
-import (
-	"fmt"
-	"log"
-	"reflect"
-	"strings"
-)
-
-// Clone returns a deep copy of a protocol buffer.
-func Clone(src Message) Message {
-	in := reflect.ValueOf(src)
-	if in.IsNil() {
-		return src
-	}
-	out := reflect.New(in.Type().Elem())
-	dst := out.Interface().(Message)
-	Merge(dst, src)
-	return dst
-}
-
-// Merger is the interface representing objects that can merge messages of the same type.
-type Merger interface {
-	// Merge merges src into this message.
-	// Required and optional fields that are set in src will be set to that value in dst.
-	// Elements of repeated fields will be appended.
-	//
-	// Merge may panic if called with a different argument type than the receiver.
-	Merge(src Message)
-}
-
-// generatedMerger is the custom merge method that generated protos will have.
-// We must add this method since a generate Merge method will conflict with
-// many existing protos that have a Merge data field already defined.
-type generatedMerger interface {
-	XXX_Merge(src Message)
-}
-
-// Merge merges src into dst.
-// Required and optional fields that are set in src will be set to that value in dst.
-// Elements of repeated fields will be appended.
-// Merge panics if src and dst are not the same type, or if dst is nil.
-func Merge(dst, src Message) {
-	if m, ok := dst.(Merger); ok {
-		m.Merge(src)
-		return
-	}
-
-	in := reflect.ValueOf(src)
-	out := reflect.ValueOf(dst)
-	if out.IsNil() {
-		panic("proto: nil destination")
-	}
-	if in.Type() != out.Type() {
-		panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
-	}
-	if in.IsNil() {
-		return // Merge from nil src is a noop
-	}
-	if m, ok := dst.(generatedMerger); ok {
-		m.XXX_Merge(src)
-		return
-	}
-	mergeStruct(out.Elem(), in.Elem())
-}
-
-func mergeStruct(out, in reflect.Value) {
-	sprop := GetProperties(in.Type())
-	for i := 0; i < in.NumField(); i++ {
-		f := in.Type().Field(i)
-		if strings.HasPrefix(f.Name, "XXX_") {
-			continue
-		}
-		mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
-	}
-
-	if emIn, err := extendable(in.Addr().Interface()); err == nil {
-		emOut, _ := extendable(out.Addr().Interface())
-		mIn, muIn := emIn.extensionsRead()
-		if mIn != nil {
-			mOut := emOut.extensionsWrite()
-			muIn.Lock()
-			mergeExtension(mOut, mIn)
-			muIn.Unlock()
-		}
-	}
-
-	uf := in.FieldByName("XXX_unrecognized")
-	if !uf.IsValid() {
-		return
-	}
-	uin := uf.Bytes()
-	if len(uin) > 0 {
-		out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
-	}
-}
-
-// mergeAny performs a merge between two values of the same type.
-// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
-// prop is set if this is a struct field (it may be nil).
-func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
-	if in.Type() == protoMessageType {
-		if !in.IsNil() {
-			if out.IsNil() {
-				out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
-			} else {
-				Merge(out.Interface().(Message), in.Interface().(Message))
-			}
-		}
-		return
-	}
-	switch in.Kind() {
-	case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
-		reflect.String, reflect.Uint32, reflect.Uint64:
-		if !viaPtr && isProto3Zero(in) {
-			return
-		}
-		out.Set(in)
-	case reflect.Interface:
-		// Probably a oneof field; copy non-nil values.
-		if in.IsNil() {
-			return
-		}
-		// Allocate destination if it is not set, or set to a different type.
-		// Otherwise we will merge as normal.
-		if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
-			out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
-		}
-		mergeAny(out.Elem(), in.Elem(), false, nil)
-	case reflect.Map:
-		if in.Len() == 0 {
-			return
-		}
-		if out.IsNil() {
-			out.Set(reflect.MakeMap(in.Type()))
-		}
-		// For maps with value types of *T or []byte we need to deep copy each value.
-		elemKind := in.Type().Elem().Kind()
-		for _, key := range in.MapKeys() {
-			var val reflect.Value
-			switch elemKind {
-			case reflect.Ptr:
-				val = reflect.New(in.Type().Elem().Elem())
-				mergeAny(val, in.MapIndex(key), false, nil)
-			case reflect.Slice:
-				val = in.MapIndex(key)
-				val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
-			default:
-				val = in.MapIndex(key)
-			}
-			out.SetMapIndex(key, val)
-		}
-	case reflect.Ptr:
-		if in.IsNil() {
-			return
-		}
-		if out.IsNil() {
-			out.Set(reflect.New(in.Elem().Type()))
-		}
-		mergeAny(out.Elem(), in.Elem(), true, nil)
-	case reflect.Slice:
-		if in.IsNil() {
-			return
-		}
-		if in.Type().Elem().Kind() == reflect.Uint8 {
-			// []byte is a scalar bytes field, not a repeated field.
-
-			// Edge case: if this is in a proto3 message, a zero length
-			// bytes field is considered the zero value, and should not
-			// be merged.
-			if prop != nil && prop.proto3 && in.Len() == 0 {
-				return
-			}
-
-			// Make a deep copy.
-			// Append to []byte{} instead of []byte(nil) so that we never end up
-			// with a nil result.
-			out.SetBytes(append([]byte{}, in.Bytes()...))
-			return
-		}
-		n := in.Len()
-		if out.IsNil() {
-			out.Set(reflect.MakeSlice(in.Type(), 0, n))
-		}
-		switch in.Type().Elem().Kind() {
-		case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
-			reflect.String, reflect.Uint32, reflect.Uint64:
-			out.Set(reflect.AppendSlice(out, in))
-		default:
-			for i := 0; i < n; i++ {
-				x := reflect.Indirect(reflect.New(in.Type().Elem()))
-				mergeAny(x, in.Index(i), false, nil)
-				out.Set(reflect.Append(out, x))
-			}
-		}
-	case reflect.Struct:
-		mergeStruct(out, in)
-	default:
-		// unknown type, so not a protocol buffer
-		log.Printf("proto: don't know how to copy %v", in)
-	}
-}
-
-func mergeExtension(out, in map[int32]Extension) {
-	for extNum, eIn := range in {
-		eOut := Extension{desc: eIn.desc}
-		if eIn.value != nil {
-			v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
-			mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
-			eOut.value = v.Interface()
-		}
-		if eIn.enc != nil {
-			eOut.enc = make([]byte, len(eIn.enc))
-			copy(eOut.enc, eIn.enc)
-		}
-
-		out[extNum] = eOut
-	}
-}

+ 0 - 428
vendor/github.com/golang/protobuf/proto/decode.go

@@ -1,428 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for decoding protocol buffer data to construct in-memory representations.
- */
-
-import (
-	"errors"
-	"fmt"
-	"io"
-)
-
-// errOverflow is returned when an integer is too large to be represented.
-var errOverflow = errors.New("proto: integer overflow")
-
-// ErrInternalBadWireType is returned by generated code when an incorrect
-// wire type is encountered. It does not get returned to user code.
-var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
-
-// DecodeVarint reads a varint-encoded integer from the slice.
-// It returns the integer and the number of bytes consumed, or
-// zero if there is not enough.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func DecodeVarint(buf []byte) (x uint64, n int) {
-	for shift := uint(0); shift < 64; shift += 7 {
-		if n >= len(buf) {
-			return 0, 0
-		}
-		b := uint64(buf[n])
-		n++
-		x |= (b & 0x7F) << shift
-		if (b & 0x80) == 0 {
-			return x, n
-		}
-	}
-
-	// The number is too large to represent in a 64-bit value.
-	return 0, 0
-}
-
-func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
-	i := p.index
-	l := len(p.buf)
-
-	for shift := uint(0); shift < 64; shift += 7 {
-		if i >= l {
-			err = io.ErrUnexpectedEOF
-			return
-		}
-		b := p.buf[i]
-		i++
-		x |= (uint64(b) & 0x7F) << shift
-		if b < 0x80 {
-			p.index = i
-			return
-		}
-	}
-
-	// The number is too large to represent in a 64-bit value.
-	err = errOverflow
-	return
-}
-
-// DecodeVarint reads a varint-encoded integer from the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) DecodeVarint() (x uint64, err error) {
-	i := p.index
-	buf := p.buf
-
-	if i >= len(buf) {
-		return 0, io.ErrUnexpectedEOF
-	} else if buf[i] < 0x80 {
-		p.index++
-		return uint64(buf[i]), nil
-	} else if len(buf)-i < 10 {
-		return p.decodeVarintSlow()
-	}
-
-	var b uint64
-	// we already checked the first byte
-	x = uint64(buf[i]) - 0x80
-	i++
-
-	b = uint64(buf[i])
-	i++
-	x += b << 7
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 7
-
-	b = uint64(buf[i])
-	i++
-	x += b << 14
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 14
-
-	b = uint64(buf[i])
-	i++
-	x += b << 21
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 21
-
-	b = uint64(buf[i])
-	i++
-	x += b << 28
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 28
-
-	b = uint64(buf[i])
-	i++
-	x += b << 35
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 35
-
-	b = uint64(buf[i])
-	i++
-	x += b << 42
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 42
-
-	b = uint64(buf[i])
-	i++
-	x += b << 49
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 49
-
-	b = uint64(buf[i])
-	i++
-	x += b << 56
-	if b&0x80 == 0 {
-		goto done
-	}
-	x -= 0x80 << 56
-
-	b = uint64(buf[i])
-	i++
-	x += b << 63
-	if b&0x80 == 0 {
-		goto done
-	}
-	// x -= 0x80 << 63 // Always zero.
-
-	return 0, errOverflow
-
-done:
-	p.index = i
-	return x, nil
-}
-
-// DecodeFixed64 reads a 64-bit integer from the Buffer.
-// This is the format for the
-// fixed64, sfixed64, and double protocol buffer types.
-func (p *Buffer) DecodeFixed64() (x uint64, err error) {
-	// x, err already 0
-	i := p.index + 8
-	if i < 0 || i > len(p.buf) {
-		err = io.ErrUnexpectedEOF
-		return
-	}
-	p.index = i
-
-	x = uint64(p.buf[i-8])
-	x |= uint64(p.buf[i-7]) << 8
-	x |= uint64(p.buf[i-6]) << 16
-	x |= uint64(p.buf[i-5]) << 24
-	x |= uint64(p.buf[i-4]) << 32
-	x |= uint64(p.buf[i-3]) << 40
-	x |= uint64(p.buf[i-2]) << 48
-	x |= uint64(p.buf[i-1]) << 56
-	return
-}
-
-// DecodeFixed32 reads a 32-bit integer from the Buffer.
-// This is the format for the
-// fixed32, sfixed32, and float protocol buffer types.
-func (p *Buffer) DecodeFixed32() (x uint64, err error) {
-	// x, err already 0
-	i := p.index + 4
-	if i < 0 || i > len(p.buf) {
-		err = io.ErrUnexpectedEOF
-		return
-	}
-	p.index = i
-
-	x = uint64(p.buf[i-4])
-	x |= uint64(p.buf[i-3]) << 8
-	x |= uint64(p.buf[i-2]) << 16
-	x |= uint64(p.buf[i-1]) << 24
-	return
-}
-
-// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
-// from the Buffer.
-// This is the format used for the sint64 protocol buffer type.
-func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
-	x, err = p.DecodeVarint()
-	if err != nil {
-		return
-	}
-	x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
-	return
-}
-
-// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
-// from  the Buffer.
-// This is the format used for the sint32 protocol buffer type.
-func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
-	x, err = p.DecodeVarint()
-	if err != nil {
-		return
-	}
-	x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
-	return
-}
-
-// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
-// This is the format used for the bytes protocol buffer
-// type and for embedded messages.
-func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
-	n, err := p.DecodeVarint()
-	if err != nil {
-		return nil, err
-	}
-
-	nb := int(n)
-	if nb < 0 {
-		return nil, fmt.Errorf("proto: bad byte length %d", nb)
-	}
-	end := p.index + nb
-	if end < p.index || end > len(p.buf) {
-		return nil, io.ErrUnexpectedEOF
-	}
-
-	if !alloc {
-		// todo: check if can get more uses of alloc=false
-		buf = p.buf[p.index:end]
-		p.index += nb
-		return
-	}
-
-	buf = make([]byte, nb)
-	copy(buf, p.buf[p.index:])
-	p.index += nb
-	return
-}
-
-// DecodeStringBytes reads an encoded string from the Buffer.
-// This is the format used for the proto2 string type.
-func (p *Buffer) DecodeStringBytes() (s string, err error) {
-	buf, err := p.DecodeRawBytes(false)
-	if err != nil {
-		return
-	}
-	return string(buf), nil
-}
-
-// Unmarshaler is the interface representing objects that can
-// unmarshal themselves.  The argument points to data that may be
-// overwritten, so implementations should not keep references to the
-// buffer.
-// Unmarshal implementations should not clear the receiver.
-// Any unmarshaled data should be merged into the receiver.
-// Callers of Unmarshal that do not want to retain existing data
-// should Reset the receiver before calling Unmarshal.
-type Unmarshaler interface {
-	Unmarshal([]byte) error
-}
-
-// newUnmarshaler is the interface representing objects that can
-// unmarshal themselves. The semantics are identical to Unmarshaler.
-//
-// This exists to support protoc-gen-go generated messages.
-// The proto package will stop type-asserting to this interface in the future.
-//
-// DO NOT DEPEND ON THIS.
-type newUnmarshaler interface {
-	XXX_Unmarshal([]byte) error
-}
-
-// Unmarshal parses the protocol buffer representation in buf and places the
-// decoded result in pb.  If the struct underlying pb does not match
-// the data in buf, the results can be unpredictable.
-//
-// Unmarshal resets pb before starting to unmarshal, so any
-// existing data in pb is always removed. Use UnmarshalMerge
-// to preserve and append to existing data.
-func Unmarshal(buf []byte, pb Message) error {
-	pb.Reset()
-	if u, ok := pb.(newUnmarshaler); ok {
-		return u.XXX_Unmarshal(buf)
-	}
-	if u, ok := pb.(Unmarshaler); ok {
-		return u.Unmarshal(buf)
-	}
-	return NewBuffer(buf).Unmarshal(pb)
-}
-
-// UnmarshalMerge parses the protocol buffer representation in buf and
-// writes the decoded result to pb.  If the struct underlying pb does not match
-// the data in buf, the results can be unpredictable.
-//
-// UnmarshalMerge merges into existing data in pb.
-// Most code should use Unmarshal instead.
-func UnmarshalMerge(buf []byte, pb Message) error {
-	if u, ok := pb.(newUnmarshaler); ok {
-		return u.XXX_Unmarshal(buf)
-	}
-	if u, ok := pb.(Unmarshaler); ok {
-		// NOTE: The history of proto have unfortunately been inconsistent
-		// whether Unmarshaler should or should not implicitly clear itself.
-		// Some implementations do, most do not.
-		// Thus, calling this here may or may not do what people want.
-		//
-		// See https://github.com/golang/protobuf/issues/424
-		return u.Unmarshal(buf)
-	}
-	return NewBuffer(buf).Unmarshal(pb)
-}
-
-// DecodeMessage reads a count-delimited message from the Buffer.
-func (p *Buffer) DecodeMessage(pb Message) error {
-	enc, err := p.DecodeRawBytes(false)
-	if err != nil {
-		return err
-	}
-	return NewBuffer(enc).Unmarshal(pb)
-}
-
-// DecodeGroup reads a tag-delimited group from the Buffer.
-// StartGroup tag is already consumed. This function consumes
-// EndGroup tag.
-func (p *Buffer) DecodeGroup(pb Message) error {
-	b := p.buf[p.index:]
-	x, y := findEndGroup(b)
-	if x < 0 {
-		return io.ErrUnexpectedEOF
-	}
-	err := Unmarshal(b[:x], pb)
-	p.index += y
-	return err
-}
-
-// Unmarshal parses the protocol buffer representation in the
-// Buffer and places the decoded result in pb.  If the struct
-// underlying pb does not match the data in the buffer, the results can be
-// unpredictable.
-//
-// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
-func (p *Buffer) Unmarshal(pb Message) error {
-	// If the object can unmarshal itself, let it.
-	if u, ok := pb.(newUnmarshaler); ok {
-		err := u.XXX_Unmarshal(p.buf[p.index:])
-		p.index = len(p.buf)
-		return err
-	}
-	if u, ok := pb.(Unmarshaler); ok {
-		// NOTE: The history of proto have unfortunately been inconsistent
-		// whether Unmarshaler should or should not implicitly clear itself.
-		// Some implementations do, most do not.
-		// Thus, calling this here may or may not do what people want.
-		//
-		// See https://github.com/golang/protobuf/issues/424
-		err := u.Unmarshal(p.buf[p.index:])
-		p.index = len(p.buf)
-		return err
-	}
-
-	// Slow workaround for messages that aren't Unmarshalers.
-	// This includes some hand-coded .pb.go files and
-	// bootstrap protos.
-	// TODO: fix all of those and then add Unmarshal to
-	// the Message interface. Then:
-	// The cast above and code below can be deleted.
-	// The old unmarshaler can be deleted.
-	// Clients can call Unmarshal directly (can already do that, actually).
-	var info InternalMessageInfo
-	err := info.Unmarshal(pb, p.buf[p.index:])
-	p.index = len(p.buf)
-	return err
-}

+ 0 - 350
vendor/github.com/golang/protobuf/proto/discard.go

@@ -1,350 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2017 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
-	"fmt"
-	"reflect"
-	"strings"
-	"sync"
-	"sync/atomic"
-)
-
-type generatedDiscarder interface {
-	XXX_DiscardUnknown()
-}
-
-// DiscardUnknown recursively discards all unknown fields from this message
-// and all embedded messages.
-//
-// When unmarshaling a message with unrecognized fields, the tags and values
-// of such fields are preserved in the Message. This allows a later call to
-// marshal to be able to produce a message that continues to have those
-// unrecognized fields. To avoid this, DiscardUnknown is used to
-// explicitly clear the unknown fields after unmarshaling.
-//
-// For proto2 messages, the unknown fields of message extensions are only
-// discarded from messages that have been accessed via GetExtension.
-func DiscardUnknown(m Message) {
-	if m, ok := m.(generatedDiscarder); ok {
-		m.XXX_DiscardUnknown()
-		return
-	}
-	// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
-	// but the master branch has no implementation for InternalMessageInfo,
-	// so it would be more work to replicate that approach.
-	discardLegacy(m)
-}
-
-// DiscardUnknown recursively discards all unknown fields.
-func (a *InternalMessageInfo) DiscardUnknown(m Message) {
-	di := atomicLoadDiscardInfo(&a.discard)
-	if di == nil {
-		di = getDiscardInfo(reflect.TypeOf(m).Elem())
-		atomicStoreDiscardInfo(&a.discard, di)
-	}
-	di.discard(toPointer(&m))
-}
-
-type discardInfo struct {
-	typ reflect.Type
-
-	initialized int32 // 0: only typ is valid, 1: everything is valid
-	lock        sync.Mutex
-
-	fields       []discardFieldInfo
-	unrecognized field
-}
-
-type discardFieldInfo struct {
-	field   field // Offset of field, guaranteed to be valid
-	discard func(src pointer)
-}
-
-var (
-	discardInfoMap  = map[reflect.Type]*discardInfo{}
-	discardInfoLock sync.Mutex
-)
-
-func getDiscardInfo(t reflect.Type) *discardInfo {
-	discardInfoLock.Lock()
-	defer discardInfoLock.Unlock()
-	di := discardInfoMap[t]
-	if di == nil {
-		di = &discardInfo{typ: t}
-		discardInfoMap[t] = di
-	}
-	return di
-}
-
-func (di *discardInfo) discard(src pointer) {
-	if src.isNil() {
-		return // Nothing to do.
-	}
-
-	if atomic.LoadInt32(&di.initialized) == 0 {
-		di.computeDiscardInfo()
-	}
-
-	for _, fi := range di.fields {
-		sfp := src.offset(fi.field)
-		fi.discard(sfp)
-	}
-
-	// For proto2 messages, only discard unknown fields in message extensions
-	// that have been accessed via GetExtension.
-	if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
-		// Ignore lock since DiscardUnknown is not concurrency safe.
-		emm, _ := em.extensionsRead()
-		for _, mx := range emm {
-			if m, ok := mx.value.(Message); ok {
-				DiscardUnknown(m)
-			}
-		}
-	}
-
-	if di.unrecognized.IsValid() {
-		*src.offset(di.unrecognized).toBytes() = nil
-	}
-}
-
-func (di *discardInfo) computeDiscardInfo() {
-	di.lock.Lock()
-	defer di.lock.Unlock()
-	if di.initialized != 0 {
-		return
-	}
-	t := di.typ
-	n := t.NumField()
-
-	for i := 0; i < n; i++ {
-		f := t.Field(i)
-		if strings.HasPrefix(f.Name, "XXX_") {
-			continue
-		}
-
-		dfi := discardFieldInfo{field: toField(&f)}
-		tf := f.Type
-
-		// Unwrap tf to get its most basic type.
-		var isPointer, isSlice bool
-		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
-			isSlice = true
-			tf = tf.Elem()
-		}
-		if tf.Kind() == reflect.Ptr {
-			isPointer = true
-			tf = tf.Elem()
-		}
-		if isPointer && isSlice && tf.Kind() != reflect.Struct {
-			panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
-		}
-
-		switch tf.Kind() {
-		case reflect.Struct:
-			switch {
-			case !isPointer:
-				panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
-			case isSlice: // E.g., []*pb.T
-				di := getDiscardInfo(tf)
-				dfi.discard = func(src pointer) {
-					sps := src.getPointerSlice()
-					for _, sp := range sps {
-						if !sp.isNil() {
-							di.discard(sp)
-						}
-					}
-				}
-			default: // E.g., *pb.T
-				di := getDiscardInfo(tf)
-				dfi.discard = func(src pointer) {
-					sp := src.getPointer()
-					if !sp.isNil() {
-						di.discard(sp)
-					}
-				}
-			}
-		case reflect.Map:
-			switch {
-			case isPointer || isSlice:
-				panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
-			default: // E.g., map[K]V
-				if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
-					dfi.discard = func(src pointer) {
-						sm := src.asPointerTo(tf).Elem()
-						if sm.Len() == 0 {
-							return
-						}
-						for _, key := range sm.MapKeys() {
-							val := sm.MapIndex(key)
-							DiscardUnknown(val.Interface().(Message))
-						}
-					}
-				} else {
-					dfi.discard = func(pointer) {} // Noop
-				}
-			}
-		case reflect.Interface:
-			// Must be oneof field.
-			switch {
-			case isPointer || isSlice:
-				panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
-			default: // E.g., interface{}
-				// TODO: Make this faster?
-				dfi.discard = func(src pointer) {
-					su := src.asPointerTo(tf).Elem()
-					if !su.IsNil() {
-						sv := su.Elem().Elem().Field(0)
-						if sv.Kind() == reflect.Ptr && sv.IsNil() {
-							return
-						}
-						switch sv.Type().Kind() {
-						case reflect.Ptr: // Proto struct (e.g., *T)
-							DiscardUnknown(sv.Interface().(Message))
-						}
-					}
-				}
-			}
-		default:
-			continue
-		}
-		di.fields = append(di.fields, dfi)
-	}
-
-	di.unrecognized = invalidField
-	if f, ok := t.FieldByName("XXX_unrecognized"); ok {
-		if f.Type != reflect.TypeOf([]byte{}) {
-			panic("expected XXX_unrecognized to be of type []byte")
-		}
-		di.unrecognized = toField(&f)
-	}
-
-	atomic.StoreInt32(&di.initialized, 1)
-}
-
-func discardLegacy(m Message) {
-	v := reflect.ValueOf(m)
-	if v.Kind() != reflect.Ptr || v.IsNil() {
-		return
-	}
-	v = v.Elem()
-	if v.Kind() != reflect.Struct {
-		return
-	}
-	t := v.Type()
-
-	for i := 0; i < v.NumField(); i++ {
-		f := t.Field(i)
-		if strings.HasPrefix(f.Name, "XXX_") {
-			continue
-		}
-		vf := v.Field(i)
-		tf := f.Type
-
-		// Unwrap tf to get its most basic type.
-		var isPointer, isSlice bool
-		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
-			isSlice = true
-			tf = tf.Elem()
-		}
-		if tf.Kind() == reflect.Ptr {
-			isPointer = true
-			tf = tf.Elem()
-		}
-		if isPointer && isSlice && tf.Kind() != reflect.Struct {
-			panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
-		}
-
-		switch tf.Kind() {
-		case reflect.Struct:
-			switch {
-			case !isPointer:
-				panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
-			case isSlice: // E.g., []*pb.T
-				for j := 0; j < vf.Len(); j++ {
-					discardLegacy(vf.Index(j).Interface().(Message))
-				}
-			default: // E.g., *pb.T
-				discardLegacy(vf.Interface().(Message))
-			}
-		case reflect.Map:
-			switch {
-			case isPointer || isSlice:
-				panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
-			default: // E.g., map[K]V
-				tv := vf.Type().Elem()
-				if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
-					for _, key := range vf.MapKeys() {
-						val := vf.MapIndex(key)
-						discardLegacy(val.Interface().(Message))
-					}
-				}
-			}
-		case reflect.Interface:
-			// Must be oneof field.
-			switch {
-			case isPointer || isSlice:
-				panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
-			default: // E.g., test_proto.isCommunique_Union interface
-				if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
-					vf = vf.Elem() // E.g., *test_proto.Communique_Msg
-					if !vf.IsNil() {
-						vf = vf.Elem()   // E.g., test_proto.Communique_Msg
-						vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
-						if vf.Kind() == reflect.Ptr {
-							discardLegacy(vf.Interface().(Message))
-						}
-					}
-				}
-			}
-		}
-	}
-
-	if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
-		if vf.Type() != reflect.TypeOf([]byte{}) {
-			panic("expected XXX_unrecognized to be of type []byte")
-		}
-		vf.Set(reflect.ValueOf([]byte(nil)))
-	}
-
-	// For proto2 messages, only discard unknown fields in message extensions
-	// that have been accessed via GetExtension.
-	if em, err := extendable(m); err == nil {
-		// Ignore lock since discardLegacy is not concurrency safe.
-		emm, _ := em.extensionsRead()
-		for _, mx := range emm {
-			if m, ok := mx.value.(Message); ok {
-				discardLegacy(m)
-			}
-		}
-	}
-}

+ 0 - 203
vendor/github.com/golang/protobuf/proto/encode.go

@@ -1,203 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for encoding data into the wire format for protocol buffers.
- */
-
-import (
-	"errors"
-	"reflect"
-)
-
-var (
-	// errRepeatedHasNil is the error returned if Marshal is called with
-	// a struct with a repeated field containing a nil element.
-	errRepeatedHasNil = errors.New("proto: repeated field has nil element")
-
-	// errOneofHasNil is the error returned if Marshal is called with
-	// a struct with a oneof field containing a nil element.
-	errOneofHasNil = errors.New("proto: oneof field has nil value")
-
-	// ErrNil is the error returned if Marshal is called with nil.
-	ErrNil = errors.New("proto: Marshal called with nil")
-
-	// ErrTooLarge is the error returned if Marshal is called with a
-	// message that encodes to >2GB.
-	ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
-)
-
-// The fundamental encoders that put bytes on the wire.
-// Those that take integer types all accept uint64 and are
-// therefore of type valueEncoder.
-
-const maxVarintBytes = 10 // maximum length of a varint
-
-// EncodeVarint returns the varint encoding of x.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-// Not used by the package itself, but helpful to clients
-// wishing to use the same encoding.
-func EncodeVarint(x uint64) []byte {
-	var buf [maxVarintBytes]byte
-	var n int
-	for n = 0; x > 127; n++ {
-		buf[n] = 0x80 | uint8(x&0x7F)
-		x >>= 7
-	}
-	buf[n] = uint8(x)
-	n++
-	return buf[0:n]
-}
-
-// EncodeVarint writes a varint-encoded integer to the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) EncodeVarint(x uint64) error {
-	for x >= 1<<7 {
-		p.buf = append(p.buf, uint8(x&0x7f|0x80))
-		x >>= 7
-	}
-	p.buf = append(p.buf, uint8(x))
-	return nil
-}
-
-// SizeVarint returns the varint encoding size of an integer.
-func SizeVarint(x uint64) int {
-	switch {
-	case x < 1<<7:
-		return 1
-	case x < 1<<14:
-		return 2
-	case x < 1<<21:
-		return 3
-	case x < 1<<28:
-		return 4
-	case x < 1<<35:
-		return 5
-	case x < 1<<42:
-		return 6
-	case x < 1<<49:
-		return 7
-	case x < 1<<56:
-		return 8
-	case x < 1<<63:
-		return 9
-	}
-	return 10
-}
-
-// EncodeFixed64 writes a 64-bit integer to the Buffer.
-// This is the format for the
-// fixed64, sfixed64, and double protocol buffer types.
-func (p *Buffer) EncodeFixed64(x uint64) error {
-	p.buf = append(p.buf,
-		uint8(x),
-		uint8(x>>8),
-		uint8(x>>16),
-		uint8(x>>24),
-		uint8(x>>32),
-		uint8(x>>40),
-		uint8(x>>48),
-		uint8(x>>56))
-	return nil
-}
-
-// EncodeFixed32 writes a 32-bit integer to the Buffer.
-// This is the format for the
-// fixed32, sfixed32, and float protocol buffer types.
-func (p *Buffer) EncodeFixed32(x uint64) error {
-	p.buf = append(p.buf,
-		uint8(x),
-		uint8(x>>8),
-		uint8(x>>16),
-		uint8(x>>24))
-	return nil
-}
-
-// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
-// to the Buffer.
-// This is the format used for the sint64 protocol buffer type.
-func (p *Buffer) EncodeZigzag64(x uint64) error {
-	// use signed number to get arithmetic right shift.
-	return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-
-// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
-// to the Buffer.
-// This is the format used for the sint32 protocol buffer type.
-func (p *Buffer) EncodeZigzag32(x uint64) error {
-	// use signed number to get arithmetic right shift.
-	return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
-}
-
-// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
-// This is the format used for the bytes protocol buffer
-// type and for embedded messages.
-func (p *Buffer) EncodeRawBytes(b []byte) error {
-	p.EncodeVarint(uint64(len(b)))
-	p.buf = append(p.buf, b...)
-	return nil
-}
-
-// EncodeStringBytes writes an encoded string to the Buffer.
-// This is the format used for the proto2 string type.
-func (p *Buffer) EncodeStringBytes(s string) error {
-	p.EncodeVarint(uint64(len(s)))
-	p.buf = append(p.buf, s...)
-	return nil
-}
-
-// Marshaler is the interface representing objects that can marshal themselves.
-type Marshaler interface {
-	Marshal() ([]byte, error)
-}
-
-// EncodeMessage writes the protocol buffer to the Buffer,
-// prefixed by a varint-encoded length.
-func (p *Buffer) EncodeMessage(pb Message) error {
-	siz := Size(pb)
-	p.EncodeVarint(uint64(siz))
-	return p.Marshal(pb)
-}
-
-// All protocol buffer fields are nillable, but be careful.
-func isNil(v reflect.Value) bool {
-	switch v.Kind() {
-	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
-		return v.IsNil()
-	}
-	return false
-}

+ 0 - 300
vendor/github.com/golang/protobuf/proto/equal.go

@@ -1,300 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2011 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Protocol buffer comparison.
-
-package proto
-
-import (
-	"bytes"
-	"log"
-	"reflect"
-	"strings"
-)
-
-/*
-Equal returns true iff protocol buffers a and b are equal.
-The arguments must both be pointers to protocol buffer structs.
-
-Equality is defined in this way:
-  - Two messages are equal iff they are the same type,
-    corresponding fields are equal, unknown field sets
-    are equal, and extensions sets are equal.
-  - Two set scalar fields are equal iff their values are equal.
-    If the fields are of a floating-point type, remember that
-    NaN != x for all x, including NaN. If the message is defined
-    in a proto3 .proto file, fields are not "set"; specifically,
-    zero length proto3 "bytes" fields are equal (nil == {}).
-  - Two repeated fields are equal iff their lengths are the same,
-    and their corresponding elements are equal. Note a "bytes" field,
-    although represented by []byte, is not a repeated field and the
-    rule for the scalar fields described above applies.
-  - Two unset fields are equal.
-  - Two unknown field sets are equal if their current
-    encoded state is equal.
-  - Two extension sets are equal iff they have corresponding
-    elements that are pairwise equal.
-  - Two map fields are equal iff their lengths are the same,
-    and they contain the same set of elements. Zero-length map
-    fields are equal.
-  - Every other combination of things are not equal.
-
-The return value is undefined if a and b are not protocol buffers.
-*/
-func Equal(a, b Message) bool {
-	if a == nil || b == nil {
-		return a == b
-	}
-	v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
-	if v1.Type() != v2.Type() {
-		return false
-	}
-	if v1.Kind() == reflect.Ptr {
-		if v1.IsNil() {
-			return v2.IsNil()
-		}
-		if v2.IsNil() {
-			return false
-		}
-		v1, v2 = v1.Elem(), v2.Elem()
-	}
-	if v1.Kind() != reflect.Struct {
-		return false
-	}
-	return equalStruct(v1, v2)
-}
-
-// v1 and v2 are known to have the same type.
-func equalStruct(v1, v2 reflect.Value) bool {
-	sprop := GetProperties(v1.Type())
-	for i := 0; i < v1.NumField(); i++ {
-		f := v1.Type().Field(i)
-		if strings.HasPrefix(f.Name, "XXX_") {
-			continue
-		}
-		f1, f2 := v1.Field(i), v2.Field(i)
-		if f.Type.Kind() == reflect.Ptr {
-			if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
-				// both unset
-				continue
-			} else if n1 != n2 {
-				// set/unset mismatch
-				return false
-			}
-			f1, f2 = f1.Elem(), f2.Elem()
-		}
-		if !equalAny(f1, f2, sprop.Prop[i]) {
-			return false
-		}
-	}
-
-	if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
-		em2 := v2.FieldByName("XXX_InternalExtensions")
-		if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
-			return false
-		}
-	}
-
-	if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
-		em2 := v2.FieldByName("XXX_extensions")
-		if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
-			return false
-		}
-	}
-
-	uf := v1.FieldByName("XXX_unrecognized")
-	if !uf.IsValid() {
-		return true
-	}
-
-	u1 := uf.Bytes()
-	u2 := v2.FieldByName("XXX_unrecognized").Bytes()
-	return bytes.Equal(u1, u2)
-}
-
-// v1 and v2 are known to have the same type.
-// prop may be nil.
-func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
-	if v1.Type() == protoMessageType {
-		m1, _ := v1.Interface().(Message)
-		m2, _ := v2.Interface().(Message)
-		return Equal(m1, m2)
-	}
-	switch v1.Kind() {
-	case reflect.Bool:
-		return v1.Bool() == v2.Bool()
-	case reflect.Float32, reflect.Float64:
-		return v1.Float() == v2.Float()
-	case reflect.Int32, reflect.Int64:
-		return v1.Int() == v2.Int()
-	case reflect.Interface:
-		// Probably a oneof field; compare the inner values.
-		n1, n2 := v1.IsNil(), v2.IsNil()
-		if n1 || n2 {
-			return n1 == n2
-		}
-		e1, e2 := v1.Elem(), v2.Elem()
-		if e1.Type() != e2.Type() {
-			return false
-		}
-		return equalAny(e1, e2, nil)
-	case reflect.Map:
-		if v1.Len() != v2.Len() {
-			return false
-		}
-		for _, key := range v1.MapKeys() {
-			val2 := v2.MapIndex(key)
-			if !val2.IsValid() {
-				// This key was not found in the second map.
-				return false
-			}
-			if !equalAny(v1.MapIndex(key), val2, nil) {
-				return false
-			}
-		}
-		return true
-	case reflect.Ptr:
-		// Maps may have nil values in them, so check for nil.
-		if v1.IsNil() && v2.IsNil() {
-			return true
-		}
-		if v1.IsNil() != v2.IsNil() {
-			return false
-		}
-		return equalAny(v1.Elem(), v2.Elem(), prop)
-	case reflect.Slice:
-		if v1.Type().Elem().Kind() == reflect.Uint8 {
-			// short circuit: []byte
-
-			// Edge case: if this is in a proto3 message, a zero length
-			// bytes field is considered the zero value.
-			if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
-				return true
-			}
-			if v1.IsNil() != v2.IsNil() {
-				return false
-			}
-			return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
-		}
-
-		if v1.Len() != v2.Len() {
-			return false
-		}
-		for i := 0; i < v1.Len(); i++ {
-			if !equalAny(v1.Index(i), v2.Index(i), prop) {
-				return false
-			}
-		}
-		return true
-	case reflect.String:
-		return v1.Interface().(string) == v2.Interface().(string)
-	case reflect.Struct:
-		return equalStruct(v1, v2)
-	case reflect.Uint32, reflect.Uint64:
-		return v1.Uint() == v2.Uint()
-	}
-
-	// unknown type, so not a protocol buffer
-	log.Printf("proto: don't know how to compare %v", v1)
-	return false
-}
-
-// base is the struct type that the extensions are based on.
-// x1 and x2 are InternalExtensions.
-func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
-	em1, _ := x1.extensionsRead()
-	em2, _ := x2.extensionsRead()
-	return equalExtMap(base, em1, em2)
-}
-
-func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
-	if len(em1) != len(em2) {
-		return false
-	}
-
-	for extNum, e1 := range em1 {
-		e2, ok := em2[extNum]
-		if !ok {
-			return false
-		}
-
-		m1, m2 := e1.value, e2.value
-
-		if m1 == nil && m2 == nil {
-			// Both have only encoded form.
-			if bytes.Equal(e1.enc, e2.enc) {
-				continue
-			}
-			// The bytes are different, but the extensions might still be
-			// equal. We need to decode them to compare.
-		}
-
-		if m1 != nil && m2 != nil {
-			// Both are unencoded.
-			if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
-				return false
-			}
-			continue
-		}
-
-		// At least one is encoded. To do a semantically correct comparison
-		// we need to unmarshal them first.
-		var desc *ExtensionDesc
-		if m := extensionMaps[base]; m != nil {
-			desc = m[extNum]
-		}
-		if desc == nil {
-			// If both have only encoded form and the bytes are the same,
-			// it is handled above. We get here when the bytes are different.
-			// We don't know how to decode it, so just compare them as byte
-			// slices.
-			log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
-			return false
-		}
-		var err error
-		if m1 == nil {
-			m1, err = decodeExtension(e1.enc, desc)
-		}
-		if m2 == nil && err == nil {
-			m2, err = decodeExtension(e2.enc, desc)
-		}
-		if err != nil {
-			// The encoded form is invalid.
-			log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
-			return false
-		}
-		if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
-			return false
-		}
-	}
-
-	return true
-}

+ 0 - 543
vendor/github.com/golang/protobuf/proto/extensions.go

@@ -1,543 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Types and routines for supporting protocol buffer extensions.
- */
-
-import (
-	"errors"
-	"fmt"
-	"io"
-	"reflect"
-	"strconv"
-	"sync"
-)
-
-// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
-var ErrMissingExtension = errors.New("proto: missing extension")
-
-// ExtensionRange represents a range of message extensions for a protocol buffer.
-// Used in code generated by the protocol compiler.
-type ExtensionRange struct {
-	Start, End int32 // both inclusive
-}
-
-// extendableProto is an interface implemented by any protocol buffer generated by the current
-// proto compiler that may be extended.
-type extendableProto interface {
-	Message
-	ExtensionRangeArray() []ExtensionRange
-	extensionsWrite() map[int32]Extension
-	extensionsRead() (map[int32]Extension, sync.Locker)
-}
-
-// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
-// version of the proto compiler that may be extended.
-type extendableProtoV1 interface {
-	Message
-	ExtensionRangeArray() []ExtensionRange
-	ExtensionMap() map[int32]Extension
-}
-
-// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
-type extensionAdapter struct {
-	extendableProtoV1
-}
-
-func (e extensionAdapter) extensionsWrite() map[int32]Extension {
-	return e.ExtensionMap()
-}
-
-func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
-	return e.ExtensionMap(), notLocker{}
-}
-
-// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
-type notLocker struct{}
-
-func (n notLocker) Lock()   {}
-func (n notLocker) Unlock() {}
-
-// extendable returns the extendableProto interface for the given generated proto message.
-// If the proto message has the old extension format, it returns a wrapper that implements
-// the extendableProto interface.
-func extendable(p interface{}) (extendableProto, error) {
-	switch p := p.(type) {
-	case extendableProto:
-		if isNilPtr(p) {
-			return nil, fmt.Errorf("proto: nil %T is not extendable", p)
-		}
-		return p, nil
-	case extendableProtoV1:
-		if isNilPtr(p) {
-			return nil, fmt.Errorf("proto: nil %T is not extendable", p)
-		}
-		return extensionAdapter{p}, nil
-	}
-	// Don't allocate a specific error containing %T:
-	// this is the hot path for Clone and MarshalText.
-	return nil, errNotExtendable
-}
-
-var errNotExtendable = errors.New("proto: not an extendable proto.Message")
-
-func isNilPtr(x interface{}) bool {
-	v := reflect.ValueOf(x)
-	return v.Kind() == reflect.Ptr && v.IsNil()
-}
-
-// XXX_InternalExtensions is an internal representation of proto extensions.
-//
-// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
-// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
-//
-// The methods of XXX_InternalExtensions are not concurrency safe in general,
-// but calls to logically read-only methods such as has and get may be executed concurrently.
-type XXX_InternalExtensions struct {
-	// The struct must be indirect so that if a user inadvertently copies a
-	// generated message and its embedded XXX_InternalExtensions, they
-	// avoid the mayhem of a copied mutex.
-	//
-	// The mutex serializes all logically read-only operations to p.extensionMap.
-	// It is up to the client to ensure that write operations to p.extensionMap are
-	// mutually exclusive with other accesses.
-	p *struct {
-		mu           sync.Mutex
-		extensionMap map[int32]Extension
-	}
-}
-
-// extensionsWrite returns the extension map, creating it on first use.
-func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
-	if e.p == nil {
-		e.p = new(struct {
-			mu           sync.Mutex
-			extensionMap map[int32]Extension
-		})
-		e.p.extensionMap = make(map[int32]Extension)
-	}
-	return e.p.extensionMap
-}
-
-// extensionsRead returns the extensions map for read-only use.  It may be nil.
-// The caller must hold the returned mutex's lock when accessing Elements within the map.
-func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
-	if e.p == nil {
-		return nil, nil
-	}
-	return e.p.extensionMap, &e.p.mu
-}
-
-// ExtensionDesc represents an extension specification.
-// Used in generated code from the protocol compiler.
-type ExtensionDesc struct {
-	ExtendedType  Message     // nil pointer to the type that is being extended
-	ExtensionType interface{} // nil pointer to the extension type
-	Field         int32       // field number
-	Name          string      // fully-qualified name of extension, for text formatting
-	Tag           string      // protobuf tag style
-	Filename      string      // name of the file in which the extension is defined
-}
-
-func (ed *ExtensionDesc) repeated() bool {
-	t := reflect.TypeOf(ed.ExtensionType)
-	return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
-}
-
-// Extension represents an extension in a message.
-type Extension struct {
-	// When an extension is stored in a message using SetExtension
-	// only desc and value are set. When the message is marshaled
-	// enc will be set to the encoded form of the message.
-	//
-	// When a message is unmarshaled and contains extensions, each
-	// extension will have only enc set. When such an extension is
-	// accessed using GetExtension (or GetExtensions) desc and value
-	// will be set.
-	desc  *ExtensionDesc
-	value interface{}
-	enc   []byte
-}
-
-// SetRawExtension is for testing only.
-func SetRawExtension(base Message, id int32, b []byte) {
-	epb, err := extendable(base)
-	if err != nil {
-		return
-	}
-	extmap := epb.extensionsWrite()
-	extmap[id] = Extension{enc: b}
-}
-
-// isExtensionField returns true iff the given field number is in an extension range.
-func isExtensionField(pb extendableProto, field int32) bool {
-	for _, er := range pb.ExtensionRangeArray() {
-		if er.Start <= field && field <= er.End {
-			return true
-		}
-	}
-	return false
-}
-
-// checkExtensionTypes checks that the given extension is valid for pb.
-func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
-	var pbi interface{} = pb
-	// Check the extended type.
-	if ea, ok := pbi.(extensionAdapter); ok {
-		pbi = ea.extendableProtoV1
-	}
-	if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
-		return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
-	}
-	// Check the range.
-	if !isExtensionField(pb, extension.Field) {
-		return errors.New("proto: bad extension number; not in declared ranges")
-	}
-	return nil
-}
-
-// extPropKey is sufficient to uniquely identify an extension.
-type extPropKey struct {
-	base  reflect.Type
-	field int32
-}
-
-var extProp = struct {
-	sync.RWMutex
-	m map[extPropKey]*Properties
-}{
-	m: make(map[extPropKey]*Properties),
-}
-
-func extensionProperties(ed *ExtensionDesc) *Properties {
-	key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
-
-	extProp.RLock()
-	if prop, ok := extProp.m[key]; ok {
-		extProp.RUnlock()
-		return prop
-	}
-	extProp.RUnlock()
-
-	extProp.Lock()
-	defer extProp.Unlock()
-	// Check again.
-	if prop, ok := extProp.m[key]; ok {
-		return prop
-	}
-
-	prop := new(Properties)
-	prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
-	extProp.m[key] = prop
-	return prop
-}
-
-// HasExtension returns whether the given extension is present in pb.
-func HasExtension(pb Message, extension *ExtensionDesc) bool {
-	// TODO: Check types, field numbers, etc.?
-	epb, err := extendable(pb)
-	if err != nil {
-		return false
-	}
-	extmap, mu := epb.extensionsRead()
-	if extmap == nil {
-		return false
-	}
-	mu.Lock()
-	_, ok := extmap[extension.Field]
-	mu.Unlock()
-	return ok
-}
-
-// ClearExtension removes the given extension from pb.
-func ClearExtension(pb Message, extension *ExtensionDesc) {
-	epb, err := extendable(pb)
-	if err != nil {
-		return
-	}
-	// TODO: Check types, field numbers, etc.?
-	extmap := epb.extensionsWrite()
-	delete(extmap, extension.Field)
-}
-
-// GetExtension retrieves a proto2 extended field from pb.
-//
-// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
-// then GetExtension parses the encoded field and returns a Go value of the specified type.
-// If the field is not present, then the default value is returned (if one is specified),
-// otherwise ErrMissingExtension is reported.
-//
-// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
-// then GetExtension returns the raw encoded bytes of the field extension.
-func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
-	epb, err := extendable(pb)
-	if err != nil {
-		return nil, err
-	}
-
-	if extension.ExtendedType != nil {
-		// can only check type if this is a complete descriptor
-		if err := checkExtensionTypes(epb, extension); err != nil {
-			return nil, err
-		}
-	}
-
-	emap, mu := epb.extensionsRead()
-	if emap == nil {
-		return defaultExtensionValue(extension)
-	}
-	mu.Lock()
-	defer mu.Unlock()
-	e, ok := emap[extension.Field]
-	if !ok {
-		// defaultExtensionValue returns the default value or
-		// ErrMissingExtension if there is no default.
-		return defaultExtensionValue(extension)
-	}
-
-	if e.value != nil {
-		// Already decoded. Check the descriptor, though.
-		if e.desc != extension {
-			// This shouldn't happen. If it does, it means that
-			// GetExtension was called twice with two different
-			// descriptors with the same field number.
-			return nil, errors.New("proto: descriptor conflict")
-		}
-		return e.value, nil
-	}
-
-	if extension.ExtensionType == nil {
-		// incomplete descriptor
-		return e.enc, nil
-	}
-
-	v, err := decodeExtension(e.enc, extension)
-	if err != nil {
-		return nil, err
-	}
-
-	// Remember the decoded version and drop the encoded version.
-	// That way it is safe to mutate what we return.
-	e.value = v
-	e.desc = extension
-	e.enc = nil
-	emap[extension.Field] = e
-	return e.value, nil
-}
-
-// defaultExtensionValue returns the default value for extension.
-// If no default for an extension is defined ErrMissingExtension is returned.
-func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
-	if extension.ExtensionType == nil {
-		// incomplete descriptor, so no default
-		return nil, ErrMissingExtension
-	}
-
-	t := reflect.TypeOf(extension.ExtensionType)
-	props := extensionProperties(extension)
-
-	sf, _, err := fieldDefault(t, props)
-	if err != nil {
-		return nil, err
-	}
-
-	if sf == nil || sf.value == nil {
-		// There is no default value.
-		return nil, ErrMissingExtension
-	}
-
-	if t.Kind() != reflect.Ptr {
-		// We do not need to return a Ptr, we can directly return sf.value.
-		return sf.value, nil
-	}
-
-	// We need to return an interface{} that is a pointer to sf.value.
-	value := reflect.New(t).Elem()
-	value.Set(reflect.New(value.Type().Elem()))
-	if sf.kind == reflect.Int32 {
-		// We may have an int32 or an enum, but the underlying data is int32.
-		// Since we can't set an int32 into a non int32 reflect.value directly
-		// set it as a int32.
-		value.Elem().SetInt(int64(sf.value.(int32)))
-	} else {
-		value.Elem().Set(reflect.ValueOf(sf.value))
-	}
-	return value.Interface(), nil
-}
-
-// decodeExtension decodes an extension encoded in b.
-func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
-	t := reflect.TypeOf(extension.ExtensionType)
-	unmarshal := typeUnmarshaler(t, extension.Tag)
-
-	// t is a pointer to a struct, pointer to basic type or a slice.
-	// Allocate space to store the pointer/slice.
-	value := reflect.New(t).Elem()
-
-	var err error
-	for {
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		wire := int(x) & 7
-
-		b, err = unmarshal(b, valToPointer(value.Addr()), wire)
-		if err != nil {
-			return nil, err
-		}
-
-		if len(b) == 0 {
-			break
-		}
-	}
-	return value.Interface(), nil
-}
-
-// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
-// The returned slice has the same length as es; missing extensions will appear as nil elements.
-func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
-	epb, err := extendable(pb)
-	if err != nil {
-		return nil, err
-	}
-	extensions = make([]interface{}, len(es))
-	for i, e := range es {
-		extensions[i], err = GetExtension(epb, e)
-		if err == ErrMissingExtension {
-			err = nil
-		}
-		if err != nil {
-			return
-		}
-	}
-	return
-}
-
-// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
-// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
-// just the Field field, which defines the extension's field number.
-func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
-	epb, err := extendable(pb)
-	if err != nil {
-		return nil, err
-	}
-	registeredExtensions := RegisteredExtensions(pb)
-
-	emap, mu := epb.extensionsRead()
-	if emap == nil {
-		return nil, nil
-	}
-	mu.Lock()
-	defer mu.Unlock()
-	extensions := make([]*ExtensionDesc, 0, len(emap))
-	for extid, e := range emap {
-		desc := e.desc
-		if desc == nil {
-			desc = registeredExtensions[extid]
-			if desc == nil {
-				desc = &ExtensionDesc{Field: extid}
-			}
-		}
-
-		extensions = append(extensions, desc)
-	}
-	return extensions, nil
-}
-
-// SetExtension sets the specified extension of pb to the specified value.
-func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
-	epb, err := extendable(pb)
-	if err != nil {
-		return err
-	}
-	if err := checkExtensionTypes(epb, extension); err != nil {
-		return err
-	}
-	typ := reflect.TypeOf(extension.ExtensionType)
-	if typ != reflect.TypeOf(value) {
-		return errors.New("proto: bad extension value type")
-	}
-	// nil extension values need to be caught early, because the
-	// encoder can't distinguish an ErrNil due to a nil extension
-	// from an ErrNil due to a missing field. Extensions are
-	// always optional, so the encoder would just swallow the error
-	// and drop all the extensions from the encoded message.
-	if reflect.ValueOf(value).IsNil() {
-		return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
-	}
-
-	extmap := epb.extensionsWrite()
-	extmap[extension.Field] = Extension{desc: extension, value: value}
-	return nil
-}
-
-// ClearAllExtensions clears all extensions from pb.
-func ClearAllExtensions(pb Message) {
-	epb, err := extendable(pb)
-	if err != nil {
-		return
-	}
-	m := epb.extensionsWrite()
-	for k := range m {
-		delete(m, k)
-	}
-}
-
-// A global registry of extensions.
-// The generated code will register the generated descriptors by calling RegisterExtension.
-
-var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
-
-// RegisterExtension is called from the generated code.
-func RegisterExtension(desc *ExtensionDesc) {
-	st := reflect.TypeOf(desc.ExtendedType).Elem()
-	m := extensionMaps[st]
-	if m == nil {
-		m = make(map[int32]*ExtensionDesc)
-		extensionMaps[st] = m
-	}
-	if _, ok := m[desc.Field]; ok {
-		panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
-	}
-	m[desc.Field] = desc
-}
-
-// RegisteredExtensions returns a map of the registered extensions of a
-// protocol buffer struct, indexed by the extension number.
-// The argument pb should be a nil pointer to the struct type.
-func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
-	return extensionMaps[reflect.TypeOf(pb).Elem()]
-}

+ 0 - 979
vendor/github.com/golang/protobuf/proto/lib.go

@@ -1,979 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/*
-Package proto converts data structures to and from the wire format of
-protocol buffers.  It works in concert with the Go source code generated
-for .proto files by the protocol compiler.
-
-A summary of the properties of the protocol buffer interface
-for a protocol buffer variable v:
-
-  - Names are turned from camel_case to CamelCase for export.
-  - There are no methods on v to set fields; just treat
-	them as structure fields.
-  - There are getters that return a field's value if set,
-	and return the field's default value if unset.
-	The getters work even if the receiver is a nil message.
-  - The zero value for a struct is its correct initialization state.
-	All desired fields must be set before marshaling.
-  - A Reset() method will restore a protobuf struct to its zero state.
-  - Non-repeated fields are pointers to the values; nil means unset.
-	That is, optional or required field int32 f becomes F *int32.
-  - Repeated fields are slices.
-  - Helper functions are available to aid the setting of fields.
-	msg.Foo = proto.String("hello") // set field
-  - Constants are defined to hold the default values of all fields that
-	have them.  They have the form Default_StructName_FieldName.
-	Because the getter methods handle defaulted values,
-	direct use of these constants should be rare.
-  - Enums are given type names and maps from names to values.
-	Enum values are prefixed by the enclosing message's name, or by the
-	enum's type name if it is a top-level enum. Enum types have a String
-	method, and a Enum method to assist in message construction.
-  - Nested messages, groups and enums have type names prefixed with the name of
-	the surrounding message type.
-  - Extensions are given descriptor names that start with E_,
-	followed by an underscore-delimited list of the nested messages
-	that contain it (if any) followed by the CamelCased name of the
-	extension field itself.  HasExtension, ClearExtension, GetExtension
-	and SetExtension are functions for manipulating extensions.
-  - Oneof field sets are given a single field in their message,
-	with distinguished wrapper types for each possible field value.
-  - Marshal and Unmarshal are functions to encode and decode the wire format.
-
-When the .proto file specifies `syntax="proto3"`, there are some differences:
-
-  - Non-repeated fields of non-message type are values instead of pointers.
-  - Enum types do not get an Enum method.
-
-The simplest way to describe this is to see an example.
-Given file test.proto, containing
-
-	package example;
-
-	enum FOO { X = 17; }
-
-	message Test {
-	  required string label = 1;
-	  optional int32 type = 2 [default=77];
-	  repeated int64 reps = 3;
-	  optional group OptionalGroup = 4 {
-	    required string RequiredField = 5;
-	  }
-	  oneof union {
-	    int32 number = 6;
-	    string name = 7;
-	  }
-	}
-
-The resulting file, test.pb.go, is:
-
-	package example
-
-	import proto "github.com/golang/protobuf/proto"
-	import math "math"
-
-	type FOO int32
-	const (
-		FOO_X FOO = 17
-	)
-	var FOO_name = map[int32]string{
-		17: "X",
-	}
-	var FOO_value = map[string]int32{
-		"X": 17,
-	}
-
-	func (x FOO) Enum() *FOO {
-		p := new(FOO)
-		*p = x
-		return p
-	}
-	func (x FOO) String() string {
-		return proto.EnumName(FOO_name, int32(x))
-	}
-	func (x *FOO) UnmarshalJSON(data []byte) error {
-		value, err := proto.UnmarshalJSONEnum(FOO_value, data)
-		if err != nil {
-			return err
-		}
-		*x = FOO(value)
-		return nil
-	}
-
-	type Test struct {
-		Label         *string             `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
-		Type          *int32              `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
-		Reps          []int64             `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
-		Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
-		// Types that are valid to be assigned to Union:
-		//	*Test_Number
-		//	*Test_Name
-		Union            isTest_Union `protobuf_oneof:"union"`
-		XXX_unrecognized []byte       `json:"-"`
-	}
-	func (m *Test) Reset()         { *m = Test{} }
-	func (m *Test) String() string { return proto.CompactTextString(m) }
-	func (*Test) ProtoMessage() {}
-
-	type isTest_Union interface {
-		isTest_Union()
-	}
-
-	type Test_Number struct {
-		Number int32 `protobuf:"varint,6,opt,name=number"`
-	}
-	type Test_Name struct {
-		Name string `protobuf:"bytes,7,opt,name=name"`
-	}
-
-	func (*Test_Number) isTest_Union() {}
-	func (*Test_Name) isTest_Union()   {}
-
-	func (m *Test) GetUnion() isTest_Union {
-		if m != nil {
-			return m.Union
-		}
-		return nil
-	}
-	const Default_Test_Type int32 = 77
-
-	func (m *Test) GetLabel() string {
-		if m != nil && m.Label != nil {
-			return *m.Label
-		}
-		return ""
-	}
-
-	func (m *Test) GetType() int32 {
-		if m != nil && m.Type != nil {
-			return *m.Type
-		}
-		return Default_Test_Type
-	}
-
-	func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
-		if m != nil {
-			return m.Optionalgroup
-		}
-		return nil
-	}
-
-	type Test_OptionalGroup struct {
-		RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
-	}
-	func (m *Test_OptionalGroup) Reset()         { *m = Test_OptionalGroup{} }
-	func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
-
-	func (m *Test_OptionalGroup) GetRequiredField() string {
-		if m != nil && m.RequiredField != nil {
-			return *m.RequiredField
-		}
-		return ""
-	}
-
-	func (m *Test) GetNumber() int32 {
-		if x, ok := m.GetUnion().(*Test_Number); ok {
-			return x.Number
-		}
-		return 0
-	}
-
-	func (m *Test) GetName() string {
-		if x, ok := m.GetUnion().(*Test_Name); ok {
-			return x.Name
-		}
-		return ""
-	}
-
-	func init() {
-		proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
-	}
-
-To create and play with a Test object:
-
-	package main
-
-	import (
-		"log"
-
-		"github.com/golang/protobuf/proto"
-		pb "./example.pb"
-	)
-
-	func main() {
-		test := &pb.Test{
-			Label: proto.String("hello"),
-			Type:  proto.Int32(17),
-			Reps:  []int64{1, 2, 3},
-			Optionalgroup: &pb.Test_OptionalGroup{
-				RequiredField: proto.String("good bye"),
-			},
-			Union: &pb.Test_Name{"fred"},
-		}
-		data, err := proto.Marshal(test)
-		if err != nil {
-			log.Fatal("marshaling error: ", err)
-		}
-		newTest := &pb.Test{}
-		err = proto.Unmarshal(data, newTest)
-		if err != nil {
-			log.Fatal("unmarshaling error: ", err)
-		}
-		// Now test and newTest contain the same data.
-		if test.GetLabel() != newTest.GetLabel() {
-			log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
-		}
-		// Use a type switch to determine which oneof was set.
-		switch u := test.Union.(type) {
-		case *pb.Test_Number: // u.Number contains the number.
-		case *pb.Test_Name: // u.Name contains the string.
-		}
-		// etc.
-	}
-*/
-package proto
-
-import (
-	"encoding/json"
-	"fmt"
-	"log"
-	"reflect"
-	"sort"
-	"strconv"
-	"sync"
-)
-
-// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
-// Marshal reports this when a required field is not initialized.
-// Unmarshal reports this when a required field is missing from the wire data.
-type RequiredNotSetError struct{ field string }
-
-func (e *RequiredNotSetError) Error() string {
-	if e.field == "" {
-		return fmt.Sprintf("proto: required field not set")
-	}
-	return fmt.Sprintf("proto: required field %q not set", e.field)
-}
-func (e *RequiredNotSetError) RequiredNotSet() bool {
-	return true
-}
-
-type invalidUTF8Error struct{ field string }
-
-func (e *invalidUTF8Error) Error() string {
-	if e.field == "" {
-		return "proto: invalid UTF-8 detected"
-	}
-	return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
-}
-func (e *invalidUTF8Error) InvalidUTF8() bool {
-	return true
-}
-
-// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
-// This error should not be exposed to the external API as such errors should
-// be recreated with the field information.
-var errInvalidUTF8 = &invalidUTF8Error{}
-
-// isNonFatal reports whether the error is either a RequiredNotSet error
-// or a InvalidUTF8 error.
-func isNonFatal(err error) bool {
-	if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
-		return true
-	}
-	if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
-		return true
-	}
-	return false
-}
-
-type nonFatal struct{ E error }
-
-// Merge merges err into nf and reports whether it was successful.
-// Otherwise it returns false for any fatal non-nil errors.
-func (nf *nonFatal) Merge(err error) (ok bool) {
-	if err == nil {
-		return true // not an error
-	}
-	if !isNonFatal(err) {
-		return false // fatal error
-	}
-	if nf.E == nil {
-		nf.E = err // store first instance of non-fatal error
-	}
-	return true
-}
-
-// Message is implemented by generated protocol buffer messages.
-type Message interface {
-	Reset()
-	String() string
-	ProtoMessage()
-}
-
-// Stats records allocation details about the protocol buffer encoders
-// and decoders.  Useful for tuning the library itself.
-type Stats struct {
-	Emalloc uint64 // mallocs in encode
-	Dmalloc uint64 // mallocs in decode
-	Encode  uint64 // number of encodes
-	Decode  uint64 // number of decodes
-	Chit    uint64 // number of cache hits
-	Cmiss   uint64 // number of cache misses
-	Size    uint64 // number of sizes
-}
-
-// Set to true to enable stats collection.
-const collectStats = false
-
-var stats Stats
-
-// GetStats returns a copy of the global Stats structure.
-func GetStats() Stats { return stats }
-
-// A Buffer is a buffer manager for marshaling and unmarshaling
-// protocol buffers.  It may be reused between invocations to
-// reduce memory usage.  It is not necessary to use a Buffer;
-// the global functions Marshal and Unmarshal create a
-// temporary Buffer and are fine for most applications.
-type Buffer struct {
-	buf   []byte // encode/decode byte stream
-	index int    // read point
-
-	deterministic bool
-}
-
-// NewBuffer allocates a new Buffer and initializes its internal data to
-// the contents of the argument slice.
-func NewBuffer(e []byte) *Buffer {
-	return &Buffer{buf: e}
-}
-
-// Reset resets the Buffer, ready for marshaling a new protocol buffer.
-func (p *Buffer) Reset() {
-	p.buf = p.buf[0:0] // for reading/writing
-	p.index = 0        // for reading
-}
-
-// SetBuf replaces the internal buffer with the slice,
-// ready for unmarshaling the contents of the slice.
-func (p *Buffer) SetBuf(s []byte) {
-	p.buf = s
-	p.index = 0
-}
-
-// Bytes returns the contents of the Buffer.
-func (p *Buffer) Bytes() []byte { return p.buf }
-
-// SetDeterministic sets whether to use deterministic serialization.
-//
-// Deterministic serialization guarantees that for a given binary, equal
-// messages will always be serialized to the same bytes. This implies:
-//
-//   - Repeated serialization of a message will return the same bytes.
-//   - Different processes of the same binary (which may be executing on
-//     different machines) will serialize equal messages to the same bytes.
-//
-// Note that the deterministic serialization is NOT canonical across
-// languages. It is not guaranteed to remain stable over time. It is unstable
-// across different builds with schema changes due to unknown fields.
-// Users who need canonical serialization (e.g., persistent storage in a
-// canonical form, fingerprinting, etc.) should define their own
-// canonicalization specification and implement their own serializer rather
-// than relying on this API.
-//
-// If deterministic serialization is requested, map entries will be sorted
-// by keys in lexographical order. This is an implementation detail and
-// subject to change.
-func (p *Buffer) SetDeterministic(deterministic bool) {
-	p.deterministic = deterministic
-}
-
-/*
- * Helper routines for simplifying the creation of optional fields of basic type.
- */
-
-// Bool is a helper routine that allocates a new bool value
-// to store v and returns a pointer to it.
-func Bool(v bool) *bool {
-	return &v
-}
-
-// Int32 is a helper routine that allocates a new int32 value
-// to store v and returns a pointer to it.
-func Int32(v int32) *int32 {
-	return &v
-}
-
-// Int is a helper routine that allocates a new int32 value
-// to store v and returns a pointer to it, but unlike Int32
-// its argument value is an int.
-func Int(v int) *int32 {
-	p := new(int32)
-	*p = int32(v)
-	return p
-}
-
-// Int64 is a helper routine that allocates a new int64 value
-// to store v and returns a pointer to it.
-func Int64(v int64) *int64 {
-	return &v
-}
-
-// Float32 is a helper routine that allocates a new float32 value
-// to store v and returns a pointer to it.
-func Float32(v float32) *float32 {
-	return &v
-}
-
-// Float64 is a helper routine that allocates a new float64 value
-// to store v and returns a pointer to it.
-func Float64(v float64) *float64 {
-	return &v
-}
-
-// Uint32 is a helper routine that allocates a new uint32 value
-// to store v and returns a pointer to it.
-func Uint32(v uint32) *uint32 {
-	return &v
-}
-
-// Uint64 is a helper routine that allocates a new uint64 value
-// to store v and returns a pointer to it.
-func Uint64(v uint64) *uint64 {
-	return &v
-}
-
-// String is a helper routine that allocates a new string value
-// to store v and returns a pointer to it.
-func String(v string) *string {
-	return &v
-}
-
-// EnumName is a helper function to simplify printing protocol buffer enums
-// by name.  Given an enum map and a value, it returns a useful string.
-func EnumName(m map[int32]string, v int32) string {
-	s, ok := m[v]
-	if ok {
-		return s
-	}
-	return strconv.Itoa(int(v))
-}
-
-// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
-// from their JSON-encoded representation. Given a map from the enum's symbolic
-// names to its int values, and a byte buffer containing the JSON-encoded
-// value, it returns an int32 that can be cast to the enum type by the caller.
-//
-// The function can deal with both JSON representations, numeric and symbolic.
-func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
-	if data[0] == '"' {
-		// New style: enums are strings.
-		var repr string
-		if err := json.Unmarshal(data, &repr); err != nil {
-			return -1, err
-		}
-		val, ok := m[repr]
-		if !ok {
-			return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
-		}
-		return val, nil
-	}
-	// Old style: enums are ints.
-	var val int32
-	if err := json.Unmarshal(data, &val); err != nil {
-		return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
-	}
-	return val, nil
-}
-
-// DebugPrint dumps the encoded data in b in a debugging format with a header
-// including the string s. Used in testing but made available for general debugging.
-func (p *Buffer) DebugPrint(s string, b []byte) {
-	var u uint64
-
-	obuf := p.buf
-	index := p.index
-	p.buf = b
-	p.index = 0
-	depth := 0
-
-	fmt.Printf("\n--- %s ---\n", s)
-
-out:
-	for {
-		for i := 0; i < depth; i++ {
-			fmt.Print("  ")
-		}
-
-		index := p.index
-		if index == len(p.buf) {
-			break
-		}
-
-		op, err := p.DecodeVarint()
-		if err != nil {
-			fmt.Printf("%3d: fetching op err %v\n", index, err)
-			break out
-		}
-		tag := op >> 3
-		wire := op & 7
-
-		switch wire {
-		default:
-			fmt.Printf("%3d: t=%3d unknown wire=%d\n",
-				index, tag, wire)
-			break out
-
-		case WireBytes:
-			var r []byte
-
-			r, err = p.DecodeRawBytes(false)
-			if err != nil {
-				break out
-			}
-			fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
-			if len(r) <= 6 {
-				for i := 0; i < len(r); i++ {
-					fmt.Printf(" %.2x", r[i])
-				}
-			} else {
-				for i := 0; i < 3; i++ {
-					fmt.Printf(" %.2x", r[i])
-				}
-				fmt.Printf(" ..")
-				for i := len(r) - 3; i < len(r); i++ {
-					fmt.Printf(" %.2x", r[i])
-				}
-			}
-			fmt.Printf("\n")
-
-		case WireFixed32:
-			u, err = p.DecodeFixed32()
-			if err != nil {
-				fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
-				break out
-			}
-			fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
-
-		case WireFixed64:
-			u, err = p.DecodeFixed64()
-			if err != nil {
-				fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
-				break out
-			}
-			fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
-
-		case WireVarint:
-			u, err = p.DecodeVarint()
-			if err != nil {
-				fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
-				break out
-			}
-			fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
-
-		case WireStartGroup:
-			fmt.Printf("%3d: t=%3d start\n", index, tag)
-			depth++
-
-		case WireEndGroup:
-			depth--
-			fmt.Printf("%3d: t=%3d end\n", index, tag)
-		}
-	}
-
-	if depth != 0 {
-		fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
-	}
-	fmt.Printf("\n")
-
-	p.buf = obuf
-	p.index = index
-}
-
-// SetDefaults sets unset protocol buffer fields to their default values.
-// It only modifies fields that are both unset and have defined defaults.
-// It recursively sets default values in any non-nil sub-messages.
-func SetDefaults(pb Message) {
-	setDefaults(reflect.ValueOf(pb), true, false)
-}
-
-// v is a pointer to a struct.
-func setDefaults(v reflect.Value, recur, zeros bool) {
-	v = v.Elem()
-
-	defaultMu.RLock()
-	dm, ok := defaults[v.Type()]
-	defaultMu.RUnlock()
-	if !ok {
-		dm = buildDefaultMessage(v.Type())
-		defaultMu.Lock()
-		defaults[v.Type()] = dm
-		defaultMu.Unlock()
-	}
-
-	for _, sf := range dm.scalars {
-		f := v.Field(sf.index)
-		if !f.IsNil() {
-			// field already set
-			continue
-		}
-		dv := sf.value
-		if dv == nil && !zeros {
-			// no explicit default, and don't want to set zeros
-			continue
-		}
-		fptr := f.Addr().Interface() // **T
-		// TODO: Consider batching the allocations we do here.
-		switch sf.kind {
-		case reflect.Bool:
-			b := new(bool)
-			if dv != nil {
-				*b = dv.(bool)
-			}
-			*(fptr.(**bool)) = b
-		case reflect.Float32:
-			f := new(float32)
-			if dv != nil {
-				*f = dv.(float32)
-			}
-			*(fptr.(**float32)) = f
-		case reflect.Float64:
-			f := new(float64)
-			if dv != nil {
-				*f = dv.(float64)
-			}
-			*(fptr.(**float64)) = f
-		case reflect.Int32:
-			// might be an enum
-			if ft := f.Type(); ft != int32PtrType {
-				// enum
-				f.Set(reflect.New(ft.Elem()))
-				if dv != nil {
-					f.Elem().SetInt(int64(dv.(int32)))
-				}
-			} else {
-				// int32 field
-				i := new(int32)
-				if dv != nil {
-					*i = dv.(int32)
-				}
-				*(fptr.(**int32)) = i
-			}
-		case reflect.Int64:
-			i := new(int64)
-			if dv != nil {
-				*i = dv.(int64)
-			}
-			*(fptr.(**int64)) = i
-		case reflect.String:
-			s := new(string)
-			if dv != nil {
-				*s = dv.(string)
-			}
-			*(fptr.(**string)) = s
-		case reflect.Uint8:
-			// exceptional case: []byte
-			var b []byte
-			if dv != nil {
-				db := dv.([]byte)
-				b = make([]byte, len(db))
-				copy(b, db)
-			} else {
-				b = []byte{}
-			}
-			*(fptr.(*[]byte)) = b
-		case reflect.Uint32:
-			u := new(uint32)
-			if dv != nil {
-				*u = dv.(uint32)
-			}
-			*(fptr.(**uint32)) = u
-		case reflect.Uint64:
-			u := new(uint64)
-			if dv != nil {
-				*u = dv.(uint64)
-			}
-			*(fptr.(**uint64)) = u
-		default:
-			log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
-		}
-	}
-
-	for _, ni := range dm.nested {
-		f := v.Field(ni)
-		// f is *T or []*T or map[T]*T
-		switch f.Kind() {
-		case reflect.Ptr:
-			if f.IsNil() {
-				continue
-			}
-			setDefaults(f, recur, zeros)
-
-		case reflect.Slice:
-			for i := 0; i < f.Len(); i++ {
-				e := f.Index(i)
-				if e.IsNil() {
-					continue
-				}
-				setDefaults(e, recur, zeros)
-			}
-
-		case reflect.Map:
-			for _, k := range f.MapKeys() {
-				e := f.MapIndex(k)
-				if e.IsNil() {
-					continue
-				}
-				setDefaults(e, recur, zeros)
-			}
-		}
-	}
-}
-
-var (
-	// defaults maps a protocol buffer struct type to a slice of the fields,
-	// with its scalar fields set to their proto-declared non-zero default values.
-	defaultMu sync.RWMutex
-	defaults  = make(map[reflect.Type]defaultMessage)
-
-	int32PtrType = reflect.TypeOf((*int32)(nil))
-)
-
-// defaultMessage represents information about the default values of a message.
-type defaultMessage struct {
-	scalars []scalarField
-	nested  []int // struct field index of nested messages
-}
-
-type scalarField struct {
-	index int          // struct field index
-	kind  reflect.Kind // element type (the T in *T or []T)
-	value interface{}  // the proto-declared default value, or nil
-}
-
-// t is a struct type.
-func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
-	sprop := GetProperties(t)
-	for _, prop := range sprop.Prop {
-		fi, ok := sprop.decoderTags.get(prop.Tag)
-		if !ok {
-			// XXX_unrecognized
-			continue
-		}
-		ft := t.Field(fi).Type
-
-		sf, nested, err := fieldDefault(ft, prop)
-		switch {
-		case err != nil:
-			log.Print(err)
-		case nested:
-			dm.nested = append(dm.nested, fi)
-		case sf != nil:
-			sf.index = fi
-			dm.scalars = append(dm.scalars, *sf)
-		}
-	}
-
-	return dm
-}
-
-// fieldDefault returns the scalarField for field type ft.
-// sf will be nil if the field can not have a default.
-// nestedMessage will be true if this is a nested message.
-// Note that sf.index is not set on return.
-func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
-	var canHaveDefault bool
-	switch ft.Kind() {
-	case reflect.Ptr:
-		if ft.Elem().Kind() == reflect.Struct {
-			nestedMessage = true
-		} else {
-			canHaveDefault = true // proto2 scalar field
-		}
-
-	case reflect.Slice:
-		switch ft.Elem().Kind() {
-		case reflect.Ptr:
-			nestedMessage = true // repeated message
-		case reflect.Uint8:
-			canHaveDefault = true // bytes field
-		}
-
-	case reflect.Map:
-		if ft.Elem().Kind() == reflect.Ptr {
-			nestedMessage = true // map with message values
-		}
-	}
-
-	if !canHaveDefault {
-		if nestedMessage {
-			return nil, true, nil
-		}
-		return nil, false, nil
-	}
-
-	// We now know that ft is a pointer or slice.
-	sf = &scalarField{kind: ft.Elem().Kind()}
-
-	// scalar fields without defaults
-	if !prop.HasDefault {
-		return sf, false, nil
-	}
-
-	// a scalar field: either *T or []byte
-	switch ft.Elem().Kind() {
-	case reflect.Bool:
-		x, err := strconv.ParseBool(prop.Default)
-		if err != nil {
-			return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
-		}
-		sf.value = x
-	case reflect.Float32:
-		x, err := strconv.ParseFloat(prop.Default, 32)
-		if err != nil {
-			return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
-		}
-		sf.value = float32(x)
-	case reflect.Float64:
-		x, err := strconv.ParseFloat(prop.Default, 64)
-		if err != nil {
-			return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
-		}
-		sf.value = x
-	case reflect.Int32:
-		x, err := strconv.ParseInt(prop.Default, 10, 32)
-		if err != nil {
-			return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
-		}
-		sf.value = int32(x)
-	case reflect.Int64:
-		x, err := strconv.ParseInt(prop.Default, 10, 64)
-		if err != nil {
-			return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
-		}
-		sf.value = x
-	case reflect.String:
-		sf.value = prop.Default
-	case reflect.Uint8:
-		// []byte (not *uint8)
-		sf.value = []byte(prop.Default)
-	case reflect.Uint32:
-		x, err := strconv.ParseUint(prop.Default, 10, 32)
-		if err != nil {
-			return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
-		}
-		sf.value = uint32(x)
-	case reflect.Uint64:
-		x, err := strconv.ParseUint(prop.Default, 10, 64)
-		if err != nil {
-			return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
-		}
-		sf.value = x
-	default:
-		return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
-	}
-
-	return sf, false, nil
-}
-
-// mapKeys returns a sort.Interface to be used for sorting the map keys.
-// Map fields may have key types of non-float scalars, strings and enums.
-func mapKeys(vs []reflect.Value) sort.Interface {
-	s := mapKeySorter{vs: vs}
-
-	// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
-	if len(vs) == 0 {
-		return s
-	}
-	switch vs[0].Kind() {
-	case reflect.Int32, reflect.Int64:
-		s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
-	case reflect.Uint32, reflect.Uint64:
-		s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
-	case reflect.Bool:
-		s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
-	case reflect.String:
-		s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
-	default:
-		panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
-	}
-
-	return s
-}
-
-type mapKeySorter struct {
-	vs   []reflect.Value
-	less func(a, b reflect.Value) bool
-}
-
-func (s mapKeySorter) Len() int      { return len(s.vs) }
-func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
-func (s mapKeySorter) Less(i, j int) bool {
-	return s.less(s.vs[i], s.vs[j])
-}
-
-// isProto3Zero reports whether v is a zero proto3 value.
-func isProto3Zero(v reflect.Value) bool {
-	switch v.Kind() {
-	case reflect.Bool:
-		return !v.Bool()
-	case reflect.Int32, reflect.Int64:
-		return v.Int() == 0
-	case reflect.Uint32, reflect.Uint64:
-		return v.Uint() == 0
-	case reflect.Float32, reflect.Float64:
-		return v.Float() == 0
-	case reflect.String:
-		return v.String() == ""
-	}
-	return false
-}
-
-// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const ProtoPackageIsVersion2 = true
-
-// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const ProtoPackageIsVersion1 = true
-
-// InternalMessageInfo is a type used internally by generated .pb.go files.
-// This type is not intended to be used by non-generated code.
-// This type is not subject to any compatibility guarantee.
-type InternalMessageInfo struct {
-	marshal   *marshalInfo
-	unmarshal *unmarshalInfo
-	merge     *mergeInfo
-	discard   *discardInfo
-}

+ 0 - 314
vendor/github.com/golang/protobuf/proto/message_set.go

@@ -1,314 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Support for message sets.
- */
-
-import (
-	"bytes"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"reflect"
-	"sort"
-	"sync"
-)
-
-// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
-// A message type ID is required for storing a protocol buffer in a message set.
-var errNoMessageTypeID = errors.New("proto does not have a message type ID")
-
-// The first two types (_MessageSet_Item and messageSet)
-// model what the protocol compiler produces for the following protocol message:
-//   message MessageSet {
-//     repeated group Item = 1 {
-//       required int32 type_id = 2;
-//       required string message = 3;
-//     };
-//   }
-// That is the MessageSet wire format. We can't use a proto to generate these
-// because that would introduce a circular dependency between it and this package.
-
-type _MessageSet_Item struct {
-	TypeId  *int32 `protobuf:"varint,2,req,name=type_id"`
-	Message []byte `protobuf:"bytes,3,req,name=message"`
-}
-
-type messageSet struct {
-	Item             []*_MessageSet_Item `protobuf:"group,1,rep"`
-	XXX_unrecognized []byte
-	// TODO: caching?
-}
-
-// Make sure messageSet is a Message.
-var _ Message = (*messageSet)(nil)
-
-// messageTypeIder is an interface satisfied by a protocol buffer type
-// that may be stored in a MessageSet.
-type messageTypeIder interface {
-	MessageTypeId() int32
-}
-
-func (ms *messageSet) find(pb Message) *_MessageSet_Item {
-	mti, ok := pb.(messageTypeIder)
-	if !ok {
-		return nil
-	}
-	id := mti.MessageTypeId()
-	for _, item := range ms.Item {
-		if *item.TypeId == id {
-			return item
-		}
-	}
-	return nil
-}
-
-func (ms *messageSet) Has(pb Message) bool {
-	return ms.find(pb) != nil
-}
-
-func (ms *messageSet) Unmarshal(pb Message) error {
-	if item := ms.find(pb); item != nil {
-		return Unmarshal(item.Message, pb)
-	}
-	if _, ok := pb.(messageTypeIder); !ok {
-		return errNoMessageTypeID
-	}
-	return nil // TODO: return error instead?
-}
-
-func (ms *messageSet) Marshal(pb Message) error {
-	msg, err := Marshal(pb)
-	if err != nil {
-		return err
-	}
-	if item := ms.find(pb); item != nil {
-		// reuse existing item
-		item.Message = msg
-		return nil
-	}
-
-	mti, ok := pb.(messageTypeIder)
-	if !ok {
-		return errNoMessageTypeID
-	}
-
-	mtid := mti.MessageTypeId()
-	ms.Item = append(ms.Item, &_MessageSet_Item{
-		TypeId:  &mtid,
-		Message: msg,
-	})
-	return nil
-}
-
-func (ms *messageSet) Reset()         { *ms = messageSet{} }
-func (ms *messageSet) String() string { return CompactTextString(ms) }
-func (*messageSet) ProtoMessage()     {}
-
-// Support for the message_set_wire_format message option.
-
-func skipVarint(buf []byte) []byte {
-	i := 0
-	for ; buf[i]&0x80 != 0; i++ {
-	}
-	return buf[i+1:]
-}
-
-// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
-// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSet(exts interface{}) ([]byte, error) {
-	return marshalMessageSet(exts, false)
-}
-
-// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
-func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
-	switch exts := exts.(type) {
-	case *XXX_InternalExtensions:
-		var u marshalInfo
-		siz := u.sizeMessageSet(exts)
-		b := make([]byte, 0, siz)
-		return u.appendMessageSet(b, exts, deterministic)
-
-	case map[int32]Extension:
-		// This is an old-style extension map.
-		// Wrap it in a new-style XXX_InternalExtensions.
-		ie := XXX_InternalExtensions{
-			p: &struct {
-				mu           sync.Mutex
-				extensionMap map[int32]Extension
-			}{
-				extensionMap: exts,
-			},
-		}
-
-		var u marshalInfo
-		siz := u.sizeMessageSet(&ie)
-		b := make([]byte, 0, siz)
-		return u.appendMessageSet(b, &ie, deterministic)
-
-	default:
-		return nil, errors.New("proto: not an extension map")
-	}
-}
-
-// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
-// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSet(buf []byte, exts interface{}) error {
-	var m map[int32]Extension
-	switch exts := exts.(type) {
-	case *XXX_InternalExtensions:
-		m = exts.extensionsWrite()
-	case map[int32]Extension:
-		m = exts
-	default:
-		return errors.New("proto: not an extension map")
-	}
-
-	ms := new(messageSet)
-	if err := Unmarshal(buf, ms); err != nil {
-		return err
-	}
-	for _, item := range ms.Item {
-		id := *item.TypeId
-		msg := item.Message
-
-		// Restore wire type and field number varint, plus length varint.
-		// Be careful to preserve duplicate items.
-		b := EncodeVarint(uint64(id)<<3 | WireBytes)
-		if ext, ok := m[id]; ok {
-			// Existing data; rip off the tag and length varint
-			// so we join the new data correctly.
-			// We can assume that ext.enc is set because we are unmarshaling.
-			o := ext.enc[len(b):]   // skip wire type and field number
-			_, n := DecodeVarint(o) // calculate length of length varint
-			o = o[n:]               // skip length varint
-			msg = append(o, msg...) // join old data and new data
-		}
-		b = append(b, EncodeVarint(uint64(len(msg)))...)
-		b = append(b, msg...)
-
-		m[id] = Extension{enc: b}
-	}
-	return nil
-}
-
-// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
-// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
-	var m map[int32]Extension
-	switch exts := exts.(type) {
-	case *XXX_InternalExtensions:
-		var mu sync.Locker
-		m, mu = exts.extensionsRead()
-		if m != nil {
-			// Keep the extensions map locked until we're done marshaling to prevent
-			// races between marshaling and unmarshaling the lazily-{en,de}coded
-			// values.
-			mu.Lock()
-			defer mu.Unlock()
-		}
-	case map[int32]Extension:
-		m = exts
-	default:
-		return nil, errors.New("proto: not an extension map")
-	}
-	var b bytes.Buffer
-	b.WriteByte('{')
-
-	// Process the map in key order for deterministic output.
-	ids := make([]int32, 0, len(m))
-	for id := range m {
-		ids = append(ids, id)
-	}
-	sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
-
-	for i, id := range ids {
-		ext := m[id]
-		msd, ok := messageSetMap[id]
-		if !ok {
-			// Unknown type; we can't render it, so skip it.
-			continue
-		}
-
-		if i > 0 && b.Len() > 1 {
-			b.WriteByte(',')
-		}
-
-		fmt.Fprintf(&b, `"[%s]":`, msd.name)
-
-		x := ext.value
-		if x == nil {
-			x = reflect.New(msd.t.Elem()).Interface()
-			if err := Unmarshal(ext.enc, x.(Message)); err != nil {
-				return nil, err
-			}
-		}
-		d, err := json.Marshal(x)
-		if err != nil {
-			return nil, err
-		}
-		b.Write(d)
-	}
-	b.WriteByte('}')
-	return b.Bytes(), nil
-}
-
-// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
-// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
-	// Common-case fast path.
-	if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
-		return nil
-	}
-
-	// This is fairly tricky, and it's not clear that it is needed.
-	return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
-}
-
-// A global registry of types that can be used in a MessageSet.
-
-var messageSetMap = make(map[int32]messageSetDesc)
-
-type messageSetDesc struct {
-	t    reflect.Type // pointer to struct
-	name string
-}
-
-// RegisterMessageSetType is called from the generated code.
-func RegisterMessageSetType(m Message, fieldNum int32, name string) {
-	messageSetMap[fieldNum] = messageSetDesc{
-		t:    reflect.TypeOf(m),
-		name: name,
-	}
-}

+ 0 - 357
vendor/github.com/golang/protobuf/proto/pointer_reflect.go

@@ -1,357 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build purego appengine js
-
-// This file contains an implementation of proto field accesses using package reflect.
-// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
-// be used on App Engine.
-
-package proto
-
-import (
-	"reflect"
-	"sync"
-)
-
-const unsafeAllowed = false
-
-// A field identifies a field in a struct, accessible from a pointer.
-// In this implementation, a field is identified by the sequence of field indices
-// passed to reflect's FieldByIndex.
-type field []int
-
-// toField returns a field equivalent to the given reflect field.
-func toField(f *reflect.StructField) field {
-	return f.Index
-}
-
-// invalidField is an invalid field identifier.
-var invalidField = field(nil)
-
-// zeroField is a noop when calling pointer.offset.
-var zeroField = field([]int{})
-
-// IsValid reports whether the field identifier is valid.
-func (f field) IsValid() bool { return f != nil }
-
-// The pointer type is for the table-driven decoder.
-// The implementation here uses a reflect.Value of pointer type to
-// create a generic pointer. In pointer_unsafe.go we use unsafe
-// instead of reflect to implement the same (but faster) interface.
-type pointer struct {
-	v reflect.Value
-}
-
-// toPointer converts an interface of pointer type to a pointer
-// that points to the same target.
-func toPointer(i *Message) pointer {
-	return pointer{v: reflect.ValueOf(*i)}
-}
-
-// toAddrPointer converts an interface to a pointer that points to
-// the interface data.
-func toAddrPointer(i *interface{}, isptr bool) pointer {
-	v := reflect.ValueOf(*i)
-	u := reflect.New(v.Type())
-	u.Elem().Set(v)
-	return pointer{v: u}
-}
-
-// valToPointer converts v to a pointer.  v must be of pointer type.
-func valToPointer(v reflect.Value) pointer {
-	return pointer{v: v}
-}
-
-// offset converts from a pointer to a structure to a pointer to
-// one of its fields.
-func (p pointer) offset(f field) pointer {
-	return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
-}
-
-func (p pointer) isNil() bool {
-	return p.v.IsNil()
-}
-
-// grow updates the slice s in place to make it one element longer.
-// s must be addressable.
-// Returns the (addressable) new element.
-func grow(s reflect.Value) reflect.Value {
-	n, m := s.Len(), s.Cap()
-	if n < m {
-		s.SetLen(n + 1)
-	} else {
-		s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
-	}
-	return s.Index(n)
-}
-
-func (p pointer) toInt64() *int64 {
-	return p.v.Interface().(*int64)
-}
-func (p pointer) toInt64Ptr() **int64 {
-	return p.v.Interface().(**int64)
-}
-func (p pointer) toInt64Slice() *[]int64 {
-	return p.v.Interface().(*[]int64)
-}
-
-var int32ptr = reflect.TypeOf((*int32)(nil))
-
-func (p pointer) toInt32() *int32 {
-	return p.v.Convert(int32ptr).Interface().(*int32)
-}
-
-// The toInt32Ptr/Slice methods don't work because of enums.
-// Instead, we must use set/get methods for the int32ptr/slice case.
-/*
-	func (p pointer) toInt32Ptr() **int32 {
-		return p.v.Interface().(**int32)
-}
-	func (p pointer) toInt32Slice() *[]int32 {
-		return p.v.Interface().(*[]int32)
-}
-*/
-func (p pointer) getInt32Ptr() *int32 {
-	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
-		// raw int32 type
-		return p.v.Elem().Interface().(*int32)
-	}
-	// an enum
-	return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
-}
-func (p pointer) setInt32Ptr(v int32) {
-	// Allocate value in a *int32. Possibly convert that to a *enum.
-	// Then assign it to a **int32 or **enum.
-	// Note: we can convert *int32 to *enum, but we can't convert
-	// **int32 to **enum!
-	p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
-}
-
-// getInt32Slice copies []int32 from p as a new slice.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) getInt32Slice() []int32 {
-	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
-		// raw int32 type
-		return p.v.Elem().Interface().([]int32)
-	}
-	// an enum
-	// Allocate a []int32, then assign []enum's values into it.
-	// Note: we can't convert []enum to []int32.
-	slice := p.v.Elem()
-	s := make([]int32, slice.Len())
-	for i := 0; i < slice.Len(); i++ {
-		s[i] = int32(slice.Index(i).Int())
-	}
-	return s
-}
-
-// setInt32Slice copies []int32 into p as a new slice.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) setInt32Slice(v []int32) {
-	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
-		// raw int32 type
-		p.v.Elem().Set(reflect.ValueOf(v))
-		return
-	}
-	// an enum
-	// Allocate a []enum, then assign []int32's values into it.
-	// Note: we can't convert []enum to []int32.
-	slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
-	for i, x := range v {
-		slice.Index(i).SetInt(int64(x))
-	}
-	p.v.Elem().Set(slice)
-}
-func (p pointer) appendInt32Slice(v int32) {
-	grow(p.v.Elem()).SetInt(int64(v))
-}
-
-func (p pointer) toUint64() *uint64 {
-	return p.v.Interface().(*uint64)
-}
-func (p pointer) toUint64Ptr() **uint64 {
-	return p.v.Interface().(**uint64)
-}
-func (p pointer) toUint64Slice() *[]uint64 {
-	return p.v.Interface().(*[]uint64)
-}
-func (p pointer) toUint32() *uint32 {
-	return p.v.Interface().(*uint32)
-}
-func (p pointer) toUint32Ptr() **uint32 {
-	return p.v.Interface().(**uint32)
-}
-func (p pointer) toUint32Slice() *[]uint32 {
-	return p.v.Interface().(*[]uint32)
-}
-func (p pointer) toBool() *bool {
-	return p.v.Interface().(*bool)
-}
-func (p pointer) toBoolPtr() **bool {
-	return p.v.Interface().(**bool)
-}
-func (p pointer) toBoolSlice() *[]bool {
-	return p.v.Interface().(*[]bool)
-}
-func (p pointer) toFloat64() *float64 {
-	return p.v.Interface().(*float64)
-}
-func (p pointer) toFloat64Ptr() **float64 {
-	return p.v.Interface().(**float64)
-}
-func (p pointer) toFloat64Slice() *[]float64 {
-	return p.v.Interface().(*[]float64)
-}
-func (p pointer) toFloat32() *float32 {
-	return p.v.Interface().(*float32)
-}
-func (p pointer) toFloat32Ptr() **float32 {
-	return p.v.Interface().(**float32)
-}
-func (p pointer) toFloat32Slice() *[]float32 {
-	return p.v.Interface().(*[]float32)
-}
-func (p pointer) toString() *string {
-	return p.v.Interface().(*string)
-}
-func (p pointer) toStringPtr() **string {
-	return p.v.Interface().(**string)
-}
-func (p pointer) toStringSlice() *[]string {
-	return p.v.Interface().(*[]string)
-}
-func (p pointer) toBytes() *[]byte {
-	return p.v.Interface().(*[]byte)
-}
-func (p pointer) toBytesSlice() *[][]byte {
-	return p.v.Interface().(*[][]byte)
-}
-func (p pointer) toExtensions() *XXX_InternalExtensions {
-	return p.v.Interface().(*XXX_InternalExtensions)
-}
-func (p pointer) toOldExtensions() *map[int32]Extension {
-	return p.v.Interface().(*map[int32]Extension)
-}
-func (p pointer) getPointer() pointer {
-	return pointer{v: p.v.Elem()}
-}
-func (p pointer) setPointer(q pointer) {
-	p.v.Elem().Set(q.v)
-}
-func (p pointer) appendPointer(q pointer) {
-	grow(p.v.Elem()).Set(q.v)
-}
-
-// getPointerSlice copies []*T from p as a new []pointer.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) getPointerSlice() []pointer {
-	if p.v.IsNil() {
-		return nil
-	}
-	n := p.v.Elem().Len()
-	s := make([]pointer, n)
-	for i := 0; i < n; i++ {
-		s[i] = pointer{v: p.v.Elem().Index(i)}
-	}
-	return s
-}
-
-// setPointerSlice copies []pointer into p as a new []*T.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) setPointerSlice(v []pointer) {
-	if v == nil {
-		p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
-		return
-	}
-	s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
-	for _, p := range v {
-		s = reflect.Append(s, p.v)
-	}
-	p.v.Elem().Set(s)
-}
-
-// getInterfacePointer returns a pointer that points to the
-// interface data of the interface pointed by p.
-func (p pointer) getInterfacePointer() pointer {
-	if p.v.Elem().IsNil() {
-		return pointer{v: p.v.Elem()}
-	}
-	return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
-}
-
-func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
-	// TODO: check that p.v.Type().Elem() == t?
-	return p.v
-}
-
-func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
-	atomicLock.Lock()
-	defer atomicLock.Unlock()
-	return *p
-}
-func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
-	atomicLock.Lock()
-	defer atomicLock.Unlock()
-	*p = v
-}
-func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
-	atomicLock.Lock()
-	defer atomicLock.Unlock()
-	return *p
-}
-func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
-	atomicLock.Lock()
-	defer atomicLock.Unlock()
-	*p = v
-}
-func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
-	atomicLock.Lock()
-	defer atomicLock.Unlock()
-	return *p
-}
-func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
-	atomicLock.Lock()
-	defer atomicLock.Unlock()
-	*p = v
-}
-func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
-	atomicLock.Lock()
-	defer atomicLock.Unlock()
-	return *p
-}
-func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
-	atomicLock.Lock()
-	defer atomicLock.Unlock()
-	*p = v
-}
-
-var atomicLock sync.Mutex

+ 0 - 308
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go

@@ -1,308 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build !purego,!appengine,!js
-
-// This file contains the implementation of the proto field accesses using package unsafe.
-
-package proto
-
-import (
-	"reflect"
-	"sync/atomic"
-	"unsafe"
-)
-
-const unsafeAllowed = true
-
-// A field identifies a field in a struct, accessible from a pointer.
-// In this implementation, a field is identified by its byte offset from the start of the struct.
-type field uintptr
-
-// toField returns a field equivalent to the given reflect field.
-func toField(f *reflect.StructField) field {
-	return field(f.Offset)
-}
-
-// invalidField is an invalid field identifier.
-const invalidField = ^field(0)
-
-// zeroField is a noop when calling pointer.offset.
-const zeroField = field(0)
-
-// IsValid reports whether the field identifier is valid.
-func (f field) IsValid() bool {
-	return f != invalidField
-}
-
-// The pointer type below is for the new table-driven encoder/decoder.
-// The implementation here uses unsafe.Pointer to create a generic pointer.
-// In pointer_reflect.go we use reflect instead of unsafe to implement
-// the same (but slower) interface.
-type pointer struct {
-	p unsafe.Pointer
-}
-
-// size of pointer
-var ptrSize = unsafe.Sizeof(uintptr(0))
-
-// toPointer converts an interface of pointer type to a pointer
-// that points to the same target.
-func toPointer(i *Message) pointer {
-	// Super-tricky - read pointer out of data word of interface value.
-	// Saves ~25ns over the equivalent:
-	// return valToPointer(reflect.ValueOf(*i))
-	return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
-}
-
-// toAddrPointer converts an interface to a pointer that points to
-// the interface data.
-func toAddrPointer(i *interface{}, isptr bool) pointer {
-	// Super-tricky - read or get the address of data word of interface value.
-	if isptr {
-		// The interface is of pointer type, thus it is a direct interface.
-		// The data word is the pointer data itself. We take its address.
-		return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
-	}
-	// The interface is not of pointer type. The data word is the pointer
-	// to the data.
-	return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
-}
-
-// valToPointer converts v to a pointer. v must be of pointer type.
-func valToPointer(v reflect.Value) pointer {
-	return pointer{p: unsafe.Pointer(v.Pointer())}
-}
-
-// offset converts from a pointer to a structure to a pointer to
-// one of its fields.
-func (p pointer) offset(f field) pointer {
-	// For safety, we should panic if !f.IsValid, however calling panic causes
-	// this to no longer be inlineable, which is a serious performance cost.
-	/*
-		if !f.IsValid() {
-			panic("invalid field")
-		}
-	*/
-	return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
-}
-
-func (p pointer) isNil() bool {
-	return p.p == nil
-}
-
-func (p pointer) toInt64() *int64 {
-	return (*int64)(p.p)
-}
-func (p pointer) toInt64Ptr() **int64 {
-	return (**int64)(p.p)
-}
-func (p pointer) toInt64Slice() *[]int64 {
-	return (*[]int64)(p.p)
-}
-func (p pointer) toInt32() *int32 {
-	return (*int32)(p.p)
-}
-
-// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
-/*
-	func (p pointer) toInt32Ptr() **int32 {
-		return (**int32)(p.p)
-	}
-	func (p pointer) toInt32Slice() *[]int32 {
-		return (*[]int32)(p.p)
-	}
-*/
-func (p pointer) getInt32Ptr() *int32 {
-	return *(**int32)(p.p)
-}
-func (p pointer) setInt32Ptr(v int32) {
-	*(**int32)(p.p) = &v
-}
-
-// getInt32Slice loads a []int32 from p.
-// The value returned is aliased with the original slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) getInt32Slice() []int32 {
-	return *(*[]int32)(p.p)
-}
-
-// setInt32Slice stores a []int32 to p.
-// The value set is aliased with the input slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) setInt32Slice(v []int32) {
-	*(*[]int32)(p.p) = v
-}
-
-// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
-func (p pointer) appendInt32Slice(v int32) {
-	s := (*[]int32)(p.p)
-	*s = append(*s, v)
-}
-
-func (p pointer) toUint64() *uint64 {
-	return (*uint64)(p.p)
-}
-func (p pointer) toUint64Ptr() **uint64 {
-	return (**uint64)(p.p)
-}
-func (p pointer) toUint64Slice() *[]uint64 {
-	return (*[]uint64)(p.p)
-}
-func (p pointer) toUint32() *uint32 {
-	return (*uint32)(p.p)
-}
-func (p pointer) toUint32Ptr() **uint32 {
-	return (**uint32)(p.p)
-}
-func (p pointer) toUint32Slice() *[]uint32 {
-	return (*[]uint32)(p.p)
-}
-func (p pointer) toBool() *bool {
-	return (*bool)(p.p)
-}
-func (p pointer) toBoolPtr() **bool {
-	return (**bool)(p.p)
-}
-func (p pointer) toBoolSlice() *[]bool {
-	return (*[]bool)(p.p)
-}
-func (p pointer) toFloat64() *float64 {
-	return (*float64)(p.p)
-}
-func (p pointer) toFloat64Ptr() **float64 {
-	return (**float64)(p.p)
-}
-func (p pointer) toFloat64Slice() *[]float64 {
-	return (*[]float64)(p.p)
-}
-func (p pointer) toFloat32() *float32 {
-	return (*float32)(p.p)
-}
-func (p pointer) toFloat32Ptr() **float32 {
-	return (**float32)(p.p)
-}
-func (p pointer) toFloat32Slice() *[]float32 {
-	return (*[]float32)(p.p)
-}
-func (p pointer) toString() *string {
-	return (*string)(p.p)
-}
-func (p pointer) toStringPtr() **string {
-	return (**string)(p.p)
-}
-func (p pointer) toStringSlice() *[]string {
-	return (*[]string)(p.p)
-}
-func (p pointer) toBytes() *[]byte {
-	return (*[]byte)(p.p)
-}
-func (p pointer) toBytesSlice() *[][]byte {
-	return (*[][]byte)(p.p)
-}
-func (p pointer) toExtensions() *XXX_InternalExtensions {
-	return (*XXX_InternalExtensions)(p.p)
-}
-func (p pointer) toOldExtensions() *map[int32]Extension {
-	return (*map[int32]Extension)(p.p)
-}
-
-// getPointerSlice loads []*T from p as a []pointer.
-// The value returned is aliased with the original slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) getPointerSlice() []pointer {
-	// Super-tricky - p should point to a []*T where T is a
-	// message type. We load it as []pointer.
-	return *(*[]pointer)(p.p)
-}
-
-// setPointerSlice stores []pointer into p as a []*T.
-// The value set is aliased with the input slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) setPointerSlice(v []pointer) {
-	// Super-tricky - p should point to a []*T where T is a
-	// message type. We store it as []pointer.
-	*(*[]pointer)(p.p) = v
-}
-
-// getPointer loads the pointer at p and returns it.
-func (p pointer) getPointer() pointer {
-	return pointer{p: *(*unsafe.Pointer)(p.p)}
-}
-
-// setPointer stores the pointer q at p.
-func (p pointer) setPointer(q pointer) {
-	*(*unsafe.Pointer)(p.p) = q.p
-}
-
-// append q to the slice pointed to by p.
-func (p pointer) appendPointer(q pointer) {
-	s := (*[]unsafe.Pointer)(p.p)
-	*s = append(*s, q.p)
-}
-
-// getInterfacePointer returns a pointer that points to the
-// interface data of the interface pointed by p.
-func (p pointer) getInterfacePointer() pointer {
-	// Super-tricky - read pointer out of data word of interface value.
-	return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
-}
-
-// asPointerTo returns a reflect.Value that is a pointer to an
-// object of type t stored at p.
-func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
-	return reflect.NewAt(t, p.p)
-}
-
-func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
-	return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
-	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
-func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
-	return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
-	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
-func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
-	return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
-	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
-func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
-	return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
-	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}

+ 0 - 544
vendor/github.com/golang/protobuf/proto/properties.go

@@ -1,544 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for encoding data into the wire format for protocol buffers.
- */
-
-import (
-	"fmt"
-	"log"
-	"os"
-	"reflect"
-	"sort"
-	"strconv"
-	"strings"
-	"sync"
-)
-
-const debug bool = false
-
-// Constants that identify the encoding of a value on the wire.
-const (
-	WireVarint     = 0
-	WireFixed64    = 1
-	WireBytes      = 2
-	WireStartGroup = 3
-	WireEndGroup   = 4
-	WireFixed32    = 5
-)
-
-// tagMap is an optimization over map[int]int for typical protocol buffer
-// use-cases. Encoded protocol buffers are often in tag order with small tag
-// numbers.
-type tagMap struct {
-	fastTags []int
-	slowTags map[int]int
-}
-
-// tagMapFastLimit is the upper bound on the tag number that will be stored in
-// the tagMap slice rather than its map.
-const tagMapFastLimit = 1024
-
-func (p *tagMap) get(t int) (int, bool) {
-	if t > 0 && t < tagMapFastLimit {
-		if t >= len(p.fastTags) {
-			return 0, false
-		}
-		fi := p.fastTags[t]
-		return fi, fi >= 0
-	}
-	fi, ok := p.slowTags[t]
-	return fi, ok
-}
-
-func (p *tagMap) put(t int, fi int) {
-	if t > 0 && t < tagMapFastLimit {
-		for len(p.fastTags) < t+1 {
-			p.fastTags = append(p.fastTags, -1)
-		}
-		p.fastTags[t] = fi
-		return
-	}
-	if p.slowTags == nil {
-		p.slowTags = make(map[int]int)
-	}
-	p.slowTags[t] = fi
-}
-
-// StructProperties represents properties for all the fields of a struct.
-// decoderTags and decoderOrigNames should only be used by the decoder.
-type StructProperties struct {
-	Prop             []*Properties  // properties for each field
-	reqCount         int            // required count
-	decoderTags      tagMap         // map from proto tag to struct field number
-	decoderOrigNames map[string]int // map from original name to struct field number
-	order            []int          // list of struct field numbers in tag order
-
-	// OneofTypes contains information about the oneof fields in this message.
-	// It is keyed by the original name of a field.
-	OneofTypes map[string]*OneofProperties
-}
-
-// OneofProperties represents information about a specific field in a oneof.
-type OneofProperties struct {
-	Type  reflect.Type // pointer to generated struct type for this oneof field
-	Field int          // struct field number of the containing oneof in the message
-	Prop  *Properties
-}
-
-// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
-// See encode.go, (*Buffer).enc_struct.
-
-func (sp *StructProperties) Len() int { return len(sp.order) }
-func (sp *StructProperties) Less(i, j int) bool {
-	return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
-}
-func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
-
-// Properties represents the protocol-specific behavior of a single struct field.
-type Properties struct {
-	Name     string // name of the field, for error messages
-	OrigName string // original name before protocol compiler (always set)
-	JSONName string // name to use for JSON; determined by protoc
-	Wire     string
-	WireType int
-	Tag      int
-	Required bool
-	Optional bool
-	Repeated bool
-	Packed   bool   // relevant for repeated primitives only
-	Enum     string // set for enum types only
-	proto3   bool   // whether this is known to be a proto3 field
-	oneof    bool   // whether this is a oneof field
-
-	Default    string // default value
-	HasDefault bool   // whether an explicit default was provided
-
-	stype reflect.Type      // set for struct types only
-	sprop *StructProperties // set for struct types only
-
-	mtype      reflect.Type // set for map types only
-	MapKeyProp *Properties  // set for map types only
-	MapValProp *Properties  // set for map types only
-}
-
-// String formats the properties in the protobuf struct field tag style.
-func (p *Properties) String() string {
-	s := p.Wire
-	s += ","
-	s += strconv.Itoa(p.Tag)
-	if p.Required {
-		s += ",req"
-	}
-	if p.Optional {
-		s += ",opt"
-	}
-	if p.Repeated {
-		s += ",rep"
-	}
-	if p.Packed {
-		s += ",packed"
-	}
-	s += ",name=" + p.OrigName
-	if p.JSONName != p.OrigName {
-		s += ",json=" + p.JSONName
-	}
-	if p.proto3 {
-		s += ",proto3"
-	}
-	if p.oneof {
-		s += ",oneof"
-	}
-	if len(p.Enum) > 0 {
-		s += ",enum=" + p.Enum
-	}
-	if p.HasDefault {
-		s += ",def=" + p.Default
-	}
-	return s
-}
-
-// Parse populates p by parsing a string in the protobuf struct field tag style.
-func (p *Properties) Parse(s string) {
-	// "bytes,49,opt,name=foo,def=hello!"
-	fields := strings.Split(s, ",") // breaks def=, but handled below.
-	if len(fields) < 2 {
-		fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
-		return
-	}
-
-	p.Wire = fields[0]
-	switch p.Wire {
-	case "varint":
-		p.WireType = WireVarint
-	case "fixed32":
-		p.WireType = WireFixed32
-	case "fixed64":
-		p.WireType = WireFixed64
-	case "zigzag32":
-		p.WireType = WireVarint
-	case "zigzag64":
-		p.WireType = WireVarint
-	case "bytes", "group":
-		p.WireType = WireBytes
-		// no numeric converter for non-numeric types
-	default:
-		fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
-		return
-	}
-
-	var err error
-	p.Tag, err = strconv.Atoi(fields[1])
-	if err != nil {
-		return
-	}
-
-outer:
-	for i := 2; i < len(fields); i++ {
-		f := fields[i]
-		switch {
-		case f == "req":
-			p.Required = true
-		case f == "opt":
-			p.Optional = true
-		case f == "rep":
-			p.Repeated = true
-		case f == "packed":
-			p.Packed = true
-		case strings.HasPrefix(f, "name="):
-			p.OrigName = f[5:]
-		case strings.HasPrefix(f, "json="):
-			p.JSONName = f[5:]
-		case strings.HasPrefix(f, "enum="):
-			p.Enum = f[5:]
-		case f == "proto3":
-			p.proto3 = true
-		case f == "oneof":
-			p.oneof = true
-		case strings.HasPrefix(f, "def="):
-			p.HasDefault = true
-			p.Default = f[4:] // rest of string
-			if i+1 < len(fields) {
-				// Commas aren't escaped, and def is always last.
-				p.Default += "," + strings.Join(fields[i+1:], ",")
-				break outer
-			}
-		}
-	}
-}
-
-var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
-
-// setFieldProps initializes the field properties for submessages and maps.
-func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
-	switch t1 := typ; t1.Kind() {
-	case reflect.Ptr:
-		if t1.Elem().Kind() == reflect.Struct {
-			p.stype = t1.Elem()
-		}
-
-	case reflect.Slice:
-		if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
-			p.stype = t2.Elem()
-		}
-
-	case reflect.Map:
-		p.mtype = t1
-		p.MapKeyProp = &Properties{}
-		p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
-		p.MapValProp = &Properties{}
-		vtype := p.mtype.Elem()
-		if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
-			// The value type is not a message (*T) or bytes ([]byte),
-			// so we need encoders for the pointer to this type.
-			vtype = reflect.PtrTo(vtype)
-		}
-		p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
-	}
-
-	if p.stype != nil {
-		if lockGetProp {
-			p.sprop = GetProperties(p.stype)
-		} else {
-			p.sprop = getPropertiesLocked(p.stype)
-		}
-	}
-}
-
-var (
-	marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
-)
-
-// Init populates the properties from a protocol buffer struct tag.
-func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
-	p.init(typ, name, tag, f, true)
-}
-
-func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
-	// "bytes,49,opt,def=hello!"
-	p.Name = name
-	p.OrigName = name
-	if tag == "" {
-		return
-	}
-	p.Parse(tag)
-	p.setFieldProps(typ, f, lockGetProp)
-}
-
-var (
-	propertiesMu  sync.RWMutex
-	propertiesMap = make(map[reflect.Type]*StructProperties)
-)
-
-// GetProperties returns the list of properties for the type represented by t.
-// t must represent a generated struct type of a protocol message.
-func GetProperties(t reflect.Type) *StructProperties {
-	if t.Kind() != reflect.Struct {
-		panic("proto: type must have kind struct")
-	}
-
-	// Most calls to GetProperties in a long-running program will be
-	// retrieving details for types we have seen before.
-	propertiesMu.RLock()
-	sprop, ok := propertiesMap[t]
-	propertiesMu.RUnlock()
-	if ok {
-		if collectStats {
-			stats.Chit++
-		}
-		return sprop
-	}
-
-	propertiesMu.Lock()
-	sprop = getPropertiesLocked(t)
-	propertiesMu.Unlock()
-	return sprop
-}
-
-// getPropertiesLocked requires that propertiesMu is held.
-func getPropertiesLocked(t reflect.Type) *StructProperties {
-	if prop, ok := propertiesMap[t]; ok {
-		if collectStats {
-			stats.Chit++
-		}
-		return prop
-	}
-	if collectStats {
-		stats.Cmiss++
-	}
-
-	prop := new(StructProperties)
-	// in case of recursive protos, fill this in now.
-	propertiesMap[t] = prop
-
-	// build properties
-	prop.Prop = make([]*Properties, t.NumField())
-	prop.order = make([]int, t.NumField())
-
-	for i := 0; i < t.NumField(); i++ {
-		f := t.Field(i)
-		p := new(Properties)
-		name := f.Name
-		p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
-
-		oneof := f.Tag.Get("protobuf_oneof") // special case
-		if oneof != "" {
-			// Oneof fields don't use the traditional protobuf tag.
-			p.OrigName = oneof
-		}
-		prop.Prop[i] = p
-		prop.order[i] = i
-		if debug {
-			print(i, " ", f.Name, " ", t.String(), " ")
-			if p.Tag > 0 {
-				print(p.String())
-			}
-			print("\n")
-		}
-	}
-
-	// Re-order prop.order.
-	sort.Sort(prop)
-
-	type oneofMessage interface {
-		XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
-	}
-	if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
-		var oots []interface{}
-		_, _, _, oots = om.XXX_OneofFuncs()
-
-		// Interpret oneof metadata.
-		prop.OneofTypes = make(map[string]*OneofProperties)
-		for _, oot := range oots {
-			oop := &OneofProperties{
-				Type: reflect.ValueOf(oot).Type(), // *T
-				Prop: new(Properties),
-			}
-			sft := oop.Type.Elem().Field(0)
-			oop.Prop.Name = sft.Name
-			oop.Prop.Parse(sft.Tag.Get("protobuf"))
-			// There will be exactly one interface field that
-			// this new value is assignable to.
-			for i := 0; i < t.NumField(); i++ {
-				f := t.Field(i)
-				if f.Type.Kind() != reflect.Interface {
-					continue
-				}
-				if !oop.Type.AssignableTo(f.Type) {
-					continue
-				}
-				oop.Field = i
-				break
-			}
-			prop.OneofTypes[oop.Prop.OrigName] = oop
-		}
-	}
-
-	// build required counts
-	// build tags
-	reqCount := 0
-	prop.decoderOrigNames = make(map[string]int)
-	for i, p := range prop.Prop {
-		if strings.HasPrefix(p.Name, "XXX_") {
-			// Internal fields should not appear in tags/origNames maps.
-			// They are handled specially when encoding and decoding.
-			continue
-		}
-		if p.Required {
-			reqCount++
-		}
-		prop.decoderTags.put(p.Tag, i)
-		prop.decoderOrigNames[p.OrigName] = i
-	}
-	prop.reqCount = reqCount
-
-	return prop
-}
-
-// A global registry of enum types.
-// The generated code will register the generated maps by calling RegisterEnum.
-
-var enumValueMaps = make(map[string]map[string]int32)
-
-// RegisterEnum is called from the generated code to install the enum descriptor
-// maps into the global table to aid parsing text format protocol buffers.
-func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
-	if _, ok := enumValueMaps[typeName]; ok {
-		panic("proto: duplicate enum registered: " + typeName)
-	}
-	enumValueMaps[typeName] = valueMap
-}
-
-// EnumValueMap returns the mapping from names to integers of the
-// enum type enumType, or a nil if not found.
-func EnumValueMap(enumType string) map[string]int32 {
-	return enumValueMaps[enumType]
-}
-
-// A registry of all linked message types.
-// The string is a fully-qualified proto name ("pkg.Message").
-var (
-	protoTypedNils = make(map[string]Message)      // a map from proto names to typed nil pointers
-	protoMapTypes  = make(map[string]reflect.Type) // a map from proto names to map types
-	revProtoTypes  = make(map[reflect.Type]string)
-)
-
-// RegisterType is called from generated code and maps from the fully qualified
-// proto name to the type (pointer to struct) of the protocol buffer.
-func RegisterType(x Message, name string) {
-	if _, ok := protoTypedNils[name]; ok {
-		// TODO: Some day, make this a panic.
-		log.Printf("proto: duplicate proto type registered: %s", name)
-		return
-	}
-	t := reflect.TypeOf(x)
-	if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
-		// Generated code always calls RegisterType with nil x.
-		// This check is just for extra safety.
-		protoTypedNils[name] = x
-	} else {
-		protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
-	}
-	revProtoTypes[t] = name
-}
-
-// RegisterMapType is called from generated code and maps from the fully qualified
-// proto name to the native map type of the proto map definition.
-func RegisterMapType(x interface{}, name string) {
-	if reflect.TypeOf(x).Kind() != reflect.Map {
-		panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
-	}
-	if _, ok := protoMapTypes[name]; ok {
-		log.Printf("proto: duplicate proto type registered: %s", name)
-		return
-	}
-	t := reflect.TypeOf(x)
-	protoMapTypes[name] = t
-	revProtoTypes[t] = name
-}
-
-// MessageName returns the fully-qualified proto name for the given message type.
-func MessageName(x Message) string {
-	type xname interface {
-		XXX_MessageName() string
-	}
-	if m, ok := x.(xname); ok {
-		return m.XXX_MessageName()
-	}
-	return revProtoTypes[reflect.TypeOf(x)]
-}
-
-// MessageType returns the message type (pointer to struct) for a named message.
-// The type is not guaranteed to implement proto.Message if the name refers to a
-// map entry.
-func MessageType(name string) reflect.Type {
-	if t, ok := protoTypedNils[name]; ok {
-		return reflect.TypeOf(t)
-	}
-	return protoMapTypes[name]
-}
-
-// A registry of all linked proto files.
-var (
-	protoFiles = make(map[string][]byte) // file name => fileDescriptor
-)
-
-// RegisterFile is called from generated code and maps from the
-// full file name of a .proto file to its compressed FileDescriptorProto.
-func RegisterFile(filename string, fileDescriptor []byte) {
-	protoFiles[filename] = fileDescriptor
-}
-
-// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
-func FileDescriptor(filename string) []byte { return protoFiles[filename] }

+ 0 - 2767
vendor/github.com/golang/protobuf/proto/table_marshal.go

@@ -1,2767 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
-	"errors"
-	"fmt"
-	"math"
-	"reflect"
-	"sort"
-	"strconv"
-	"strings"
-	"sync"
-	"sync/atomic"
-	"unicode/utf8"
-)
-
-// a sizer takes a pointer to a field and the size of its tag, computes the size of
-// the encoded data.
-type sizer func(pointer, int) int
-
-// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
-// marshals the field to the end of the slice, returns the slice and error (if any).
-type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
-
-// marshalInfo is the information used for marshaling a message.
-type marshalInfo struct {
-	typ          reflect.Type
-	fields       []*marshalFieldInfo
-	unrecognized field                      // offset of XXX_unrecognized
-	extensions   field                      // offset of XXX_InternalExtensions
-	v1extensions field                      // offset of XXX_extensions
-	sizecache    field                      // offset of XXX_sizecache
-	initialized  int32                      // 0 -- only typ is set, 1 -- fully initialized
-	messageset   bool                       // uses message set wire format
-	hasmarshaler bool                       // has custom marshaler
-	sync.RWMutex                            // protect extElems map, also for initialization
-	extElems     map[int32]*marshalElemInfo // info of extension elements
-}
-
-// marshalFieldInfo is the information used for marshaling a field of a message.
-type marshalFieldInfo struct {
-	field      field
-	wiretag    uint64 // tag in wire format
-	tagsize    int    // size of tag in wire format
-	sizer      sizer
-	marshaler  marshaler
-	isPointer  bool
-	required   bool                              // field is required
-	name       string                            // name of the field, for error reporting
-	oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
-}
-
-// marshalElemInfo is the information used for marshaling an extension or oneof element.
-type marshalElemInfo struct {
-	wiretag   uint64 // tag in wire format
-	tagsize   int    // size of tag in wire format
-	sizer     sizer
-	marshaler marshaler
-	isptr     bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
-}
-
-var (
-	marshalInfoMap  = map[reflect.Type]*marshalInfo{}
-	marshalInfoLock sync.Mutex
-)
-
-// getMarshalInfo returns the information to marshal a given type of message.
-// The info it returns may not necessarily initialized.
-// t is the type of the message (NOT the pointer to it).
-func getMarshalInfo(t reflect.Type) *marshalInfo {
-	marshalInfoLock.Lock()
-	u, ok := marshalInfoMap[t]
-	if !ok {
-		u = &marshalInfo{typ: t}
-		marshalInfoMap[t] = u
-	}
-	marshalInfoLock.Unlock()
-	return u
-}
-
-// Size is the entry point from generated code,
-// and should be ONLY called by generated code.
-// It computes the size of encoded data of msg.
-// a is a pointer to a place to store cached marshal info.
-func (a *InternalMessageInfo) Size(msg Message) int {
-	u := getMessageMarshalInfo(msg, a)
-	ptr := toPointer(&msg)
-	if ptr.isNil() {
-		// We get here if msg is a typed nil ((*SomeMessage)(nil)),
-		// so it satisfies the interface, and msg == nil wouldn't
-		// catch it. We don't want crash in this case.
-		return 0
-	}
-	return u.size(ptr)
-}
-
-// Marshal is the entry point from generated code,
-// and should be ONLY called by generated code.
-// It marshals msg to the end of b.
-// a is a pointer to a place to store cached marshal info.
-func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
-	u := getMessageMarshalInfo(msg, a)
-	ptr := toPointer(&msg)
-	if ptr.isNil() {
-		// We get here if msg is a typed nil ((*SomeMessage)(nil)),
-		// so it satisfies the interface, and msg == nil wouldn't
-		// catch it. We don't want crash in this case.
-		return b, ErrNil
-	}
-	return u.marshal(b, ptr, deterministic)
-}
-
-func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
-	// u := a.marshal, but atomically.
-	// We use an atomic here to ensure memory consistency.
-	u := atomicLoadMarshalInfo(&a.marshal)
-	if u == nil {
-		// Get marshal information from type of message.
-		t := reflect.ValueOf(msg).Type()
-		if t.Kind() != reflect.Ptr {
-			panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
-		}
-		u = getMarshalInfo(t.Elem())
-		// Store it in the cache for later users.
-		// a.marshal = u, but atomically.
-		atomicStoreMarshalInfo(&a.marshal, u)
-	}
-	return u
-}
-
-// size is the main function to compute the size of the encoded data of a message.
-// ptr is the pointer to the message.
-func (u *marshalInfo) size(ptr pointer) int {
-	if atomic.LoadInt32(&u.initialized) == 0 {
-		u.computeMarshalInfo()
-	}
-
-	// If the message can marshal itself, let it do it, for compatibility.
-	// NOTE: This is not efficient.
-	if u.hasmarshaler {
-		m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
-		b, _ := m.Marshal()
-		return len(b)
-	}
-
-	n := 0
-	for _, f := range u.fields {
-		if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
-			// nil pointer always marshals to nothing
-			continue
-		}
-		n += f.sizer(ptr.offset(f.field), f.tagsize)
-	}
-	if u.extensions.IsValid() {
-		e := ptr.offset(u.extensions).toExtensions()
-		if u.messageset {
-			n += u.sizeMessageSet(e)
-		} else {
-			n += u.sizeExtensions(e)
-		}
-	}
-	if u.v1extensions.IsValid() {
-		m := *ptr.offset(u.v1extensions).toOldExtensions()
-		n += u.sizeV1Extensions(m)
-	}
-	if u.unrecognized.IsValid() {
-		s := *ptr.offset(u.unrecognized).toBytes()
-		n += len(s)
-	}
-	// cache the result for use in marshal
-	if u.sizecache.IsValid() {
-		atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
-	}
-	return n
-}
-
-// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
-// fall back to compute the size.
-func (u *marshalInfo) cachedsize(ptr pointer) int {
-	if u.sizecache.IsValid() {
-		return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
-	}
-	return u.size(ptr)
-}
-
-// marshal is the main function to marshal a message. It takes a byte slice and appends
-// the encoded data to the end of the slice, returns the slice and error (if any).
-// ptr is the pointer to the message.
-// If deterministic is true, map is marshaled in deterministic order.
-func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
-	if atomic.LoadInt32(&u.initialized) == 0 {
-		u.computeMarshalInfo()
-	}
-
-	// If the message can marshal itself, let it do it, for compatibility.
-	// NOTE: This is not efficient.
-	if u.hasmarshaler {
-		m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
-		b1, err := m.Marshal()
-		b = append(b, b1...)
-		return b, err
-	}
-
-	var err, errLater error
-	// The old marshaler encodes extensions at beginning.
-	if u.extensions.IsValid() {
-		e := ptr.offset(u.extensions).toExtensions()
-		if u.messageset {
-			b, err = u.appendMessageSet(b, e, deterministic)
-		} else {
-			b, err = u.appendExtensions(b, e, deterministic)
-		}
-		if err != nil {
-			return b, err
-		}
-	}
-	if u.v1extensions.IsValid() {
-		m := *ptr.offset(u.v1extensions).toOldExtensions()
-		b, err = u.appendV1Extensions(b, m, deterministic)
-		if err != nil {
-			return b, err
-		}
-	}
-	for _, f := range u.fields {
-		if f.required {
-			if ptr.offset(f.field).getPointer().isNil() {
-				// Required field is not set.
-				// We record the error but keep going, to give a complete marshaling.
-				if errLater == nil {
-					errLater = &RequiredNotSetError{f.name}
-				}
-				continue
-			}
-		}
-		if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
-			// nil pointer always marshals to nothing
-			continue
-		}
-		b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
-		if err != nil {
-			if err1, ok := err.(*RequiredNotSetError); ok {
-				// Required field in submessage is not set.
-				// We record the error but keep going, to give a complete marshaling.
-				if errLater == nil {
-					errLater = &RequiredNotSetError{f.name + "." + err1.field}
-				}
-				continue
-			}
-			if err == errRepeatedHasNil {
-				err = errors.New("proto: repeated field " + f.name + " has nil element")
-			}
-			if err == errInvalidUTF8 {
-				if errLater == nil {
-					fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
-					errLater = &invalidUTF8Error{fullName}
-				}
-				continue
-			}
-			return b, err
-		}
-	}
-	if u.unrecognized.IsValid() {
-		s := *ptr.offset(u.unrecognized).toBytes()
-		b = append(b, s...)
-	}
-	return b, errLater
-}
-
-// computeMarshalInfo initializes the marshal info.
-func (u *marshalInfo) computeMarshalInfo() {
-	u.Lock()
-	defer u.Unlock()
-	if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
-		return
-	}
-
-	t := u.typ
-	u.unrecognized = invalidField
-	u.extensions = invalidField
-	u.v1extensions = invalidField
-	u.sizecache = invalidField
-
-	// If the message can marshal itself, let it do it, for compatibility.
-	// NOTE: This is not efficient.
-	if reflect.PtrTo(t).Implements(marshalerType) {
-		u.hasmarshaler = true
-		atomic.StoreInt32(&u.initialized, 1)
-		return
-	}
-
-	// get oneof implementers
-	var oneofImplementers []interface{}
-	if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
-		_, _, _, oneofImplementers = m.XXX_OneofFuncs()
-	}
-
-	n := t.NumField()
-
-	// deal with XXX fields first
-	for i := 0; i < t.NumField(); i++ {
-		f := t.Field(i)
-		if !strings.HasPrefix(f.Name, "XXX_") {
-			continue
-		}
-		switch f.Name {
-		case "XXX_sizecache":
-			u.sizecache = toField(&f)
-		case "XXX_unrecognized":
-			u.unrecognized = toField(&f)
-		case "XXX_InternalExtensions":
-			u.extensions = toField(&f)
-			u.messageset = f.Tag.Get("protobuf_messageset") == "1"
-		case "XXX_extensions":
-			u.v1extensions = toField(&f)
-		case "XXX_NoUnkeyedLiteral":
-			// nothing to do
-		default:
-			panic("unknown XXX field: " + f.Name)
-		}
-		n--
-	}
-
-	// normal fields
-	fields := make([]marshalFieldInfo, n) // batch allocation
-	u.fields = make([]*marshalFieldInfo, 0, n)
-	for i, j := 0, 0; i < t.NumField(); i++ {
-		f := t.Field(i)
-
-		if strings.HasPrefix(f.Name, "XXX_") {
-			continue
-		}
-		field := &fields[j]
-		j++
-		field.name = f.Name
-		u.fields = append(u.fields, field)
-		if f.Tag.Get("protobuf_oneof") != "" {
-			field.computeOneofFieldInfo(&f, oneofImplementers)
-			continue
-		}
-		if f.Tag.Get("protobuf") == "" {
-			// field has no tag (not in generated message), ignore it
-			u.fields = u.fields[:len(u.fields)-1]
-			j--
-			continue
-		}
-		field.computeMarshalFieldInfo(&f)
-	}
-
-	// fields are marshaled in tag order on the wire.
-	sort.Sort(byTag(u.fields))
-
-	atomic.StoreInt32(&u.initialized, 1)
-}
-
-// helper for sorting fields by tag
-type byTag []*marshalFieldInfo
-
-func (a byTag) Len() int           { return len(a) }
-func (a byTag) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
-func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
-
-// getExtElemInfo returns the information to marshal an extension element.
-// The info it returns is initialized.
-func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
-	// get from cache first
-	u.RLock()
-	e, ok := u.extElems[desc.Field]
-	u.RUnlock()
-	if ok {
-		return e
-	}
-
-	t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
-	tags := strings.Split(desc.Tag, ",")
-	tag, err := strconv.Atoi(tags[1])
-	if err != nil {
-		panic("tag is not an integer")
-	}
-	wt := wiretype(tags[0])
-	sizer, marshaler := typeMarshaler(t, tags, false, false)
-	e = &marshalElemInfo{
-		wiretag:   uint64(tag)<<3 | wt,
-		tagsize:   SizeVarint(uint64(tag) << 3),
-		sizer:     sizer,
-		marshaler: marshaler,
-		isptr:     t.Kind() == reflect.Ptr,
-	}
-
-	// update cache
-	u.Lock()
-	if u.extElems == nil {
-		u.extElems = make(map[int32]*marshalElemInfo)
-	}
-	u.extElems[desc.Field] = e
-	u.Unlock()
-	return e
-}
-
-// computeMarshalFieldInfo fills up the information to marshal a field.
-func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
-	// parse protobuf tag of the field.
-	// tag has format of "bytes,49,opt,name=foo,def=hello!"
-	tags := strings.Split(f.Tag.Get("protobuf"), ",")
-	if tags[0] == "" {
-		return
-	}
-	tag, err := strconv.Atoi(tags[1])
-	if err != nil {
-		panic("tag is not an integer")
-	}
-	wt := wiretype(tags[0])
-	if tags[2] == "req" {
-		fi.required = true
-	}
-	fi.setTag(f, tag, wt)
-	fi.setMarshaler(f, tags)
-}
-
-func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
-	fi.field = toField(f)
-	fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
-	fi.isPointer = true
-	fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
-	fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
-
-	ityp := f.Type // interface type
-	for _, o := range oneofImplementers {
-		t := reflect.TypeOf(o)
-		if !t.Implements(ityp) {
-			continue
-		}
-		sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
-		tags := strings.Split(sf.Tag.Get("protobuf"), ",")
-		tag, err := strconv.Atoi(tags[1])
-		if err != nil {
-			panic("tag is not an integer")
-		}
-		wt := wiretype(tags[0])
-		sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
-		fi.oneofElems[t.Elem()] = &marshalElemInfo{
-			wiretag:   uint64(tag)<<3 | wt,
-			tagsize:   SizeVarint(uint64(tag) << 3),
-			sizer:     sizer,
-			marshaler: marshaler,
-		}
-	}
-}
-
-type oneofMessage interface {
-	XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
-}
-
-// wiretype returns the wire encoding of the type.
-func wiretype(encoding string) uint64 {
-	switch encoding {
-	case "fixed32":
-		return WireFixed32
-	case "fixed64":
-		return WireFixed64
-	case "varint", "zigzag32", "zigzag64":
-		return WireVarint
-	case "bytes":
-		return WireBytes
-	case "group":
-		return WireStartGroup
-	}
-	panic("unknown wire type " + encoding)
-}
-
-// setTag fills up the tag (in wire format) and its size in the info of a field.
-func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
-	fi.field = toField(f)
-	fi.wiretag = uint64(tag)<<3 | wt
-	fi.tagsize = SizeVarint(uint64(tag) << 3)
-}
-
-// setMarshaler fills up the sizer and marshaler in the info of a field.
-func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
-	switch f.Type.Kind() {
-	case reflect.Map:
-		// map field
-		fi.isPointer = true
-		fi.sizer, fi.marshaler = makeMapMarshaler(f)
-		return
-	case reflect.Ptr, reflect.Slice:
-		fi.isPointer = true
-	}
-	fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
-}
-
-// typeMarshaler returns the sizer and marshaler of a given field.
-// t is the type of the field.
-// tags is the generated "protobuf" tag of the field.
-// If nozero is true, zero value is not marshaled to the wire.
-// If oneof is true, it is a oneof field.
-func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
-	encoding := tags[0]
-
-	pointer := false
-	slice := false
-	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
-		slice = true
-		t = t.Elem()
-	}
-	if t.Kind() == reflect.Ptr {
-		pointer = true
-		t = t.Elem()
-	}
-
-	packed := false
-	proto3 := false
-	validateUTF8 := true
-	for i := 2; i < len(tags); i++ {
-		if tags[i] == "packed" {
-			packed = true
-		}
-		if tags[i] == "proto3" {
-			proto3 = true
-		}
-	}
-	validateUTF8 = validateUTF8 && proto3
-
-	switch t.Kind() {
-	case reflect.Bool:
-		if pointer {
-			return sizeBoolPtr, appendBoolPtr
-		}
-		if slice {
-			if packed {
-				return sizeBoolPackedSlice, appendBoolPackedSlice
-			}
-			return sizeBoolSlice, appendBoolSlice
-		}
-		if nozero {
-			return sizeBoolValueNoZero, appendBoolValueNoZero
-		}
-		return sizeBoolValue, appendBoolValue
-	case reflect.Uint32:
-		switch encoding {
-		case "fixed32":
-			if pointer {
-				return sizeFixed32Ptr, appendFixed32Ptr
-			}
-			if slice {
-				if packed {
-					return sizeFixed32PackedSlice, appendFixed32PackedSlice
-				}
-				return sizeFixed32Slice, appendFixed32Slice
-			}
-			if nozero {
-				return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
-			}
-			return sizeFixed32Value, appendFixed32Value
-		case "varint":
-			if pointer {
-				return sizeVarint32Ptr, appendVarint32Ptr
-			}
-			if slice {
-				if packed {
-					return sizeVarint32PackedSlice, appendVarint32PackedSlice
-				}
-				return sizeVarint32Slice, appendVarint32Slice
-			}
-			if nozero {
-				return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
-			}
-			return sizeVarint32Value, appendVarint32Value
-		}
-	case reflect.Int32:
-		switch encoding {
-		case "fixed32":
-			if pointer {
-				return sizeFixedS32Ptr, appendFixedS32Ptr
-			}
-			if slice {
-				if packed {
-					return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
-				}
-				return sizeFixedS32Slice, appendFixedS32Slice
-			}
-			if nozero {
-				return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
-			}
-			return sizeFixedS32Value, appendFixedS32Value
-		case "varint":
-			if pointer {
-				return sizeVarintS32Ptr, appendVarintS32Ptr
-			}
-			if slice {
-				if packed {
-					return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
-				}
-				return sizeVarintS32Slice, appendVarintS32Slice
-			}
-			if nozero {
-				return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
-			}
-			return sizeVarintS32Value, appendVarintS32Value
-		case "zigzag32":
-			if pointer {
-				return sizeZigzag32Ptr, appendZigzag32Ptr
-			}
-			if slice {
-				if packed {
-					return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
-				}
-				return sizeZigzag32Slice, appendZigzag32Slice
-			}
-			if nozero {
-				return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
-			}
-			return sizeZigzag32Value, appendZigzag32Value
-		}
-	case reflect.Uint64:
-		switch encoding {
-		case "fixed64":
-			if pointer {
-				return sizeFixed64Ptr, appendFixed64Ptr
-			}
-			if slice {
-				if packed {
-					return sizeFixed64PackedSlice, appendFixed64PackedSlice
-				}
-				return sizeFixed64Slice, appendFixed64Slice
-			}
-			if nozero {
-				return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
-			}
-			return sizeFixed64Value, appendFixed64Value
-		case "varint":
-			if pointer {
-				return sizeVarint64Ptr, appendVarint64Ptr
-			}
-			if slice {
-				if packed {
-					return sizeVarint64PackedSlice, appendVarint64PackedSlice
-				}
-				return sizeVarint64Slice, appendVarint64Slice
-			}
-			if nozero {
-				return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
-			}
-			return sizeVarint64Value, appendVarint64Value
-		}
-	case reflect.Int64:
-		switch encoding {
-		case "fixed64":
-			if pointer {
-				return sizeFixedS64Ptr, appendFixedS64Ptr
-			}
-			if slice {
-				if packed {
-					return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
-				}
-				return sizeFixedS64Slice, appendFixedS64Slice
-			}
-			if nozero {
-				return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
-			}
-			return sizeFixedS64Value, appendFixedS64Value
-		case "varint":
-			if pointer {
-				return sizeVarintS64Ptr, appendVarintS64Ptr
-			}
-			if slice {
-				if packed {
-					return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
-				}
-				return sizeVarintS64Slice, appendVarintS64Slice
-			}
-			if nozero {
-				return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
-			}
-			return sizeVarintS64Value, appendVarintS64Value
-		case "zigzag64":
-			if pointer {
-				return sizeZigzag64Ptr, appendZigzag64Ptr
-			}
-			if slice {
-				if packed {
-					return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
-				}
-				return sizeZigzag64Slice, appendZigzag64Slice
-			}
-			if nozero {
-				return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
-			}
-			return sizeZigzag64Value, appendZigzag64Value
-		}
-	case reflect.Float32:
-		if pointer {
-			return sizeFloat32Ptr, appendFloat32Ptr
-		}
-		if slice {
-			if packed {
-				return sizeFloat32PackedSlice, appendFloat32PackedSlice
-			}
-			return sizeFloat32Slice, appendFloat32Slice
-		}
-		if nozero {
-			return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
-		}
-		return sizeFloat32Value, appendFloat32Value
-	case reflect.Float64:
-		if pointer {
-			return sizeFloat64Ptr, appendFloat64Ptr
-		}
-		if slice {
-			if packed {
-				return sizeFloat64PackedSlice, appendFloat64PackedSlice
-			}
-			return sizeFloat64Slice, appendFloat64Slice
-		}
-		if nozero {
-			return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
-		}
-		return sizeFloat64Value, appendFloat64Value
-	case reflect.String:
-		if validateUTF8 {
-			if pointer {
-				return sizeStringPtr, appendUTF8StringPtr
-			}
-			if slice {
-				return sizeStringSlice, appendUTF8StringSlice
-			}
-			if nozero {
-				return sizeStringValueNoZero, appendUTF8StringValueNoZero
-			}
-			return sizeStringValue, appendUTF8StringValue
-		}
-		if pointer {
-			return sizeStringPtr, appendStringPtr
-		}
-		if slice {
-			return sizeStringSlice, appendStringSlice
-		}
-		if nozero {
-			return sizeStringValueNoZero, appendStringValueNoZero
-		}
-		return sizeStringValue, appendStringValue
-	case reflect.Slice:
-		if slice {
-			return sizeBytesSlice, appendBytesSlice
-		}
-		if oneof {
-			// Oneof bytes field may also have "proto3" tag.
-			// We want to marshal it as a oneof field. Do this
-			// check before the proto3 check.
-			return sizeBytesOneof, appendBytesOneof
-		}
-		if proto3 {
-			return sizeBytes3, appendBytes3
-		}
-		return sizeBytes, appendBytes
-	case reflect.Struct:
-		switch encoding {
-		case "group":
-			if slice {
-				return makeGroupSliceMarshaler(getMarshalInfo(t))
-			}
-			return makeGroupMarshaler(getMarshalInfo(t))
-		case "bytes":
-			if slice {
-				return makeMessageSliceMarshaler(getMarshalInfo(t))
-			}
-			return makeMessageMarshaler(getMarshalInfo(t))
-		}
-	}
-	panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
-}
-
-// Below are functions to size/marshal a specific type of a field.
-// They are stored in the field's info, and called by function pointers.
-// They have type sizer or marshaler.
-
-func sizeFixed32Value(_ pointer, tagsize int) int {
-	return 4 + tagsize
-}
-func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toUint32()
-	if v == 0 {
-		return 0
-	}
-	return 4 + tagsize
-}
-func sizeFixed32Ptr(ptr pointer, tagsize int) int {
-	p := *ptr.toUint32Ptr()
-	if p == nil {
-		return 0
-	}
-	return 4 + tagsize
-}
-func sizeFixed32Slice(ptr pointer, tagsize int) int {
-	s := *ptr.toUint32Slice()
-	return (4 + tagsize) * len(s)
-}
-func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toUint32Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
-}
-func sizeFixedS32Value(_ pointer, tagsize int) int {
-	return 4 + tagsize
-}
-func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toInt32()
-	if v == 0 {
-		return 0
-	}
-	return 4 + tagsize
-}
-func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
-	p := ptr.getInt32Ptr()
-	if p == nil {
-		return 0
-	}
-	return 4 + tagsize
-}
-func sizeFixedS32Slice(ptr pointer, tagsize int) int {
-	s := ptr.getInt32Slice()
-	return (4 + tagsize) * len(s)
-}
-func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
-	s := ptr.getInt32Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
-}
-func sizeFloat32Value(_ pointer, tagsize int) int {
-	return 4 + tagsize
-}
-func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
-	v := math.Float32bits(*ptr.toFloat32())
-	if v == 0 {
-		return 0
-	}
-	return 4 + tagsize
-}
-func sizeFloat32Ptr(ptr pointer, tagsize int) int {
-	p := *ptr.toFloat32Ptr()
-	if p == nil {
-		return 0
-	}
-	return 4 + tagsize
-}
-func sizeFloat32Slice(ptr pointer, tagsize int) int {
-	s := *ptr.toFloat32Slice()
-	return (4 + tagsize) * len(s)
-}
-func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toFloat32Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
-}
-func sizeFixed64Value(_ pointer, tagsize int) int {
-	return 8 + tagsize
-}
-func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toUint64()
-	if v == 0 {
-		return 0
-	}
-	return 8 + tagsize
-}
-func sizeFixed64Ptr(ptr pointer, tagsize int) int {
-	p := *ptr.toUint64Ptr()
-	if p == nil {
-		return 0
-	}
-	return 8 + tagsize
-}
-func sizeFixed64Slice(ptr pointer, tagsize int) int {
-	s := *ptr.toUint64Slice()
-	return (8 + tagsize) * len(s)
-}
-func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toUint64Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
-}
-func sizeFixedS64Value(_ pointer, tagsize int) int {
-	return 8 + tagsize
-}
-func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toInt64()
-	if v == 0 {
-		return 0
-	}
-	return 8 + tagsize
-}
-func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
-	p := *ptr.toInt64Ptr()
-	if p == nil {
-		return 0
-	}
-	return 8 + tagsize
-}
-func sizeFixedS64Slice(ptr pointer, tagsize int) int {
-	s := *ptr.toInt64Slice()
-	return (8 + tagsize) * len(s)
-}
-func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toInt64Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
-}
-func sizeFloat64Value(_ pointer, tagsize int) int {
-	return 8 + tagsize
-}
-func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
-	v := math.Float64bits(*ptr.toFloat64())
-	if v == 0 {
-		return 0
-	}
-	return 8 + tagsize
-}
-func sizeFloat64Ptr(ptr pointer, tagsize int) int {
-	p := *ptr.toFloat64Ptr()
-	if p == nil {
-		return 0
-	}
-	return 8 + tagsize
-}
-func sizeFloat64Slice(ptr pointer, tagsize int) int {
-	s := *ptr.toFloat64Slice()
-	return (8 + tagsize) * len(s)
-}
-func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toFloat64Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
-}
-func sizeVarint32Value(ptr pointer, tagsize int) int {
-	v := *ptr.toUint32()
-	return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toUint32()
-	if v == 0 {
-		return 0
-	}
-	return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarint32Ptr(ptr pointer, tagsize int) int {
-	p := *ptr.toUint32Ptr()
-	if p == nil {
-		return 0
-	}
-	return SizeVarint(uint64(*p)) + tagsize
-}
-func sizeVarint32Slice(ptr pointer, tagsize int) int {
-	s := *ptr.toUint32Slice()
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v)) + tagsize
-	}
-	return n
-}
-func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toUint32Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v))
-	}
-	return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeVarintS32Value(ptr pointer, tagsize int) int {
-	v := *ptr.toInt32()
-	return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toInt32()
-	if v == 0 {
-		return 0
-	}
-	return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
-	p := ptr.getInt32Ptr()
-	if p == nil {
-		return 0
-	}
-	return SizeVarint(uint64(*p)) + tagsize
-}
-func sizeVarintS32Slice(ptr pointer, tagsize int) int {
-	s := ptr.getInt32Slice()
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v)) + tagsize
-	}
-	return n
-}
-func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
-	s := ptr.getInt32Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v))
-	}
-	return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeVarint64Value(ptr pointer, tagsize int) int {
-	v := *ptr.toUint64()
-	return SizeVarint(v) + tagsize
-}
-func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toUint64()
-	if v == 0 {
-		return 0
-	}
-	return SizeVarint(v) + tagsize
-}
-func sizeVarint64Ptr(ptr pointer, tagsize int) int {
-	p := *ptr.toUint64Ptr()
-	if p == nil {
-		return 0
-	}
-	return SizeVarint(*p) + tagsize
-}
-func sizeVarint64Slice(ptr pointer, tagsize int) int {
-	s := *ptr.toUint64Slice()
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(v) + tagsize
-	}
-	return n
-}
-func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toUint64Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(v)
-	}
-	return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeVarintS64Value(ptr pointer, tagsize int) int {
-	v := *ptr.toInt64()
-	return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toInt64()
-	if v == 0 {
-		return 0
-	}
-	return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
-	p := *ptr.toInt64Ptr()
-	if p == nil {
-		return 0
-	}
-	return SizeVarint(uint64(*p)) + tagsize
-}
-func sizeVarintS64Slice(ptr pointer, tagsize int) int {
-	s := *ptr.toInt64Slice()
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v)) + tagsize
-	}
-	return n
-}
-func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toInt64Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v))
-	}
-	return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeZigzag32Value(ptr pointer, tagsize int) int {
-	v := *ptr.toInt32()
-	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-}
-func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toInt32()
-	if v == 0 {
-		return 0
-	}
-	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-}
-func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
-	p := ptr.getInt32Ptr()
-	if p == nil {
-		return 0
-	}
-	v := *p
-	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-}
-func sizeZigzag32Slice(ptr pointer, tagsize int) int {
-	s := ptr.getInt32Slice()
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-	}
-	return n
-}
-func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
-	s := ptr.getInt32Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
-	}
-	return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeZigzag64Value(ptr pointer, tagsize int) int {
-	v := *ptr.toInt64()
-	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-}
-func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toInt64()
-	if v == 0 {
-		return 0
-	}
-	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-}
-func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
-	p := *ptr.toInt64Ptr()
-	if p == nil {
-		return 0
-	}
-	v := *p
-	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-}
-func sizeZigzag64Slice(ptr pointer, tagsize int) int {
-	s := *ptr.toInt64Slice()
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-	}
-	return n
-}
-func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toInt64Slice()
-	if len(s) == 0 {
-		return 0
-	}
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
-	}
-	return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeBoolValue(_ pointer, tagsize int) int {
-	return 1 + tagsize
-}
-func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toBool()
-	if !v {
-		return 0
-	}
-	return 1 + tagsize
-}
-func sizeBoolPtr(ptr pointer, tagsize int) int {
-	p := *ptr.toBoolPtr()
-	if p == nil {
-		return 0
-	}
-	return 1 + tagsize
-}
-func sizeBoolSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toBoolSlice()
-	return (1 + tagsize) * len(s)
-}
-func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toBoolSlice()
-	if len(s) == 0 {
-		return 0
-	}
-	return len(s) + SizeVarint(uint64(len(s))) + tagsize
-}
-func sizeStringValue(ptr pointer, tagsize int) int {
-	v := *ptr.toString()
-	return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeStringValueNoZero(ptr pointer, tagsize int) int {
-	v := *ptr.toString()
-	if v == "" {
-		return 0
-	}
-	return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeStringPtr(ptr pointer, tagsize int) int {
-	p := *ptr.toStringPtr()
-	if p == nil {
-		return 0
-	}
-	v := *p
-	return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeStringSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toStringSlice()
-	n := 0
-	for _, v := range s {
-		n += len(v) + SizeVarint(uint64(len(v))) + tagsize
-	}
-	return n
-}
-func sizeBytes(ptr pointer, tagsize int) int {
-	v := *ptr.toBytes()
-	if v == nil {
-		return 0
-	}
-	return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeBytes3(ptr pointer, tagsize int) int {
-	v := *ptr.toBytes()
-	if len(v) == 0 {
-		return 0
-	}
-	return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeBytesOneof(ptr pointer, tagsize int) int {
-	v := *ptr.toBytes()
-	return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeBytesSlice(ptr pointer, tagsize int) int {
-	s := *ptr.toBytesSlice()
-	n := 0
-	for _, v := range s {
-		n += len(v) + SizeVarint(uint64(len(v))) + tagsize
-	}
-	return n
-}
-
-// appendFixed32 appends an encoded fixed32 to b.
-func appendFixed32(b []byte, v uint32) []byte {
-	b = append(b,
-		byte(v),
-		byte(v>>8),
-		byte(v>>16),
-		byte(v>>24))
-	return b
-}
-
-// appendFixed64 appends an encoded fixed64 to b.
-func appendFixed64(b []byte, v uint64) []byte {
-	b = append(b,
-		byte(v),
-		byte(v>>8),
-		byte(v>>16),
-		byte(v>>24),
-		byte(v>>32),
-		byte(v>>40),
-		byte(v>>48),
-		byte(v>>56))
-	return b
-}
-
-// appendVarint appends an encoded varint to b.
-func appendVarint(b []byte, v uint64) []byte {
-	// TODO: make 1-byte (maybe 2-byte) case inline-able, once we
-	// have non-leaf inliner.
-	switch {
-	case v < 1<<7:
-		b = append(b, byte(v))
-	case v < 1<<14:
-		b = append(b,
-			byte(v&0x7f|0x80),
-			byte(v>>7))
-	case v < 1<<21:
-		b = append(b,
-			byte(v&0x7f|0x80),
-			byte((v>>7)&0x7f|0x80),
-			byte(v>>14))
-	case v < 1<<28:
-		b = append(b,
-			byte(v&0x7f|0x80),
-			byte((v>>7)&0x7f|0x80),
-			byte((v>>14)&0x7f|0x80),
-			byte(v>>21))
-	case v < 1<<35:
-		b = append(b,
-			byte(v&0x7f|0x80),
-			byte((v>>7)&0x7f|0x80),
-			byte((v>>14)&0x7f|0x80),
-			byte((v>>21)&0x7f|0x80),
-			byte(v>>28))
-	case v < 1<<42:
-		b = append(b,
-			byte(v&0x7f|0x80),
-			byte((v>>7)&0x7f|0x80),
-			byte((v>>14)&0x7f|0x80),
-			byte((v>>21)&0x7f|0x80),
-			byte((v>>28)&0x7f|0x80),
-			byte(v>>35))
-	case v < 1<<49:
-		b = append(b,
-			byte(v&0x7f|0x80),
-			byte((v>>7)&0x7f|0x80),
-			byte((v>>14)&0x7f|0x80),
-			byte((v>>21)&0x7f|0x80),
-			byte((v>>28)&0x7f|0x80),
-			byte((v>>35)&0x7f|0x80),
-			byte(v>>42))
-	case v < 1<<56:
-		b = append(b,
-			byte(v&0x7f|0x80),
-			byte((v>>7)&0x7f|0x80),
-			byte((v>>14)&0x7f|0x80),
-			byte((v>>21)&0x7f|0x80),
-			byte((v>>28)&0x7f|0x80),
-			byte((v>>35)&0x7f|0x80),
-			byte((v>>42)&0x7f|0x80),
-			byte(v>>49))
-	case v < 1<<63:
-		b = append(b,
-			byte(v&0x7f|0x80),
-			byte((v>>7)&0x7f|0x80),
-			byte((v>>14)&0x7f|0x80),
-			byte((v>>21)&0x7f|0x80),
-			byte((v>>28)&0x7f|0x80),
-			byte((v>>35)&0x7f|0x80),
-			byte((v>>42)&0x7f|0x80),
-			byte((v>>49)&0x7f|0x80),
-			byte(v>>56))
-	default:
-		b = append(b,
-			byte(v&0x7f|0x80),
-			byte((v>>7)&0x7f|0x80),
-			byte((v>>14)&0x7f|0x80),
-			byte((v>>21)&0x7f|0x80),
-			byte((v>>28)&0x7f|0x80),
-			byte((v>>35)&0x7f|0x80),
-			byte((v>>42)&0x7f|0x80),
-			byte((v>>49)&0x7f|0x80),
-			byte((v>>56)&0x7f|0x80),
-			1)
-	}
-	return b
-}
-
-func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toUint32()
-	b = appendVarint(b, wiretag)
-	b = appendFixed32(b, v)
-	return b, nil
-}
-func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toUint32()
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed32(b, v)
-	return b, nil
-}
-func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := *ptr.toUint32Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed32(b, *p)
-	return b, nil
-}
-func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toUint32Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendFixed32(b, v)
-	}
-	return b, nil
-}
-func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toUint32Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	b = appendVarint(b, uint64(4*len(s)))
-	for _, v := range s {
-		b = appendFixed32(b, v)
-	}
-	return b, nil
-}
-func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt32()
-	b = appendVarint(b, wiretag)
-	b = appendFixed32(b, uint32(v))
-	return b, nil
-}
-func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt32()
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed32(b, uint32(v))
-	return b, nil
-}
-func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := ptr.getInt32Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed32(b, uint32(*p))
-	return b, nil
-}
-func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := ptr.getInt32Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendFixed32(b, uint32(v))
-	}
-	return b, nil
-}
-func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := ptr.getInt32Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	b = appendVarint(b, uint64(4*len(s)))
-	for _, v := range s {
-		b = appendFixed32(b, uint32(v))
-	}
-	return b, nil
-}
-func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := math.Float32bits(*ptr.toFloat32())
-	b = appendVarint(b, wiretag)
-	b = appendFixed32(b, v)
-	return b, nil
-}
-func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := math.Float32bits(*ptr.toFloat32())
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed32(b, v)
-	return b, nil
-}
-func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := *ptr.toFloat32Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed32(b, math.Float32bits(*p))
-	return b, nil
-}
-func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toFloat32Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendFixed32(b, math.Float32bits(v))
-	}
-	return b, nil
-}
-func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toFloat32Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	b = appendVarint(b, uint64(4*len(s)))
-	for _, v := range s {
-		b = appendFixed32(b, math.Float32bits(v))
-	}
-	return b, nil
-}
-func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toUint64()
-	b = appendVarint(b, wiretag)
-	b = appendFixed64(b, v)
-	return b, nil
-}
-func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toUint64()
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed64(b, v)
-	return b, nil
-}
-func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := *ptr.toUint64Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed64(b, *p)
-	return b, nil
-}
-func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toUint64Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendFixed64(b, v)
-	}
-	return b, nil
-}
-func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toUint64Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	b = appendVarint(b, uint64(8*len(s)))
-	for _, v := range s {
-		b = appendFixed64(b, v)
-	}
-	return b, nil
-}
-func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt64()
-	b = appendVarint(b, wiretag)
-	b = appendFixed64(b, uint64(v))
-	return b, nil
-}
-func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt64()
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed64(b, uint64(v))
-	return b, nil
-}
-func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := *ptr.toInt64Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed64(b, uint64(*p))
-	return b, nil
-}
-func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toInt64Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendFixed64(b, uint64(v))
-	}
-	return b, nil
-}
-func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toInt64Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	b = appendVarint(b, uint64(8*len(s)))
-	for _, v := range s {
-		b = appendFixed64(b, uint64(v))
-	}
-	return b, nil
-}
-func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := math.Float64bits(*ptr.toFloat64())
-	b = appendVarint(b, wiretag)
-	b = appendFixed64(b, v)
-	return b, nil
-}
-func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := math.Float64bits(*ptr.toFloat64())
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed64(b, v)
-	return b, nil
-}
-func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := *ptr.toFloat64Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendFixed64(b, math.Float64bits(*p))
-	return b, nil
-}
-func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toFloat64Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendFixed64(b, math.Float64bits(v))
-	}
-	return b, nil
-}
-func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toFloat64Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	b = appendVarint(b, uint64(8*len(s)))
-	for _, v := range s {
-		b = appendFixed64(b, math.Float64bits(v))
-	}
-	return b, nil
-}
-func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toUint32()
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(v))
-	return b, nil
-}
-func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toUint32()
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(v))
-	return b, nil
-}
-func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := *ptr.toUint32Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(*p))
-	return b, nil
-}
-func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toUint32Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendVarint(b, uint64(v))
-	}
-	return b, nil
-}
-func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toUint32Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	// compute size
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v))
-	}
-	b = appendVarint(b, uint64(n))
-	for _, v := range s {
-		b = appendVarint(b, uint64(v))
-	}
-	return b, nil
-}
-func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt32()
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(v))
-	return b, nil
-}
-func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt32()
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(v))
-	return b, nil
-}
-func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := ptr.getInt32Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(*p))
-	return b, nil
-}
-func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := ptr.getInt32Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendVarint(b, uint64(v))
-	}
-	return b, nil
-}
-func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := ptr.getInt32Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	// compute size
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v))
-	}
-	b = appendVarint(b, uint64(n))
-	for _, v := range s {
-		b = appendVarint(b, uint64(v))
-	}
-	return b, nil
-}
-func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toUint64()
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, v)
-	return b, nil
-}
-func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toUint64()
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, v)
-	return b, nil
-}
-func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := *ptr.toUint64Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, *p)
-	return b, nil
-}
-func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toUint64Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendVarint(b, v)
-	}
-	return b, nil
-}
-func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toUint64Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	// compute size
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(v)
-	}
-	b = appendVarint(b, uint64(n))
-	for _, v := range s {
-		b = appendVarint(b, v)
-	}
-	return b, nil
-}
-func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt64()
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(v))
-	return b, nil
-}
-func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt64()
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(v))
-	return b, nil
-}
-func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := *ptr.toInt64Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(*p))
-	return b, nil
-}
-func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toInt64Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendVarint(b, uint64(v))
-	}
-	return b, nil
-}
-func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toInt64Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	// compute size
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v))
-	}
-	b = appendVarint(b, uint64(n))
-	for _, v := range s {
-		b = appendVarint(b, uint64(v))
-	}
-	return b, nil
-}
-func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt32()
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
-	return b, nil
-}
-func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt32()
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
-	return b, nil
-}
-func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := ptr.getInt32Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	v := *p
-	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
-	return b, nil
-}
-func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := ptr.getInt32Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
-	}
-	return b, nil
-}
-func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := ptr.getInt32Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	// compute size
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
-	}
-	b = appendVarint(b, uint64(n))
-	for _, v := range s {
-		b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
-	}
-	return b, nil
-}
-func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt64()
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
-	return b, nil
-}
-func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toInt64()
-	if v == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
-	return b, nil
-}
-func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := *ptr.toInt64Ptr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	v := *p
-	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
-	return b, nil
-}
-func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toInt64Slice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
-	}
-	return b, nil
-}
-func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toInt64Slice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	// compute size
-	n := 0
-	for _, v := range s {
-		n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
-	}
-	b = appendVarint(b, uint64(n))
-	for _, v := range s {
-		b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
-	}
-	return b, nil
-}
-func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toBool()
-	b = appendVarint(b, wiretag)
-	if v {
-		b = append(b, 1)
-	} else {
-		b = append(b, 0)
-	}
-	return b, nil
-}
-func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toBool()
-	if !v {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = append(b, 1)
-	return b, nil
-}
-
-func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := *ptr.toBoolPtr()
-	if p == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	if *p {
-		b = append(b, 1)
-	} else {
-		b = append(b, 0)
-	}
-	return b, nil
-}
-func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toBoolSlice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		if v {
-			b = append(b, 1)
-		} else {
-			b = append(b, 0)
-		}
-	}
-	return b, nil
-}
-func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toBoolSlice()
-	if len(s) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag&^7|WireBytes)
-	b = appendVarint(b, uint64(len(s)))
-	for _, v := range s {
-		if v {
-			b = append(b, 1)
-		} else {
-			b = append(b, 0)
-		}
-	}
-	return b, nil
-}
-func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toString()
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(len(v)))
-	b = append(b, v...)
-	return b, nil
-}
-func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toString()
-	if v == "" {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(len(v)))
-	b = append(b, v...)
-	return b, nil
-}
-func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	p := *ptr.toStringPtr()
-	if p == nil {
-		return b, nil
-	}
-	v := *p
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(len(v)))
-	b = append(b, v...)
-	return b, nil
-}
-func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toStringSlice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendVarint(b, uint64(len(v)))
-		b = append(b, v...)
-	}
-	return b, nil
-}
-func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	var invalidUTF8 bool
-	v := *ptr.toString()
-	if !utf8.ValidString(v) {
-		invalidUTF8 = true
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(len(v)))
-	b = append(b, v...)
-	if invalidUTF8 {
-		return b, errInvalidUTF8
-	}
-	return b, nil
-}
-func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	var invalidUTF8 bool
-	v := *ptr.toString()
-	if v == "" {
-		return b, nil
-	}
-	if !utf8.ValidString(v) {
-		invalidUTF8 = true
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(len(v)))
-	b = append(b, v...)
-	if invalidUTF8 {
-		return b, errInvalidUTF8
-	}
-	return b, nil
-}
-func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	var invalidUTF8 bool
-	p := *ptr.toStringPtr()
-	if p == nil {
-		return b, nil
-	}
-	v := *p
-	if !utf8.ValidString(v) {
-		invalidUTF8 = true
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(len(v)))
-	b = append(b, v...)
-	if invalidUTF8 {
-		return b, errInvalidUTF8
-	}
-	return b, nil
-}
-func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	var invalidUTF8 bool
-	s := *ptr.toStringSlice()
-	for _, v := range s {
-		if !utf8.ValidString(v) {
-			invalidUTF8 = true
-		}
-		b = appendVarint(b, wiretag)
-		b = appendVarint(b, uint64(len(v)))
-		b = append(b, v...)
-	}
-	if invalidUTF8 {
-		return b, errInvalidUTF8
-	}
-	return b, nil
-}
-func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toBytes()
-	if v == nil {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(len(v)))
-	b = append(b, v...)
-	return b, nil
-}
-func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toBytes()
-	if len(v) == 0 {
-		return b, nil
-	}
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(len(v)))
-	b = append(b, v...)
-	return b, nil
-}
-func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	v := *ptr.toBytes()
-	b = appendVarint(b, wiretag)
-	b = appendVarint(b, uint64(len(v)))
-	b = append(b, v...)
-	return b, nil
-}
-func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
-	s := *ptr.toBytesSlice()
-	for _, v := range s {
-		b = appendVarint(b, wiretag)
-		b = appendVarint(b, uint64(len(v)))
-		b = append(b, v...)
-	}
-	return b, nil
-}
-
-// makeGroupMarshaler returns the sizer and marshaler for a group.
-// u is the marshal info of the underlying message.
-func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
-	return func(ptr pointer, tagsize int) int {
-			p := ptr.getPointer()
-			if p.isNil() {
-				return 0
-			}
-			return u.size(p) + 2*tagsize
-		},
-		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
-			p := ptr.getPointer()
-			if p.isNil() {
-				return b, nil
-			}
-			var err error
-			b = appendVarint(b, wiretag) // start group
-			b, err = u.marshal(b, p, deterministic)
-			b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
-			return b, err
-		}
-}
-
-// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
-// u is the marshal info of the underlying message.
-func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
-	return func(ptr pointer, tagsize int) int {
-			s := ptr.getPointerSlice()
-			n := 0
-			for _, v := range s {
-				if v.isNil() {
-					continue
-				}
-				n += u.size(v) + 2*tagsize
-			}
-			return n
-		},
-		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
-			s := ptr.getPointerSlice()
-			var err error
-			var nerr nonFatal
-			for _, v := range s {
-				if v.isNil() {
-					return b, errRepeatedHasNil
-				}
-				b = appendVarint(b, wiretag) // start group
-				b, err = u.marshal(b, v, deterministic)
-				b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
-				if !nerr.Merge(err) {
-					if err == ErrNil {
-						err = errRepeatedHasNil
-					}
-					return b, err
-				}
-			}
-			return b, nerr.E
-		}
-}
-
-// makeMessageMarshaler returns the sizer and marshaler for a message field.
-// u is the marshal info of the message.
-func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
-	return func(ptr pointer, tagsize int) int {
-			p := ptr.getPointer()
-			if p.isNil() {
-				return 0
-			}
-			siz := u.size(p)
-			return siz + SizeVarint(uint64(siz)) + tagsize
-		},
-		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
-			p := ptr.getPointer()
-			if p.isNil() {
-				return b, nil
-			}
-			b = appendVarint(b, wiretag)
-			siz := u.cachedsize(p)
-			b = appendVarint(b, uint64(siz))
-			return u.marshal(b, p, deterministic)
-		}
-}
-
-// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
-// u is the marshal info of the message.
-func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
-	return func(ptr pointer, tagsize int) int {
-			s := ptr.getPointerSlice()
-			n := 0
-			for _, v := range s {
-				if v.isNil() {
-					continue
-				}
-				siz := u.size(v)
-				n += siz + SizeVarint(uint64(siz)) + tagsize
-			}
-			return n
-		},
-		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
-			s := ptr.getPointerSlice()
-			var err error
-			var nerr nonFatal
-			for _, v := range s {
-				if v.isNil() {
-					return b, errRepeatedHasNil
-				}
-				b = appendVarint(b, wiretag)
-				siz := u.cachedsize(v)
-				b = appendVarint(b, uint64(siz))
-				b, err = u.marshal(b, v, deterministic)
-
-				if !nerr.Merge(err) {
-					if err == ErrNil {
-						err = errRepeatedHasNil
-					}
-					return b, err
-				}
-			}
-			return b, nerr.E
-		}
-}
-
-// makeMapMarshaler returns the sizer and marshaler for a map field.
-// f is the pointer to the reflect data structure of the field.
-func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
-	// figure out key and value type
-	t := f.Type
-	keyType := t.Key()
-	valType := t.Elem()
-	keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
-	valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
-	keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
-	valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
-	keyWireTag := 1<<3 | wiretype(keyTags[0])
-	valWireTag := 2<<3 | wiretype(valTags[0])
-
-	// We create an interface to get the addresses of the map key and value.
-	// If value is pointer-typed, the interface is a direct interface, the
-	// idata itself is the value. Otherwise, the idata is the pointer to the
-	// value.
-	// Key cannot be pointer-typed.
-	valIsPtr := valType.Kind() == reflect.Ptr
-
-	// If value is a message with nested maps, calling
-	// valSizer in marshal may be quadratic. We should use
-	// cached version in marshal (but not in size).
-	// If value is not message type, we don't have size cache,
-	// but it cannot be nested either. Just use valSizer.
-	valCachedSizer := valSizer
-	if valIsPtr && valType.Elem().Kind() == reflect.Struct {
-		u := getMarshalInfo(valType.Elem())
-		valCachedSizer = func(ptr pointer, tagsize int) int {
-			// Same as message sizer, but use cache.
-			p := ptr.getPointer()
-			if p.isNil() {
-				return 0
-			}
-			siz := u.cachedsize(p)
-			return siz + SizeVarint(uint64(siz)) + tagsize
-		}
-	}
-	return func(ptr pointer, tagsize int) int {
-			m := ptr.asPointerTo(t).Elem() // the map
-			n := 0
-			for _, k := range m.MapKeys() {
-				ki := k.Interface()
-				vi := m.MapIndex(k).Interface()
-				kaddr := toAddrPointer(&ki, false)             // pointer to key
-				vaddr := toAddrPointer(&vi, valIsPtr)          // pointer to value
-				siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
-				n += siz + SizeVarint(uint64(siz)) + tagsize
-			}
-			return n
-		},
-		func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
-			m := ptr.asPointerTo(t).Elem() // the map
-			var err error
-			keys := m.MapKeys()
-			if len(keys) > 1 && deterministic {
-				sort.Sort(mapKeys(keys))
-			}
-
-			var nerr nonFatal
-			for _, k := range keys {
-				ki := k.Interface()
-				vi := m.MapIndex(k).Interface()
-				kaddr := toAddrPointer(&ki, false)    // pointer to key
-				vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
-				b = appendVarint(b, tag)
-				siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
-				b = appendVarint(b, uint64(siz))
-				b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
-				if !nerr.Merge(err) {
-					return b, err
-				}
-				b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
-				if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
-					return b, err
-				}
-			}
-			return b, nerr.E
-		}
-}
-
-// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
-// fi is the marshal info of the field.
-// f is the pointer to the reflect data structure of the field.
-func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
-	// Oneof field is an interface. We need to get the actual data type on the fly.
-	t := f.Type
-	return func(ptr pointer, _ int) int {
-			p := ptr.getInterfacePointer()
-			if p.isNil() {
-				return 0
-			}
-			v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
-			telem := v.Type()
-			e := fi.oneofElems[telem]
-			return e.sizer(p, e.tagsize)
-		},
-		func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
-			p := ptr.getInterfacePointer()
-			if p.isNil() {
-				return b, nil
-			}
-			v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
-			telem := v.Type()
-			if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
-				return b, errOneofHasNil
-			}
-			e := fi.oneofElems[telem]
-			return e.marshaler(b, p, e.wiretag, deterministic)
-		}
-}
-
-// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
-func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
-	m, mu := ext.extensionsRead()
-	if m == nil {
-		return 0
-	}
-	mu.Lock()
-
-	n := 0
-	for _, e := range m {
-		if e.value == nil || e.desc == nil {
-			// Extension is only in its encoded form.
-			n += len(e.enc)
-			continue
-		}
-
-		// We don't skip extensions that have an encoded form set,
-		// because the extension value may have been mutated after
-		// the last time this function was called.
-		ei := u.getExtElemInfo(e.desc)
-		v := e.value
-		p := toAddrPointer(&v, ei.isptr)
-		n += ei.sizer(p, ei.tagsize)
-	}
-	mu.Unlock()
-	return n
-}
-
-// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
-func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
-	m, mu := ext.extensionsRead()
-	if m == nil {
-		return b, nil
-	}
-	mu.Lock()
-	defer mu.Unlock()
-
-	var err error
-	var nerr nonFatal
-
-	// Fast-path for common cases: zero or one extensions.
-	// Don't bother sorting the keys.
-	if len(m) <= 1 {
-		for _, e := range m {
-			if e.value == nil || e.desc == nil {
-				// Extension is only in its encoded form.
-				b = append(b, e.enc...)
-				continue
-			}
-
-			// We don't skip extensions that have an encoded form set,
-			// because the extension value may have been mutated after
-			// the last time this function was called.
-
-			ei := u.getExtElemInfo(e.desc)
-			v := e.value
-			p := toAddrPointer(&v, ei.isptr)
-			b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
-			if !nerr.Merge(err) {
-				return b, err
-			}
-		}
-		return b, nerr.E
-	}
-
-	// Sort the keys to provide a deterministic encoding.
-	// Not sure this is required, but the old code does it.
-	keys := make([]int, 0, len(m))
-	for k := range m {
-		keys = append(keys, int(k))
-	}
-	sort.Ints(keys)
-
-	for _, k := range keys {
-		e := m[int32(k)]
-		if e.value == nil || e.desc == nil {
-			// Extension is only in its encoded form.
-			b = append(b, e.enc...)
-			continue
-		}
-
-		// We don't skip extensions that have an encoded form set,
-		// because the extension value may have been mutated after
-		// the last time this function was called.
-
-		ei := u.getExtElemInfo(e.desc)
-		v := e.value
-		p := toAddrPointer(&v, ei.isptr)
-		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
-		if !nerr.Merge(err) {
-			return b, err
-		}
-	}
-	return b, nerr.E
-}
-
-// message set format is:
-//   message MessageSet {
-//     repeated group Item = 1 {
-//       required int32 type_id = 2;
-//       required string message = 3;
-//     };
-//   }
-
-// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
-// in message set format (above).
-func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
-	m, mu := ext.extensionsRead()
-	if m == nil {
-		return 0
-	}
-	mu.Lock()
-
-	n := 0
-	for id, e := range m {
-		n += 2                          // start group, end group. tag = 1 (size=1)
-		n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
-
-		if e.value == nil || e.desc == nil {
-			// Extension is only in its encoded form.
-			msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
-			siz := len(msgWithLen)
-			n += siz + 1 // message, tag = 3 (size=1)
-			continue
-		}
-
-		// We don't skip extensions that have an encoded form set,
-		// because the extension value may have been mutated after
-		// the last time this function was called.
-
-		ei := u.getExtElemInfo(e.desc)
-		v := e.value
-		p := toAddrPointer(&v, ei.isptr)
-		n += ei.sizer(p, 1) // message, tag = 3 (size=1)
-	}
-	mu.Unlock()
-	return n
-}
-
-// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
-// to the end of byte slice b.
-func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
-	m, mu := ext.extensionsRead()
-	if m == nil {
-		return b, nil
-	}
-	mu.Lock()
-	defer mu.Unlock()
-
-	var err error
-	var nerr nonFatal
-
-	// Fast-path for common cases: zero or one extensions.
-	// Don't bother sorting the keys.
-	if len(m) <= 1 {
-		for id, e := range m {
-			b = append(b, 1<<3|WireStartGroup)
-			b = append(b, 2<<3|WireVarint)
-			b = appendVarint(b, uint64(id))
-
-			if e.value == nil || e.desc == nil {
-				// Extension is only in its encoded form.
-				msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
-				b = append(b, 3<<3|WireBytes)
-				b = append(b, msgWithLen...)
-				b = append(b, 1<<3|WireEndGroup)
-				continue
-			}
-
-			// We don't skip extensions that have an encoded form set,
-			// because the extension value may have been mutated after
-			// the last time this function was called.
-
-			ei := u.getExtElemInfo(e.desc)
-			v := e.value
-			p := toAddrPointer(&v, ei.isptr)
-			b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
-			if !nerr.Merge(err) {
-				return b, err
-			}
-			b = append(b, 1<<3|WireEndGroup)
-		}
-		return b, nerr.E
-	}
-
-	// Sort the keys to provide a deterministic encoding.
-	keys := make([]int, 0, len(m))
-	for k := range m {
-		keys = append(keys, int(k))
-	}
-	sort.Ints(keys)
-
-	for _, id := range keys {
-		e := m[int32(id)]
-		b = append(b, 1<<3|WireStartGroup)
-		b = append(b, 2<<3|WireVarint)
-		b = appendVarint(b, uint64(id))
-
-		if e.value == nil || e.desc == nil {
-			// Extension is only in its encoded form.
-			msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
-			b = append(b, 3<<3|WireBytes)
-			b = append(b, msgWithLen...)
-			b = append(b, 1<<3|WireEndGroup)
-			continue
-		}
-
-		// We don't skip extensions that have an encoded form set,
-		// because the extension value may have been mutated after
-		// the last time this function was called.
-
-		ei := u.getExtElemInfo(e.desc)
-		v := e.value
-		p := toAddrPointer(&v, ei.isptr)
-		b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
-		b = append(b, 1<<3|WireEndGroup)
-		if !nerr.Merge(err) {
-			return b, err
-		}
-	}
-	return b, nerr.E
-}
-
-// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
-func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
-	if m == nil {
-		return 0
-	}
-
-	n := 0
-	for _, e := range m {
-		if e.value == nil || e.desc == nil {
-			// Extension is only in its encoded form.
-			n += len(e.enc)
-			continue
-		}
-
-		// We don't skip extensions that have an encoded form set,
-		// because the extension value may have been mutated after
-		// the last time this function was called.
-
-		ei := u.getExtElemInfo(e.desc)
-		v := e.value
-		p := toAddrPointer(&v, ei.isptr)
-		n += ei.sizer(p, ei.tagsize)
-	}
-	return n
-}
-
-// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
-func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
-	if m == nil {
-		return b, nil
-	}
-
-	// Sort the keys to provide a deterministic encoding.
-	keys := make([]int, 0, len(m))
-	for k := range m {
-		keys = append(keys, int(k))
-	}
-	sort.Ints(keys)
-
-	var err error
-	var nerr nonFatal
-	for _, k := range keys {
-		e := m[int32(k)]
-		if e.value == nil || e.desc == nil {
-			// Extension is only in its encoded form.
-			b = append(b, e.enc...)
-			continue
-		}
-
-		// We don't skip extensions that have an encoded form set,
-		// because the extension value may have been mutated after
-		// the last time this function was called.
-
-		ei := u.getExtElemInfo(e.desc)
-		v := e.value
-		p := toAddrPointer(&v, ei.isptr)
-		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
-		if !nerr.Merge(err) {
-			return b, err
-		}
-	}
-	return b, nerr.E
-}
-
-// newMarshaler is the interface representing objects that can marshal themselves.
-//
-// This exists to support protoc-gen-go generated messages.
-// The proto package will stop type-asserting to this interface in the future.
-//
-// DO NOT DEPEND ON THIS.
-type newMarshaler interface {
-	XXX_Size() int
-	XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
-}
-
-// Size returns the encoded size of a protocol buffer message.
-// This is the main entry point.
-func Size(pb Message) int {
-	if m, ok := pb.(newMarshaler); ok {
-		return m.XXX_Size()
-	}
-	if m, ok := pb.(Marshaler); ok {
-		// If the message can marshal itself, let it do it, for compatibility.
-		// NOTE: This is not efficient.
-		b, _ := m.Marshal()
-		return len(b)
-	}
-	// in case somehow we didn't generate the wrapper
-	if pb == nil {
-		return 0
-	}
-	var info InternalMessageInfo
-	return info.Size(pb)
-}
-
-// Marshal takes a protocol buffer message
-// and encodes it into the wire format, returning the data.
-// This is the main entry point.
-func Marshal(pb Message) ([]byte, error) {
-	if m, ok := pb.(newMarshaler); ok {
-		siz := m.XXX_Size()
-		b := make([]byte, 0, siz)
-		return m.XXX_Marshal(b, false)
-	}
-	if m, ok := pb.(Marshaler); ok {
-		// If the message can marshal itself, let it do it, for compatibility.
-		// NOTE: This is not efficient.
-		return m.Marshal()
-	}
-	// in case somehow we didn't generate the wrapper
-	if pb == nil {
-		return nil, ErrNil
-	}
-	var info InternalMessageInfo
-	siz := info.Size(pb)
-	b := make([]byte, 0, siz)
-	return info.Marshal(b, pb, false)
-}
-
-// Marshal takes a protocol buffer message
-// and encodes it into the wire format, writing the result to the
-// Buffer.
-// This is an alternative entry point. It is not necessary to use
-// a Buffer for most applications.
-func (p *Buffer) Marshal(pb Message) error {
-	var err error
-	if m, ok := pb.(newMarshaler); ok {
-		siz := m.XXX_Size()
-		p.grow(siz) // make sure buf has enough capacity
-		p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
-		return err
-	}
-	if m, ok := pb.(Marshaler); ok {
-		// If the message can marshal itself, let it do it, for compatibility.
-		// NOTE: This is not efficient.
-		b, err := m.Marshal()
-		p.buf = append(p.buf, b...)
-		return err
-	}
-	// in case somehow we didn't generate the wrapper
-	if pb == nil {
-		return ErrNil
-	}
-	var info InternalMessageInfo
-	siz := info.Size(pb)
-	p.grow(siz) // make sure buf has enough capacity
-	p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
-	return err
-}
-
-// grow grows the buffer's capacity, if necessary, to guarantee space for
-// another n bytes. After grow(n), at least n bytes can be written to the
-// buffer without another allocation.
-func (p *Buffer) grow(n int) {
-	need := len(p.buf) + n
-	if need <= cap(p.buf) {
-		return
-	}
-	newCap := len(p.buf) * 2
-	if newCap < need {
-		newCap = need
-	}
-	p.buf = append(make([]byte, 0, newCap), p.buf...)
-}

+ 0 - 654
vendor/github.com/golang/protobuf/proto/table_merge.go

@@ -1,654 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
-	"fmt"
-	"reflect"
-	"strings"
-	"sync"
-	"sync/atomic"
-)
-
-// Merge merges the src message into dst.
-// This assumes that dst and src of the same type and are non-nil.
-func (a *InternalMessageInfo) Merge(dst, src Message) {
-	mi := atomicLoadMergeInfo(&a.merge)
-	if mi == nil {
-		mi = getMergeInfo(reflect.TypeOf(dst).Elem())
-		atomicStoreMergeInfo(&a.merge, mi)
-	}
-	mi.merge(toPointer(&dst), toPointer(&src))
-}
-
-type mergeInfo struct {
-	typ reflect.Type
-
-	initialized int32 // 0: only typ is valid, 1: everything is valid
-	lock        sync.Mutex
-
-	fields       []mergeFieldInfo
-	unrecognized field // Offset of XXX_unrecognized
-}
-
-type mergeFieldInfo struct {
-	field field // Offset of field, guaranteed to be valid
-
-	// isPointer reports whether the value in the field is a pointer.
-	// This is true for the following situations:
-	//	* Pointer to struct
-	//	* Pointer to basic type (proto2 only)
-	//	* Slice (first value in slice header is a pointer)
-	//	* String (first value in string header is a pointer)
-	isPointer bool
-
-	// basicWidth reports the width of the field assuming that it is directly
-	// embedded in the struct (as is the case for basic types in proto3).
-	// The possible values are:
-	// 	0: invalid
-	//	1: bool
-	//	4: int32, uint32, float32
-	//	8: int64, uint64, float64
-	basicWidth int
-
-	// Where dst and src are pointers to the types being merged.
-	merge func(dst, src pointer)
-}
-
-var (
-	mergeInfoMap  = map[reflect.Type]*mergeInfo{}
-	mergeInfoLock sync.Mutex
-)
-
-func getMergeInfo(t reflect.Type) *mergeInfo {
-	mergeInfoLock.Lock()
-	defer mergeInfoLock.Unlock()
-	mi := mergeInfoMap[t]
-	if mi == nil {
-		mi = &mergeInfo{typ: t}
-		mergeInfoMap[t] = mi
-	}
-	return mi
-}
-
-// merge merges src into dst assuming they are both of type *mi.typ.
-func (mi *mergeInfo) merge(dst, src pointer) {
-	if dst.isNil() {
-		panic("proto: nil destination")
-	}
-	if src.isNil() {
-		return // Nothing to do.
-	}
-
-	if atomic.LoadInt32(&mi.initialized) == 0 {
-		mi.computeMergeInfo()
-	}
-
-	for _, fi := range mi.fields {
-		sfp := src.offset(fi.field)
-
-		// As an optimization, we can avoid the merge function call cost
-		// if we know for sure that the source will have no effect
-		// by checking if it is the zero value.
-		if unsafeAllowed {
-			if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
-				continue
-			}
-			if fi.basicWidth > 0 {
-				switch {
-				case fi.basicWidth == 1 && !*sfp.toBool():
-					continue
-				case fi.basicWidth == 4 && *sfp.toUint32() == 0:
-					continue
-				case fi.basicWidth == 8 && *sfp.toUint64() == 0:
-					continue
-				}
-			}
-		}
-
-		dfp := dst.offset(fi.field)
-		fi.merge(dfp, sfp)
-	}
-
-	// TODO: Make this faster?
-	out := dst.asPointerTo(mi.typ).Elem()
-	in := src.asPointerTo(mi.typ).Elem()
-	if emIn, err := extendable(in.Addr().Interface()); err == nil {
-		emOut, _ := extendable(out.Addr().Interface())
-		mIn, muIn := emIn.extensionsRead()
-		if mIn != nil {
-			mOut := emOut.extensionsWrite()
-			muIn.Lock()
-			mergeExtension(mOut, mIn)
-			muIn.Unlock()
-		}
-	}
-
-	if mi.unrecognized.IsValid() {
-		if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
-			*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
-		}
-	}
-}
-
-func (mi *mergeInfo) computeMergeInfo() {
-	mi.lock.Lock()
-	defer mi.lock.Unlock()
-	if mi.initialized != 0 {
-		return
-	}
-	t := mi.typ
-	n := t.NumField()
-
-	props := GetProperties(t)
-	for i := 0; i < n; i++ {
-		f := t.Field(i)
-		if strings.HasPrefix(f.Name, "XXX_") {
-			continue
-		}
-
-		mfi := mergeFieldInfo{field: toField(&f)}
-		tf := f.Type
-
-		// As an optimization, we can avoid the merge function call cost
-		// if we know for sure that the source will have no effect
-		// by checking if it is the zero value.
-		if unsafeAllowed {
-			switch tf.Kind() {
-			case reflect.Ptr, reflect.Slice, reflect.String:
-				// As a special case, we assume slices and strings are pointers
-				// since we know that the first field in the SliceSlice or
-				// StringHeader is a data pointer.
-				mfi.isPointer = true
-			case reflect.Bool:
-				mfi.basicWidth = 1
-			case reflect.Int32, reflect.Uint32, reflect.Float32:
-				mfi.basicWidth = 4
-			case reflect.Int64, reflect.Uint64, reflect.Float64:
-				mfi.basicWidth = 8
-			}
-		}
-
-		// Unwrap tf to get at its most basic type.
-		var isPointer, isSlice bool
-		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
-			isSlice = true
-			tf = tf.Elem()
-		}
-		if tf.Kind() == reflect.Ptr {
-			isPointer = true
-			tf = tf.Elem()
-		}
-		if isPointer && isSlice && tf.Kind() != reflect.Struct {
-			panic("both pointer and slice for basic type in " + tf.Name())
-		}
-
-		switch tf.Kind() {
-		case reflect.Int32:
-			switch {
-			case isSlice: // E.g., []int32
-				mfi.merge = func(dst, src pointer) {
-					// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
-					/*
-						sfsp := src.toInt32Slice()
-						if *sfsp != nil {
-							dfsp := dst.toInt32Slice()
-							*dfsp = append(*dfsp, *sfsp...)
-							if *dfsp == nil {
-								*dfsp = []int64{}
-							}
-						}
-					*/
-					sfs := src.getInt32Slice()
-					if sfs != nil {
-						dfs := dst.getInt32Slice()
-						dfs = append(dfs, sfs...)
-						if dfs == nil {
-							dfs = []int32{}
-						}
-						dst.setInt32Slice(dfs)
-					}
-				}
-			case isPointer: // E.g., *int32
-				mfi.merge = func(dst, src pointer) {
-					// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
-					/*
-						sfpp := src.toInt32Ptr()
-						if *sfpp != nil {
-							dfpp := dst.toInt32Ptr()
-							if *dfpp == nil {
-								*dfpp = Int32(**sfpp)
-							} else {
-								**dfpp = **sfpp
-							}
-						}
-					*/
-					sfp := src.getInt32Ptr()
-					if sfp != nil {
-						dfp := dst.getInt32Ptr()
-						if dfp == nil {
-							dst.setInt32Ptr(*sfp)
-						} else {
-							*dfp = *sfp
-						}
-					}
-				}
-			default: // E.g., int32
-				mfi.merge = func(dst, src pointer) {
-					if v := *src.toInt32(); v != 0 {
-						*dst.toInt32() = v
-					}
-				}
-			}
-		case reflect.Int64:
-			switch {
-			case isSlice: // E.g., []int64
-				mfi.merge = func(dst, src pointer) {
-					sfsp := src.toInt64Slice()
-					if *sfsp != nil {
-						dfsp := dst.toInt64Slice()
-						*dfsp = append(*dfsp, *sfsp...)
-						if *dfsp == nil {
-							*dfsp = []int64{}
-						}
-					}
-				}
-			case isPointer: // E.g., *int64
-				mfi.merge = func(dst, src pointer) {
-					sfpp := src.toInt64Ptr()
-					if *sfpp != nil {
-						dfpp := dst.toInt64Ptr()
-						if *dfpp == nil {
-							*dfpp = Int64(**sfpp)
-						} else {
-							**dfpp = **sfpp
-						}
-					}
-				}
-			default: // E.g., int64
-				mfi.merge = func(dst, src pointer) {
-					if v := *src.toInt64(); v != 0 {
-						*dst.toInt64() = v
-					}
-				}
-			}
-		case reflect.Uint32:
-			switch {
-			case isSlice: // E.g., []uint32
-				mfi.merge = func(dst, src pointer) {
-					sfsp := src.toUint32Slice()
-					if *sfsp != nil {
-						dfsp := dst.toUint32Slice()
-						*dfsp = append(*dfsp, *sfsp...)
-						if *dfsp == nil {
-							*dfsp = []uint32{}
-						}
-					}
-				}
-			case isPointer: // E.g., *uint32
-				mfi.merge = func(dst, src pointer) {
-					sfpp := src.toUint32Ptr()
-					if *sfpp != nil {
-						dfpp := dst.toUint32Ptr()
-						if *dfpp == nil {
-							*dfpp = Uint32(**sfpp)
-						} else {
-							**dfpp = **sfpp
-						}
-					}
-				}
-			default: // E.g., uint32
-				mfi.merge = func(dst, src pointer) {
-					if v := *src.toUint32(); v != 0 {
-						*dst.toUint32() = v
-					}
-				}
-			}
-		case reflect.Uint64:
-			switch {
-			case isSlice: // E.g., []uint64
-				mfi.merge = func(dst, src pointer) {
-					sfsp := src.toUint64Slice()
-					if *sfsp != nil {
-						dfsp := dst.toUint64Slice()
-						*dfsp = append(*dfsp, *sfsp...)
-						if *dfsp == nil {
-							*dfsp = []uint64{}
-						}
-					}
-				}
-			case isPointer: // E.g., *uint64
-				mfi.merge = func(dst, src pointer) {
-					sfpp := src.toUint64Ptr()
-					if *sfpp != nil {
-						dfpp := dst.toUint64Ptr()
-						if *dfpp == nil {
-							*dfpp = Uint64(**sfpp)
-						} else {
-							**dfpp = **sfpp
-						}
-					}
-				}
-			default: // E.g., uint64
-				mfi.merge = func(dst, src pointer) {
-					if v := *src.toUint64(); v != 0 {
-						*dst.toUint64() = v
-					}
-				}
-			}
-		case reflect.Float32:
-			switch {
-			case isSlice: // E.g., []float32
-				mfi.merge = func(dst, src pointer) {
-					sfsp := src.toFloat32Slice()
-					if *sfsp != nil {
-						dfsp := dst.toFloat32Slice()
-						*dfsp = append(*dfsp, *sfsp...)
-						if *dfsp == nil {
-							*dfsp = []float32{}
-						}
-					}
-				}
-			case isPointer: // E.g., *float32
-				mfi.merge = func(dst, src pointer) {
-					sfpp := src.toFloat32Ptr()
-					if *sfpp != nil {
-						dfpp := dst.toFloat32Ptr()
-						if *dfpp == nil {
-							*dfpp = Float32(**sfpp)
-						} else {
-							**dfpp = **sfpp
-						}
-					}
-				}
-			default: // E.g., float32
-				mfi.merge = func(dst, src pointer) {
-					if v := *src.toFloat32(); v != 0 {
-						*dst.toFloat32() = v
-					}
-				}
-			}
-		case reflect.Float64:
-			switch {
-			case isSlice: // E.g., []float64
-				mfi.merge = func(dst, src pointer) {
-					sfsp := src.toFloat64Slice()
-					if *sfsp != nil {
-						dfsp := dst.toFloat64Slice()
-						*dfsp = append(*dfsp, *sfsp...)
-						if *dfsp == nil {
-							*dfsp = []float64{}
-						}
-					}
-				}
-			case isPointer: // E.g., *float64
-				mfi.merge = func(dst, src pointer) {
-					sfpp := src.toFloat64Ptr()
-					if *sfpp != nil {
-						dfpp := dst.toFloat64Ptr()
-						if *dfpp == nil {
-							*dfpp = Float64(**sfpp)
-						} else {
-							**dfpp = **sfpp
-						}
-					}
-				}
-			default: // E.g., float64
-				mfi.merge = func(dst, src pointer) {
-					if v := *src.toFloat64(); v != 0 {
-						*dst.toFloat64() = v
-					}
-				}
-			}
-		case reflect.Bool:
-			switch {
-			case isSlice: // E.g., []bool
-				mfi.merge = func(dst, src pointer) {
-					sfsp := src.toBoolSlice()
-					if *sfsp != nil {
-						dfsp := dst.toBoolSlice()
-						*dfsp = append(*dfsp, *sfsp...)
-						if *dfsp == nil {
-							*dfsp = []bool{}
-						}
-					}
-				}
-			case isPointer: // E.g., *bool
-				mfi.merge = func(dst, src pointer) {
-					sfpp := src.toBoolPtr()
-					if *sfpp != nil {
-						dfpp := dst.toBoolPtr()
-						if *dfpp == nil {
-							*dfpp = Bool(**sfpp)
-						} else {
-							**dfpp = **sfpp
-						}
-					}
-				}
-			default: // E.g., bool
-				mfi.merge = func(dst, src pointer) {
-					if v := *src.toBool(); v {
-						*dst.toBool() = v
-					}
-				}
-			}
-		case reflect.String:
-			switch {
-			case isSlice: // E.g., []string
-				mfi.merge = func(dst, src pointer) {
-					sfsp := src.toStringSlice()
-					if *sfsp != nil {
-						dfsp := dst.toStringSlice()
-						*dfsp = append(*dfsp, *sfsp...)
-						if *dfsp == nil {
-							*dfsp = []string{}
-						}
-					}
-				}
-			case isPointer: // E.g., *string
-				mfi.merge = func(dst, src pointer) {
-					sfpp := src.toStringPtr()
-					if *sfpp != nil {
-						dfpp := dst.toStringPtr()
-						if *dfpp == nil {
-							*dfpp = String(**sfpp)
-						} else {
-							**dfpp = **sfpp
-						}
-					}
-				}
-			default: // E.g., string
-				mfi.merge = func(dst, src pointer) {
-					if v := *src.toString(); v != "" {
-						*dst.toString() = v
-					}
-				}
-			}
-		case reflect.Slice:
-			isProto3 := props.Prop[i].proto3
-			switch {
-			case isPointer:
-				panic("bad pointer in byte slice case in " + tf.Name())
-			case tf.Elem().Kind() != reflect.Uint8:
-				panic("bad element kind in byte slice case in " + tf.Name())
-			case isSlice: // E.g., [][]byte
-				mfi.merge = func(dst, src pointer) {
-					sbsp := src.toBytesSlice()
-					if *sbsp != nil {
-						dbsp := dst.toBytesSlice()
-						for _, sb := range *sbsp {
-							if sb == nil {
-								*dbsp = append(*dbsp, nil)
-							} else {
-								*dbsp = append(*dbsp, append([]byte{}, sb...))
-							}
-						}
-						if *dbsp == nil {
-							*dbsp = [][]byte{}
-						}
-					}
-				}
-			default: // E.g., []byte
-				mfi.merge = func(dst, src pointer) {
-					sbp := src.toBytes()
-					if *sbp != nil {
-						dbp := dst.toBytes()
-						if !isProto3 || len(*sbp) > 0 {
-							*dbp = append([]byte{}, *sbp...)
-						}
-					}
-				}
-			}
-		case reflect.Struct:
-			switch {
-			case !isPointer:
-				panic(fmt.Sprintf("message field %s without pointer", tf))
-			case isSlice: // E.g., []*pb.T
-				mi := getMergeInfo(tf)
-				mfi.merge = func(dst, src pointer) {
-					sps := src.getPointerSlice()
-					if sps != nil {
-						dps := dst.getPointerSlice()
-						for _, sp := range sps {
-							var dp pointer
-							if !sp.isNil() {
-								dp = valToPointer(reflect.New(tf))
-								mi.merge(dp, sp)
-							}
-							dps = append(dps, dp)
-						}
-						if dps == nil {
-							dps = []pointer{}
-						}
-						dst.setPointerSlice(dps)
-					}
-				}
-			default: // E.g., *pb.T
-				mi := getMergeInfo(tf)
-				mfi.merge = func(dst, src pointer) {
-					sp := src.getPointer()
-					if !sp.isNil() {
-						dp := dst.getPointer()
-						if dp.isNil() {
-							dp = valToPointer(reflect.New(tf))
-							dst.setPointer(dp)
-						}
-						mi.merge(dp, sp)
-					}
-				}
-			}
-		case reflect.Map:
-			switch {
-			case isPointer || isSlice:
-				panic("bad pointer or slice in map case in " + tf.Name())
-			default: // E.g., map[K]V
-				mfi.merge = func(dst, src pointer) {
-					sm := src.asPointerTo(tf).Elem()
-					if sm.Len() == 0 {
-						return
-					}
-					dm := dst.asPointerTo(tf).Elem()
-					if dm.IsNil() {
-						dm.Set(reflect.MakeMap(tf))
-					}
-
-					switch tf.Elem().Kind() {
-					case reflect.Ptr: // Proto struct (e.g., *T)
-						for _, key := range sm.MapKeys() {
-							val := sm.MapIndex(key)
-							val = reflect.ValueOf(Clone(val.Interface().(Message)))
-							dm.SetMapIndex(key, val)
-						}
-					case reflect.Slice: // E.g. Bytes type (e.g., []byte)
-						for _, key := range sm.MapKeys() {
-							val := sm.MapIndex(key)
-							val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
-							dm.SetMapIndex(key, val)
-						}
-					default: // Basic type (e.g., string)
-						for _, key := range sm.MapKeys() {
-							val := sm.MapIndex(key)
-							dm.SetMapIndex(key, val)
-						}
-					}
-				}
-			}
-		case reflect.Interface:
-			// Must be oneof field.
-			switch {
-			case isPointer || isSlice:
-				panic("bad pointer or slice in interface case in " + tf.Name())
-			default: // E.g., interface{}
-				// TODO: Make this faster?
-				mfi.merge = func(dst, src pointer) {
-					su := src.asPointerTo(tf).Elem()
-					if !su.IsNil() {
-						du := dst.asPointerTo(tf).Elem()
-						typ := su.Elem().Type()
-						if du.IsNil() || du.Elem().Type() != typ {
-							du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
-						}
-						sv := su.Elem().Elem().Field(0)
-						if sv.Kind() == reflect.Ptr && sv.IsNil() {
-							return
-						}
-						dv := du.Elem().Elem().Field(0)
-						if dv.Kind() == reflect.Ptr && dv.IsNil() {
-							dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
-						}
-						switch sv.Type().Kind() {
-						case reflect.Ptr: // Proto struct (e.g., *T)
-							Merge(dv.Interface().(Message), sv.Interface().(Message))
-						case reflect.Slice: // E.g. Bytes type (e.g., []byte)
-							dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
-						default: // Basic type (e.g., string)
-							dv.Set(sv)
-						}
-					}
-				}
-			}
-		default:
-			panic(fmt.Sprintf("merger not found for type:%s", tf))
-		}
-		mi.fields = append(mi.fields, mfi)
-	}
-
-	mi.unrecognized = invalidField
-	if f, ok := t.FieldByName("XXX_unrecognized"); ok {
-		if f.Type != reflect.TypeOf([]byte{}) {
-			panic("expected XXX_unrecognized to be of type []byte")
-		}
-		mi.unrecognized = toField(&f)
-	}
-
-	atomic.StoreInt32(&mi.initialized, 1)
-}

+ 0 - 2051
vendor/github.com/golang/protobuf/proto/table_unmarshal.go

@@ -1,2051 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
-	"errors"
-	"fmt"
-	"io"
-	"math"
-	"reflect"
-	"strconv"
-	"strings"
-	"sync"
-	"sync/atomic"
-	"unicode/utf8"
-)
-
-// Unmarshal is the entry point from the generated .pb.go files.
-// This function is not intended to be used by non-generated code.
-// This function is not subject to any compatibility guarantee.
-// msg contains a pointer to a protocol buffer struct.
-// b is the data to be unmarshaled into the protocol buffer.
-// a is a pointer to a place to store cached unmarshal information.
-func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
-	// Load the unmarshal information for this message type.
-	// The atomic load ensures memory consistency.
-	u := atomicLoadUnmarshalInfo(&a.unmarshal)
-	if u == nil {
-		// Slow path: find unmarshal info for msg, update a with it.
-		u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
-		atomicStoreUnmarshalInfo(&a.unmarshal, u)
-	}
-	// Then do the unmarshaling.
-	err := u.unmarshal(toPointer(&msg), b)
-	return err
-}
-
-type unmarshalInfo struct {
-	typ reflect.Type // type of the protobuf struct
-
-	// 0 = only typ field is initialized
-	// 1 = completely initialized
-	initialized     int32
-	lock            sync.Mutex                    // prevents double initialization
-	dense           []unmarshalFieldInfo          // fields indexed by tag #
-	sparse          map[uint64]unmarshalFieldInfo // fields indexed by tag #
-	reqFields       []string                      // names of required fields
-	reqMask         uint64                        // 1<<len(reqFields)-1
-	unrecognized    field                         // offset of []byte to put unrecognized data (or invalidField if we should throw it away)
-	extensions      field                         // offset of extensions field (of type proto.XXX_InternalExtensions), or invalidField if it does not exist
-	oldExtensions   field                         // offset of old-form extensions field (of type map[int]Extension)
-	extensionRanges []ExtensionRange              // if non-nil, implies extensions field is valid
-	isMessageSet    bool                          // if true, implies extensions field is valid
-}
-
-// An unmarshaler takes a stream of bytes and a pointer to a field of a message.
-// It decodes the field, stores it at f, and returns the unused bytes.
-// w is the wire encoding.
-// b is the data after the tag and wire encoding have been read.
-type unmarshaler func(b []byte, f pointer, w int) ([]byte, error)
-
-type unmarshalFieldInfo struct {
-	// location of the field in the proto message structure.
-	field field
-
-	// function to unmarshal the data for the field.
-	unmarshal unmarshaler
-
-	// if a required field, contains a single set bit at this field's index in the required field list.
-	reqMask uint64
-
-	name string // name of the field, for error reporting
-}
-
-var (
-	unmarshalInfoMap  = map[reflect.Type]*unmarshalInfo{}
-	unmarshalInfoLock sync.Mutex
-)
-
-// getUnmarshalInfo returns the data structure which can be
-// subsequently used to unmarshal a message of the given type.
-// t is the type of the message (note: not pointer to message).
-func getUnmarshalInfo(t reflect.Type) *unmarshalInfo {
-	// It would be correct to return a new unmarshalInfo
-	// unconditionally. We would end up allocating one
-	// per occurrence of that type as a message or submessage.
-	// We use a cache here just to reduce memory usage.
-	unmarshalInfoLock.Lock()
-	defer unmarshalInfoLock.Unlock()
-	u := unmarshalInfoMap[t]
-	if u == nil {
-		u = &unmarshalInfo{typ: t}
-		// Note: we just set the type here. The rest of the fields
-		// will be initialized on first use.
-		unmarshalInfoMap[t] = u
-	}
-	return u
-}
-
-// unmarshal does the main work of unmarshaling a message.
-// u provides type information used to unmarshal the message.
-// m is a pointer to a protocol buffer message.
-// b is a byte stream to unmarshal into m.
-// This is top routine used when recursively unmarshaling submessages.
-func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
-	if atomic.LoadInt32(&u.initialized) == 0 {
-		u.computeUnmarshalInfo()
-	}
-	if u.isMessageSet {
-		return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
-	}
-	var reqMask uint64 // bitmask of required fields we've seen.
-	var errLater error
-	for len(b) > 0 {
-		// Read tag and wire type.
-		// Special case 1 and 2 byte varints.
-		var x uint64
-		if b[0] < 128 {
-			x = uint64(b[0])
-			b = b[1:]
-		} else if len(b) >= 2 && b[1] < 128 {
-			x = uint64(b[0]&0x7f) + uint64(b[1])<<7
-			b = b[2:]
-		} else {
-			var n int
-			x, n = decodeVarint(b)
-			if n == 0 {
-				return io.ErrUnexpectedEOF
-			}
-			b = b[n:]
-		}
-		tag := x >> 3
-		wire := int(x) & 7
-
-		// Dispatch on the tag to one of the unmarshal* functions below.
-		var f unmarshalFieldInfo
-		if tag < uint64(len(u.dense)) {
-			f = u.dense[tag]
-		} else {
-			f = u.sparse[tag]
-		}
-		if fn := f.unmarshal; fn != nil {
-			var err error
-			b, err = fn(b, m.offset(f.field), wire)
-			if err == nil {
-				reqMask |= f.reqMask
-				continue
-			}
-			if r, ok := err.(*RequiredNotSetError); ok {
-				// Remember this error, but keep parsing. We need to produce
-				// a full parse even if a required field is missing.
-				if errLater == nil {
-					errLater = r
-				}
-				reqMask |= f.reqMask
-				continue
-			}
-			if err != errInternalBadWireType {
-				if err == errInvalidUTF8 {
-					if errLater == nil {
-						fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
-						errLater = &invalidUTF8Error{fullName}
-					}
-					continue
-				}
-				return err
-			}
-			// Fragments with bad wire type are treated as unknown fields.
-		}
-
-		// Unknown tag.
-		if !u.unrecognized.IsValid() {
-			// Don't keep unrecognized data; just skip it.
-			var err error
-			b, err = skipField(b, wire)
-			if err != nil {
-				return err
-			}
-			continue
-		}
-		// Keep unrecognized data around.
-		// maybe in extensions, maybe in the unrecognized field.
-		z := m.offset(u.unrecognized).toBytes()
-		var emap map[int32]Extension
-		var e Extension
-		for _, r := range u.extensionRanges {
-			if uint64(r.Start) <= tag && tag <= uint64(r.End) {
-				if u.extensions.IsValid() {
-					mp := m.offset(u.extensions).toExtensions()
-					emap = mp.extensionsWrite()
-					e = emap[int32(tag)]
-					z = &e.enc
-					break
-				}
-				if u.oldExtensions.IsValid() {
-					p := m.offset(u.oldExtensions).toOldExtensions()
-					emap = *p
-					if emap == nil {
-						emap = map[int32]Extension{}
-						*p = emap
-					}
-					e = emap[int32(tag)]
-					z = &e.enc
-					break
-				}
-				panic("no extensions field available")
-			}
-		}
-
-		// Use wire type to skip data.
-		var err error
-		b0 := b
-		b, err = skipField(b, wire)
-		if err != nil {
-			return err
-		}
-		*z = encodeVarint(*z, tag<<3|uint64(wire))
-		*z = append(*z, b0[:len(b0)-len(b)]...)
-
-		if emap != nil {
-			emap[int32(tag)] = e
-		}
-	}
-	if reqMask != u.reqMask && errLater == nil {
-		// A required field of this message is missing.
-		for _, n := range u.reqFields {
-			if reqMask&1 == 0 {
-				errLater = &RequiredNotSetError{n}
-			}
-			reqMask >>= 1
-		}
-	}
-	return errLater
-}
-
-// computeUnmarshalInfo fills in u with information for use
-// in unmarshaling protocol buffers of type u.typ.
-func (u *unmarshalInfo) computeUnmarshalInfo() {
-	u.lock.Lock()
-	defer u.lock.Unlock()
-	if u.initialized != 0 {
-		return
-	}
-	t := u.typ
-	n := t.NumField()
-
-	// Set up the "not found" value for the unrecognized byte buffer.
-	// This is the default for proto3.
-	u.unrecognized = invalidField
-	u.extensions = invalidField
-	u.oldExtensions = invalidField
-
-	// List of the generated type and offset for each oneof field.
-	type oneofField struct {
-		ityp  reflect.Type // interface type of oneof field
-		field field        // offset in containing message
-	}
-	var oneofFields []oneofField
-
-	for i := 0; i < n; i++ {
-		f := t.Field(i)
-		if f.Name == "XXX_unrecognized" {
-			// The byte slice used to hold unrecognized input is special.
-			if f.Type != reflect.TypeOf(([]byte)(nil)) {
-				panic("bad type for XXX_unrecognized field: " + f.Type.Name())
-			}
-			u.unrecognized = toField(&f)
-			continue
-		}
-		if f.Name == "XXX_InternalExtensions" {
-			// Ditto here.
-			if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
-				panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
-			}
-			u.extensions = toField(&f)
-			if f.Tag.Get("protobuf_messageset") == "1" {
-				u.isMessageSet = true
-			}
-			continue
-		}
-		if f.Name == "XXX_extensions" {
-			// An older form of the extensions field.
-			if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) {
-				panic("bad type for XXX_extensions field: " + f.Type.Name())
-			}
-			u.oldExtensions = toField(&f)
-			continue
-		}
-		if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
-			continue
-		}
-
-		oneof := f.Tag.Get("protobuf_oneof")
-		if oneof != "" {
-			oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
-			// The rest of oneof processing happens below.
-			continue
-		}
-
-		tags := f.Tag.Get("protobuf")
-		tagArray := strings.Split(tags, ",")
-		if len(tagArray) < 2 {
-			panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
-		}
-		tag, err := strconv.Atoi(tagArray[1])
-		if err != nil {
-			panic("protobuf tag field not an integer: " + tagArray[1])
-		}
-
-		name := ""
-		for _, tag := range tagArray[3:] {
-			if strings.HasPrefix(tag, "name=") {
-				name = tag[5:]
-			}
-		}
-
-		// Extract unmarshaling function from the field (its type and tags).
-		unmarshal := fieldUnmarshaler(&f)
-
-		// Required field?
-		var reqMask uint64
-		if tagArray[2] == "req" {
-			bit := len(u.reqFields)
-			u.reqFields = append(u.reqFields, name)
-			reqMask = uint64(1) << uint(bit)
-			// TODO: if we have more than 64 required fields, we end up
-			// not verifying that all required fields are present.
-			// Fix this, perhaps using a count of required fields?
-		}
-
-		// Store the info in the correct slot in the message.
-		u.setTag(tag, toField(&f), unmarshal, reqMask, name)
-	}
-
-	// Find any types associated with oneof fields.
-	// TODO: XXX_OneofFuncs returns more info than we need.  Get rid of some of it?
-	fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
-	if fn.IsValid() {
-		res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
-		for i := res.Len() - 1; i >= 0; i-- {
-			v := res.Index(i)                             // interface{}
-			tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
-			typ := tptr.Elem()                            // Msg_X
-
-			f := typ.Field(0) // oneof implementers have one field
-			baseUnmarshal := fieldUnmarshaler(&f)
-			tags := strings.Split(f.Tag.Get("protobuf"), ",")
-			fieldNum, err := strconv.Atoi(tags[1])
-			if err != nil {
-				panic("protobuf tag field not an integer: " + tags[1])
-			}
-			var name string
-			for _, tag := range tags {
-				if strings.HasPrefix(tag, "name=") {
-					name = strings.TrimPrefix(tag, "name=")
-					break
-				}
-			}
-
-			// Find the oneof field that this struct implements.
-			// Might take O(n^2) to process all of the oneofs, but who cares.
-			for _, of := range oneofFields {
-				if tptr.Implements(of.ityp) {
-					// We have found the corresponding interface for this struct.
-					// That lets us know where this struct should be stored
-					// when we encounter it during unmarshaling.
-					unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
-					u.setTag(fieldNum, of.field, unmarshal, 0, name)
-				}
-			}
-		}
-	}
-
-	// Get extension ranges, if any.
-	fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
-	if fn.IsValid() {
-		if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
-			panic("a message with extensions, but no extensions field in " + t.Name())
-		}
-		u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
-	}
-
-	// Explicitly disallow tag 0. This will ensure we flag an error
-	// when decoding a buffer of all zeros. Without this code, we
-	// would decode and skip an all-zero buffer of even length.
-	// [0 0] is [tag=0/wiretype=varint varint-encoded-0].
-	u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
-		return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
-	}, 0, "")
-
-	// Set mask for required field check.
-	u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
-
-	atomic.StoreInt32(&u.initialized, 1)
-}
-
-// setTag stores the unmarshal information for the given tag.
-// tag = tag # for field
-// field/unmarshal = unmarshal info for that field.
-// reqMask = if required, bitmask for field position in required field list. 0 otherwise.
-// name = short name of the field.
-func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64, name string) {
-	i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask, name: name}
-	n := u.typ.NumField()
-	if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
-		for len(u.dense) <= tag {
-			u.dense = append(u.dense, unmarshalFieldInfo{})
-		}
-		u.dense[tag] = i
-		return
-	}
-	if u.sparse == nil {
-		u.sparse = map[uint64]unmarshalFieldInfo{}
-	}
-	u.sparse[uint64(tag)] = i
-}
-
-// fieldUnmarshaler returns an unmarshaler for the given field.
-func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
-	if f.Type.Kind() == reflect.Map {
-		return makeUnmarshalMap(f)
-	}
-	return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
-}
-
-// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
-func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
-	tagArray := strings.Split(tags, ",")
-	encoding := tagArray[0]
-	name := "unknown"
-	proto3 := false
-	validateUTF8 := true
-	for _, tag := range tagArray[3:] {
-		if strings.HasPrefix(tag, "name=") {
-			name = tag[5:]
-		}
-		if tag == "proto3" {
-			proto3 = true
-		}
-	}
-	validateUTF8 = validateUTF8 && proto3
-
-	// Figure out packaging (pointer, slice, or both)
-	slice := false
-	pointer := false
-	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
-		slice = true
-		t = t.Elem()
-	}
-	if t.Kind() == reflect.Ptr {
-		pointer = true
-		t = t.Elem()
-	}
-
-	// We'll never have both pointer and slice for basic types.
-	if pointer && slice && t.Kind() != reflect.Struct {
-		panic("both pointer and slice for basic type in " + t.Name())
-	}
-
-	switch t.Kind() {
-	case reflect.Bool:
-		if pointer {
-			return unmarshalBoolPtr
-		}
-		if slice {
-			return unmarshalBoolSlice
-		}
-		return unmarshalBoolValue
-	case reflect.Int32:
-		switch encoding {
-		case "fixed32":
-			if pointer {
-				return unmarshalFixedS32Ptr
-			}
-			if slice {
-				return unmarshalFixedS32Slice
-			}
-			return unmarshalFixedS32Value
-		case "varint":
-			// this could be int32 or enum
-			if pointer {
-				return unmarshalInt32Ptr
-			}
-			if slice {
-				return unmarshalInt32Slice
-			}
-			return unmarshalInt32Value
-		case "zigzag32":
-			if pointer {
-				return unmarshalSint32Ptr
-			}
-			if slice {
-				return unmarshalSint32Slice
-			}
-			return unmarshalSint32Value
-		}
-	case reflect.Int64:
-		switch encoding {
-		case "fixed64":
-			if pointer {
-				return unmarshalFixedS64Ptr
-			}
-			if slice {
-				return unmarshalFixedS64Slice
-			}
-			return unmarshalFixedS64Value
-		case "varint":
-			if pointer {
-				return unmarshalInt64Ptr
-			}
-			if slice {
-				return unmarshalInt64Slice
-			}
-			return unmarshalInt64Value
-		case "zigzag64":
-			if pointer {
-				return unmarshalSint64Ptr
-			}
-			if slice {
-				return unmarshalSint64Slice
-			}
-			return unmarshalSint64Value
-		}
-	case reflect.Uint32:
-		switch encoding {
-		case "fixed32":
-			if pointer {
-				return unmarshalFixed32Ptr
-			}
-			if slice {
-				return unmarshalFixed32Slice
-			}
-			return unmarshalFixed32Value
-		case "varint":
-			if pointer {
-				return unmarshalUint32Ptr
-			}
-			if slice {
-				return unmarshalUint32Slice
-			}
-			return unmarshalUint32Value
-		}
-	case reflect.Uint64:
-		switch encoding {
-		case "fixed64":
-			if pointer {
-				return unmarshalFixed64Ptr
-			}
-			if slice {
-				return unmarshalFixed64Slice
-			}
-			return unmarshalFixed64Value
-		case "varint":
-			if pointer {
-				return unmarshalUint64Ptr
-			}
-			if slice {
-				return unmarshalUint64Slice
-			}
-			return unmarshalUint64Value
-		}
-	case reflect.Float32:
-		if pointer {
-			return unmarshalFloat32Ptr
-		}
-		if slice {
-			return unmarshalFloat32Slice
-		}
-		return unmarshalFloat32Value
-	case reflect.Float64:
-		if pointer {
-			return unmarshalFloat64Ptr
-		}
-		if slice {
-			return unmarshalFloat64Slice
-		}
-		return unmarshalFloat64Value
-	case reflect.Map:
-		panic("map type in typeUnmarshaler in " + t.Name())
-	case reflect.Slice:
-		if pointer {
-			panic("bad pointer in slice case in " + t.Name())
-		}
-		if slice {
-			return unmarshalBytesSlice
-		}
-		return unmarshalBytesValue
-	case reflect.String:
-		if validateUTF8 {
-			if pointer {
-				return unmarshalUTF8StringPtr
-			}
-			if slice {
-				return unmarshalUTF8StringSlice
-			}
-			return unmarshalUTF8StringValue
-		}
-		if pointer {
-			return unmarshalStringPtr
-		}
-		if slice {
-			return unmarshalStringSlice
-		}
-		return unmarshalStringValue
-	case reflect.Struct:
-		// message or group field
-		if !pointer {
-			panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding))
-		}
-		switch encoding {
-		case "bytes":
-			if slice {
-				return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
-			}
-			return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
-		case "group":
-			if slice {
-				return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
-			}
-			return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
-		}
-	}
-	panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
-}
-
-// Below are all the unmarshalers for individual fields of various types.
-
-func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int64(x)
-	*f.toInt64() = v
-	return b, nil
-}
-
-func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int64(x)
-	*f.toInt64Ptr() = &v
-	return b, nil
-}
-
-func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			x, n = decodeVarint(b)
-			if n == 0 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			b = b[n:]
-			v := int64(x)
-			s := f.toInt64Slice()
-			*s = append(*s, v)
-		}
-		return res, nil
-	}
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int64(x)
-	s := f.toInt64Slice()
-	*s = append(*s, v)
-	return b, nil
-}
-
-func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int64(x>>1) ^ int64(x)<<63>>63
-	*f.toInt64() = v
-	return b, nil
-}
-
-func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int64(x>>1) ^ int64(x)<<63>>63
-	*f.toInt64Ptr() = &v
-	return b, nil
-}
-
-func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			x, n = decodeVarint(b)
-			if n == 0 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			b = b[n:]
-			v := int64(x>>1) ^ int64(x)<<63>>63
-			s := f.toInt64Slice()
-			*s = append(*s, v)
-		}
-		return res, nil
-	}
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int64(x>>1) ^ int64(x)<<63>>63
-	s := f.toInt64Slice()
-	*s = append(*s, v)
-	return b, nil
-}
-
-func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := uint64(x)
-	*f.toUint64() = v
-	return b, nil
-}
-
-func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := uint64(x)
-	*f.toUint64Ptr() = &v
-	return b, nil
-}
-
-func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			x, n = decodeVarint(b)
-			if n == 0 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			b = b[n:]
-			v := uint64(x)
-			s := f.toUint64Slice()
-			*s = append(*s, v)
-		}
-		return res, nil
-	}
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := uint64(x)
-	s := f.toUint64Slice()
-	*s = append(*s, v)
-	return b, nil
-}
-
-func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int32(x)
-	*f.toInt32() = v
-	return b, nil
-}
-
-func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int32(x)
-	f.setInt32Ptr(v)
-	return b, nil
-}
-
-func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			x, n = decodeVarint(b)
-			if n == 0 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			b = b[n:]
-			v := int32(x)
-			f.appendInt32Slice(v)
-		}
-		return res, nil
-	}
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int32(x)
-	f.appendInt32Slice(v)
-	return b, nil
-}
-
-func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int32(x>>1) ^ int32(x)<<31>>31
-	*f.toInt32() = v
-	return b, nil
-}
-
-func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int32(x>>1) ^ int32(x)<<31>>31
-	f.setInt32Ptr(v)
-	return b, nil
-}
-
-func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			x, n = decodeVarint(b)
-			if n == 0 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			b = b[n:]
-			v := int32(x>>1) ^ int32(x)<<31>>31
-			f.appendInt32Slice(v)
-		}
-		return res, nil
-	}
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := int32(x>>1) ^ int32(x)<<31>>31
-	f.appendInt32Slice(v)
-	return b, nil
-}
-
-func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := uint32(x)
-	*f.toUint32() = v
-	return b, nil
-}
-
-func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := uint32(x)
-	*f.toUint32Ptr() = &v
-	return b, nil
-}
-
-func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			x, n = decodeVarint(b)
-			if n == 0 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			b = b[n:]
-			v := uint32(x)
-			s := f.toUint32Slice()
-			*s = append(*s, v)
-		}
-		return res, nil
-	}
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	v := uint32(x)
-	s := f.toUint32Slice()
-	*s = append(*s, v)
-	return b, nil
-}
-
-func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed64 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 8 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
-	*f.toUint64() = v
-	return b[8:], nil
-}
-
-func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed64 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 8 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
-	*f.toUint64Ptr() = &v
-	return b[8:], nil
-}
-
-func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			if len(b) < 8 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
-			s := f.toUint64Slice()
-			*s = append(*s, v)
-			b = b[8:]
-		}
-		return res, nil
-	}
-	if w != WireFixed64 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 8 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
-	s := f.toUint64Slice()
-	*s = append(*s, v)
-	return b[8:], nil
-}
-
-func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed64 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 8 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
-	*f.toInt64() = v
-	return b[8:], nil
-}
-
-func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed64 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 8 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
-	*f.toInt64Ptr() = &v
-	return b[8:], nil
-}
-
-func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			if len(b) < 8 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
-			s := f.toInt64Slice()
-			*s = append(*s, v)
-			b = b[8:]
-		}
-		return res, nil
-	}
-	if w != WireFixed64 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 8 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
-	s := f.toInt64Slice()
-	*s = append(*s, v)
-	return b[8:], nil
-}
-
-func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed32 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 4 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-	*f.toUint32() = v
-	return b[4:], nil
-}
-
-func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed32 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 4 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-	*f.toUint32Ptr() = &v
-	return b[4:], nil
-}
-
-func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			if len(b) < 4 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-			s := f.toUint32Slice()
-			*s = append(*s, v)
-			b = b[4:]
-		}
-		return res, nil
-	}
-	if w != WireFixed32 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 4 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-	s := f.toUint32Slice()
-	*s = append(*s, v)
-	return b[4:], nil
-}
-
-func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed32 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 4 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
-	*f.toInt32() = v
-	return b[4:], nil
-}
-
-func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed32 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 4 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
-	f.setInt32Ptr(v)
-	return b[4:], nil
-}
-
-func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			if len(b) < 4 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
-			f.appendInt32Slice(v)
-			b = b[4:]
-		}
-		return res, nil
-	}
-	if w != WireFixed32 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 4 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
-	f.appendInt32Slice(v)
-	return b[4:], nil
-}
-
-func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	// Note: any length varint is allowed, even though any sane
-	// encoder will use one byte.
-	// See https://github.com/golang/protobuf/issues/76
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	// TODO: check if x>1? Tests seem to indicate no.
-	v := x != 0
-	*f.toBool() = v
-	return b[n:], nil
-}
-
-func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := x != 0
-	*f.toBoolPtr() = &v
-	return b[n:], nil
-}
-
-func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			x, n = decodeVarint(b)
-			if n == 0 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			v := x != 0
-			s := f.toBoolSlice()
-			*s = append(*s, v)
-			b = b[n:]
-		}
-		return res, nil
-	}
-	if w != WireVarint {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := x != 0
-	s := f.toBoolSlice()
-	*s = append(*s, v)
-	return b[n:], nil
-}
-
-func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed64 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 8 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
-	*f.toFloat64() = v
-	return b[8:], nil
-}
-
-func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed64 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 8 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
-	*f.toFloat64Ptr() = &v
-	return b[8:], nil
-}
-
-func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			if len(b) < 8 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
-			s := f.toFloat64Slice()
-			*s = append(*s, v)
-			b = b[8:]
-		}
-		return res, nil
-	}
-	if w != WireFixed64 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 8 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
-	s := f.toFloat64Slice()
-	*s = append(*s, v)
-	return b[8:], nil
-}
-
-func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed32 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 4 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
-	*f.toFloat32() = v
-	return b[4:], nil
-}
-
-func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireFixed32 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 4 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
-	*f.toFloat32Ptr() = &v
-	return b[4:], nil
-}
-
-func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
-	if w == WireBytes { // packed
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		res := b[x:]
-		b = b[:x]
-		for len(b) > 0 {
-			if len(b) < 4 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
-			s := f.toFloat32Slice()
-			*s = append(*s, v)
-			b = b[4:]
-		}
-		return res, nil
-	}
-	if w != WireFixed32 {
-		return b, errInternalBadWireType
-	}
-	if len(b) < 4 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
-	s := f.toFloat32Slice()
-	*s = append(*s, v)
-	return b[4:], nil
-}
-
-func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireBytes {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	if x > uint64(len(b)) {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := string(b[:x])
-	*f.toString() = v
-	return b[x:], nil
-}
-
-func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireBytes {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	if x > uint64(len(b)) {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := string(b[:x])
-	*f.toStringPtr() = &v
-	return b[x:], nil
-}
-
-func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireBytes {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	if x > uint64(len(b)) {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := string(b[:x])
-	s := f.toStringSlice()
-	*s = append(*s, v)
-	return b[x:], nil
-}
-
-func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireBytes {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	if x > uint64(len(b)) {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := string(b[:x])
-	*f.toString() = v
-	if !utf8.ValidString(v) {
-		return b[x:], errInvalidUTF8
-	}
-	return b[x:], nil
-}
-
-func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireBytes {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	if x > uint64(len(b)) {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := string(b[:x])
-	*f.toStringPtr() = &v
-	if !utf8.ValidString(v) {
-		return b[x:], errInvalidUTF8
-	}
-	return b[x:], nil
-}
-
-func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireBytes {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	if x > uint64(len(b)) {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := string(b[:x])
-	s := f.toStringSlice()
-	*s = append(*s, v)
-	if !utf8.ValidString(v) {
-		return b[x:], errInvalidUTF8
-	}
-	return b[x:], nil
-}
-
-var emptyBuf [0]byte
-
-func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireBytes {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	if x > uint64(len(b)) {
-		return nil, io.ErrUnexpectedEOF
-	}
-	// The use of append here is a trick which avoids the zeroing
-	// that would be required if we used a make/copy pair.
-	// We append to emptyBuf instead of nil because we want
-	// a non-nil result even when the length is 0.
-	v := append(emptyBuf[:], b[:x]...)
-	*f.toBytes() = v
-	return b[x:], nil
-}
-
-func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
-	if w != WireBytes {
-		return b, errInternalBadWireType
-	}
-	x, n := decodeVarint(b)
-	if n == 0 {
-		return nil, io.ErrUnexpectedEOF
-	}
-	b = b[n:]
-	if x > uint64(len(b)) {
-		return nil, io.ErrUnexpectedEOF
-	}
-	v := append(emptyBuf[:], b[:x]...)
-	s := f.toBytesSlice()
-	*s = append(*s, v)
-	return b[x:], nil
-}
-
-func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
-	return func(b []byte, f pointer, w int) ([]byte, error) {
-		if w != WireBytes {
-			return b, errInternalBadWireType
-		}
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		// First read the message field to see if something is there.
-		// The semantics of multiple submessages are weird.  Instead of
-		// the last one winning (as it is for all other fields), multiple
-		// submessages are merged.
-		v := f.getPointer()
-		if v.isNil() {
-			v = valToPointer(reflect.New(sub.typ))
-			f.setPointer(v)
-		}
-		err := sub.unmarshal(v, b[:x])
-		if err != nil {
-			if r, ok := err.(*RequiredNotSetError); ok {
-				r.field = name + "." + r.field
-			} else {
-				return nil, err
-			}
-		}
-		return b[x:], err
-	}
-}
-
-func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
-	return func(b []byte, f pointer, w int) ([]byte, error) {
-		if w != WireBytes {
-			return b, errInternalBadWireType
-		}
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		v := valToPointer(reflect.New(sub.typ))
-		err := sub.unmarshal(v, b[:x])
-		if err != nil {
-			if r, ok := err.(*RequiredNotSetError); ok {
-				r.field = name + "." + r.field
-			} else {
-				return nil, err
-			}
-		}
-		f.appendPointer(v)
-		return b[x:], err
-	}
-}
-
-func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
-	return func(b []byte, f pointer, w int) ([]byte, error) {
-		if w != WireStartGroup {
-			return b, errInternalBadWireType
-		}
-		x, y := findEndGroup(b)
-		if x < 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		v := f.getPointer()
-		if v.isNil() {
-			v = valToPointer(reflect.New(sub.typ))
-			f.setPointer(v)
-		}
-		err := sub.unmarshal(v, b[:x])
-		if err != nil {
-			if r, ok := err.(*RequiredNotSetError); ok {
-				r.field = name + "." + r.field
-			} else {
-				return nil, err
-			}
-		}
-		return b[y:], err
-	}
-}
-
-func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
-	return func(b []byte, f pointer, w int) ([]byte, error) {
-		if w != WireStartGroup {
-			return b, errInternalBadWireType
-		}
-		x, y := findEndGroup(b)
-		if x < 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		v := valToPointer(reflect.New(sub.typ))
-		err := sub.unmarshal(v, b[:x])
-		if err != nil {
-			if r, ok := err.(*RequiredNotSetError); ok {
-				r.field = name + "." + r.field
-			} else {
-				return nil, err
-			}
-		}
-		f.appendPointer(v)
-		return b[y:], err
-	}
-}
-
-func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
-	t := f.Type
-	kt := t.Key()
-	vt := t.Elem()
-	unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
-	unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val"))
-	return func(b []byte, f pointer, w int) ([]byte, error) {
-		// The map entry is a submessage. Figure out how big it is.
-		if w != WireBytes {
-			return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
-		}
-		x, n := decodeVarint(b)
-		if n == 0 {
-			return nil, io.ErrUnexpectedEOF
-		}
-		b = b[n:]
-		if x > uint64(len(b)) {
-			return nil, io.ErrUnexpectedEOF
-		}
-		r := b[x:] // unused data to return
-		b = b[:x]  // data for map entry
-
-		// Note: we could use #keys * #values ~= 200 functions
-		// to do map decoding without reflection. Probably not worth it.
-		// Maps will be somewhat slow. Oh well.
-
-		// Read key and value from data.
-		var nerr nonFatal
-		k := reflect.New(kt)
-		v := reflect.New(vt)
-		for len(b) > 0 {
-			x, n := decodeVarint(b)
-			if n == 0 {
-				return nil, io.ErrUnexpectedEOF
-			}
-			wire := int(x) & 7
-			b = b[n:]
-
-			var err error
-			switch x >> 3 {
-			case 1:
-				b, err = unmarshalKey(b, valToPointer(k), wire)
-			case 2:
-				b, err = unmarshalVal(b, valToPointer(v), wire)
-			default:
-				err = errInternalBadWireType // skip unknown tag
-			}
-
-			if nerr.Merge(err) {
-				continue
-			}
-			if err != errInternalBadWireType {
-				return nil, err
-			}
-
-			// Skip past unknown fields.
-			b, err = skipField(b, wire)
-			if err != nil {
-				return nil, err
-			}
-		}
-
-		// Get map, allocate if needed.
-		m := f.asPointerTo(t).Elem() // an addressable map[K]T
-		if m.IsNil() {
-			m.Set(reflect.MakeMap(t))
-		}
-
-		// Insert into map.
-		m.SetMapIndex(k.Elem(), v.Elem())
-
-		return r, nerr.E
-	}
-}
-
-// makeUnmarshalOneof makes an unmarshaler for oneof fields.
-// for:
-// message Msg {
-//   oneof F {
-//     int64 X = 1;
-//     float64 Y = 2;
-//   }
-// }
-// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
-// ityp is the interface type of the oneof field (e.g. isMsg_F).
-// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
-// Note that this function will be called once for each case in the oneof.
-func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
-	sf := typ.Field(0)
-	field0 := toField(&sf)
-	return func(b []byte, f pointer, w int) ([]byte, error) {
-		// Allocate holder for value.
-		v := reflect.New(typ)
-
-		// Unmarshal data into holder.
-		// We unmarshal into the first field of the holder object.
-		var err error
-		var nerr nonFatal
-		b, err = unmarshal(b, valToPointer(v).offset(field0), w)
-		if !nerr.Merge(err) {
-			return nil, err
-		}
-
-		// Write pointer to holder into target field.
-		f.asPointerTo(ityp).Elem().Set(v)
-
-		return b, nerr.E
-	}
-}
-
-// Error used by decode internally.
-var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
-
-// skipField skips past a field of type wire and returns the remaining bytes.
-func skipField(b []byte, wire int) ([]byte, error) {
-	switch wire {
-	case WireVarint:
-		_, k := decodeVarint(b)
-		if k == 0 {
-			return b, io.ErrUnexpectedEOF
-		}
-		b = b[k:]
-	case WireFixed32:
-		if len(b) < 4 {
-			return b, io.ErrUnexpectedEOF
-		}
-		b = b[4:]
-	case WireFixed64:
-		if len(b) < 8 {
-			return b, io.ErrUnexpectedEOF
-		}
-		b = b[8:]
-	case WireBytes:
-		m, k := decodeVarint(b)
-		if k == 0 || uint64(len(b)-k) < m {
-			return b, io.ErrUnexpectedEOF
-		}
-		b = b[uint64(k)+m:]
-	case WireStartGroup:
-		_, i := findEndGroup(b)
-		if i == -1 {
-			return b, io.ErrUnexpectedEOF
-		}
-		b = b[i:]
-	default:
-		return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
-	}
-	return b, nil
-}
-
-// findEndGroup finds the index of the next EndGroup tag.
-// Groups may be nested, so the "next" EndGroup tag is the first
-// unpaired EndGroup.
-// findEndGroup returns the indexes of the start and end of the EndGroup tag.
-// Returns (-1,-1) if it can't find one.
-func findEndGroup(b []byte) (int, int) {
-	depth := 1
-	i := 0
-	for {
-		x, n := decodeVarint(b[i:])
-		if n == 0 {
-			return -1, -1
-		}
-		j := i
-		i += n
-		switch x & 7 {
-		case WireVarint:
-			_, k := decodeVarint(b[i:])
-			if k == 0 {
-				return -1, -1
-			}
-			i += k
-		case WireFixed32:
-			if len(b)-4 < i {
-				return -1, -1
-			}
-			i += 4
-		case WireFixed64:
-			if len(b)-8 < i {
-				return -1, -1
-			}
-			i += 8
-		case WireBytes:
-			m, k := decodeVarint(b[i:])
-			if k == 0 {
-				return -1, -1
-			}
-			i += k
-			if uint64(len(b)-i) < m {
-				return -1, -1
-			}
-			i += int(m)
-		case WireStartGroup:
-			depth++
-		case WireEndGroup:
-			depth--
-			if depth == 0 {
-				return j, i
-			}
-		default:
-			return -1, -1
-		}
-	}
-}
-
-// encodeVarint appends a varint-encoded integer to b and returns the result.
-func encodeVarint(b []byte, x uint64) []byte {
-	for x >= 1<<7 {
-		b = append(b, byte(x&0x7f|0x80))
-		x >>= 7
-	}
-	return append(b, byte(x))
-}
-
-// decodeVarint reads a varint-encoded integer from b.
-// Returns the decoded integer and the number of bytes read.
-// If there is an error, it returns 0,0.
-func decodeVarint(b []byte) (uint64, int) {
-	var x, y uint64
-	if len(b) <= 0 {
-		goto bad
-	}
-	x = uint64(b[0])
-	if x < 0x80 {
-		return x, 1
-	}
-	x -= 0x80
-
-	if len(b) <= 1 {
-		goto bad
-	}
-	y = uint64(b[1])
-	x += y << 7
-	if y < 0x80 {
-		return x, 2
-	}
-	x -= 0x80 << 7
-
-	if len(b) <= 2 {
-		goto bad
-	}
-	y = uint64(b[2])
-	x += y << 14
-	if y < 0x80 {
-		return x, 3
-	}
-	x -= 0x80 << 14
-
-	if len(b) <= 3 {
-		goto bad
-	}
-	y = uint64(b[3])
-	x += y << 21
-	if y < 0x80 {
-		return x, 4
-	}
-	x -= 0x80 << 21
-
-	if len(b) <= 4 {
-		goto bad
-	}
-	y = uint64(b[4])
-	x += y << 28
-	if y < 0x80 {
-		return x, 5
-	}
-	x -= 0x80 << 28
-
-	if len(b) <= 5 {
-		goto bad
-	}
-	y = uint64(b[5])
-	x += y << 35
-	if y < 0x80 {
-		return x, 6
-	}
-	x -= 0x80 << 35
-
-	if len(b) <= 6 {
-		goto bad
-	}
-	y = uint64(b[6])
-	x += y << 42
-	if y < 0x80 {
-		return x, 7
-	}
-	x -= 0x80 << 42
-
-	if len(b) <= 7 {
-		goto bad
-	}
-	y = uint64(b[7])
-	x += y << 49
-	if y < 0x80 {
-		return x, 8
-	}
-	x -= 0x80 << 49
-
-	if len(b) <= 8 {
-		goto bad
-	}
-	y = uint64(b[8])
-	x += y << 56
-	if y < 0x80 {
-		return x, 9
-	}
-	x -= 0x80 << 56
-
-	if len(b) <= 9 {
-		goto bad
-	}
-	y = uint64(b[9])
-	x += y << 63
-	if y < 2 {
-		return x, 10
-	}
-
-bad:
-	return 0, 0
-}

+ 0 - 843
vendor/github.com/golang/protobuf/proto/text.go

@@ -1,843 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// Functions for writing the text protocol buffer format.
-
-import (
-	"bufio"
-	"bytes"
-	"encoding"
-	"errors"
-	"fmt"
-	"io"
-	"log"
-	"math"
-	"reflect"
-	"sort"
-	"strings"
-)
-
-var (
-	newline         = []byte("\n")
-	spaces          = []byte("                                        ")
-	endBraceNewline = []byte("}\n")
-	backslashN      = []byte{'\\', 'n'}
-	backslashR      = []byte{'\\', 'r'}
-	backslashT      = []byte{'\\', 't'}
-	backslashDQ     = []byte{'\\', '"'}
-	backslashBS     = []byte{'\\', '\\'}
-	posInf          = []byte("inf")
-	negInf          = []byte("-inf")
-	nan             = []byte("nan")
-)
-
-type writer interface {
-	io.Writer
-	WriteByte(byte) error
-}
-
-// textWriter is an io.Writer that tracks its indentation level.
-type textWriter struct {
-	ind      int
-	complete bool // if the current position is a complete line
-	compact  bool // whether to write out as a one-liner
-	w        writer
-}
-
-func (w *textWriter) WriteString(s string) (n int, err error) {
-	if !strings.Contains(s, "\n") {
-		if !w.compact && w.complete {
-			w.writeIndent()
-		}
-		w.complete = false
-		return io.WriteString(w.w, s)
-	}
-	// WriteString is typically called without newlines, so this
-	// codepath and its copy are rare.  We copy to avoid
-	// duplicating all of Write's logic here.
-	return w.Write([]byte(s))
-}
-
-func (w *textWriter) Write(p []byte) (n int, err error) {
-	newlines := bytes.Count(p, newline)
-	if newlines == 0 {
-		if !w.compact && w.complete {
-			w.writeIndent()
-		}
-		n, err = w.w.Write(p)
-		w.complete = false
-		return n, err
-	}
-
-	frags := bytes.SplitN(p, newline, newlines+1)
-	if w.compact {
-		for i, frag := range frags {
-			if i > 0 {
-				if err := w.w.WriteByte(' '); err != nil {
-					return n, err
-				}
-				n++
-			}
-			nn, err := w.w.Write(frag)
-			n += nn
-			if err != nil {
-				return n, err
-			}
-		}
-		return n, nil
-	}
-
-	for i, frag := range frags {
-		if w.complete {
-			w.writeIndent()
-		}
-		nn, err := w.w.Write(frag)
-		n += nn
-		if err != nil {
-			return n, err
-		}
-		if i+1 < len(frags) {
-			if err := w.w.WriteByte('\n'); err != nil {
-				return n, err
-			}
-			n++
-		}
-	}
-	w.complete = len(frags[len(frags)-1]) == 0
-	return n, nil
-}
-
-func (w *textWriter) WriteByte(c byte) error {
-	if w.compact && c == '\n' {
-		c = ' '
-	}
-	if !w.compact && w.complete {
-		w.writeIndent()
-	}
-	err := w.w.WriteByte(c)
-	w.complete = c == '\n'
-	return err
-}
-
-func (w *textWriter) indent() { w.ind++ }
-
-func (w *textWriter) unindent() {
-	if w.ind == 0 {
-		log.Print("proto: textWriter unindented too far")
-		return
-	}
-	w.ind--
-}
-
-func writeName(w *textWriter, props *Properties) error {
-	if _, err := w.WriteString(props.OrigName); err != nil {
-		return err
-	}
-	if props.Wire != "group" {
-		return w.WriteByte(':')
-	}
-	return nil
-}
-
-func requiresQuotes(u string) bool {
-	// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
-	for _, ch := range u {
-		switch {
-		case ch == '.' || ch == '/' || ch == '_':
-			continue
-		case '0' <= ch && ch <= '9':
-			continue
-		case 'A' <= ch && ch <= 'Z':
-			continue
-		case 'a' <= ch && ch <= 'z':
-			continue
-		default:
-			return true
-		}
-	}
-	return false
-}
-
-// isAny reports whether sv is a google.protobuf.Any message
-func isAny(sv reflect.Value) bool {
-	type wkt interface {
-		XXX_WellKnownType() string
-	}
-	t, ok := sv.Addr().Interface().(wkt)
-	return ok && t.XXX_WellKnownType() == "Any"
-}
-
-// writeProto3Any writes an expanded google.protobuf.Any message.
-//
-// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
-// required messages are not linked in).
-//
-// It returns (true, error) when sv was written in expanded format or an error
-// was encountered.
-func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
-	turl := sv.FieldByName("TypeUrl")
-	val := sv.FieldByName("Value")
-	if !turl.IsValid() || !val.IsValid() {
-		return true, errors.New("proto: invalid google.protobuf.Any message")
-	}
-
-	b, ok := val.Interface().([]byte)
-	if !ok {
-		return true, errors.New("proto: invalid google.protobuf.Any message")
-	}
-
-	parts := strings.Split(turl.String(), "/")
-	mt := MessageType(parts[len(parts)-1])
-	if mt == nil {
-		return false, nil
-	}
-	m := reflect.New(mt.Elem())
-	if err := Unmarshal(b, m.Interface().(Message)); err != nil {
-		return false, nil
-	}
-	w.Write([]byte("["))
-	u := turl.String()
-	if requiresQuotes(u) {
-		writeString(w, u)
-	} else {
-		w.Write([]byte(u))
-	}
-	if w.compact {
-		w.Write([]byte("]:<"))
-	} else {
-		w.Write([]byte("]: <\n"))
-		w.ind++
-	}
-	if err := tm.writeStruct(w, m.Elem()); err != nil {
-		return true, err
-	}
-	if w.compact {
-		w.Write([]byte("> "))
-	} else {
-		w.ind--
-		w.Write([]byte(">\n"))
-	}
-	return true, nil
-}
-
-func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
-	if tm.ExpandAny && isAny(sv) {
-		if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
-			return err
-		}
-	}
-	st := sv.Type()
-	sprops := GetProperties(st)
-	for i := 0; i < sv.NumField(); i++ {
-		fv := sv.Field(i)
-		props := sprops.Prop[i]
-		name := st.Field(i).Name
-
-		if name == "XXX_NoUnkeyedLiteral" {
-			continue
-		}
-
-		if strings.HasPrefix(name, "XXX_") {
-			// There are two XXX_ fields:
-			//   XXX_unrecognized []byte
-			//   XXX_extensions   map[int32]proto.Extension
-			// The first is handled here;
-			// the second is handled at the bottom of this function.
-			if name == "XXX_unrecognized" && !fv.IsNil() {
-				if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
-					return err
-				}
-			}
-			continue
-		}
-		if fv.Kind() == reflect.Ptr && fv.IsNil() {
-			// Field not filled in. This could be an optional field or
-			// a required field that wasn't filled in. Either way, there
-			// isn't anything we can show for it.
-			continue
-		}
-		if fv.Kind() == reflect.Slice && fv.IsNil() {
-			// Repeated field that is empty, or a bytes field that is unused.
-			continue
-		}
-
-		if props.Repeated && fv.Kind() == reflect.Slice {
-			// Repeated field.
-			for j := 0; j < fv.Len(); j++ {
-				if err := writeName(w, props); err != nil {
-					return err
-				}
-				if !w.compact {
-					if err := w.WriteByte(' '); err != nil {
-						return err
-					}
-				}
-				v := fv.Index(j)
-				if v.Kind() == reflect.Ptr && v.IsNil() {
-					// A nil message in a repeated field is not valid,
-					// but we can handle that more gracefully than panicking.
-					if _, err := w.Write([]byte("<nil>\n")); err != nil {
-						return err
-					}
-					continue
-				}
-				if err := tm.writeAny(w, v, props); err != nil {
-					return err
-				}
-				if err := w.WriteByte('\n'); err != nil {
-					return err
-				}
-			}
-			continue
-		}
-		if fv.Kind() == reflect.Map {
-			// Map fields are rendered as a repeated struct with key/value fields.
-			keys := fv.MapKeys()
-			sort.Sort(mapKeys(keys))
-			for _, key := range keys {
-				val := fv.MapIndex(key)
-				if err := writeName(w, props); err != nil {
-					return err
-				}
-				if !w.compact {
-					if err := w.WriteByte(' '); err != nil {
-						return err
-					}
-				}
-				// open struct
-				if err := w.WriteByte('<'); err != nil {
-					return err
-				}
-				if !w.compact {
-					if err := w.WriteByte('\n'); err != nil {
-						return err
-					}
-				}
-				w.indent()
-				// key
-				if _, err := w.WriteString("key:"); err != nil {
-					return err
-				}
-				if !w.compact {
-					if err := w.WriteByte(' '); err != nil {
-						return err
-					}
-				}
-				if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
-					return err
-				}
-				if err := w.WriteByte('\n'); err != nil {
-					return err
-				}
-				// nil values aren't legal, but we can avoid panicking because of them.
-				if val.Kind() != reflect.Ptr || !val.IsNil() {
-					// value
-					if _, err := w.WriteString("value:"); err != nil {
-						return err
-					}
-					if !w.compact {
-						if err := w.WriteByte(' '); err != nil {
-							return err
-						}
-					}
-					if err := tm.writeAny(w, val, props.MapValProp); err != nil {
-						return err
-					}
-					if err := w.WriteByte('\n'); err != nil {
-						return err
-					}
-				}
-				// close struct
-				w.unindent()
-				if err := w.WriteByte('>'); err != nil {
-					return err
-				}
-				if err := w.WriteByte('\n'); err != nil {
-					return err
-				}
-			}
-			continue
-		}
-		if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
-			// empty bytes field
-			continue
-		}
-		if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
-			// proto3 non-repeated scalar field; skip if zero value
-			if isProto3Zero(fv) {
-				continue
-			}
-		}
-
-		if fv.Kind() == reflect.Interface {
-			// Check if it is a oneof.
-			if st.Field(i).Tag.Get("protobuf_oneof") != "" {
-				// fv is nil, or holds a pointer to generated struct.
-				// That generated struct has exactly one field,
-				// which has a protobuf struct tag.
-				if fv.IsNil() {
-					continue
-				}
-				inner := fv.Elem().Elem() // interface -> *T -> T
-				tag := inner.Type().Field(0).Tag.Get("protobuf")
-				props = new(Properties) // Overwrite the outer props var, but not its pointee.
-				props.Parse(tag)
-				// Write the value in the oneof, not the oneof itself.
-				fv = inner.Field(0)
-
-				// Special case to cope with malformed messages gracefully:
-				// If the value in the oneof is a nil pointer, don't panic
-				// in writeAny.
-				if fv.Kind() == reflect.Ptr && fv.IsNil() {
-					// Use errors.New so writeAny won't render quotes.
-					msg := errors.New("/* nil */")
-					fv = reflect.ValueOf(&msg).Elem()
-				}
-			}
-		}
-
-		if err := writeName(w, props); err != nil {
-			return err
-		}
-		if !w.compact {
-			if err := w.WriteByte(' '); err != nil {
-				return err
-			}
-		}
-
-		// Enums have a String method, so writeAny will work fine.
-		if err := tm.writeAny(w, fv, props); err != nil {
-			return err
-		}
-
-		if err := w.WriteByte('\n'); err != nil {
-			return err
-		}
-	}
-
-	// Extensions (the XXX_extensions field).
-	pv := sv.Addr()
-	if _, err := extendable(pv.Interface()); err == nil {
-		if err := tm.writeExtensions(w, pv); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-// writeAny writes an arbitrary field.
-func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
-	v = reflect.Indirect(v)
-
-	// Floats have special cases.
-	if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
-		x := v.Float()
-		var b []byte
-		switch {
-		case math.IsInf(x, 1):
-			b = posInf
-		case math.IsInf(x, -1):
-			b = negInf
-		case math.IsNaN(x):
-			b = nan
-		}
-		if b != nil {
-			_, err := w.Write(b)
-			return err
-		}
-		// Other values are handled below.
-	}
-
-	// We don't attempt to serialise every possible value type; only those
-	// that can occur in protocol buffers.
-	switch v.Kind() {
-	case reflect.Slice:
-		// Should only be a []byte; repeated fields are handled in writeStruct.
-		if err := writeString(w, string(v.Bytes())); err != nil {
-			return err
-		}
-	case reflect.String:
-		if err := writeString(w, v.String()); err != nil {
-			return err
-		}
-	case reflect.Struct:
-		// Required/optional group/message.
-		var bra, ket byte = '<', '>'
-		if props != nil && props.Wire == "group" {
-			bra, ket = '{', '}'
-		}
-		if err := w.WriteByte(bra); err != nil {
-			return err
-		}
-		if !w.compact {
-			if err := w.WriteByte('\n'); err != nil {
-				return err
-			}
-		}
-		w.indent()
-		if v.CanAddr() {
-			// Calling v.Interface on a struct causes the reflect package to
-			// copy the entire struct. This is racy with the new Marshaler
-			// since we atomically update the XXX_sizecache.
-			//
-			// Thus, we retrieve a pointer to the struct if possible to avoid
-			// a race since v.Interface on the pointer doesn't copy the struct.
-			//
-			// If v is not addressable, then we are not worried about a race
-			// since it implies that the binary Marshaler cannot possibly be
-			// mutating this value.
-			v = v.Addr()
-		}
-		if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
-			text, err := etm.MarshalText()
-			if err != nil {
-				return err
-			}
-			if _, err = w.Write(text); err != nil {
-				return err
-			}
-		} else {
-			if v.Kind() == reflect.Ptr {
-				v = v.Elem()
-			}
-			if err := tm.writeStruct(w, v); err != nil {
-				return err
-			}
-		}
-		w.unindent()
-		if err := w.WriteByte(ket); err != nil {
-			return err
-		}
-	default:
-		_, err := fmt.Fprint(w, v.Interface())
-		return err
-	}
-	return nil
-}
-
-// equivalent to C's isprint.
-func isprint(c byte) bool {
-	return c >= 0x20 && c < 0x7f
-}
-
-// writeString writes a string in the protocol buffer text format.
-// It is similar to strconv.Quote except we don't use Go escape sequences,
-// we treat the string as a byte sequence, and we use octal escapes.
-// These differences are to maintain interoperability with the other
-// languages' implementations of the text format.
-func writeString(w *textWriter, s string) error {
-	// use WriteByte here to get any needed indent
-	if err := w.WriteByte('"'); err != nil {
-		return err
-	}
-	// Loop over the bytes, not the runes.
-	for i := 0; i < len(s); i++ {
-		var err error
-		// Divergence from C++: we don't escape apostrophes.
-		// There's no need to escape them, and the C++ parser
-		// copes with a naked apostrophe.
-		switch c := s[i]; c {
-		case '\n':
-			_, err = w.w.Write(backslashN)
-		case '\r':
-			_, err = w.w.Write(backslashR)
-		case '\t':
-			_, err = w.w.Write(backslashT)
-		case '"':
-			_, err = w.w.Write(backslashDQ)
-		case '\\':
-			_, err = w.w.Write(backslashBS)
-		default:
-			if isprint(c) {
-				err = w.w.WriteByte(c)
-			} else {
-				_, err = fmt.Fprintf(w.w, "\\%03o", c)
-			}
-		}
-		if err != nil {
-			return err
-		}
-	}
-	return w.WriteByte('"')
-}
-
-func writeUnknownStruct(w *textWriter, data []byte) (err error) {
-	if !w.compact {
-		if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
-			return err
-		}
-	}
-	b := NewBuffer(data)
-	for b.index < len(b.buf) {
-		x, err := b.DecodeVarint()
-		if err != nil {
-			_, err := fmt.Fprintf(w, "/* %v */\n", err)
-			return err
-		}
-		wire, tag := x&7, x>>3
-		if wire == WireEndGroup {
-			w.unindent()
-			if _, err := w.Write(endBraceNewline); err != nil {
-				return err
-			}
-			continue
-		}
-		if _, err := fmt.Fprint(w, tag); err != nil {
-			return err
-		}
-		if wire != WireStartGroup {
-			if err := w.WriteByte(':'); err != nil {
-				return err
-			}
-		}
-		if !w.compact || wire == WireStartGroup {
-			if err := w.WriteByte(' '); err != nil {
-				return err
-			}
-		}
-		switch wire {
-		case WireBytes:
-			buf, e := b.DecodeRawBytes(false)
-			if e == nil {
-				_, err = fmt.Fprintf(w, "%q", buf)
-			} else {
-				_, err = fmt.Fprintf(w, "/* %v */", e)
-			}
-		case WireFixed32:
-			x, err = b.DecodeFixed32()
-			err = writeUnknownInt(w, x, err)
-		case WireFixed64:
-			x, err = b.DecodeFixed64()
-			err = writeUnknownInt(w, x, err)
-		case WireStartGroup:
-			err = w.WriteByte('{')
-			w.indent()
-		case WireVarint:
-			x, err = b.DecodeVarint()
-			err = writeUnknownInt(w, x, err)
-		default:
-			_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
-		}
-		if err != nil {
-			return err
-		}
-		if err = w.WriteByte('\n'); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func writeUnknownInt(w *textWriter, x uint64, err error) error {
-	if err == nil {
-		_, err = fmt.Fprint(w, x)
-	} else {
-		_, err = fmt.Fprintf(w, "/* %v */", err)
-	}
-	return err
-}
-
-type int32Slice []int32
-
-func (s int32Slice) Len() int           { return len(s) }
-func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
-func (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
-
-// writeExtensions writes all the extensions in pv.
-// pv is assumed to be a pointer to a protocol message struct that is extendable.
-func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
-	emap := extensionMaps[pv.Type().Elem()]
-	ep, _ := extendable(pv.Interface())
-
-	// Order the extensions by ID.
-	// This isn't strictly necessary, but it will give us
-	// canonical output, which will also make testing easier.
-	m, mu := ep.extensionsRead()
-	if m == nil {
-		return nil
-	}
-	mu.Lock()
-	ids := make([]int32, 0, len(m))
-	for id := range m {
-		ids = append(ids, id)
-	}
-	sort.Sort(int32Slice(ids))
-	mu.Unlock()
-
-	for _, extNum := range ids {
-		ext := m[extNum]
-		var desc *ExtensionDesc
-		if emap != nil {
-			desc = emap[extNum]
-		}
-		if desc == nil {
-			// Unknown extension.
-			if err := writeUnknownStruct(w, ext.enc); err != nil {
-				return err
-			}
-			continue
-		}
-
-		pb, err := GetExtension(ep, desc)
-		if err != nil {
-			return fmt.Errorf("failed getting extension: %v", err)
-		}
-
-		// Repeated extensions will appear as a slice.
-		if !desc.repeated() {
-			if err := tm.writeExtension(w, desc.Name, pb); err != nil {
-				return err
-			}
-		} else {
-			v := reflect.ValueOf(pb)
-			for i := 0; i < v.Len(); i++ {
-				if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
-					return err
-				}
-			}
-		}
-	}
-	return nil
-}
-
-func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
-	if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
-		return err
-	}
-	if !w.compact {
-		if err := w.WriteByte(' '); err != nil {
-			return err
-		}
-	}
-	if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
-		return err
-	}
-	if err := w.WriteByte('\n'); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (w *textWriter) writeIndent() {
-	if !w.complete {
-		return
-	}
-	remain := w.ind * 2
-	for remain > 0 {
-		n := remain
-		if n > len(spaces) {
-			n = len(spaces)
-		}
-		w.w.Write(spaces[:n])
-		remain -= n
-	}
-	w.complete = false
-}
-
-// TextMarshaler is a configurable text format marshaler.
-type TextMarshaler struct {
-	Compact   bool // use compact text format (one line).
-	ExpandAny bool // expand google.protobuf.Any messages of known types
-}
-
-// Marshal writes a given protocol buffer in text format.
-// The only errors returned are from w.
-func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
-	val := reflect.ValueOf(pb)
-	if pb == nil || val.IsNil() {
-		w.Write([]byte("<nil>"))
-		return nil
-	}
-	var bw *bufio.Writer
-	ww, ok := w.(writer)
-	if !ok {
-		bw = bufio.NewWriter(w)
-		ww = bw
-	}
-	aw := &textWriter{
-		w:        ww,
-		complete: true,
-		compact:  tm.Compact,
-	}
-
-	if etm, ok := pb.(encoding.TextMarshaler); ok {
-		text, err := etm.MarshalText()
-		if err != nil {
-			return err
-		}
-		if _, err = aw.Write(text); err != nil {
-			return err
-		}
-		if bw != nil {
-			return bw.Flush()
-		}
-		return nil
-	}
-	// Dereference the received pointer so we don't have outer < and >.
-	v := reflect.Indirect(val)
-	if err := tm.writeStruct(aw, v); err != nil {
-		return err
-	}
-	if bw != nil {
-		return bw.Flush()
-	}
-	return nil
-}
-
-// Text is the same as Marshal, but returns the string directly.
-func (tm *TextMarshaler) Text(pb Message) string {
-	var buf bytes.Buffer
-	tm.Marshal(&buf, pb)
-	return buf.String()
-}
-
-var (
-	defaultTextMarshaler = TextMarshaler{}
-	compactTextMarshaler = TextMarshaler{Compact: true}
-)
-
-// TODO: consider removing some of the Marshal functions below.
-
-// MarshalText writes a given protocol buffer in text format.
-// The only errors returned are from w.
-func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
-
-// MarshalTextString is the same as MarshalText, but returns the string directly.
-func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
-
-// CompactText writes a given protocol buffer in compact text format (one line).
-func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
-
-// CompactTextString is the same as CompactText, but returns the string directly.
-func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }

+ 0 - 880
vendor/github.com/golang/protobuf/proto/text_parser.go

@@ -1,880 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// Functions for parsing the Text protocol buffer format.
-// TODO: message sets.
-
-import (
-	"encoding"
-	"errors"
-	"fmt"
-	"reflect"
-	"strconv"
-	"strings"
-	"unicode/utf8"
-)
-
-// Error string emitted when deserializing Any and fields are already set
-const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
-
-type ParseError struct {
-	Message string
-	Line    int // 1-based line number
-	Offset  int // 0-based byte offset from start of input
-}
-
-func (p *ParseError) Error() string {
-	if p.Line == 1 {
-		// show offset only for first line
-		return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
-	}
-	return fmt.Sprintf("line %d: %v", p.Line, p.Message)
-}
-
-type token struct {
-	value    string
-	err      *ParseError
-	line     int    // line number
-	offset   int    // byte number from start of input, not start of line
-	unquoted string // the unquoted version of value, if it was a quoted string
-}
-
-func (t *token) String() string {
-	if t.err == nil {
-		return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
-	}
-	return fmt.Sprintf("parse error: %v", t.err)
-}
-
-type textParser struct {
-	s            string // remaining input
-	done         bool   // whether the parsing is finished (success or error)
-	backed       bool   // whether back() was called
-	offset, line int
-	cur          token
-}
-
-func newTextParser(s string) *textParser {
-	p := new(textParser)
-	p.s = s
-	p.line = 1
-	p.cur.line = 1
-	return p
-}
-
-func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
-	pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
-	p.cur.err = pe
-	p.done = true
-	return pe
-}
-
-// Numbers and identifiers are matched by [-+._A-Za-z0-9]
-func isIdentOrNumberChar(c byte) bool {
-	switch {
-	case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
-		return true
-	case '0' <= c && c <= '9':
-		return true
-	}
-	switch c {
-	case '-', '+', '.', '_':
-		return true
-	}
-	return false
-}
-
-func isWhitespace(c byte) bool {
-	switch c {
-	case ' ', '\t', '\n', '\r':
-		return true
-	}
-	return false
-}
-
-func isQuote(c byte) bool {
-	switch c {
-	case '"', '\'':
-		return true
-	}
-	return false
-}
-
-func (p *textParser) skipWhitespace() {
-	i := 0
-	for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
-		if p.s[i] == '#' {
-			// comment; skip to end of line or input
-			for i < len(p.s) && p.s[i] != '\n' {
-				i++
-			}
-			if i == len(p.s) {
-				break
-			}
-		}
-		if p.s[i] == '\n' {
-			p.line++
-		}
-		i++
-	}
-	p.offset += i
-	p.s = p.s[i:len(p.s)]
-	if len(p.s) == 0 {
-		p.done = true
-	}
-}
-
-func (p *textParser) advance() {
-	// Skip whitespace
-	p.skipWhitespace()
-	if p.done {
-		return
-	}
-
-	// Start of non-whitespace
-	p.cur.err = nil
-	p.cur.offset, p.cur.line = p.offset, p.line
-	p.cur.unquoted = ""
-	switch p.s[0] {
-	case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
-		// Single symbol
-		p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
-	case '"', '\'':
-		// Quoted string
-		i := 1
-		for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
-			if p.s[i] == '\\' && i+1 < len(p.s) {
-				// skip escaped char
-				i++
-			}
-			i++
-		}
-		if i >= len(p.s) || p.s[i] != p.s[0] {
-			p.errorf("unmatched quote")
-			return
-		}
-		unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
-		if err != nil {
-			p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
-			return
-		}
-		p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
-		p.cur.unquoted = unq
-	default:
-		i := 0
-		for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
-			i++
-		}
-		if i == 0 {
-			p.errorf("unexpected byte %#x", p.s[0])
-			return
-		}
-		p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
-	}
-	p.offset += len(p.cur.value)
-}
-
-var (
-	errBadUTF8 = errors.New("proto: bad UTF-8")
-)
-
-func unquoteC(s string, quote rune) (string, error) {
-	// This is based on C++'s tokenizer.cc.
-	// Despite its name, this is *not* parsing C syntax.
-	// For instance, "\0" is an invalid quoted string.
-
-	// Avoid allocation in trivial cases.
-	simple := true
-	for _, r := range s {
-		if r == '\\' || r == quote {
-			simple = false
-			break
-		}
-	}
-	if simple {
-		return s, nil
-	}
-
-	buf := make([]byte, 0, 3*len(s)/2)
-	for len(s) > 0 {
-		r, n := utf8.DecodeRuneInString(s)
-		if r == utf8.RuneError && n == 1 {
-			return "", errBadUTF8
-		}
-		s = s[n:]
-		if r != '\\' {
-			if r < utf8.RuneSelf {
-				buf = append(buf, byte(r))
-			} else {
-				buf = append(buf, string(r)...)
-			}
-			continue
-		}
-
-		ch, tail, err := unescape(s)
-		if err != nil {
-			return "", err
-		}
-		buf = append(buf, ch...)
-		s = tail
-	}
-	return string(buf), nil
-}
-
-func unescape(s string) (ch string, tail string, err error) {
-	r, n := utf8.DecodeRuneInString(s)
-	if r == utf8.RuneError && n == 1 {
-		return "", "", errBadUTF8
-	}
-	s = s[n:]
-	switch r {
-	case 'a':
-		return "\a", s, nil
-	case 'b':
-		return "\b", s, nil
-	case 'f':
-		return "\f", s, nil
-	case 'n':
-		return "\n", s, nil
-	case 'r':
-		return "\r", s, nil
-	case 't':
-		return "\t", s, nil
-	case 'v':
-		return "\v", s, nil
-	case '?':
-		return "?", s, nil // trigraph workaround
-	case '\'', '"', '\\':
-		return string(r), s, nil
-	case '0', '1', '2', '3', '4', '5', '6', '7':
-		if len(s) < 2 {
-			return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
-		}
-		ss := string(r) + s[:2]
-		s = s[2:]
-		i, err := strconv.ParseUint(ss, 8, 8)
-		if err != nil {
-			return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
-		}
-		return string([]byte{byte(i)}), s, nil
-	case 'x', 'X', 'u', 'U':
-		var n int
-		switch r {
-		case 'x', 'X':
-			n = 2
-		case 'u':
-			n = 4
-		case 'U':
-			n = 8
-		}
-		if len(s) < n {
-			return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
-		}
-		ss := s[:n]
-		s = s[n:]
-		i, err := strconv.ParseUint(ss, 16, 64)
-		if err != nil {
-			return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
-		}
-		if r == 'x' || r == 'X' {
-			return string([]byte{byte(i)}), s, nil
-		}
-		if i > utf8.MaxRune {
-			return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
-		}
-		return string(i), s, nil
-	}
-	return "", "", fmt.Errorf(`unknown escape \%c`, r)
-}
-
-// Back off the parser by one token. Can only be done between calls to next().
-// It makes the next advance() a no-op.
-func (p *textParser) back() { p.backed = true }
-
-// Advances the parser and returns the new current token.
-func (p *textParser) next() *token {
-	if p.backed || p.done {
-		p.backed = false
-		return &p.cur
-	}
-	p.advance()
-	if p.done {
-		p.cur.value = ""
-	} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
-		// Look for multiple quoted strings separated by whitespace,
-		// and concatenate them.
-		cat := p.cur
-		for {
-			p.skipWhitespace()
-			if p.done || !isQuote(p.s[0]) {
-				break
-			}
-			p.advance()
-			if p.cur.err != nil {
-				return &p.cur
-			}
-			cat.value += " " + p.cur.value
-			cat.unquoted += p.cur.unquoted
-		}
-		p.done = false // parser may have seen EOF, but we want to return cat
-		p.cur = cat
-	}
-	return &p.cur
-}
-
-func (p *textParser) consumeToken(s string) error {
-	tok := p.next()
-	if tok.err != nil {
-		return tok.err
-	}
-	if tok.value != s {
-		p.back()
-		return p.errorf("expected %q, found %q", s, tok.value)
-	}
-	return nil
-}
-
-// Return a RequiredNotSetError indicating which required field was not set.
-func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
-	st := sv.Type()
-	sprops := GetProperties(st)
-	for i := 0; i < st.NumField(); i++ {
-		if !isNil(sv.Field(i)) {
-			continue
-		}
-
-		props := sprops.Prop[i]
-		if props.Required {
-			return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
-		}
-	}
-	return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
-}
-
-// Returns the index in the struct for the named field, as well as the parsed tag properties.
-func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
-	i, ok := sprops.decoderOrigNames[name]
-	if ok {
-		return i, sprops.Prop[i], true
-	}
-	return -1, nil, false
-}
-
-// Consume a ':' from the input stream (if the next token is a colon),
-// returning an error if a colon is needed but not present.
-func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
-	tok := p.next()
-	if tok.err != nil {
-		return tok.err
-	}
-	if tok.value != ":" {
-		// Colon is optional when the field is a group or message.
-		needColon := true
-		switch props.Wire {
-		case "group":
-			needColon = false
-		case "bytes":
-			// A "bytes" field is either a message, a string, or a repeated field;
-			// those three become *T, *string and []T respectively, so we can check for
-			// this field being a pointer to a non-string.
-			if typ.Kind() == reflect.Ptr {
-				// *T or *string
-				if typ.Elem().Kind() == reflect.String {
-					break
-				}
-			} else if typ.Kind() == reflect.Slice {
-				// []T or []*T
-				if typ.Elem().Kind() != reflect.Ptr {
-					break
-				}
-			} else if typ.Kind() == reflect.String {
-				// The proto3 exception is for a string field,
-				// which requires a colon.
-				break
-			}
-			needColon = false
-		}
-		if needColon {
-			return p.errorf("expected ':', found %q", tok.value)
-		}
-		p.back()
-	}
-	return nil
-}
-
-func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
-	st := sv.Type()
-	sprops := GetProperties(st)
-	reqCount := sprops.reqCount
-	var reqFieldErr error
-	fieldSet := make(map[string]bool)
-	// A struct is a sequence of "name: value", terminated by one of
-	// '>' or '}', or the end of the input.  A name may also be
-	// "[extension]" or "[type/url]".
-	//
-	// The whole struct can also be an expanded Any message, like:
-	// [type/url] < ... struct contents ... >
-	for {
-		tok := p.next()
-		if tok.err != nil {
-			return tok.err
-		}
-		if tok.value == terminator {
-			break
-		}
-		if tok.value == "[" {
-			// Looks like an extension or an Any.
-			//
-			// TODO: Check whether we need to handle
-			// namespace rooted names (e.g. ".something.Foo").
-			extName, err := p.consumeExtName()
-			if err != nil {
-				return err
-			}
-
-			if s := strings.LastIndex(extName, "/"); s >= 0 {
-				// If it contains a slash, it's an Any type URL.
-				messageName := extName[s+1:]
-				mt := MessageType(messageName)
-				if mt == nil {
-					return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
-				}
-				tok = p.next()
-				if tok.err != nil {
-					return tok.err
-				}
-				// consume an optional colon
-				if tok.value == ":" {
-					tok = p.next()
-					if tok.err != nil {
-						return tok.err
-					}
-				}
-				var terminator string
-				switch tok.value {
-				case "<":
-					terminator = ">"
-				case "{":
-					terminator = "}"
-				default:
-					return p.errorf("expected '{' or '<', found %q", tok.value)
-				}
-				v := reflect.New(mt.Elem())
-				if pe := p.readStruct(v.Elem(), terminator); pe != nil {
-					return pe
-				}
-				b, err := Marshal(v.Interface().(Message))
-				if err != nil {
-					return p.errorf("failed to marshal message of type %q: %v", messageName, err)
-				}
-				if fieldSet["type_url"] {
-					return p.errorf(anyRepeatedlyUnpacked, "type_url")
-				}
-				if fieldSet["value"] {
-					return p.errorf(anyRepeatedlyUnpacked, "value")
-				}
-				sv.FieldByName("TypeUrl").SetString(extName)
-				sv.FieldByName("Value").SetBytes(b)
-				fieldSet["type_url"] = true
-				fieldSet["value"] = true
-				continue
-			}
-
-			var desc *ExtensionDesc
-			// This could be faster, but it's functional.
-			// TODO: Do something smarter than a linear scan.
-			for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
-				if d.Name == extName {
-					desc = d
-					break
-				}
-			}
-			if desc == nil {
-				return p.errorf("unrecognized extension %q", extName)
-			}
-
-			props := &Properties{}
-			props.Parse(desc.Tag)
-
-			typ := reflect.TypeOf(desc.ExtensionType)
-			if err := p.checkForColon(props, typ); err != nil {
-				return err
-			}
-
-			rep := desc.repeated()
-
-			// Read the extension structure, and set it in
-			// the value we're constructing.
-			var ext reflect.Value
-			if !rep {
-				ext = reflect.New(typ).Elem()
-			} else {
-				ext = reflect.New(typ.Elem()).Elem()
-			}
-			if err := p.readAny(ext, props); err != nil {
-				if _, ok := err.(*RequiredNotSetError); !ok {
-					return err
-				}
-				reqFieldErr = err
-			}
-			ep := sv.Addr().Interface().(Message)
-			if !rep {
-				SetExtension(ep, desc, ext.Interface())
-			} else {
-				old, err := GetExtension(ep, desc)
-				var sl reflect.Value
-				if err == nil {
-					sl = reflect.ValueOf(old) // existing slice
-				} else {
-					sl = reflect.MakeSlice(typ, 0, 1)
-				}
-				sl = reflect.Append(sl, ext)
-				SetExtension(ep, desc, sl.Interface())
-			}
-			if err := p.consumeOptionalSeparator(); err != nil {
-				return err
-			}
-			continue
-		}
-
-		// This is a normal, non-extension field.
-		name := tok.value
-		var dst reflect.Value
-		fi, props, ok := structFieldByName(sprops, name)
-		if ok {
-			dst = sv.Field(fi)
-		} else if oop, ok := sprops.OneofTypes[name]; ok {
-			// It is a oneof.
-			props = oop.Prop
-			nv := reflect.New(oop.Type.Elem())
-			dst = nv.Elem().Field(0)
-			field := sv.Field(oop.Field)
-			if !field.IsNil() {
-				return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
-			}
-			field.Set(nv)
-		}
-		if !dst.IsValid() {
-			return p.errorf("unknown field name %q in %v", name, st)
-		}
-
-		if dst.Kind() == reflect.Map {
-			// Consume any colon.
-			if err := p.checkForColon(props, dst.Type()); err != nil {
-				return err
-			}
-
-			// Construct the map if it doesn't already exist.
-			if dst.IsNil() {
-				dst.Set(reflect.MakeMap(dst.Type()))
-			}
-			key := reflect.New(dst.Type().Key()).Elem()
-			val := reflect.New(dst.Type().Elem()).Elem()
-
-			// The map entry should be this sequence of tokens:
-			//	< key : KEY value : VALUE >
-			// However, implementations may omit key or value, and technically
-			// we should support them in any order.  See b/28924776 for a time
-			// this went wrong.
-
-			tok := p.next()
-			var terminator string
-			switch tok.value {
-			case "<":
-				terminator = ">"
-			case "{":
-				terminator = "}"
-			default:
-				return p.errorf("expected '{' or '<', found %q", tok.value)
-			}
-			for {
-				tok := p.next()
-				if tok.err != nil {
-					return tok.err
-				}
-				if tok.value == terminator {
-					break
-				}
-				switch tok.value {
-				case "key":
-					if err := p.consumeToken(":"); err != nil {
-						return err
-					}
-					if err := p.readAny(key, props.MapKeyProp); err != nil {
-						return err
-					}
-					if err := p.consumeOptionalSeparator(); err != nil {
-						return err
-					}
-				case "value":
-					if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
-						return err
-					}
-					if err := p.readAny(val, props.MapValProp); err != nil {
-						return err
-					}
-					if err := p.consumeOptionalSeparator(); err != nil {
-						return err
-					}
-				default:
-					p.back()
-					return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
-				}
-			}
-
-			dst.SetMapIndex(key, val)
-			continue
-		}
-
-		// Check that it's not already set if it's not a repeated field.
-		if !props.Repeated && fieldSet[name] {
-			return p.errorf("non-repeated field %q was repeated", name)
-		}
-
-		if err := p.checkForColon(props, dst.Type()); err != nil {
-			return err
-		}
-
-		// Parse into the field.
-		fieldSet[name] = true
-		if err := p.readAny(dst, props); err != nil {
-			if _, ok := err.(*RequiredNotSetError); !ok {
-				return err
-			}
-			reqFieldErr = err
-		}
-		if props.Required {
-			reqCount--
-		}
-
-		if err := p.consumeOptionalSeparator(); err != nil {
-			return err
-		}
-
-	}
-
-	if reqCount > 0 {
-		return p.missingRequiredFieldError(sv)
-	}
-	return reqFieldErr
-}
-
-// consumeExtName consumes extension name or expanded Any type URL and the
-// following ']'. It returns the name or URL consumed.
-func (p *textParser) consumeExtName() (string, error) {
-	tok := p.next()
-	if tok.err != nil {
-		return "", tok.err
-	}
-
-	// If extension name or type url is quoted, it's a single token.
-	if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
-		name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
-		if err != nil {
-			return "", err
-		}
-		return name, p.consumeToken("]")
-	}
-
-	// Consume everything up to "]"
-	var parts []string
-	for tok.value != "]" {
-		parts = append(parts, tok.value)
-		tok = p.next()
-		if tok.err != nil {
-			return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
-		}
-		if p.done && tok.value != "]" {
-			return "", p.errorf("unclosed type_url or extension name")
-		}
-	}
-	return strings.Join(parts, ""), nil
-}
-
-// consumeOptionalSeparator consumes an optional semicolon or comma.
-// It is used in readStruct to provide backward compatibility.
-func (p *textParser) consumeOptionalSeparator() error {
-	tok := p.next()
-	if tok.err != nil {
-		return tok.err
-	}
-	if tok.value != ";" && tok.value != "," {
-		p.back()
-	}
-	return nil
-}
-
-func (p *textParser) readAny(v reflect.Value, props *Properties) error {
-	tok := p.next()
-	if tok.err != nil {
-		return tok.err
-	}
-	if tok.value == "" {
-		return p.errorf("unexpected EOF")
-	}
-
-	switch fv := v; fv.Kind() {
-	case reflect.Slice:
-		at := v.Type()
-		if at.Elem().Kind() == reflect.Uint8 {
-			// Special case for []byte
-			if tok.value[0] != '"' && tok.value[0] != '\'' {
-				// Deliberately written out here, as the error after
-				// this switch statement would write "invalid []byte: ...",
-				// which is not as user-friendly.
-				return p.errorf("invalid string: %v", tok.value)
-			}
-			bytes := []byte(tok.unquoted)
-			fv.Set(reflect.ValueOf(bytes))
-			return nil
-		}
-		// Repeated field.
-		if tok.value == "[" {
-			// Repeated field with list notation, like [1,2,3].
-			for {
-				fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
-				err := p.readAny(fv.Index(fv.Len()-1), props)
-				if err != nil {
-					return err
-				}
-				tok := p.next()
-				if tok.err != nil {
-					return tok.err
-				}
-				if tok.value == "]" {
-					break
-				}
-				if tok.value != "," {
-					return p.errorf("Expected ']' or ',' found %q", tok.value)
-				}
-			}
-			return nil
-		}
-		// One value of the repeated field.
-		p.back()
-		fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
-		return p.readAny(fv.Index(fv.Len()-1), props)
-	case reflect.Bool:
-		// true/1/t/True or false/f/0/False.
-		switch tok.value {
-		case "true", "1", "t", "True":
-			fv.SetBool(true)
-			return nil
-		case "false", "0", "f", "False":
-			fv.SetBool(false)
-			return nil
-		}
-	case reflect.Float32, reflect.Float64:
-		v := tok.value
-		// Ignore 'f' for compatibility with output generated by C++, but don't
-		// remove 'f' when the value is "-inf" or "inf".
-		if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
-			v = v[:len(v)-1]
-		}
-		if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
-			fv.SetFloat(f)
-			return nil
-		}
-	case reflect.Int32:
-		if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
-			fv.SetInt(x)
-			return nil
-		}
-
-		if len(props.Enum) == 0 {
-			break
-		}
-		m, ok := enumValueMaps[props.Enum]
-		if !ok {
-			break
-		}
-		x, ok := m[tok.value]
-		if !ok {
-			break
-		}
-		fv.SetInt(int64(x))
-		return nil
-	case reflect.Int64:
-		if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
-			fv.SetInt(x)
-			return nil
-		}
-
-	case reflect.Ptr:
-		// A basic field (indirected through pointer), or a repeated message/group
-		p.back()
-		fv.Set(reflect.New(fv.Type().Elem()))
-		return p.readAny(fv.Elem(), props)
-	case reflect.String:
-		if tok.value[0] == '"' || tok.value[0] == '\'' {
-			fv.SetString(tok.unquoted)
-			return nil
-		}
-	case reflect.Struct:
-		var terminator string
-		switch tok.value {
-		case "{":
-			terminator = "}"
-		case "<":
-			terminator = ">"
-		default:
-			return p.errorf("expected '{' or '<', found %q", tok.value)
-		}
-		// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
-		return p.readStruct(fv, terminator)
-	case reflect.Uint32:
-		if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
-			fv.SetUint(uint64(x))
-			return nil
-		}
-	case reflect.Uint64:
-		if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
-			fv.SetUint(x)
-			return nil
-		}
-	}
-	return p.errorf("invalid %v: %v", v.Type(), tok.value)
-}
-
-// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
-// before starting to unmarshal, so any existing data in pb is always removed.
-// If a required field is not set and no other error occurs,
-// UnmarshalText returns *RequiredNotSetError.
-func UnmarshalText(s string, pb Message) error {
-	if um, ok := pb.(encoding.TextUnmarshaler); ok {
-		return um.UnmarshalText([]byte(s))
-	}
-	pb.Reset()
-	v := reflect.ValueOf(pb)
-	return newTextParser(s).readStruct(v.Elem(), "")
-}

+ 0 - 171
vendor/github.com/google/go-github/AUTHORS

@@ -1,171 +0,0 @@
-# This is the official list of go-github authors for copyright purposes.
-#
-# This does not necessarily list everyone who has contributed code, since in
-# some cases, their employer may be the copyright holder. To see the full list
-# of contributors, see the revision history in source control or
-# https://github.com/google/go-github/graphs/contributors.
-#
-# Authors who wish to be recognized in this file should add themselves (or
-# their employer, as appropriate).
-
-178inaba <masahiro.furudate@gmail.com>
-Abhinav Gupta <mail@abhinavg.net>
-Ahmed Hagy <a.akram93@gmail.com>
-Ainsley Chong <ainsley.chong@gmail.com>
-Akeda Bagus <akeda@x-team.com>
-Alec Thomas <alec@swapoff.org>
-Aleks Clark <aleks.clark@gmail.com>
-Alex Bramley <a.bramley@gmail.com>
-Alexander Harkness <me@bearbin.net>
-Allen Sun <shlallen1990@gmail.com>
-Amey Sakhadeo <me@ameyms.com>
-Andreas Garnæs <https://github.com/andreas>
-Andrew Ryabchun <aryabchun@mail.ua>
-Andy Hume <andyhume@gmail.com>
-Andy Lindeman <andy@lindeman.io>
-Anshuman Bhartiya <anshuman.bhartiya@gmail.com>
-Antoine Pelisse <apelisse@gmail.com>
-Anubha Kushwaha <anubha_bt2k14@dtu.ac.in>
-Aravind <aravindkp@outlook.in>
-Arıl Bozoluk <arilbozoluk@hotmail.com>
-Austin Dizzy <dizzy@wow.com>
-Beshr Kayali <beshrkayali@gmail.com>
-Beyang Liu <beyang.liu@gmail.com>
-Billy Lynch <wlynch92@gmail.com>
-Björn Häuser <b.haeuser@rebuy.de>
-Brad Harris <bmharris@gmail.com>
-Bradley Falzon <brad@teambrad.net>
-Brian Egizi <brian@mojotech.com>
-Bryan Boreham <bryan@weave.works>
-Cami Diez <diezcami@gmail.com>
-Carlos Alexandro Becker <caarlos0@gmail.com>
-chandresh-pancholi <chandreshpancholi007@gmail.com>
-Charlie Yan <charlieyan08@gmail.com>
-Chris King <chriskingnet@gmail.com>
-Chris Roche <chris@vsco.co>
-Chris Schaefer <chris@dtzq.com>
-Christoph Sassenberg <defsprite@gmail.com>
-Colin Misare <github.com/cmisare>
-Craig Peterson <cpeterson@stackoverflow.com>
-Cristian Maglie <c.maglie@bug.st>
-Daehyeok Mun <daehyeok@gmail.com>
-Daniel Leavitt <daniel.leavitt@gmail.com>
-Dave Du Cros <davidducros@gmail.com>
-Dave Henderson <dhenderson@gmail.com>
-David Deng <daviddengcn@gmail.com>
-Dennis Webb <dennis@bluesentryit.com>
-Diego Lapiduz <diego.lapiduz@cfpb.gov>
-Dmitri Shuralyov <shurcooL@gmail.com>
-dmnlk <seikima2demon@gmail.com>
-Don Petersen <don@donpetersen.net>
-Doug Turner <doug.turner@gmail.com>
-Drew Fradette <drew.fradette@gmail.com>
-Eli Uriegas <seemethere101@gmail.com>
-Elliott Beach <elliott2.71828@gmail.com>
-erwinvaneyk <erwinvaneyk@gmail.com>
-Fabrice <fabrice.vaillant@student.ecp.fr>
-Filippo Valsorda <hi@filippo.io>
-Florian Forster <ff@octo.it>
-Francesc Gil <xescugil@gmail.com>
-Francis <hello@francismakes.com>
-Fredrik Jönsson <fredrik.jonsson@izettle.com>
-Garrett Squire <garrettsquire@gmail.com>
-Georgy Buranov <gburanov@gmail.com>
-Gnahz <p@oath.pl>
-Google Inc.
-griffin_stewie <panterathefamilyguy@gmail.com>
-Guz Alexander <kalimatas@gmail.com>
-Hanno Hecker <hanno.hecker@zalando.de>
-Hari haran <hariharan.uno@gmail.com>
-haya14busa <hayabusa1419@gmail.com>
-Huy Tr <kingbazoka@gmail.com>
-huydx <doxuanhuy@gmail.com>
-i2bskn <i2bskn@gmail.com>
-Isao Jonas <isao.jonas@gmail.com>
-isqua <isqua@isqua.ru>
-Jameel Haffejee <RC1140@republiccommandos.co.za>
-Jan Kosecki <jan.kosecki91@gmail.com>
-Jeremy Morris <jeremylevanmorris@gmail.com>
-Jihoon Chung <j.c@navercorp.com>
-Jimmi Dyson <jimmidyson@gmail.com>
-Joe Tsai <joetsai@digital-static.net>
-John Barton <jrbarton@gmail.com>
-John Engelman <john.r.engelman@gmail.com>
-jpbelanger-mtl <jp.belanger@gmail.com>
-Juan Basso <jrbasso@gmail.com>
-Julien Rostand <jrostand@users.noreply.github.com>
-Justin Abrahms <justin@abrah.ms>
-jzhoucliqr <jzhou@cliqr.com>
-Katrina Owen <kytrinyx@github.com>
-Keita Urashima <ursm@ursm.jp>
-Kevin Burke <kev@inburke.com>
-Konrad Malawski <konrad.malawski@project13.pl>
-Kookheon Kwon <kucuny@gmail.com>
-Krzysztof Kowalczyk <kkowalczyk@gmail.com>
-Kshitij Saraogi <KshitijSaraogi@gmail.com>
-kyokomi <kyoko1220adword@gmail.com>
-Lucas Alcantara <lucasalcantaraf@gmail.com>
-Luke Evers <me@lukevers.com>
-Luke Kysow <lkysow@gmail.com>
-Luke Roberts <email@luke-roberts.co.uk>
-Luke Young <luke@hydrantlabs.org>
-Maksim Zhylinski <uzzable@gmail.com>
-Martin-Louis Bright <mlbright@gmail.com>
-Mat Geist <matgeist@gmail.com>
-Matt Brender <mjbrender@gmail.com>
-Matt Landis <landis.matt@gmail.com>
-Maxime Bury <maxime.bury@gmail.com>
-Michael Tiller <michael.tiller@gmail.com>
-Michał Glapa <michal.glapa@gmail.com>
-Nathan VanBenschoten <nvanbenschoten@gmail.com>
-Neil O'Toole <neilotoole@apache.org>
-Nick Miyake <nmiyake@palantir.com>
-Nick Spragg <nick.spragg@bbc.co.uk>
-Nikhita Raghunath <nikitaraghunath@gmail.com>
-Noah Zoschke <noah+sso2@convox.com>
-ns-cweber <cweber@narrativescience.com>
-Ondřej Kupka <ondra.cap@gmail.com>
-Panagiotis Moustafellos <pmoust@gmail.com>
-Parker Moore <parkrmoore@gmail.com>
-Pavel Shtanko <pavel.shtanko@gmail.com>
-Petr Shevtsov <petr.shevtsov@gmail.com>
-Pierre Carrier <pierre@meteor.com>
-Piotr Zurek <p.zurek@gmail.com>
-Quinn Slack <qslack@qslack.com>
-Rackspace US, Inc.
-RaviTeja Pothana <ravi-teja@live.com>
-rc1140 <jameel@republiccommandos.co.za>
-Red Hat, Inc.
-Rob Figueiredo <robfig@yext.com>
-Ronak Jain <ronakjain@outlook.in>
-Ruben Vereecken <rubenvereecken@gmail.com>
-Ryan Lower <rpjlower@gmail.com>
-Sahil Dua <sahildua2305@gmail.com>
-saisi <saisi@users.noreply.github.com>
-Sam Minnée <sam@silverstripe.com>
-Sander van Harmelen <svanharmelen@schubergphilis.com>
-Sarasa Kisaragi <lingsamuelgrace@gmail.com>
-Sean Wang <sean@decrypted.org>
-Sebastian Mandrean <sebastian.mandrean@gmail.com>
-Sebastian Mæland Pedersen <sem.pedersen@stud.uis.no>
-Sevki <s@sevki.org>
-Shawn Catanzarite <me@shawncatz.com>
-Shawn Smith <shawnpsmith@gmail.com>
-sona-tar <sona.zip@gmail.com>
-SoundCloud, Ltd.
-Stian Eikeland <stian@eikeland.se>
-Thomas Bruyelle <thomas.bruyelle@gmail.com>
-Timothée Peignier <timothee.peignier@tryphon.org>
-Trey Tacon <ttacon@gmail.com>
-ttacon <ttacon@gmail.com>
-Varadarajan Aravamudhan <varadaraajan@gmail.com>
-Victor Castell <victor@victorcastell.com>
-Victor Vrantchan <vrancean+github@gmail.com>
-Vlad Ungureanu <vladu@palantir.com>
-Will Maier <wcmaier@gmail.com>
-William Bailey <mail@williambailey.org.uk>
-Yann Malet <yann.malet@gmail.com>
-Yannick Utard <yannickutard@gmail.com>
-Yicheng Qin <qycqycqycqycqyc@gmail.com>
-Yumikiyo Osanai <yumios.art@gmail.com>
-Zach Latta <zach@zachlatta.com>

+ 0 - 341
vendor/github.com/google/go-github/LICENSE

@@ -1,341 +0,0 @@
-Copyright (c) 2013 The go-github AUTHORS. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-   * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-   * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-   * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-----------
-
-Some documentation is taken from the GitHub Developer site
-<https://developer.github.com/>, which is available under the following Creative
-Commons Attribution 3.0 License. This applies only to the go-github source
-code and would not apply to any compiled binaries.
-
-THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE
-COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY
-COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS
-AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
-
-BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE
-TO BE BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY
-BE CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS
-CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND
-CONDITIONS.
-
-1. Definitions
-
- a. "Adaptation" means a work based upon the Work, or upon the Work and
-    other pre-existing works, such as a translation, adaptation,
-    derivative work, arrangement of music or other alterations of a
-    literary or artistic work, or phonogram or performance and includes
-    cinematographic adaptations or any other form in which the Work may be
-    recast, transformed, or adapted including in any form recognizably
-    derived from the original, except that a work that constitutes a
-    Collection will not be considered an Adaptation for the purpose of
-    this License. For the avoidance of doubt, where the Work is a musical
-    work, performance or phonogram, the synchronization of the Work in
-    timed-relation with a moving image ("synching") will be considered an
-    Adaptation for the purpose of this License.
- b. "Collection" means a collection of literary or artistic works, such as
-    encyclopedias and anthologies, or performances, phonograms or
-    broadcasts, or other works or subject matter other than works listed
-    in Section 1(f) below, which, by reason of the selection and
-    arrangement of their contents, constitute intellectual creations, in
-    which the Work is included in its entirety in unmodified form along
-    with one or more other contributions, each constituting separate and
-    independent works in themselves, which together are assembled into a
-    collective whole. A work that constitutes a Collection will not be
-    considered an Adaptation (as defined above) for the purposes of this
-    License.
- c. "Distribute" means to make available to the public the original and
-    copies of the Work or Adaptation, as appropriate, through sale or
-    other transfer of ownership.
- d. "Licensor" means the individual, individuals, entity or entities that
-    offer(s) the Work under the terms of this License.
- e. "Original Author" means, in the case of a literary or artistic work,
-    the individual, individuals, entity or entities who created the Work
-    or if no individual or entity can be identified, the publisher; and in
-    addition (i) in the case of a performance the actors, singers,
-    musicians, dancers, and other persons who act, sing, deliver, declaim,
-    play in, interpret or otherwise perform literary or artistic works or
-    expressions of folklore; (ii) in the case of a phonogram the producer
-    being the person or legal entity who first fixes the sounds of a
-    performance or other sounds; and, (iii) in the case of broadcasts, the
-    organization that transmits the broadcast.
- f. "Work" means the literary and/or artistic work offered under the terms
-    of this License including without limitation any production in the
-    literary, scientific and artistic domain, whatever may be the mode or
-    form of its expression including digital form, such as a book,
-    pamphlet and other writing; a lecture, address, sermon or other work
-    of the same nature; a dramatic or dramatico-musical work; a
-    choreographic work or entertainment in dumb show; a musical
-    composition with or without words; a cinematographic work to which are
-    assimilated works expressed by a process analogous to cinematography;
-    a work of drawing, painting, architecture, sculpture, engraving or
-    lithography; a photographic work to which are assimilated works
-    expressed by a process analogous to photography; a work of applied
-    art; an illustration, map, plan, sketch or three-dimensional work
-    relative to geography, topography, architecture or science; a
-    performance; a broadcast; a phonogram; a compilation of data to the
-    extent it is protected as a copyrightable work; or a work performed by
-    a variety or circus performer to the extent it is not otherwise
-    considered a literary or artistic work.
- g. "You" means an individual or entity exercising rights under this
-    License who has not previously violated the terms of this License with
-    respect to the Work, or who has received express permission from the
-    Licensor to exercise rights under this License despite a previous
-    violation.
- h. "Publicly Perform" means to perform public recitations of the Work and
-    to communicate to the public those public recitations, by any means or
-    process, including by wire or wireless means or public digital
-    performances; to make available to the public Works in such a way that
-    members of the public may access these Works from a place and at a
-    place individually chosen by them; to perform the Work to the public
-    by any means or process and the communication to the public of the
-    performances of the Work, including by public digital performance; to
-    broadcast and rebroadcast the Work by any means including signs,
-    sounds or images.
- i. "Reproduce" means to make copies of the Work by any means including
-    without limitation by sound or visual recordings and the right of
-    fixation and reproducing fixations of the Work, including storage of a
-    protected performance or phonogram in digital form or other electronic
-    medium.
-
-2. Fair Dealing Rights. Nothing in this License is intended to reduce,
-limit, or restrict any uses free from copyright or rights arising from
-limitations or exceptions that are provided for in connection with the
-copyright protection under copyright law or other applicable laws.
-
-3. License Grant. Subject to the terms and conditions of this License,
-Licensor hereby grants You a worldwide, royalty-free, non-exclusive,
-perpetual (for the duration of the applicable copyright) license to
-exercise the rights in the Work as stated below:
-
- a. to Reproduce the Work, to incorporate the Work into one or more
-    Collections, and to Reproduce the Work as incorporated in the
-    Collections;
- b. to create and Reproduce Adaptations provided that any such Adaptation,
-    including any translation in any medium, takes reasonable steps to
-    clearly label, demarcate or otherwise identify that changes were made
-    to the original Work. For example, a translation could be marked "The
-    original work was translated from English to Spanish," or a
-    modification could indicate "The original work has been modified.";
- c. to Distribute and Publicly Perform the Work including as incorporated
-    in Collections; and,
- d. to Distribute and Publicly Perform Adaptations.
- e. For the avoidance of doubt:
-
-     i. Non-waivable Compulsory License Schemes. In those jurisdictions in
-        which the right to collect royalties through any statutory or
-        compulsory licensing scheme cannot be waived, the Licensor
-        reserves the exclusive right to collect such royalties for any
-        exercise by You of the rights granted under this License;
-    ii. Waivable Compulsory License Schemes. In those jurisdictions in
-        which the right to collect royalties through any statutory or
-        compulsory licensing scheme can be waived, the Licensor waives the
-        exclusive right to collect such royalties for any exercise by You
-        of the rights granted under this License; and,
-   iii. Voluntary License Schemes. The Licensor waives the right to
-        collect royalties, whether individually or, in the event that the
-        Licensor is a member of a collecting society that administers
-        voluntary licensing schemes, via that society, from any exercise
-        by You of the rights granted under this License.
-
-The above rights may be exercised in all media and formats whether now
-known or hereafter devised. The above rights include the right to make
-such modifications as are technically necessary to exercise the rights in
-other media and formats. Subject to Section 8(f), all rights not expressly
-granted by Licensor are hereby reserved.
-
-4. Restrictions. The license granted in Section 3 above is expressly made
-subject to and limited by the following restrictions:
-
- a. You may Distribute or Publicly Perform the Work only under the terms
-    of this License. You must include a copy of, or the Uniform Resource
-    Identifier (URI) for, this License with every copy of the Work You
-    Distribute or Publicly Perform. You may not offer or impose any terms
-    on the Work that restrict the terms of this License or the ability of
-    the recipient of the Work to exercise the rights granted to that
-    recipient under the terms of the License. You may not sublicense the
-    Work. You must keep intact all notices that refer to this License and
-    to the disclaimer of warranties with every copy of the Work You
-    Distribute or Publicly Perform. When You Distribute or Publicly
-    Perform the Work, You may not impose any effective technological
-    measures on the Work that restrict the ability of a recipient of the
-    Work from You to exercise the rights granted to that recipient under
-    the terms of the License. This Section 4(a) applies to the Work as
-    incorporated in a Collection, but this does not require the Collection
-    apart from the Work itself to be made subject to the terms of this
-    License. If You create a Collection, upon notice from any Licensor You
-    must, to the extent practicable, remove from the Collection any credit
-    as required by Section 4(b), as requested. If You create an
-    Adaptation, upon notice from any Licensor You must, to the extent
-    practicable, remove from the Adaptation any credit as required by
-    Section 4(b), as requested.
- b. If You Distribute, or Publicly Perform the Work or any Adaptations or
-    Collections, You must, unless a request has been made pursuant to
-    Section 4(a), keep intact all copyright notices for the Work and
-    provide, reasonable to the medium or means You are utilizing: (i) the
-    name of the Original Author (or pseudonym, if applicable) if supplied,
-    and/or if the Original Author and/or Licensor designate another party
-    or parties (e.g., a sponsor institute, publishing entity, journal) for
-    attribution ("Attribution Parties") in Licensor's copyright notice,
-    terms of service or by other reasonable means, the name of such party
-    or parties; (ii) the title of the Work if supplied; (iii) to the
-    extent reasonably practicable, the URI, if any, that Licensor
-    specifies to be associated with the Work, unless such URI does not
-    refer to the copyright notice or licensing information for the Work;
-    and (iv) , consistent with Section 3(b), in the case of an Adaptation,
-    a credit identifying the use of the Work in the Adaptation (e.g.,
-    "French translation of the Work by Original Author," or "Screenplay
-    based on original Work by Original Author"). The credit required by
-    this Section 4 (b) may be implemented in any reasonable manner;
-    provided, however, that in the case of a Adaptation or Collection, at
-    a minimum such credit will appear, if a credit for all contributing
-    authors of the Adaptation or Collection appears, then as part of these
-    credits and in a manner at least as prominent as the credits for the
-    other contributing authors. For the avoidance of doubt, You may only
-    use the credit required by this Section for the purpose of attribution
-    in the manner set out above and, by exercising Your rights under this
-    License, You may not implicitly or explicitly assert or imply any
-    connection with, sponsorship or endorsement by the Original Author,
-    Licensor and/or Attribution Parties, as appropriate, of You or Your
-    use of the Work, without the separate, express prior written
-    permission of the Original Author, Licensor and/or Attribution
-    Parties.
- c. Except as otherwise agreed in writing by the Licensor or as may be
-    otherwise permitted by applicable law, if You Reproduce, Distribute or
-    Publicly Perform the Work either by itself or as part of any
-    Adaptations or Collections, You must not distort, mutilate, modify or
-    take other derogatory action in relation to the Work which would be
-    prejudicial to the Original Author's honor or reputation. Licensor
-    agrees that in those jurisdictions (e.g. Japan), in which any exercise
-    of the right granted in Section 3(b) of this License (the right to
-    make Adaptations) would be deemed to be a distortion, mutilation,
-    modification or other derogatory action prejudicial to the Original
-    Author's honor and reputation, the Licensor will waive or not assert,
-    as appropriate, this Section, to the fullest extent permitted by the
-    applicable national law, to enable You to reasonably exercise Your
-    right under Section 3(b) of this License (right to make Adaptations)
-    but not otherwise.
-
-5. Representations, Warranties and Disclaimer
-
-UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR
-OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY
-KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE,
-INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY,
-FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF
-LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS,
-WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION
-OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU.
-
-6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE
-LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR
-ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES
-ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS
-BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-7. Termination
-
- a. This License and the rights granted hereunder will terminate
-    automatically upon any breach by You of the terms of this License.
-    Individuals or entities who have received Adaptations or Collections
-    from You under this License, however, will not have their licenses
-    terminated provided such individuals or entities remain in full
-    compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will
-    survive any termination of this License.
- b. Subject to the above terms and conditions, the license granted here is
-    perpetual (for the duration of the applicable copyright in the Work).
-    Notwithstanding the above, Licensor reserves the right to release the
-    Work under different license terms or to stop distributing the Work at
-    any time; provided, however that any such election will not serve to
-    withdraw this License (or any other license that has been, or is
-    required to be, granted under the terms of this License), and this
-    License will continue in full force and effect unless terminated as
-    stated above.
-
-8. Miscellaneous
-
- a. Each time You Distribute or Publicly Perform the Work or a Collection,
-    the Licensor offers to the recipient a license to the Work on the same
-    terms and conditions as the license granted to You under this License.
- b. Each time You Distribute or Publicly Perform an Adaptation, Licensor
-    offers to the recipient a license to the original Work on the same
-    terms and conditions as the license granted to You under this License.
- c. If any provision of this License is invalid or unenforceable under
-    applicable law, it shall not affect the validity or enforceability of
-    the remainder of the terms of this License, and without further action
-    by the parties to this agreement, such provision shall be reformed to
-    the minimum extent necessary to make such provision valid and
-    enforceable.
- d. No term or provision of this License shall be deemed waived and no
-    breach consented to unless such waiver or consent shall be in writing
-    and signed by the party to be charged with such waiver or consent.
- e. This License constitutes the entire agreement between the parties with
-    respect to the Work licensed here. There are no understandings,
-    agreements or representations with respect to the Work not specified
-    here. Licensor shall not be bound by any additional provisions that
-    may appear in any communication from You. This License may not be
-    modified without the mutual written agreement of the Licensor and You.
- f. The rights granted under, and the subject matter referenced, in this
-    License were drafted utilizing the terminology of the Berne Convention
-    for the Protection of Literary and Artistic Works (as amended on
-    September 28, 1979), the Rome Convention of 1961, the WIPO Copyright
-    Treaty of 1996, the WIPO Performances and Phonograms Treaty of 1996
-    and the Universal Copyright Convention (as revised on July 24, 1971).
-    These rights and subject matter take effect in the relevant
-    jurisdiction in which the License terms are sought to be enforced
-    according to the corresponding provisions of the implementation of
-    those treaty provisions in the applicable national law. If the
-    standard suite of rights granted under applicable copyright law
-    includes additional rights not granted under this License, such
-    additional rights are deemed to be included in the License; this
-    License is not intended to restrict the license of any rights under
-    applicable law.
-
-
-Creative Commons Notice
-
-    Creative Commons is not a party to this License, and makes no warranty
-    whatsoever in connection with the Work. Creative Commons will not be
-    liable to You or any party on any legal theory for any damages
-    whatsoever, including without limitation any general, special,
-    incidental or consequential damages arising in connection to this
-    license. Notwithstanding the foregoing two (2) sentences, if Creative
-    Commons has expressly identified itself as the Licensor hereunder, it
-    shall have all rights and obligations of Licensor.
-
-    Except for the limited purpose of indicating to the public that the
-    Work is licensed under the CCPL, Creative Commons does not authorize
-    the use by either party of the trademark "Creative Commons" or any
-    related trademark or logo of Creative Commons without the prior
-    written consent of Creative Commons. Any permitted use will be in
-    compliance with Creative Commons' then-current trademark usage
-    guidelines, as may be published on its website or otherwise made
-    available upon request from time to time. For the avoidance of doubt,
-    this trademark restriction does not form part of this License.
-
-    Creative Commons may be contacted at http://creativecommons.org/.

+ 0 - 69
vendor/github.com/google/go-github/github/activity.go

@@ -1,69 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "context"
-
-// ActivityService handles communication with the activity related
-// methods of the GitHub API.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/
-type ActivityService service
-
-// FeedLink represents a link to a related resource.
-type FeedLink struct {
-	HRef *string `json:"href,omitempty"`
-	Type *string `json:"type,omitempty"`
-}
-
-// Feeds represents timeline resources in Atom format.
-type Feeds struct {
-	TimelineURL                 *string  `json:"timeline_url,omitempty"`
-	UserURL                     *string  `json:"user_url,omitempty"`
-	CurrentUserPublicURL        *string  `json:"current_user_public_url,omitempty"`
-	CurrentUserURL              *string  `json:"current_user_url,omitempty"`
-	CurrentUserActorURL         *string  `json:"current_user_actor_url,omitempty"`
-	CurrentUserOrganizationURL  *string  `json:"current_user_organization_url,omitempty"`
-	CurrentUserOrganizationURLs []string `json:"current_user_organization_urls,omitempty"`
-	Links                       *struct {
-		Timeline                 *FeedLink  `json:"timeline,omitempty"`
-		User                     *FeedLink  `json:"user,omitempty"`
-		CurrentUserPublic        *FeedLink  `json:"current_user_public,omitempty"`
-		CurrentUser              *FeedLink  `json:"current_user,omitempty"`
-		CurrentUserActor         *FeedLink  `json:"current_user_actor,omitempty"`
-		CurrentUserOrganization  *FeedLink  `json:"current_user_organization,omitempty"`
-		CurrentUserOrganizations []FeedLink `json:"current_user_organizations,omitempty"`
-	} `json:"_links,omitempty"`
-}
-
-// ListFeeds lists all the feeds available to the authenticated user.
-//
-// GitHub provides several timeline resources in Atom format:
-//     Timeline: The GitHub global public timeline
-//     User: The public timeline for any user, using URI template
-//     Current user public: The public timeline for the authenticated user
-//     Current user: The private timeline for the authenticated user
-//     Current user actor: The private timeline for activity created by the
-//         authenticated user
-//     Current user organizations: The private timeline for the organizations
-//         the authenticated user is a member of.
-//
-// Note: Private feeds are only returned when authenticating via Basic Auth
-// since current feed URIs use the older, non revocable auth tokens.
-func (s *ActivityService) ListFeeds(ctx context.Context) (*Feeds, *Response, error) {
-	req, err := s.client.NewRequest("GET", "feeds", nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	f := &Feeds{}
-	resp, err := s.client.Do(ctx, req, f)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return f, resp, nil
-}

+ 0 - 324
vendor/github.com/google/go-github/github/activity_events.go

@@ -1,324 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
-	"context"
-	"encoding/json"
-	"fmt"
-	"time"
-)
-
-// Event represents a GitHub event.
-type Event struct {
-	Type       *string          `json:"type,omitempty"`
-	Public     *bool            `json:"public,omitempty"`
-	RawPayload *json.RawMessage `json:"payload,omitempty"`
-	Repo       *Repository      `json:"repo,omitempty"`
-	Actor      *User            `json:"actor,omitempty"`
-	Org        *Organization    `json:"org,omitempty"`
-	CreatedAt  *time.Time       `json:"created_at,omitempty"`
-	ID         *string          `json:"id,omitempty"`
-}
-
-func (e Event) String() string {
-	return Stringify(e)
-}
-
-// ParsePayload parses the event payload. For recognized event types,
-// a value of the corresponding struct type will be returned.
-func (e *Event) ParsePayload() (payload interface{}, err error) {
-	switch *e.Type {
-	case "CommitCommentEvent":
-		payload = &CommitCommentEvent{}
-	case "CreateEvent":
-		payload = &CreateEvent{}
-	case "DeleteEvent":
-		payload = &DeleteEvent{}
-	case "DeploymentEvent":
-		payload = &DeploymentEvent{}
-	case "DeploymentStatusEvent":
-		payload = &DeploymentStatusEvent{}
-	case "ForkEvent":
-		payload = &ForkEvent{}
-	case "GollumEvent":
-		payload = &GollumEvent{}
-	case "InstallationEvent":
-		payload = &InstallationEvent{}
-	case "InstallationRepositoriesEvent":
-		payload = &InstallationRepositoriesEvent{}
-	case "IssueCommentEvent":
-		payload = &IssueCommentEvent{}
-	case "IssuesEvent":
-		payload = &IssuesEvent{}
-	case "LabelEvent":
-		payload = &LabelEvent{}
-	case "MarketplacePurchaseEvent":
-		payload = &MarketplacePurchaseEvent{}
-	case "MemberEvent":
-		payload = &MemberEvent{}
-	case "MembershipEvent":
-		payload = &MembershipEvent{}
-	case "MilestoneEvent":
-		payload = &MilestoneEvent{}
-	case "OrganizationEvent":
-		payload = &OrganizationEvent{}
-	case "OrgBlockEvent":
-		payload = &OrgBlockEvent{}
-	case "PageBuildEvent":
-		payload = &PageBuildEvent{}
-	case "PingEvent":
-		payload = &PingEvent{}
-	case "ProjectEvent":
-		payload = &ProjectEvent{}
-	case "ProjectCardEvent":
-		payload = &ProjectCardEvent{}
-	case "ProjectColumnEvent":
-		payload = &ProjectColumnEvent{}
-	case "PublicEvent":
-		payload = &PublicEvent{}
-	case "PullRequestEvent":
-		payload = &PullRequestEvent{}
-	case "PullRequestReviewEvent":
-		payload = &PullRequestReviewEvent{}
-	case "PullRequestReviewCommentEvent":
-		payload = &PullRequestReviewCommentEvent{}
-	case "PushEvent":
-		payload = &PushEvent{}
-	case "ReleaseEvent":
-		payload = &ReleaseEvent{}
-	case "RepositoryEvent":
-		payload = &RepositoryEvent{}
-	case "StatusEvent":
-		payload = &StatusEvent{}
-	case "TeamEvent":
-		payload = &TeamEvent{}
-	case "TeamAddEvent":
-		payload = &TeamAddEvent{}
-	case "WatchEvent":
-		payload = &WatchEvent{}
-	}
-	err = json.Unmarshal(*e.RawPayload, &payload)
-	return payload, err
-}
-
-// Payload returns the parsed event payload. For recognized event types,
-// a value of the corresponding struct type will be returned.
-//
-// Deprecated: Use ParsePayload instead, which returns an error
-// rather than panics if JSON unmarshaling raw payload fails.
-func (e *Event) Payload() (payload interface{}) {
-	var err error
-	payload, err = e.ParsePayload()
-	if err != nil {
-		panic(err)
-	}
-	return payload
-}
-
-// ListEvents drinks from the firehose of all public events across GitHub.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/events/#list-public-events
-func (s *ActivityService) ListEvents(ctx context.Context, opt *ListOptions) ([]*Event, *Response, error) {
-	u, err := addOptions("events", opt)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	var events []*Event
-	resp, err := s.client.Do(ctx, req, &events)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return events, resp, nil
-}
-
-// ListRepositoryEvents lists events for a repository.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/events/#list-repository-events
-func (s *ActivityService) ListRepositoryEvents(ctx context.Context, owner, repo string, opt *ListOptions) ([]*Event, *Response, error) {
-	u := fmt.Sprintf("repos/%v/%v/events", owner, repo)
-	u, err := addOptions(u, opt)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	var events []*Event
-	resp, err := s.client.Do(ctx, req, &events)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return events, resp, nil
-}
-
-// ListIssueEventsForRepository lists issue events for a repository.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/events/#list-issue-events-for-a-repository
-func (s *ActivityService) ListIssueEventsForRepository(ctx context.Context, owner, repo string, opt *ListOptions) ([]*IssueEvent, *Response, error) {
-	u := fmt.Sprintf("repos/%v/%v/issues/events", owner, repo)
-	u, err := addOptions(u, opt)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	var events []*IssueEvent
-	resp, err := s.client.Do(ctx, req, &events)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return events, resp, nil
-}
-
-// ListEventsForRepoNetwork lists public events for a network of repositories.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/events/#list-public-events-for-a-network-of-repositories
-func (s *ActivityService) ListEventsForRepoNetwork(ctx context.Context, owner, repo string, opt *ListOptions) ([]*Event, *Response, error) {
-	u := fmt.Sprintf("networks/%v/%v/events", owner, repo)
-	u, err := addOptions(u, opt)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	var events []*Event
-	resp, err := s.client.Do(ctx, req, &events)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return events, resp, nil
-}
-
-// ListEventsForOrganization lists public events for an organization.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/events/#list-public-events-for-an-organization
-func (s *ActivityService) ListEventsForOrganization(ctx context.Context, org string, opt *ListOptions) ([]*Event, *Response, error) {
-	u := fmt.Sprintf("orgs/%v/events", org)
-	u, err := addOptions(u, opt)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	var events []*Event
-	resp, err := s.client.Do(ctx, req, &events)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return events, resp, nil
-}
-
-// ListEventsPerformedByUser lists the events performed by a user. If publicOnly is
-// true, only public events will be returned.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/events/#list-events-performed-by-a-user
-func (s *ActivityService) ListEventsPerformedByUser(ctx context.Context, user string, publicOnly bool, opt *ListOptions) ([]*Event, *Response, error) {
-	var u string
-	if publicOnly {
-		u = fmt.Sprintf("users/%v/events/public", user)
-	} else {
-		u = fmt.Sprintf("users/%v/events", user)
-	}
-	u, err := addOptions(u, opt)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	var events []*Event
-	resp, err := s.client.Do(ctx, req, &events)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return events, resp, nil
-}
-
-// ListEventsReceivedByUser lists the events received by a user. If publicOnly is
-// true, only public events will be returned.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/events/#list-events-that-a-user-has-received
-func (s *ActivityService) ListEventsReceivedByUser(ctx context.Context, user string, publicOnly bool, opt *ListOptions) ([]*Event, *Response, error) {
-	var u string
-	if publicOnly {
-		u = fmt.Sprintf("users/%v/received_events/public", user)
-	} else {
-		u = fmt.Sprintf("users/%v/received_events", user)
-	}
-	u, err := addOptions(u, opt)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	var events []*Event
-	resp, err := s.client.Do(ctx, req, &events)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return events, resp, nil
-}
-
-// ListUserEventsForOrganization provides the user’s organization dashboard. You
-// must be authenticated as the user to view this.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/events/#list-events-for-an-organization
-func (s *ActivityService) ListUserEventsForOrganization(ctx context.Context, org, user string, opt *ListOptions) ([]*Event, *Response, error) {
-	u := fmt.Sprintf("users/%v/events/orgs/%v", user, org)
-	u, err := addOptions(u, opt)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	var events []*Event
-	resp, err := s.client.Do(ctx, req, &events)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return events, resp, nil
-}

+ 0 - 223
vendor/github.com/google/go-github/github/activity_notifications.go

@@ -1,223 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
-	"context"
-	"fmt"
-	"time"
-)
-
-// Notification identifies a GitHub notification for a user.
-type Notification struct {
-	ID         *string              `json:"id,omitempty"`
-	Repository *Repository          `json:"repository,omitempty"`
-	Subject    *NotificationSubject `json:"subject,omitempty"`
-
-	// Reason identifies the event that triggered the notification.
-	//
-	// GitHub API docs: https://developer.github.com/v3/activity/notifications/#notification-reasons
-	Reason *string `json:"reason,omitempty"`
-
-	Unread     *bool      `json:"unread,omitempty"`
-	UpdatedAt  *time.Time `json:"updated_at,omitempty"`
-	LastReadAt *time.Time `json:"last_read_at,omitempty"`
-	URL        *string    `json:"url,omitempty"`
-}
-
-// NotificationSubject identifies the subject of a notification.
-type NotificationSubject struct {
-	Title            *string `json:"title,omitempty"`
-	URL              *string `json:"url,omitempty"`
-	LatestCommentURL *string `json:"latest_comment_url,omitempty"`
-	Type             *string `json:"type,omitempty"`
-}
-
-// NotificationListOptions specifies the optional parameters to the
-// ActivityService.ListNotifications method.
-type NotificationListOptions struct {
-	All           bool      `url:"all,omitempty"`
-	Participating bool      `url:"participating,omitempty"`
-	Since         time.Time `url:"since,omitempty"`
-	Before        time.Time `url:"before,omitempty"`
-
-	ListOptions
-}
-
-// ListNotifications lists all notifications for the authenticated user.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/notifications/#list-your-notifications
-func (s *ActivityService) ListNotifications(ctx context.Context, opt *NotificationListOptions) ([]*Notification, *Response, error) {
-	u := fmt.Sprintf("notifications")
-	u, err := addOptions(u, opt)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	var notifications []*Notification
-	resp, err := s.client.Do(ctx, req, &notifications)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return notifications, resp, nil
-}
-
-// ListRepositoryNotifications lists all notifications in a given repository
-// for the authenticated user.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/notifications/#list-your-notifications-in-a-repository
-func (s *ActivityService) ListRepositoryNotifications(ctx context.Context, owner, repo string, opt *NotificationListOptions) ([]*Notification, *Response, error) {
-	u := fmt.Sprintf("repos/%v/%v/notifications", owner, repo)
-	u, err := addOptions(u, opt)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	var notifications []*Notification
-	resp, err := s.client.Do(ctx, req, &notifications)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return notifications, resp, nil
-}
-
-type markReadOptions struct {
-	LastReadAt time.Time `json:"last_read_at,omitempty"`
-}
-
-// MarkNotificationsRead marks all notifications up to lastRead as read.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/notifications/#mark-as-read
-func (s *ActivityService) MarkNotificationsRead(ctx context.Context, lastRead time.Time) (*Response, error) {
-	opts := &markReadOptions{
-		LastReadAt: lastRead,
-	}
-	req, err := s.client.NewRequest("PUT", "notifications", opts)
-	if err != nil {
-		return nil, err
-	}
-
-	return s.client.Do(ctx, req, nil)
-}
-
-// MarkRepositoryNotificationsRead marks all notifications up to lastRead in
-// the specified repository as read.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/notifications/#mark-notifications-as-read-in-a-repository
-func (s *ActivityService) MarkRepositoryNotificationsRead(ctx context.Context, owner, repo string, lastRead time.Time) (*Response, error) {
-	opts := &markReadOptions{
-		LastReadAt: lastRead,
-	}
-	u := fmt.Sprintf("repos/%v/%v/notifications", owner, repo)
-	req, err := s.client.NewRequest("PUT", u, opts)
-	if err != nil {
-		return nil, err
-	}
-
-	return s.client.Do(ctx, req, nil)
-}
-
-// GetThread gets the specified notification thread.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/notifications/#view-a-single-thread
-func (s *ActivityService) GetThread(ctx context.Context, id string) (*Notification, *Response, error) {
-	u := fmt.Sprintf("notifications/threads/%v", id)
-
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	notification := new(Notification)
-	resp, err := s.client.Do(ctx, req, notification)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return notification, resp, nil
-}
-
-// MarkThreadRead marks the specified thread as read.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/notifications/#mark-a-thread-as-read
-func (s *ActivityService) MarkThreadRead(ctx context.Context, id string) (*Response, error) {
-	u := fmt.Sprintf("notifications/threads/%v", id)
-
-	req, err := s.client.NewRequest("PATCH", u, nil)
-	if err != nil {
-		return nil, err
-	}
-
-	return s.client.Do(ctx, req, nil)
-}
-
-// GetThreadSubscription checks to see if the authenticated user is subscribed
-// to a thread.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/notifications/#get-a-thread-subscription
-func (s *ActivityService) GetThreadSubscription(ctx context.Context, id string) (*Subscription, *Response, error) {
-	u := fmt.Sprintf("notifications/threads/%v/subscription", id)
-
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	sub := new(Subscription)
-	resp, err := s.client.Do(ctx, req, sub)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return sub, resp, nil
-}
-
-// SetThreadSubscription sets the subscription for the specified thread for the
-// authenticated user.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/notifications/#set-a-thread-subscription
-func (s *ActivityService) SetThreadSubscription(ctx context.Context, id string, subscription *Subscription) (*Subscription, *Response, error) {
-	u := fmt.Sprintf("notifications/threads/%v/subscription", id)
-
-	req, err := s.client.NewRequest("PUT", u, subscription)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	sub := new(Subscription)
-	resp, err := s.client.Do(ctx, req, sub)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return sub, resp, nil
-}
-
-// DeleteThreadSubscription deletes the subscription for the specified thread
-// for the authenticated user.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/notifications/#delete-a-thread-subscription
-func (s *ActivityService) DeleteThreadSubscription(ctx context.Context, id string) (*Response, error) {
-	u := fmt.Sprintf("notifications/threads/%v/subscription", id)
-	req, err := s.client.NewRequest("DELETE", u, nil)
-	if err != nil {
-		return nil, err
-	}
-
-	return s.client.Do(ctx, req, nil)
-}

+ 0 - 135
vendor/github.com/google/go-github/github/activity_star.go

@@ -1,135 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
-	"context"
-	"fmt"
-)
-
-// StarredRepository is returned by ListStarred.
-type StarredRepository struct {
-	StarredAt  *Timestamp  `json:"starred_at,omitempty"`
-	Repository *Repository `json:"repo,omitempty"`
-}
-
-// Stargazer represents a user that has starred a repository.
-type Stargazer struct {
-	StarredAt *Timestamp `json:"starred_at,omitempty"`
-	User      *User      `json:"user,omitempty"`
-}
-
-// ListStargazers lists people who have starred the specified repo.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/starring/#list-stargazers
-func (s *ActivityService) ListStargazers(ctx context.Context, owner, repo string, opt *ListOptions) ([]*Stargazer, *Response, error) {
-	u := fmt.Sprintf("repos/%s/%s/stargazers", owner, repo)
-	u, err := addOptions(u, opt)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	// TODO: remove custom Accept header when this API fully launches
-	req.Header.Set("Accept", mediaTypeStarringPreview)
-
-	var stargazers []*Stargazer
-	resp, err := s.client.Do(ctx, req, &stargazers)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return stargazers, resp, nil
-}
-
-// ActivityListStarredOptions specifies the optional parameters to the
-// ActivityService.ListStarred method.
-type ActivityListStarredOptions struct {
-	// How to sort the repository list. Possible values are: created, updated,
-	// pushed, full_name. Default is "full_name".
-	Sort string `url:"sort,omitempty"`
-
-	// Direction in which to sort repositories. Possible values are: asc, desc.
-	// Default is "asc" when sort is "full_name", otherwise default is "desc".
-	Direction string `url:"direction,omitempty"`
-
-	ListOptions
-}
-
-// ListStarred lists all the repos starred by a user. Passing the empty string
-// will list the starred repositories for the authenticated user.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/starring/#list-repositories-being-starred
-func (s *ActivityService) ListStarred(ctx context.Context, user string, opt *ActivityListStarredOptions) ([]*StarredRepository, *Response, error) {
-	var u string
-	if user != "" {
-		u = fmt.Sprintf("users/%v/starred", user)
-	} else {
-		u = "user/starred"
-	}
-	u, err := addOptions(u, opt)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	// TODO: remove custom Accept header when this API fully launches
-	req.Header.Set("Accept", mediaTypeStarringPreview)
-
-	var repos []*StarredRepository
-	resp, err := s.client.Do(ctx, req, &repos)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return repos, resp, nil
-}
-
-// IsStarred checks if a repository is starred by authenticated user.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/starring/#check-if-you-are-starring-a-repository
-func (s *ActivityService) IsStarred(ctx context.Context, owner, repo string) (bool, *Response, error) {
-	u := fmt.Sprintf("user/starred/%v/%v", owner, repo)
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return false, nil, err
-	}
-	resp, err := s.client.Do(ctx, req, nil)
-	starred, err := parseBoolResponse(err)
-	return starred, resp, err
-}
-
-// Star a repository as the authenticated user.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/starring/#star-a-repository
-func (s *ActivityService) Star(ctx context.Context, owner, repo string) (*Response, error) {
-	u := fmt.Sprintf("user/starred/%v/%v", owner, repo)
-	req, err := s.client.NewRequest("PUT", u, nil)
-	if err != nil {
-		return nil, err
-	}
-	return s.client.Do(ctx, req, nil)
-}
-
-// Unstar a repository as the authenticated user.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/starring/#unstar-a-repository
-func (s *ActivityService) Unstar(ctx context.Context, owner, repo string) (*Response, error) {
-	u := fmt.Sprintf("user/starred/%v/%v", owner, repo)
-	req, err := s.client.NewRequest("DELETE", u, nil)
-	if err != nil {
-		return nil, err
-	}
-	return s.client.Do(ctx, req, nil)
-}

+ 0 - 146
vendor/github.com/google/go-github/github/activity_watching.go

@@ -1,146 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
-	"context"
-	"fmt"
-)
-
-// Subscription identifies a repository or thread subscription.
-type Subscription struct {
-	Subscribed *bool      `json:"subscribed,omitempty"`
-	Ignored    *bool      `json:"ignored,omitempty"`
-	Reason     *string    `json:"reason,omitempty"`
-	CreatedAt  *Timestamp `json:"created_at,omitempty"`
-	URL        *string    `json:"url,omitempty"`
-
-	// only populated for repository subscriptions
-	RepositoryURL *string `json:"repository_url,omitempty"`
-
-	// only populated for thread subscriptions
-	ThreadURL *string `json:"thread_url,omitempty"`
-}
-
-// ListWatchers lists watchers of a particular repo.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/watching/#list-watchers
-func (s *ActivityService) ListWatchers(ctx context.Context, owner, repo string, opt *ListOptions) ([]*User, *Response, error) {
-	u := fmt.Sprintf("repos/%s/%s/subscribers", owner, repo)
-	u, err := addOptions(u, opt)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	var watchers []*User
-	resp, err := s.client.Do(ctx, req, &watchers)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return watchers, resp, nil
-}
-
-// ListWatched lists the repositories the specified user is watching. Passing
-// the empty string will fetch watched repos for the authenticated user.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/watching/#list-repositories-being-watched
-func (s *ActivityService) ListWatched(ctx context.Context, user string, opt *ListOptions) ([]*Repository, *Response, error) {
-	var u string
-	if user != "" {
-		u = fmt.Sprintf("users/%v/subscriptions", user)
-	} else {
-		u = "user/subscriptions"
-	}
-	u, err := addOptions(u, opt)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	var watched []*Repository
-	resp, err := s.client.Do(ctx, req, &watched)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return watched, resp, nil
-}
-
-// GetRepositorySubscription returns the subscription for the specified
-// repository for the authenticated user. If the authenticated user is not
-// watching the repository, a nil Subscription is returned.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/watching/#get-a-repository-subscription
-func (s *ActivityService) GetRepositorySubscription(ctx context.Context, owner, repo string) (*Subscription, *Response, error) {
-	u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo)
-
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	sub := new(Subscription)
-	resp, err := s.client.Do(ctx, req, sub)
-	if err != nil {
-		// if it's just a 404, don't return that as an error
-		_, err = parseBoolResponse(err)
-		return nil, resp, err
-	}
-
-	return sub, resp, nil
-}
-
-// SetRepositorySubscription sets the subscription for the specified repository
-// for the authenticated user.
-//
-// To watch a repository, set subscription.Subscribed to true.
-// To ignore notifications made within a repository, set subscription.Ignored to true.
-// To stop watching a repository, use DeleteRepositorySubscription.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/watching/#set-a-repository-subscription
-func (s *ActivityService) SetRepositorySubscription(ctx context.Context, owner, repo string, subscription *Subscription) (*Subscription, *Response, error) {
-	u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo)
-
-	req, err := s.client.NewRequest("PUT", u, subscription)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	sub := new(Subscription)
-	resp, err := s.client.Do(ctx, req, sub)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return sub, resp, nil
-}
-
-// DeleteRepositorySubscription deletes the subscription for the specified
-// repository for the authenticated user.
-//
-// This is used to stop watching a repository. To control whether or not to
-// receive notifications from a repository, use SetRepositorySubscription.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/watching/#delete-a-repository-subscription
-func (s *ActivityService) DeleteRepositorySubscription(ctx context.Context, owner, repo string) (*Response, error) {
-	u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo)
-	req, err := s.client.NewRequest("DELETE", u, nil)
-	if err != nil {
-		return nil, err
-	}
-
-	return s.client.Do(ctx, req, nil)
-}

+ 0 - 101
vendor/github.com/google/go-github/github/admin.go

@@ -1,101 +0,0 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
-	"context"
-	"fmt"
-)
-
-// AdminService handles communication with the admin related methods of the
-// GitHub API. These API routes are normally only accessible for GitHub
-// Enterprise installations.
-//
-// GitHub API docs: https://developer.github.com/v3/enterprise/
-type AdminService service
-
-// TeamLDAPMapping represents the mapping between a GitHub team and an LDAP group.
-type TeamLDAPMapping struct {
-	ID          *int64  `json:"id,omitempty"`
-	LDAPDN      *string `json:"ldap_dn,omitempty"`
-	URL         *string `json:"url,omitempty"`
-	Name        *string `json:"name,omitempty"`
-	Slug        *string `json:"slug,omitempty"`
-	Description *string `json:"description,omitempty"`
-	Privacy     *string `json:"privacy,omitempty"`
-	Permission  *string `json:"permission,omitempty"`
-
-	MembersURL      *string `json:"members_url,omitempty"`
-	RepositoriesURL *string `json:"repositories_url,omitempty"`
-}
-
-func (m TeamLDAPMapping) String() string {
-	return Stringify(m)
-}
-
-// UserLDAPMapping represents the mapping between a GitHub user and an LDAP user.
-type UserLDAPMapping struct {
-	ID         *int64  `json:"id,omitempty"`
-	LDAPDN     *string `json:"ldap_dn,omitempty"`
-	Login      *string `json:"login,omitempty"`
-	AvatarURL  *string `json:"avatar_url,omitempty"`
-	GravatarID *string `json:"gravatar_id,omitempty"`
-	Type       *string `json:"type,omitempty"`
-	SiteAdmin  *bool   `json:"site_admin,omitempty"`
-
-	URL               *string `json:"url,omitempty"`
-	EventsURL         *string `json:"events_url,omitempty"`
-	FollowingURL      *string `json:"following_url,omitempty"`
-	FollowersURL      *string `json:"followers_url,omitempty"`
-	GistsURL          *string `json:"gists_url,omitempty"`
-	OrganizationsURL  *string `json:"organizations_url,omitempty"`
-	ReceivedEventsURL *string `json:"received_events_url,omitempty"`
-	ReposURL          *string `json:"repos_url,omitempty"`
-	StarredURL        *string `json:"starred_url,omitempty"`
-	SubscriptionsURL  *string `json:"subscriptions_url,omitempty"`
-}
-
-func (m UserLDAPMapping) String() string {
-	return Stringify(m)
-}
-
-// UpdateUserLDAPMapping updates the mapping between a GitHub user and an LDAP user.
-//
-// GitHub API docs: https://developer.github.com/v3/enterprise/ldap/#update-ldap-mapping-for-a-user
-func (s *AdminService) UpdateUserLDAPMapping(ctx context.Context, user string, mapping *UserLDAPMapping) (*UserLDAPMapping, *Response, error) {
-	u := fmt.Sprintf("admin/ldap/users/%v/mapping", user)
-	req, err := s.client.NewRequest("PATCH", u, mapping)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	m := new(UserLDAPMapping)
-	resp, err := s.client.Do(ctx, req, m)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return m, resp, nil
-}
-
-// UpdateTeamLDAPMapping updates the mapping between a GitHub team and an LDAP group.
-//
-// GitHub API docs: https://developer.github.com/v3/enterprise/ldap/#update-ldap-mapping-for-a-team
-func (s *AdminService) UpdateTeamLDAPMapping(ctx context.Context, team int64, mapping *TeamLDAPMapping) (*TeamLDAPMapping, *Response, error) {
-	u := fmt.Sprintf("admin/ldap/teams/%v/mapping", team)
-	req, err := s.client.NewRequest("PATCH", u, mapping)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	m := new(TeamLDAPMapping)
-	resp, err := s.client.Do(ctx, req, m)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return m, resp, nil
-}

+ 0 - 171
vendor/github.com/google/go-github/github/admin_stats.go

@@ -1,171 +0,0 @@
-// Copyright 2017 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
-	"context"
-	"fmt"
-)
-
-// AdminStats represents a variety of stats of a Github Enterprise
-// installation.
-type AdminStats struct {
-	Issues     *IssueStats     `json:"issues,omitempty"`
-	Hooks      *HookStats      `json:"hooks,omitempty"`
-	Milestones *MilestoneStats `json:"milestones,omitempty"`
-	Orgs       *OrgStats       `json:"orgs,omitempty"`
-	Comments   *CommentStats   `json:"comments,omitempty"`
-	Pages      *PageStats      `json:"pages,omitempty"`
-	Users      *UserStats      `json:"users,omitempty"`
-	Gists      *GistStats      `json:"gists,omitempty"`
-	Pulls      *PullStats      `json:"pulls,omitempty"`
-	Repos      *RepoStats      `json:"repos,omitempty"`
-}
-
-func (s AdminStats) String() string {
-	return Stringify(s)
-}
-
-// IssueStats represents the number of total, open and closed issues.
-type IssueStats struct {
-	TotalIssues  *int `json:"total_issues,omitempty"`
-	OpenIssues   *int `json:"open_issues,omitempty"`
-	ClosedIssues *int `json:"closed_issues,omitempty"`
-}
-
-func (s IssueStats) String() string {
-	return Stringify(s)
-}
-
-// HookStats represents the number of total, active and inactive hooks.
-type HookStats struct {
-	TotalHooks    *int `json:"total_hooks,omitempty"`
-	ActiveHooks   *int `json:"active_hooks,omitempty"`
-	InactiveHooks *int `json:"inactive_hooks,omitempty"`
-}
-
-func (s HookStats) String() string {
-	return Stringify(s)
-}
-
-// MilestoneStats represents the number of total, open and close milestones.
-type MilestoneStats struct {
-	TotalMilestones  *int `json:"total_milestones,omitempty"`
-	OpenMilestones   *int `json:"open_milestones,omitempty"`
-	ClosedMilestones *int `json:"closed_milestones,omitempty"`
-}
-
-func (s MilestoneStats) String() string {
-	return Stringify(s)
-}
-
-// OrgStats represents the number of total, disabled organizations and the team
-// and team member count.
-type OrgStats struct {
-	TotalOrgs        *int `json:"total_orgs,omitempty"`
-	DisabledOrgs     *int `json:"disabled_orgs,omitempty"`
-	TotalTeams       *int `json:"total_teams,omitempty"`
-	TotalTeamMembers *int `json:"total_team_members,omitempty"`
-}
-
-func (s OrgStats) String() string {
-	return Stringify(s)
-}
-
-// CommentStats represents the number of total comments on commits, gists, issues
-// and pull requests.
-type CommentStats struct {
-	TotalCommitComments      *int `json:"total_commit_comments,omitempty"`
-	TotalGistComments        *int `json:"total_gist_comments,omitempty"`
-	TotalIssueComments       *int `json:"total_issue_comments,omitempty"`
-	TotalPullRequestComments *int `json:"total_pull_request_comments,omitempty"`
-}
-
-func (s CommentStats) String() string {
-	return Stringify(s)
-}
-
-// PageStats represents the total number of github pages.
-type PageStats struct {
-	TotalPages *int `json:"total_pages,omitempty"`
-}
-
-func (s PageStats) String() string {
-	return Stringify(s)
-}
-
-// UserStats represents the number of total, admin and suspended users.
-type UserStats struct {
-	TotalUsers     *int `json:"total_users,omitempty"`
-	AdminUsers     *int `json:"admin_users,omitempty"`
-	SuspendedUsers *int `json:"suspended_users,omitempty"`
-}
-
-func (s UserStats) String() string {
-	return Stringify(s)
-}
-
-//GistStats represents the number of total, private and public gists.
-type GistStats struct {
-	TotalGists   *int `json:"total_gists,omitempty"`
-	PrivateGists *int `json:"private_gists,omitempty"`
-	PublicGists  *int `json:"public_gists,omitempty"`
-}
-
-func (s GistStats) String() string {
-	return Stringify(s)
-}
-
-// PullStats represents the number of total, merged, mergable and unmergeable
-// pull-requests.
-type PullStats struct {
-	TotalPulls      *int `json:"total_pulls,omitempty"`
-	MergedPulls     *int `json:"merged_pulls,omitempty"`
-	MergablePulls   *int `json:"mergeable_pulls,omitempty"`
-	UnmergablePulls *int `json:"unmergeable_pulls,omitempty"`
-}
-
-func (s PullStats) String() string {
-	return Stringify(s)
-}
-
-// RepoStats represents the number of total, root, fork, organization repositories
-// together with the total number of pushes and wikis.
-type RepoStats struct {
-	TotalRepos  *int `json:"total_repos,omitempty"`
-	RootRepos   *int `json:"root_repos,omitempty"`
-	ForkRepos   *int `json:"fork_repos,omitempty"`
-	OrgRepos    *int `json:"org_repos,omitempty"`
-	TotalPushes *int `json:"total_pushes,omitempty"`
-	TotalWikis  *int `json:"total_wikis,omitempty"`
-}
-
-func (s RepoStats) String() string {
-	return Stringify(s)
-}
-
-// GetAdminStats returns a variety of metrics about a Github Enterprise
-// installation.
-//
-// Please note that this is only available to site administrators,
-// otherwise it will error with a 404 not found (instead of 401 or 403).
-//
-// GitHub API docs: https://developer.github.com/v3/enterprise-admin/admin_stats/
-func (s *AdminService) GetAdminStats(ctx context.Context) (*AdminStats, *Response, error) {
-	u := fmt.Sprintf("enterprise/stats/all")
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	m := new(AdminStats)
-	resp, err := s.client.Do(ctx, req, m)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return m, resp, nil
-}

+ 0 - 169
vendor/github.com/google/go-github/github/apps.go

@@ -1,169 +0,0 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
-	"context"
-	"fmt"
-	"time"
-)
-
-// AppsService provides access to the installation related functions
-// in the GitHub API.
-//
-// GitHub API docs: https://developer.github.com/v3/apps/
-type AppsService service
-
-// App represents a GitHub App.
-type App struct {
-	ID          *int64     `json:"id,omitempty"`
-	Owner       *User      `json:"owner,omitempty"`
-	Name        *string    `json:"name,omitempty"`
-	Description *string    `json:"description,omitempty"`
-	ExternalURL *string    `json:"external_url,omitempty"`
-	HTMLURL     *string    `json:"html_url,omitempty"`
-	CreatedAt   *time.Time `json:"created_at,omitempty"`
-	UpdatedAt   *time.Time `json:"updated_at,omitempty"`
-}
-
-// InstallationToken represents an installation token.
-type InstallationToken struct {
-	Token     *string    `json:"token,omitempty"`
-	ExpiresAt *time.Time `json:"expires_at,omitempty"`
-}
-
-// Get a single GitHub App. Passing the empty string will get
-// the authenticated GitHub App.
-//
-// Note: appSlug is just the URL-friendly name of your GitHub App.
-// You can find this on the settings page for your GitHub App
-// (e.g., https://github.com/settings/apps/:app_slug).
-//
-// GitHub API docs: https://developer.github.com/v3/apps/#get-a-single-github-app
-func (s *AppsService) Get(ctx context.Context, appSlug string) (*App, *Response, error) {
-	var u string
-	if appSlug != "" {
-		u = fmt.Sprintf("apps/%v", appSlug)
-	} else {
-		u = "app"
-	}
-
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	// TODO: remove custom Accept header when this API fully launches.
-	req.Header.Set("Accept", mediaTypeIntegrationPreview)
-
-	app := new(App)
-	resp, err := s.client.Do(ctx, req, app)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return app, resp, nil
-}
-
-// ListInstallations lists the installations that the current GitHub App has.
-//
-// GitHub API docs: https://developer.github.com/v3/apps/#find-installations
-func (s *AppsService) ListInstallations(ctx context.Context, opt *ListOptions) ([]*Installation, *Response, error) {
-	u, err := addOptions("app/installations", opt)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	// TODO: remove custom Accept header when this API fully launches.
-	req.Header.Set("Accept", mediaTypeIntegrationPreview)
-
-	var i []*Installation
-	resp, err := s.client.Do(ctx, req, &i)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return i, resp, nil
-}
-
-// GetInstallation returns the specified installation.
-//
-// GitHub API docs: https://developer.github.com/v3/apps/#get-a-single-installation
-func (s *AppsService) GetInstallation(ctx context.Context, id int64) (*Installation, *Response, error) {
-	u := fmt.Sprintf("app/installations/%v", id)
-
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	// TODO: remove custom Accept header when this API fully launches.
-	req.Header.Set("Accept", mediaTypeIntegrationPreview)
-
-	i := new(Installation)
-	resp, err := s.client.Do(ctx, req, i)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return i, resp, nil
-}
-
-// ListUserInstallations lists installations that are accessible to the authenticated user.
-//
-// GitHub API docs: https://developer.github.com/v3/apps/#list-installations-for-user
-func (s *AppsService) ListUserInstallations(ctx context.Context, opt *ListOptions) ([]*Installation, *Response, error) {
-	u, err := addOptions("user/installations", opt)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	req, err := s.client.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	// TODO: remove custom Accept header when this API fully launches.
-	req.Header.Set("Accept", mediaTypeIntegrationPreview)
-
-	var i struct {
-		Installations []*Installation `json:"installations"`
-	}
-	resp, err := s.client.Do(ctx, req, &i)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return i.Installations, resp, nil
-}
-
-// CreateInstallationToken creates a new installation token.
-//
-// GitHub API docs: https://developer.github.com/v3/apps/#create-a-new-installation-token
-func (s *AppsService) CreateInstallationToken(ctx context.Context, id int64) (*InstallationToken, *Response, error) {
-	u := fmt.Sprintf("installations/%v/access_tokens", id)
-
-	req, err := s.client.NewRequest("POST", u, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	// TODO: remove custom Accept header when this API fully launches.
-	req.Header.Set("Accept", mediaTypeIntegrationPreview)
-
-	t := new(InstallationToken)
-	resp, err := s.client.Do(ctx, req, t)
-	if err != nil {
-		return nil, resp, err
-	}
-
-	return t, resp, nil
-}

Některé soubory nejsou zobrazeny, neboť je v těchto rozdílových datech změněno mnoho souborů